commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
4e72db45afe0fc85c7e306eb056f19fca5e72bd4 | Bump to v0.4.0. | memmett/PyWENO,memmett/PyWENO,memmett/PyWENO | version.py | version.py | version = '0.4.0'
release = True
if not release:
version += '.dev'
| version = '0.3'
release = False
if not release:
version += '.a2.dev'
| bsd-3-clause | Python |
4fcd2fdb43ab331be1e15742756a4b142c436ab4 | Add docstring for lzip() | Suor/funcy | funcy/py2.py | funcy/py2.py | import sys
from .calc import *
from .colls import *
from .tree import *
from .decorators import *
from .funcolls import *
from .funcs import *
from .seqs import *
from .types import *
from .strings import *
from .flow import *
from .objects import *
from .debug import *
from .primitives import *
# Setup __all__
modules = ('calc', 'colls', 'tree', 'decorators', 'funcolls', 'funcs', 'seqs', 'types',
'strings', 'flow', 'objects', 'debug', 'primitives')
__all__ = cat(sys.modules['funcy.' + m].__all__ for m in modules)
# Python 2 style zip() for Python 3
from .cross import PY3
if PY3:
_zip = zip
def zip(*seqs):
"""List zip() version."""
return list(_zip(*seqs))
__all__ += ['zip'] # HACK: using this instead of .append() to not trigger PyCharm
else:
zip = zip
| import sys
from .calc import *
from .colls import *
from .tree import *
from .decorators import *
from .funcolls import *
from .funcs import *
from .seqs import *
from .types import *
from .strings import *
from .flow import *
from .objects import *
from .debug import *
from .primitives import *
# Setup __all__
modules = ('calc', 'colls', 'tree', 'decorators', 'funcolls', 'funcs', 'seqs', 'types',
'strings', 'flow', 'objects', 'debug', 'primitives')
__all__ = cat(sys.modules['funcy.' + m].__all__ for m in modules)
# Python 2 style zip() for Python 3
from .cross import PY3
if PY3:
_zip = zip
def zip(*seqs):
return list(_zip(*seqs))
__all__ += ['zip'] # HACK: using this instead of .append() to not trigger PyCharm
else:
zip = zip
| bsd-3-clause | Python |
739e1b169af8b1abded51eb0cd225d95d33e40f9 | Update runtests.py for compatibility with Django 1.7. | yeago/django-model-utils,nemesisdesign/django-model-utils,timmygee/django-model-utils,patrys/django-model-utils,carljm/django-model-utils,yeago/django-model-utils,timmygee/django-model-utils,carljm/django-model-utils,nemesisdesign/django-model-utils,patrys/django-model-utils | runtests.py | runtests.py | #!/usr/bin/env python
import os, sys
from django.conf import settings
import django
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=(
'django.contrib.contenttypes',
'model_utils',
'model_utils.tests',
),
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3"
}
},
)
def runtests(*test_args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
# Compatibility with Django 1.7's stricter initialization
if hasattr(django, 'setup'):
django.setup()
if not test_args:
test_args = ['tests']
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner(
verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests()
| #!/usr/bin/env python
import os, sys
from django.conf import settings
if not settings.configured:
settings_dict = dict(
INSTALLED_APPS=(
'django.contrib.contenttypes',
'model_utils',
'model_utils.tests',
),
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3"
}
},
)
settings.configure(**settings_dict)
def runtests(*test_args):
if not test_args:
test_args = ['tests']
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner(
verbosity=1, interactive=True, failfast=False).run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests()
| bsd-3-clause | Python |
d3a0d7e2cf1685ce6c69d182faa309f228aa84c0 | use fixture to mock application | pkulev/xoinvader,pankshok/xoinvader | xoinvader/tests/conftest.py | xoinvader/tests/conftest.py | """Pytest configuration."""
import pytest
from xoinvader import application
from xoinvader import state
@pytest.fixture
def mock_application(request):
def inner():
return MockedApplication()
request.addfinalizer(MockedApplication._finalize)
return inner
@pytest.fixture
def mock_state(request, mock_application):
def inner(mock_app=False):
if mock_app:
mock_application()
application.get_current().register_state(MockedState)
return application.get_current().state
def stop():
application.get_current().deregister_state(MockedState.__name__)
if not application.get_current().states:
MockedApplication._finalize()
request.addfinalizer(stop)
return inner
class MockedApplication(application.Application):
@staticmethod
def _finalize():
app = application.get_current()
app.stop()
class MockedState(state.State):
pass
| """Pytest configuration."""
import pytest
from xoinvader import application
from xoinvader import state
@pytest.fixture
def mock_application(request):
def inner():
return MockedApplication()
request.addfinalizer(MockedApplication._finalize)
return inner
@pytest.fixture
def mock_state(request):
def inner(mock_app=False):
if mock_app:
MockedApplication()
application.get_current().register_state(MockedState)
return application.get_current().state
def stop():
application.get_current().deregister_state(MockedState.__name__)
if not application.get_current().states:
MockedApplication._finalize()
request.addfinalizer(stop)
return inner
class MockedApplication(application.Application):
@staticmethod
def _finalize():
app = application.get_current()
app.stop()
class MockedState(state.State):
pass
| mit | Python |
cca0355da80a2c136b8bf02eb43d55ff9da8e1ff | change youtube video folder and add log out | eric-huo/youtube_video_crawler | youtube_video_downloader.py | youtube_video_downloader.py | # Eric 2016/11/09
from __future__ import unicode_literals
import youtube_dl
import mysql.connector
conn = mysql.connector.connect(user='taloscar', password='taloscar', database='taloscar', host='10.161.23.57')
cursor = conn.cursor()
def get_to_download_video_urls():
urls_with_ids = query_from_mysql()
for url_with_id in urls_with_ids:
download_video(url_with_id[0], url_with_id[1])
def download_video(video_url, video_id):
ydl_opts = {
'verbose': True,
'ignoreerrors': True,
'noplaylist': True,
'sleep_interval': 10,
'max_sleep_interval': 20,
'outtmpl': '/home/youtube_videos/' + str(video_id) + '.mp4',
'progress_hooks': [output_log]
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print 'start to download: ' + video_id
ydl.download([video_url])
def query_from_mysql():
try:
cursor.execute('select video_url, id from youtube_video where is_downloaded = 0 limit 2')
rows = cursor.fetchall()
urls_with_ids = [(row[0], row[1]) for row in rows]
return urls_with_ids
except Exception as e:
print 'Exception when query from mysql: ' + e.message
return []
def output_log(d):
if d['status'] == 'finished':
print('Done downloading: ' + d['filename'])
log_mysql_for_finished_video(d['filename'])
elif d['status'] == 'error':
print('Error downloading: ' + d['filename'])
def log_mysql_for_finished_video(video_name):
dot_position = video_name.rfind('.')
slash_position = video_name.rfind('/')
if dot_position != -1 and slash_position != -1:
video_id = video_name[slash_position + 1: dot_position]
if video_id and video_id != '':
modify_downloaded_field_mysql(video_id)
def modify_downloaded_field_mysql(video_id):
try:
cursor.execute('update youtube_video set is_downloaded = 1 where id = %(video_id)s', {'video_id': int(video_id)})
conn.commit()
except Exception as e:
print 'Exception when modify downloaded filed mysql: ' + e.message
return []
def close_mysql():
cursor.close()
conn.close()
if __name__ == '__main__':
get_to_download_video_urls()
close_mysql()
| # Eric 2016/11/09
from __future__ import unicode_literals
import youtube_dl
import mysql.connector
conn = mysql.connector.connect(user='taloscar', password='taloscar', database='taloscar', host='10.161.23.57')
cursor = conn.cursor()
def get_to_download_video_urls():
urls_with_ids = query_from_mysql()
for url_with_id in urls_with_ids:
download_video(url_with_id[0], url_with_id[1])
def download_video(video_url, video_id):
ydl_opts = {
'verbose': True,
'ignoreerrors': True,
'noplaylist': True,
'sleep_interval': 10,
'max_sleep_interval': 20,
'outtmpl': 'youtube_videos/' + str(video_id) + '.mp4',
'progress_hooks': [output_log]
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([video_url])
def query_from_mysql():
try:
cursor.execute('select video_url, id from youtube_video where is_downloaded = 0 limit 2')
rows = cursor.fetchall()
urls_with_ids = [(row[0], row[1]) for row in rows]
return urls_with_ids
except Exception as e:
print 'Exception when query from mysql: ' + e.message
return []
def output_log(d):
if d['status'] == 'finished':
print('Done downloading: ' + d['filename'])
log_mysql_for_finished_video(d['filename'])
elif d['status'] == 'error':
print('Error downloading: ' + d['filename'])
def log_mysql_for_finished_video(video_name):
dot_position = video_name.rfind('.')
slash_position = video_name.rfind('/')
if dot_position != -1 and slash_position != -1:
video_id = video_name[slash_position + 1: dot_position]
if video_id and video_id != '':
modify_downloaded_field_mysql(video_id)
def modify_downloaded_field_mysql(video_id):
try:
cursor.execute('update youtube_video set is_downloaded = 1 where id = %(video_id)s', {'video_id': int(video_id)})
conn.commit()
except Exception as e:
print 'Exception when modify downloaded filed mysql: ' + e.message
return []
def close_mysql():
cursor.close()
conn.close()
if __name__ == '__main__':
get_to_download_video_urls()
close_mysql()
| mit | Python |
b3d00273e414913357da8e71decf04df59060281 | return login result | DBuildService/atomic-reactor,projectatomic/atomic-reactor,projectatomic/atomic-reactor,vrutkovs/atomic-reactor,jarodwilson/atomic-reactor,jarodwilson/atomic-reactor,DBuildService/atomic-reactor,jpopelka/atomic-reactor,jpopelka/atomic-reactor,fr34k8/atomic-reactor,maxamillion/atomic-reactor,vrutkovs/atomic-reactor,maxamillion/atomic-reactor,fr34k8/atomic-reactor | atomic_reactor/koji_util.py | atomic_reactor/koji_util.py | """
Copyright (c) 2016 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
import koji
import logging
import os
import time
logger = logging.getLogger(__name__)
def koji_login(session,
proxyuser=None,
ssl_certs_dir=None,
krb_principal=None,
krb_keytab=None):
"""
Choose the correct login method based on the available credentials,
and call that method on the provided session object.
:param session: koji.ClientSession instance
:param proxyuser: str, proxy user
:param ssl_certs_dir: str, path to "cert", "ca", and "serverca"
:param krb_principal: str, name of Kerberos principal
:param krb_keytab: str, Kerberos keytab
:return: None
"""
kwargs = {}
if proxyuser:
kwargs['proxyuser'] = proxyuser
if ssl_certs_dir:
# Use certificates
logger.info("Using SSL certificates for Koji authentication")
result = session.ssl_login(os.path.join(ssl_certs_dir, 'cert'),
os.path.join(ssl_certs_dir, 'ca'),
os.path.join(ssl_certs_dir, 'serverca'),
**kwargs)
else:
# Use Kerberos
logger.info("Using Kerberos for Koji authentication")
if krb_principal and krb_keytab:
kwargs['principal'] = krb_principal
kwargs['keytab'] = krb_keytab
result = session.krb_login(**kwargs)
return result
class TaskWatcher(object):
def __init__(self, session, task_id, poll_interval=5):
self.session = session
self.task_id = task_id
self.poll_interval = poll_interval
def wait(self):
logger.debug("waiting for koji task %r to finish", self.task_id)
while not self.session.taskFinished(self.task_id):
time.sleep(self.poll_interval)
logger.debug("koji task is finished, getting info")
task_info = self.session.getTaskInfo(self.task_id, request=True)
self.state = koji.TASK_STATES[task_info['state']]
return self.state
def failed(self):
return self.state in ['CANCELED', 'FAILED']
| """
Copyright (c) 2016 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
import koji
import logging
import os
import time
logger = logging.getLogger(__name__)
def koji_login(session,
proxyuser=None,
ssl_certs_dir=None,
krb_principal=None,
krb_keytab=None):
"""
Choose the correct login method based on the available credentials,
and call that method on the provided session object.
:param session: koji.ClientSession instance
:param proxyuser: str, proxy user
:param ssl_certs_dir: str, path to "cert", "ca", and "serverca"
:param krb_principal: str, name of Kerberos principal
:param krb_keytab: str, Kerberos keytab
:return: None
"""
kwargs = {}
if proxyuser:
kwargs['proxyuser'] = proxyuser
if ssl_certs_dir:
# Use certificates
logger.info("Using SSL certificates for Koji authentication")
session.ssl_login(os.path.join(ssl_certs_dir, 'cert'),
os.path.join(ssl_certs_dir, 'ca'),
os.path.join(ssl_certs_dir, 'serverca'),
**kwargs)
else:
# Use Kerberos
logger.info("Using Kerberos for Koji authentication")
if krb_principal and krb_keytab:
kwargs['principal'] = krb_principal
kwargs['keytab'] = krb_keytab
session.krb_login(**kwargs)
class TaskWatcher(object):
def __init__(self, session, task_id, poll_interval=5):
self.session = session
self.task_id = task_id
self.poll_interval = poll_interval
def wait(self):
logger.debug("waiting for koji task %r to finish", self.task_id)
while not self.session.taskFinished(self.task_id):
time.sleep(self.poll_interval)
logger.debug("koji task is finished, getting info")
task_info = self.session.getTaskInfo(self.task_id, request=True)
self.state = koji.TASK_STATES[task_info['state']]
return self.state
def failed(self):
return self.state in ['CANCELED', 'FAILED']
| bsd-3-clause | Python |
56efe455c0f7163704302d7732a1da61fcb76136 | Update kissfft to fix stdint.h issue on Windows | DavidNorman/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,arborh/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,xzturn/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,adit-chandra/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,ppwwyyxx/tensorflow,jhseu/tensorflow,aam-at/tensorflow,aldian/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aldian/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,xzturn/tensorflow,DavidNorman/tensorflow,sarvex/tensorflow,jhseu/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,xzturn/tensorflow,sarvex/tensorflow,karllessard/tensorflow,renyi533/tensorflow,DavidNorman/tensorflow,renyi533/tensorflow,chemelnucfin/tensorflow,gunan/tensorflow,Intel-tensorflow/tensorflow,xzturn/tensorflow,davidzchen/tensorflow,ppwwyyxx/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,adit-chandra/tensorflow,yongtang/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,adit-chandra/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,adit-chandra/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,tensorflow/tensorflow,petewarden/tensorflow,gunan/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,aam-at/tensorflow,jhseu/tensorflow,freedomtan/tensorflow,sarvex/tensorflow,yongtang/tensorflow,renyi533/tensorflow,renyi533/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,jhseu/tensorflow,arborh/tensorflow,chemelnucfin/tensorflow,DavidNorman/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,davidzchen/tensorflow,chemelnucfin/tensorflow,aldian/tensorflow,aam-at/tensorflow,xzturn/tensorflow,annarev/tensorflow,aam-at/tensorflow,arborh/tensorflow,tensorflow/tensorflow,petewarden/tensorflow,arborh/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,karllessard/tensorflow,yongtang/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,adit-chandra/tensorflow,DavidNorman/tensorflow,aldian/tensorflow,freedomtan/tensorflow,gunan/tensorflow,chemelnucfin/tensorflow,renyi533/tensorflow,annarev/tensorflow,gunan/tensorflow,annarev/tensorflow,ppwwyyxx/tensorflow,gautam1858/tensorflow,jhseu/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,karllessard/tensorflow,chemelnucfin/tensorflow,yongtang/tensorflow,ppwwyyxx/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,chemelnucfin/tensorflow,cxxgtxy/tensorflow,karllessard/tensorflow,arborh/tensorflow,aldian/tensorflow,chemelnucfin/tensorflow,yongtang/tensorflow,renyi533/tensorflow,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,DavidNorman/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,adit-chandra/tensorflow,gunan/tensorflow,aam-at/tensorflow,ppwwyyxx/tensorflow,paolodedios/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,chemelnucfin/tensorflow,renyi533/tensorflow,gautam1858/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow,jhseu/tensorflow,ppwwyyxx/tensorflow,chemelnucfin/tensorflow,arborh/tensorflow,adit-chandra/tensorflow,annarev/tensorflow,davidzchen/tensorflow,xzturn/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,xzturn/tensorflow,cxxgtxy/tensorflow,DavidNorman/tensorflow,DavidNorman/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aldian/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gunan/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,DavidNorman/tensorflow,gunan/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,adit-chandra/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,gunan/tensorflow,aam-at/tensorflow,DavidNorman/tensorflow,ppwwyyxx/tensorflow,davidzchen/tensorflow,gunan/tensorflow,xzturn/tensorflow,petewarden/tensorflow,petewarden/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jhseu/tensorflow,renyi533/tensorflow,xzturn/tensorflow,jhseu/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,frreiss/tensorflow-fred,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,arborh/tensorflow,jhseu/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,cxxgtxy/tensorflow,aam-at/tensorflow,yongtang/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,freedomtan/tensorflow,aam-at/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,paolodedios/tensorflow,ppwwyyxx/tensorflow,chemelnucfin/tensorflow,aam-at/tensorflow,karllessard/tensorflow,sarvex/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,ppwwyyxx/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,gunan/tensorflow,annarev/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,freedomtan/tensorflow,renyi533/tensorflow,freedomtan/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gunan/tensorflow,aldian/tensorflow,jhseu/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,petewarden/tensorflow,annarev/tensorflow,cxxgtxy/tensorflow,Intel-tensorflow/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,jhseu/tensorflow,sarvex/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,davidzchen/tensorflow,gunan/tensorflow,jhseu/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,arborh/tensorflow,paolodedios/tensorflow,cxxgtxy/tensorflow,chemelnucfin/tensorflow,cxxgtxy/tensorflow,arborh/tensorflow,paolodedios/tensorflow,DavidNorman/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,gautam1858/tensorflow,davidzchen/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,arborh/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,petewarden/tensorflow,sarvex/tensorflow,davidzchen/tensorflow,annarev/tensorflow,chemelnucfin/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,annarev/tensorflow,gautam1858/tensorflow | third_party/kissfft/workspace.bzl | third_party/kissfft/workspace.bzl | """Loads the kissfft library, used by TF Lite."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "kissfft",
strip_prefix = "kissfft-36dbc057604f00aacfc0288ddad57e3b21cfc1b8",
sha256 = "42b7ef406d5aa2d57a7b3b56fc44e8ad3011581692458a69958a911071efdcf2",
urls = [
"http://mirror.tensorflow.org/github.com/mborgerding/kissfft/archive/36dbc057604f00aacfc0288ddad57e3b21cfc1b8.tar.gz",
"https://github.com/mborgerding/kissfft/archive/36dbc057604f00aacfc0288ddad57e3b21cfc1b8.tar.gz",
],
build_file = "//third_party/kissfft:BUILD.bazel",
)
| """Loads the kissfft library, used by TF Lite."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "kissfft",
strip_prefix = "kissfft-cddf3833fdf24fa84b79be37efdcd348cae0e39c",
sha256 = "7ba83a3da1636350472e501e3e6c3418df72466990530ea273c05fa7e3dd8635",
urls = [
"http://mirror.tensorflow.org/github.com/mborgerding/kissfft/archive/cddf3833fdf24fa84b79be37efdcd348cae0e39c.tar.gz",
"https://github.com/mborgerding/kissfft/archive/cddf3833fdf24fa84b79be37efdcd348cae0e39c.tar.gz",
],
build_file = "//third_party/kissfft:BUILD.bazel",
)
| apache-2.0 | Python |
1b1534967c6494d89d76d03c7487f5b02808bb4c | Fix mac build for XCode v6.0 | dart-archive/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dartino/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dartino/dart-sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dartino/dart-sdk,dart-lang/sdk,dart-archive/dart-sdk,dart-archive/dart-sdk,dart-lang/sdk,dartino/dart-sdk | tools/gyp/find_mac_gcc_version.py | tools/gyp/find_mac_gcc_version.py | #!/usr/bin/env python
# Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import re
import subprocess
import sys
def main():
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = job.communicate()
if job.returncode != 0:
print >>sys.stderr, stdout
print >>sys.stderr, stderr
raise Exception('Error %d running xcodebuild!' % job.returncode)
matches = re.findall('^Xcode (\d+)\.(\d+)(\.(\d+))?$', stdout, re.MULTILINE)
if len(matches) > 0:
major = int(matches[0][0])
minor = int(matches[0][1])
if major == 3 and minor >= 1:
return '4.2'
elif major == 4 and minor < 5:
return 'com.apple.compilers.llvmgcc42'
elif (major == 4 and minor >= 5) or major == 5 or major == 6:
# XCode seems to select the specific clang version automatically
return 'com.apple.compilers.llvm.clang.1_0'
else:
raise Exception('Unknown XCode Version "%s"' % stdout)
else:
raise Exception('Could not parse output of xcodebuild "%s"' % stdout)
if __name__ == '__main__':
if sys.platform != 'darwin':
#raise Exception("This script only runs on Mac")
# If we aren't on Mac, print out a dummy version string; it won't be used.
print 'X.X'
else:
print main()
| #!/usr/bin/env python
# Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import re
import subprocess
import sys
def main():
job = subprocess.Popen(['xcodebuild', '-version'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = job.communicate()
if job.returncode != 0:
print >>sys.stderr, stdout
print >>sys.stderr, stderr
raise Exception('Error %d running xcodebuild!' % job.returncode)
matches = re.findall('^Xcode (\d+)\.(\d+)(\.(\d+))?$', stdout, re.MULTILINE)
if len(matches) > 0:
major = int(matches[0][0])
minor = int(matches[0][1])
if major == 3 and minor >= 1:
return '4.2'
elif major == 4 and minor < 5:
return 'com.apple.compilers.llvmgcc42'
elif (major == 4 and minor >= 5) or major == 5:
# XCode seems to select the specific clang version automatically
return 'com.apple.compilers.llvm.clang.1_0'
else:
raise Exception('Unknown XCode Version "%s"' % stdout)
else:
raise Exception('Could not parse output of xcodebuild "%s"' % stdout)
if __name__ == '__main__':
if sys.platform != 'darwin':
#raise Exception("This script only runs on Mac")
# If we aren't on Mac, print out a dummy version string; it won't be used.
print 'X.X'
else:
print main()
| bsd-3-clause | Python |
172768dacc224f3c14e85f8e732209efe9ce075a | Set the default prefix for ProjectSurveys to gsoc_program. | SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange,SRabbelier/Melange | app/soc/models/project_survey.py | app/soc/models/project_survey.py | #!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the ProjectSurvey model.
"""
__authors__ = [
'"Daniel Diniz" <ajaksu@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.models.survey import Survey
class ProjectSurvey(Survey):
"""Survey for Students that have a StudentProject.
"""
def __init__(self, *args, **kwargs):
super(ProjectSurvey, self).__init__(*args, **kwargs)
self.prefix = 'gsoc_program'
self.taking_access = 'student'
| #!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the ProjectSurvey model.
"""
__authors__ = [
'"Daniel Diniz" <ajaksu@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.models.survey import Survey
class ProjectSurvey(Survey):
"""Survey for Students that have a StudentProject.
"""
def __init__(self, *args, **kwargs):
super(ProjectSurvey, self).__init__(*args, **kwargs)
# TODO: prefix has to be set to gsoc_program once data has been transferred
self.prefix = 'program'
self.taking_access = 'student'
| apache-2.0 | Python |
f0b8f05da951cc49eb53809e12e6287fd4ab3436 | Update urls.py | knagra/farnsworth,knagra/farnsworth,knagra/farnsworth,knagra/farnsworth | farnswiki/urls.py | farnswiki/urls.py | """
XXX:
This module is deprecated and marked for replacement.
"""
from django.conf.urls import url, patterns
from django.conf import settings
urlpatterns = patterns(
"farnswiki.views",
)
for binder in settings.WIKI_BINDERS:
urlpatterns += patterns(
"farnswiki.views",
url(binder.root + r"/page/(?P<slug>[^/]+)/$", "page", {"binder": binder}, name=binder.page_url_name),
url(binder.root + r"/page/(?P<slug>[^/]+)/edit/$", "edit", {"binder": binder}, name=binder.edit_url_name),
)
urlpatterns += patterns(
"farnswiki.views",
url(binder.root + r"/$", "all_pages_view", {"binder": binder}, name="wiki_all"),
url(binder.root + r"/add/$", "add_page_view", {"binder": binder}, name="wiki_add"),
url(binder.root + r"/page/(?P<slug>[^/]+)/history/$", "history_view", {"binder": binder}, name="wiki_history"),
)
|
from django.conf.urls import url, patterns
from django.conf import settings
urlpatterns = patterns(
"farnswiki.views",
)
for binder in settings.WIKI_BINDERS:
urlpatterns += patterns(
"farnswiki.views",
url(binder.root + r"/page/(?P<slug>[^/]+)/$", "page", {"binder": binder}, name=binder.page_url_name),
url(binder.root + r"/page/(?P<slug>[^/]+)/edit/$", "edit", {"binder": binder}, name=binder.edit_url_name),
)
urlpatterns += patterns(
"farnswiki.views",
url(binder.root + r"/$", "all_pages_view", {"binder": binder}, name="wiki_all"),
url(binder.root + r"/add/$", "add_page_view", {"binder": binder}, name="wiki_add"),
url(binder.root + r"/page/(?P<slug>[^/]+)/history/$", "history_view", {"binder": binder}, name="wiki_history"),
)
| bsd-2-clause | Python |
be1d4f0f4f0b98e14ce181a1bedd93a293a93400 | Fix test with django2 | mixcloud/django-experiments,mixcloud/django-experiments,mixcloud/django-experiments | experiments/tests/urls.py | experiments/tests/urls.py | from experiments.urls import urlpatterns
from django.conf.urls import url
from django.contrib import admin
urlpatterns += [
url(r'^admin/', admin.site.urls),
]
| from experiments.urls import urlpatterns
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns += [
url(r'^admin/', include(admin.site.urls)),
]
| mit | Python |
c2fd5b8f461f48dc00fb29db538775170d1c7abd | Correct models, allow null dates | alykhank/populate | articles/models.py | articles/models.py | from django.db import models
class Bookmark(models.Model):
user_id = models.PositiveIntegerField(default=0)
read_percent = models.FloatField(default=0.0)
date_updated = models.DateTimeField(null=True)
favorite = models.BooleanField()
bk_id = models.PositiveIntegerField(default=0)
date_archived = models.DateTimeField(null=True)
date_opened = models.DateTimeField(null=True)
date_added = models.DateTimeField(null=True)
article_href = models.CharField(max_length=200)
date_favorited = models.DateTimeField(null=True)
archive = models.BooleanField()
# tags
def __unicode__(self):
return "<Bookmark('%s', '%s')>" % (self.bookmark_id, self.article.title)
class Article(models.Model):
domain = models.CharField(max_length=200)
# next_page_href
author = models.CharField(max_length=200)
url = models.URLField()
lead_image_url = models.URLField()
# content_size
title = models.CharField(max_length=200)
excerpt = models.CharField(max_length=200)
word_count = models.PositiveIntegerField(default=0)
content = models.TextField()
date_published = models.DateTimeField(null=True)
dek = models.CharField(max_length=200)
# processed
short_url = models.URLField()
# article_id
bookmark = models.ForeignKey(Bookmark)
def __unicode__(self):
return "<Article('%s', '%s')>" % (self.title, self.author)
| from django.db import models
class Bookmark(models.Model):
user_id = models.PositiveIntegerField(default=0)
read_percent = models.FloatField(default=0.0)
date_updated = models.DateTimeField()
favorite = models.BooleanField()
bookmark_id = models.PositiveIntegerField(default=0)
date_archived = models.DateTimeField()
date_opened = models.DateTimeField()
date_added = models.DateTimeField()
article_href = models.CharField(max_length=200)
date_favorited = models.DateTimeField()
archive = models.BooleanField()
# tags
def __unicode__(self):
return "<Bookmark('%s', '%s')>" % (self.bookmark_id, self.article.title)
class Article(models.Model):
domain = models.CharField(max_length=200)
# next_page_href
author = models.CharField(max_length=200)
url = models.URLField()
lead_image_url = models.URLField()
# content_size
title = models.CharField(max_length=200)
excerpt = models.CharField(max_length=200)
word_count = models.PositiveIntegerField(default=0)
content = models.TextField()
date_published = models.DateTimeField()
dek = models.CharField(max_length=200)
# processed
short_url = models.URLField()
# article_id
bookmark = models.OneToOneField(Bookmark)
def __unicode__(self):
return "<Article('%s', '%s')>" % (self.title, self.author)
| mit | Python |
09d3c9aa557a3d9a144b5a566f75526ceb9bf3b6 | Use the new #left stuff (with help link) in browse as well. | drougge/wwwwellpapp,drougge/wwwwellpapp,drougge/wwwwellpapp | cgi/browse.py | cgi/browse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import environ, listdir, stat
from sys import exit
from common import *
from os.path import normpath, exists, join as joinpath, isfile, isdir
from hashlib import md5
def forbidden():
global outdata
print "Status: 403 Forbidden"
outdata = []
prt_head()
prt(u'<h1>Forbidden</h1>\n',
u'<p>You are not allowed to access this directory.</p>\n')
prt_foot()
finish()
exit(0)
def md5file(fn):
m = md5()
with file(fn, "rb") as fh:
for data in iter(lambda: fh.read(65536), ""):
m.update(data)
return m.hexdigest()
if not client.cfg.browsebase: forbidden()
# Three slashes, because normpath is stupidly posix-compliant.
pathpart = normpath("///" + environ["PATH_INFO"])
path = normpath(client.cfg.browsebase + pathpart)
if not exists(path): forbidden()
posts = []
dirs = []
for fn in listdir(path):
ffn = joinpath(path, fn)
if isfile(ffn):
m = md5file(ffn)
p = client.get_post(m)
if p:
t = stat(ffn).st_mtime
posts.append((t, p))
elif isdir(ffn):
dirs.append(fn)
dirs.sort()
if pathpart != "/": dirs = [".."] + dirs
prt_head()
prt_left_head()
prt(u'<ul id="dirs">\n')
for d in dirs:
prt(u'<li><a href="', d, u'/">', d, u'</a></li>\n')
prt(u'</ul>\n')
prt_left_foot()
prt(u'<div id="main">\n',
u'<h1>', pathpart, u'</h1>\n',
pagelinks(u'', 0, 0))
prt_posts([p[1] for p in sorted(posts)])
prt(u'</div>\n')
prt_foot()
finish()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import environ, listdir, stat
from sys import exit
from common import *
from os.path import normpath, exists, join as joinpath, isfile, isdir
from hashlib import md5
def forbidden():
global outdata
print "Status: 403 Forbidden"
outdata = []
prt_head()
prt(u'<h1>Forbidden</h1>\n',
u'<p>You are not allowed to access this directory.</p>\n')
prt_foot()
finish()
exit(0)
def md5file(fn):
m = md5()
with file(fn, "rb") as fh:
for data in iter(lambda: fh.read(65536), ""):
m.update(data)
return m.hexdigest()
if not client.cfg.browsebase: forbidden()
# Three slashes, because normpath is stupidly posix-compliant.
pathpart = normpath("///" + environ["PATH_INFO"])
path = normpath(client.cfg.browsebase + pathpart)
if not exists(path): forbidden()
posts = []
dirs = []
for fn in listdir(path):
ffn = joinpath(path, fn)
if isfile(ffn):
m = md5file(ffn)
p = client.get_post(m)
if p:
t = stat(ffn).st_mtime
posts.append((t, p))
elif isdir(ffn):
dirs.append(fn)
dirs.sort()
if pathpart != "/": dirs = [".."] + dirs
prt_head()
prt(u'<div id="left">\n',
u'<ul id="dirs">\n')
for d in dirs:
prt(u'<li><a href="', d, u'/">', d, u'</a></li>\n')
prt(u'</ul>\n',
u'</div>\n',
u'<div id="main">\n',
u'<h1>', pathpart, u'</h1>\n',
pagelinks(u'', 0, 0))
prt_posts([p[1] for p in sorted(posts)])
prt(u'</div>\n')
prt_foot()
finish()
| mit | Python |
d31cbad208ed3b7ac29a5e4957d008f768462ee0 | use correct variable name | proversity-org/edx-app-ios,proversity-org/edx-app-ios,proversity-org/edx-app-ios,proversity-org/edx-app-ios,proversity-org/edx-app-ios,proversity-org/edx-app-ios | fastlane_match.py | fastlane_match.py |
# -*- coding: utf-8 -*-
#!/usr/bin/python
import subprocess
import sys
import requests
def not_in(x):
not_in_filters = ['ProfileUUID', 'development', 'sigh_org']
is_in = False
for not_in_filter in not_in_filters:
is_in = not_in_filter in x
if is_in:
break
return not is_in
def run_fastlane(mode):
# subprocess.check_output(['fastlane', 'create'])
print "\t=> Mode: %s" % mode
print "\t=> fastlane match %s" % mode
output = subprocess.check_output(['fastlane', 'match', mode])
output = ''.join(output.split()).split('ios|')[2].split('|ProfileName')[0].split('|')
output = [x for x in output if x]
output = [x for x in output if not_in(x)]
output = ''.join(output)
return output
def send_uuids(dev_uuid, store_uuid, org_code):
print "\t=> Development: %s" % dev_uuid
print "\t=> AppStore: %s" % store_uuid
authorization_key = 'e6005c3173671458fee3b322a73178ca2c900ab8b433c302dbd560ec0ed71570'
action = 'save_ios_uuids'
url = "https://consola-api/organizations/%s/circleci/webhook?authorization=%s&action=%s&devUUID=%s&storeUUID-=%s" % (org_code, authorization_key, action, dev_uuid, store_uuid)
r = requests.post(url)
print r.json()
if __name__ == '__main__':
org_code = sys.argv[1]
print "=> Running fastlane match"
dev_uuid = run_fastlane('development')
store_uuid = run_fastlane('appstore')
print "=> Send UUIDS to Consola"
send_uuids(dev_uuid, store_uuid, org_code);
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
import subprocess
import sys
import requests
def not_in(x):
not_in_filters = ['ProfileUUID', 'development', 'sigh_org']
is_in = False
for not_in_filter in not_in_filters:
is_in = not_in_filter in x
if is_in:
break
return not is_in
def run_fastlane(mode):
# subprocess.check_output(['fastlane', 'create'])
print "\t=> Mode: %s" % mode
print "\t=> fastlane match %s" % mode
output = subprocess.check_output(['fastlane', 'match', mode])
output = ''.join(output.split()).split('ios|')[2].split('|ProfileName')[0].split('|')
output = [x for x in output if x]
output = [x for x in output if not_in(x)]
output = ''.join(output)
return output
def send_uuids(dev_uuid, store_uuid, org_code):
print "\t=> Development: %s" % dev_uuid
print "\t=> AppStore: %s" % store_uuid
authorization_key = 'e6005c3173671458fee3b322a73178ca2c900ab8b433c302dbd560ec0ed71570'
action = 'save_ios_uuids'
url = "https://consola-api/organizations/%s/circleci/webhook?authorization=%s&action=%s&devUUID=%s&storeUUID-=%s" % (org_code, authorization_key, action, devUUID, storeUUID)
r = requests.post(url)
print r.json()
if __name__ == '__main__':
org_code = sys.argv[1]
print "=> Running fastlane match"
dev_uuid = run_fastlane('development')
store_uuid = run_fastlane('appstore')
print "=> Send UUIDS to Consola"
send_uuids(dev_uuid, store_uuid, org_code);
| apache-2.0 | Python |
da484c074400d2d37fa8357babbf747603b10760 | fix for keyError | s-ludwig/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jamming/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,zapov/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,methane/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sxend/FrameworkBenchmarks,methane/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zloster/FrameworkBenchmarks,methane/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jamming/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,actframework/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zapov/FrameworkBenchmarks,sxend/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jamming/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,methane/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zloster/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,methane/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,methane/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jamming/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zapov/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sxend/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sxend/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,doom369/FrameworkBenchmarks,doom369/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,zloster/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,sxend/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zloster/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,sxend/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jamming/FrameworkBenchmarks,jamming/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sxend/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zapov/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zapov/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jamming/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zapov/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,methane/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,actframework/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zloster/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sxend/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zloster/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,doom369/FrameworkBenchmarks,actframework/FrameworkBenchmarks,doom369/FrameworkBenchmarks,sxend/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sxend/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,doom369/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,methane/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jamming/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zapov/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,zloster/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jamming/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sxend/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,actframework/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zapov/FrameworkBenchmarks,actframework/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zloster/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,methane/FrameworkBenchmarks,sxend/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,doom369/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jamming/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zloster/FrameworkBenchmarks,actframework/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,doom369/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jamming/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,methane/FrameworkBenchmarks,zapov/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,actframework/FrameworkBenchmarks,actframework/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jamming/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,methane/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,methane/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zloster/FrameworkBenchmarks,doom369/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zapov/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zapov/FrameworkBenchmarks,doom369/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,doom369/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,methane/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,methane/FrameworkBenchmarks | toolset/setup/linux/setup_util.py | toolset/setup/linux/setup_util.py | import re
import os
import sys
import subprocess
import platform
from threading import Thread
from Queue import Queue, Empty
class NonBlockingStreamReader:
'''
Enables calling readline in a non-blocking manner with a blocking stream,
such as the ones returned from subprocess.Popen
Originally written by Eyal Arubas, who granted permission to use this inside TFB
See http://eyalarubas.com/python-subproc-nonblock.html
'''
def __init__(self, stream, eof_message = None):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
eof_message: A message to print to stdout as soon
as the stream's end is reached. Useful if you
want to track the exact moment a stream terminates
'''
self._s = stream
self._q = Queue()
self._eof_message = eof_message
self._poisonpill = 'MAGIC_POISONPILL_STRING'
def _populateQueue(stream, queue):
while True:
line = stream.readline()
if line: # 'data\n' or '\n'
queue.put(line)
else: # '' e.g. EOF
if self._eof_message:
sys.stdout.write(self._eof_message + '\n')
queue.put(self._poisonpill)
return
self._t = Thread(target = _populateQueue,
args = (self._s, self._q))
self._t.daemon = True
self._t.start()
def readline(self, timeout = None):
try:
line = self._q.get(block = timeout is not None,
timeout = timeout)
if line == self._poisonpill:
raise EndOfStream
return line
except Empty:
return None
class EndOfStream(Exception): pass
# Replaces all text found using the regular expression to_replace with the supplied replacement.
def replace_text(file, to_replace, replacement):
with open(file, "r") as conf:
contents = conf.read()
replaced_text = re.sub(to_replace, replacement, contents)
with open(file, "w") as f:
f.write(replaced_text)
# Queries the shell for the value of FWROOT
def get_fwroot():
if os.getenv('FWROOT'):
return os.environ['FWROOT']
else:
return os.getcwd()
# Turns absolute path into path relative to FWROOT
# Assumes path is underneath FWROOT, not above
#
# Useful for clean presentation of paths
# e.g. /foo/bar/benchmarks/go/install.sh
# v.s. FWROOT/go/install.sh
def path_relative_to_root(path):
# Requires bash shell parameter expansion
return subprocess.check_output("D=%s && printf \"${D#%s}\""%(path, get_fwroot()), shell=True, executable='/bin/bash')
| import re
import os
import sys
import subprocess
import platform
from threading import Thread
from Queue import Queue, Empty
class NonBlockingStreamReader:
'''
Enables calling readline in a non-blocking manner with a blocking stream,
such as the ones returned from subprocess.Popen
Originally written by Eyal Arubas, who granted permission to use this inside TFB
See http://eyalarubas.com/python-subproc-nonblock.html
'''
def __init__(self, stream, eof_message = None):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
eof_message: A message to print to stdout as soon
as the stream's end is reached. Useful if you
want to track the exact moment a stream terminates
'''
self._s = stream
self._q = Queue()
self._eof_message = eof_message
self._poisonpill = 'MAGIC_POISONPILL_STRING'
def _populateQueue(stream, queue):
while True:
line = stream.readline()
if line: # 'data\n' or '\n'
queue.put(line)
else: # '' e.g. EOF
if self._eof_message:
sys.stdout.write(self._eof_message + '\n')
queue.put(self._poisonpill)
return
self._t = Thread(target = _populateQueue,
args = (self._s, self._q))
self._t.daemon = True
self._t.start()
def readline(self, timeout = None):
try:
line = self._q.get(block = timeout is not None,
timeout = timeout)
if line == self._poisonpill:
raise EndOfStream
return line
except Empty:
return None
class EndOfStream(Exception): pass
# Replaces all text found using the regular expression to_replace with the supplied replacement.
def replace_text(file, to_replace, replacement):
with open(file, "r") as conf:
contents = conf.read()
replaced_text = re.sub(to_replace, replacement, contents)
with open(file, "w") as f:
f.write(replaced_text)
# Queries the shell for the value of FWROOT
def get_fwroot():
if os.environ['FWROOT']:
return os.environ['FWROOT']
else:
return os.getcwd()
# Turns absolute path into path relative to FWROOT
# Assumes path is underneath FWROOT, not above
#
# Useful for clean presentation of paths
# e.g. /foo/bar/benchmarks/go/install.sh
# v.s. FWROOT/go/install.sh
def path_relative_to_root(path):
# Requires bash shell parameter expansion
return subprocess.check_output("D=%s && printf \"${D#%s}\""%(path, get_fwroot()), shell=True, executable='/bin/bash')
| bsd-3-clause | Python |
7f2ed7a8f8d8599d945cbf00cd614bdcbda96e6d | Change rpc default to false for dump command in client | ooici/eeagent | eeagent/client.py | eeagent/client.py | import logging
import socket
from threading import Thread
import simplejson as json
from dashi.bootstrap import dashi_connect
import uuid
from eeagent.types import EEAgentLaunchType
class EEAgentClient(object):
def __init__(self, incoming=None, CFG=None, dashi=None, ee_name=None,
pd_name=None, handle_heartbeat=True, log=logging):
self.CFG = CFG
self.ee_name = ee_name or CFG.eeagent.name
if dashi:
self.dashi = dashi
else:
self.pd_name = pd_name or CFG.pd.name
self.exchange = CFG.server.amqp.exchange
self.dashi = dashi_connect(self.pd_name, CFG)
self._log = log
self.incoming = incoming
if handle_heartbeat:
self.dashi.handle(self.heartbeat, "heartbeat")
def heartbeat(self, message):
self.incoming(json.dumps(message))
def launch(self, params, round=0, run_type=EEAgentLaunchType.supd):
upid = str(uuid.uuid4()).split("-")[0]
self.dashi.fire(self.ee_name, "launch_process", u_pid=upid, round=round, run_type=run_type, parameters=params)
return (upid, round)
def terminate(self, upid, round):
self.dashi.fire(self.ee_name, "terminate_process", u_pid=upid, round=round)
def restart(self, upid, round):
self.dashi.fire(self.ee_name, "restart_process", u_pid=upid, round=round)
def dump(self, rpc=False):
if rpc:
return self.dashi.call(self.ee_name, "dump_state", rpc=rpc)
else:
self.dashi.fire(self.ee_name, "dump_state")
def cleanup(self, upid, round):
self.dashi.fire(self.ee_name, "cleanup", u_pid=upid, round=round)
def poll(self, timeout=None, count=None):
if timeout:
count = 1
try:
self.dashi.consume(timeout=timeout, count=count)
except socket.timeout, ex:
pass
| import logging
import socket
from threading import Thread
import simplejson as json
from dashi.bootstrap import dashi_connect
import uuid
from eeagent.types import EEAgentLaunchType
class EEAgentClient(object):
def __init__(self, incoming=None, CFG=None, dashi=None, ee_name=None,
pd_name=None, handle_heartbeat=True, log=logging):
self.CFG = CFG
self.ee_name = ee_name or CFG.eeagent.name
if dashi:
self.dashi = dashi
else:
self.pd_name = pd_name or CFG.pd.name
self.exchange = CFG.server.amqp.exchange
self.dashi = dashi_connect(self.pd_name, CFG)
self._log = log
self.incoming = incoming
if handle_heartbeat:
self.dashi.handle(self.heartbeat, "heartbeat")
def heartbeat(self, message):
self.incoming(json.dumps(message))
def launch(self, params, round=0, run_type=EEAgentLaunchType.supd):
upid = str(uuid.uuid4()).split("-")[0]
self.dashi.fire(self.ee_name, "launch_process", u_pid=upid, round=round, run_type=run_type, parameters=params)
return (upid, round)
def terminate(self, upid, round):
self.dashi.fire(self.ee_name, "terminate_process", u_pid=upid, round=round)
def restart(self, upid, round):
self.dashi.fire(self.ee_name, "restart_process", u_pid=upid, round=round)
def dump(self, rpc=True):
if rpc:
return self.dashi.call(self.ee_name, "dump_state", rpc=True
else:
self.dashi.fire(self.ee_name, "dump_state")
def cleanup(self, upid, round):
self.dashi.fire(self.ee_name, "cleanup", u_pid=upid, round=round)
def poll(self, timeout=None, count=None):
if timeout:
count = 1
try:
self.dashi.consume(timeout=timeout, count=count)
except socket.timeout, ex:
pass
| apache-2.0 | Python |
b8934c8854ebac38adaad2487b2b5549ad50fea7 | add pop and snp options to eigenstrat2vcf.py | mathii/gdc | eigenstrat2vcf.py | eigenstrat2vcf.py | #eigenstrat (packed or unpacked) to vcf
#Writes to stdout, unlike many of these scripts.
#Usage: python eigenstrat2vcf.py -r root [options]
#Data files are root.snp, root.ind and root.geno
from __future__ import division, print_function
import argparse, gdc, pyEigenstrat
#Remember, in eigenstrat, 2 means "2 ref copies"
GT_DICT={2:"0/0", 1:"0/1", 0:"1/1", 9:"./."}
################################################################################
def parse_options():
"""
argparse
"""
parser=argparse.ArgumentParser()
parser.add_argument('-r', '--root', type=str, default="", help=
"Root for eigenstrat files - i.e {root.snp, root.geno, root.ind}")
parser.add_argument('-i', '--inds', type=str, default="", help=
"File with individual samples to include, one individual per line")
parser.add_argument('-p', '--pops', type=str, default="", help=
"File with populations to include, one population per line")
parser.add_argument('-s', '--snps', type=str, default="", help=
"File with snps to include, one snp per line")
return parser.parse_args()
################################################################################
def main(options):
"""
Convert
"""
inds=pops=snps=[]
if(options.inds):
inds== [x[:-1] for x in open(options.inds) if x[:-1]]
if(options.pops):
pops== [x[:-1] for x in open(options.pops) if x[:-1]]
if(options.snps):
snps== [x[:-1] for x in open(options.snps) if x[:-1]]
data=pyEigenstrat.load(options.root, inds=inds, pops=pops, snps=snps)
#Write header.
print("##fileformat=VCFv4.0")
print("##source=eigenstrat2vcf.py")
print("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">")
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+"\t".join(data.ind["IND"]))
#Now line by line write data
for i,d in enumerate(data):
this_snp=data.snp[i]
line="\t".join([this_snp["CHR"], str(this_snp["POS"]), this_snp["ID"],
this_snp["REF"], this_snp["ALT"], "100", "PASS", ".", "GT" ])
line=line+"\t"+"\t".join([GT_DICT[x] for x in d])
print(line)
################################################################################
if __name__=="__main__":
options=parse_options()
main(options)
| #eigenstrat (packed or unpacked) to vcf
#Writes to stdout, unlike many of these scripts.
#Usage: python eigenstrat2vcf.py -i root
#Data files are root.snp, root.ind and root.geno
from __future__ import division, print_function
import argparse, gdc, pyEigenstrat
#Remember, in eigenstrat, 2 means "2 ref copies"
GT_DICT={2:"0/0", 1:"0/1", 0:"1/1", 9:"./."}
################################################################################
def parse_options():
"""
argparse
"""
parser=argparse.ArgumentParser()
parser.add_argument('-r', '--root', type=str, default="", help=
"Root for eigenstrat files - i.e {root.snp, root.geno, root.ind}")
parser.add_argument('-i', '--ind', type=str, default="", help=
"individual samples to include")
return parser.parse_args()
################################################################################
def main(options):
"""
Convert
"""
inds=[]
if(options.ind):
inds== [x[:-1] for x in open(options.ind) if x[:-1]]
data=pyEigenstrat.load(options.root, inds=inds)
#Write header.
print("##fileformat=VCFv4.0")
print("##source=eigenstrat2vcf.R")
print("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">")
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+"\t".join(data.ind["IND"]))
#Now line by line write data
for i,d in enumerate(data):
this_snp=data.snp[i]
line="\t".join([this_snp["CHR"], str(this_snp["POS"]), this_snp["ID"],
this_snp["REF"], this_snp["ALT"], "100", "PASS", ".", "GT" ])
line=line+"\t"+"\t".join([GT_DICT[x] for x in d])
print(line)
################################################################################
if __name__=="__main__":
options=parse_options()
main(options)
| apache-2.0 | Python |
48e15b8f99bb0714b7ec465a0131e452c67004e5 | Fix index to list when there is no 2nd element | chengwliu/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ultinomics/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,Fillll/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ifduyue/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,jrmontag/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,lexual/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ViralLeadership/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,jrmontag/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,lexual/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,aitatanit/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,noelevans/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,noelevans/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,chengwliu/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,jrmontag/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,alkalait/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ViralLeadership/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ViralLeadership/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,shhong/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,lexual/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,chengwliu/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ifduyue/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,Fillll/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,alkalait/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,aitatanit/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,shhong/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ultinomics/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,aitatanit/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ifduyue/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,Fillll/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,alkalait/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,ultinomics/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,shhong/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers,noelevans/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter4_TheGreatestTheoremNeverTold/top_pic_comments.py | Chapter4_TheGreatestTheoremNeverTold/top_pic_comments.py | import sys
import numpy as np
from IPython.core.display import Image
import praw
reddit = praw.Reddit("BayesianMethodsForHackers")
subreddit = reddit.get_subreddit( "pics" )
top_submissions = subreddit.get_top()
n_pic = int( sys.argv[1] ) if len(sys.argv) > 1 else 1
i = 0
while i < n_pic:
top_submission = top_submissions.next()
while "i.imgur.com" not in top_submission.url:
#make sure it is linking to an image, not a webpage.
top_submission = top_submissions.next()
i+=1
print "Title of submission: \n", top_submission.title
top_post_url = top_submission.url
#top_submission.replace_more_comments(limit=5, threshold=0)
print top_post_url
upvotes = []
downvotes = []
contents = []
_all_comments = top_submission.comments
all_comments=[]
for comment in _all_comments:
try:
upvotes.append( comment.ups )
downvotes.append( comment.downs )
contents.append( comment.body )
except Exception as e:
continue
votes = np.array( [ upvotes, downvotes] ).T
| import sys
import numpy as np
from IPython.core.display import Image
import praw
reddit = praw.Reddit("BayesianMethodsForHackers")
subreddit = reddit.get_subreddit( "pics" )
top_submissions = subreddit.get_top()
n_pic = int( sys.argv[1] ) if sys.argv[1] else 1
i = 0
while i < n_pic:
top_submission = top_submissions.next()
while "i.imgur.com" not in top_submission.url:
#make sure it is linking to an image, not a webpage.
top_submission = top_submissions.next()
i+=1
print "Title of submission: \n", top_submission.title
top_post_url = top_submission.url
#top_submission.replace_more_comments(limit=5, threshold=0)
print top_post_url
upvotes = []
downvotes = []
contents = []
_all_comments = top_submission.comments
all_comments=[]
for comment in _all_comments:
try:
upvotes.append( comment.ups )
downvotes.append( comment.downs )
contents.append( comment.body )
except Exception as e:
continue
votes = np.array( [ upvotes, downvotes] ).T
| mit | Python |
61b363de6ac007f8109e6dd7dc960d9dda4b226e | Bump version to 0.9pbs.47 | pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer,pbs/django-filer | filer/__init__.py | filer/__init__.py | #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.47' # pragma: nocover
| #-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9pbs.46' # pragma: nocover
| bsd-3-clause | Python |
8b8fa041d293cd092783f4b3f1870859f8f3ac25 | Bump develop to 1.1.1.post1 | divio/django-filer,jakob-o/django-filer,DylannCordel/django-filer,jakob-o/django-filer,stefanfoulis/django-filer,webu/django-filer,webu/django-filer,skirsdeda/django-filer,skirsdeda/django-filer,stefanfoulis/django-filer,DylannCordel/django-filer,jakob-o/django-filer,divio/django-filer,stefanfoulis/django-filer,webu/django-filer,skirsdeda/django-filer,skirsdeda/django-filer,DylannCordel/django-filer,jakob-o/django-filer,DylannCordel/django-filer,jakob-o/django-filer,divio/django-filer,stefanfoulis/django-filer,divio/django-filer,stefanfoulis/django-filer,skirsdeda/django-filer,webu/django-filer,DylannCordel/django-filer | filer/__init__.py | filer/__init__.py | # -*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '1.1.1.post1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| # -*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '1.1.1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| bsd-3-clause | Python |
a7abc13bd41e5c0a0aa80370d1f2ffdc28b11f36 | Update version.py | ambitioninc/django-entity,wesleykendall/django-entity,robdmc/django-entity,wesokes/django-entity,Wilduck/django-entity | entity/version.py | entity/version.py | __version__ = '1.8.0'
| __version__ = '1.7.5'
| mit | Python |
f31389fc6dcec7ba9c2d619729d8168ac3919439 | update included SMS | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/accounting/bootstrap/config/user_buckets_jan_2017.py | corehq/apps/accounting/bootstrap/config/user_buckets_jan_2017.py | from decimal import Decimal
from corehq.apps.accounting.models import (
FeatureType,
SoftwarePlanEdition,
)
BOOTSTRAP_CONFIG = {
(SoftwarePlanEdition.COMMUNITY, False): {
'role': 'community_plan_v1',
'product_rate': dict(),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
},
(SoftwarePlanEdition.STANDARD, False): {
'role': 'standard_plan_v0',
'product_rate': dict(monthly_fee=Decimal('100.00')),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=50, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.PRO, False): {
'role': 'pro_plan_v0',
'product_rate': dict(monthly_fee=Decimal('500.00')),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=250, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, False): {
'role': 'advanced_plan_v0',
'product_rate': dict(monthly_fee=Decimal('1000.00')),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=500, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, True): {
'role': 'advanced_plan_v0',
'product_rate': dict(),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
}
}
| from decimal import Decimal
from corehq.apps.accounting.models import (
FeatureType,
SoftwarePlanEdition,
)
BOOTSTRAP_CONFIG = {
(SoftwarePlanEdition.COMMUNITY, False): {
'role': 'community_plan_v1',
'product_rate': dict(),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
},
(SoftwarePlanEdition.STANDARD, False): {
'role': 'standard_plan_v0',
'product_rate': dict(monthly_fee=Decimal('100.00')),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=50, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=100),
}
},
(SoftwarePlanEdition.PRO, False): {
'role': 'pro_plan_v0',
'product_rate': dict(monthly_fee=Decimal('500.00')),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=250, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=500),
}
},
(SoftwarePlanEdition.ADVANCED, False): {
'role': 'advanced_plan_v0',
'product_rate': dict(monthly_fee=Decimal('1000.00')),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=500, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=1000),
}
},
(SoftwarePlanEdition.ADVANCED, True): {
'role': 'advanced_plan_v0',
'product_rate': dict(),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
}
}
| bsd-3-clause | Python |
30fb19ed837e063ac28a0afb0104a93accb511c7 | Move location data to line start | Suor/flaws | flaws/__init__.py | flaws/__init__.py | #!/usr/bin/env python
import sys
import ast
from funcy import all
import astor
from .asttools import is_write, is_use, is_constant, name_class, node_str, to_source
from .scopes import TreeLinker, ScopeBuilder
from .infer import Inferer
def slurp(filename):
with open(filename) as f:
return f.read()
def main():
for filename in sys.argv[1:]:
print '> Analyzing %s...' % filename
source = slurp(filename)
tree = ast.parse(source, filename=filename)
# print astor.dump(tree)
TreeLinker().visit(tree)
ScopeBuilder().visit(tree)
print tree.scope
print astor.dump(tree)
Inferer().visit(tree)
print to_source(tree)
# for scope, name, nodes in top.walk():
# for node in nodes:
# print '%s = %s at %s' % (name, node.val, node_str(node))
for scope, name, nodes in tree.scope.walk():
node = nodes[0]
if all(is_use, nodes) and not scope.is_builtin(name) and not scope.has_wildcards:
print '%s:%d:%d: undefined variable %s' \
% (filename, node.lineno, node.col_offset, name)
if not scope.is_class and all(is_write, nodes):
if name == '__all__' and scope.is_module or name == '_':
continue
elif scope.exports is not None and name in scope.exports:
continue
elif scope.exports is None and not name.startswith('_'):
if isinstance(node, (ast.FunctionDef, ast.ClassDef)) or is_constant(node):
continue
print '%s:%d:%d: %s %s is never used' % \
(filename, node.lineno, node.col_offset, name_class(node).title(), name)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import sys
import ast
from funcy import all
import astor
from .asttools import is_write, is_use, is_constant, name_class, node_str, to_source
from .scopes import TreeLinker, ScopeBuilder
from .infer import Inferer
def slurp(filename):
with open(filename) as f:
return f.read()
def main():
for filename in sys.argv[1:]:
print '> Analyzing %s...' % filename
source = slurp(filename)
tree = ast.parse(source, filename=filename)
# print astor.dump(tree)
TreeLinker().visit(tree)
ScopeBuilder().visit(tree)
print tree.scope
print astor.dump(tree)
Inferer().visit(tree)
print to_source(tree)
# for scope, name, nodes in top.walk():
# for node in nodes:
# print '%s = %s at %s' % (name, node.val, node_str(node))
for scope, name, nodes in tree.scope.walk():
node = nodes[0]
if all(is_use, nodes) and not scope.is_builtin(name) and not scope.has_wildcards:
print 'Undefined variable %s at %s:%d:%d' \
% (name, filename, node.lineno, node.col_offset)
if not scope.is_class and all(is_write, nodes):
if name == '__all__' and scope.is_module or name == '_':
continue
elif scope.exports is not None and name in scope.exports:
continue
elif scope.exports is None and not name.startswith('_'):
if isinstance(node, (ast.FunctionDef, ast.ClassDef)) or is_constant(node):
continue
print '%s %s is never used at %s:%d:%d' % \
(name_class(node).title(), name, filename, node.lineno, node.col_offset)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
86bff7d5b1d8f25a5160c0ae4705096e391e435f | add a fuzzy option to giant bit | dit/dit,dit/dit,Autoplectic/dit,dit/dit,Autoplectic/dit,dit/dit,Autoplectic/dit,dit/dit,Autoplectic/dit,Autoplectic/dit | dit/example_dists/giant_bit.py | dit/example_dists/giant_bit.py | """
Giant bit type distributions.
"""
from __future__ import division
from itertools import product
from .. import Distribution
from ..distconst import uniform
def giant_bit(n, k, fuzzy=False):
"""
Return a 'giant bit' distribution of size `n` and alphabet size `k`.
Parameters
----------
n : int
The number of identical bits.
k : int
The number of states for each bit.
fuzzy : bool
If true, add some noise to the giant bit.
Returns
-------
gb : Distribution
The giant bit distribution.
"""
if fuzzy:
alpha = list(map(str, range(k)))
N = k**n - k
pr1 = 0.99/k
pr2 = 0.01/N
outcomes = [''.join(o) for o in product(alpha, repeat=n)]
pmf = [(pr1 if all(_ == o[0] for _ in o) else pr2) for o in outcomes]
return Distribution(outcomes, pmf)
else:
return uniform([str(i)*n for i in range(k)])
| """
Giant bit type distributions.
"""
from ..distconst import uniform
def giant_bit(n, k):
"""
Return a 'giant bit' distribution of size `n` and alphabet size `k`.
Parameters
----------
n : int
The number of identical bits.
k : int
The number of states for each bit.
Returns
-------
gb : Distribution
The giant bit distribution.
"""
return uniform([str(i)*n for i in range(k)]) | bsd-3-clause | Python |
afd27c62049e87eaefbfb5f38c6b61b461656384 | Add hash and eq methods to Token | bambinos/formulae | formulae/token.py | formulae/token.py | class Token:
"""Representation of a single Token"""
def __init__(self, _type, lexeme, literal=None):
self.type = _type
self.lexeme = lexeme
self.literal = literal
def __hash__(self):
return hash((self.type, self.lexeme, self.literal))
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (
self.type == other.type
and self.lexeme == other.lexeme
and self.literal == other.literal
)
def __repr__(self):
string_list = [
"'type': " + str(self.type),
"'lexeme': " + str(self.lexeme),
"'literal': " + str(self.literal),
]
return "{" + ", ".join(string_list) + "}"
def __str__(self):
string_list = [
"type= " + str(self.type),
"lexeme= " + str(self.lexeme),
"literal= " + str(self.literal),
]
return "Token(" + ", ".join(string_list) + ")"
| class Token:
"""Representation of a single Token"""
def __init__(self, _type, lexeme, literal=None):
self.type = _type
self.lexeme = lexeme
self.literal = literal
def __repr__(self):
string_list = [
"'type': " + str(self.type),
"'lexeme': " + str(self.lexeme),
"'literal': " + str(self.literal),
]
return "{" + ", ".join(string_list) + "}"
def __str__(self):
string_list = [
"type= " + str(self.type),
"lexeme= " + str(self.lexeme),
"literal= " + str(self.literal),
]
return "Token(" + ", ".join(string_list) + ")"
| mit | Python |
d5466fc0b1520ccf8a2dcd1f7b5bd8c6471e3343 | Fix object as a variable name shadows internal python object, so we don't use object as a variable name | HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily | lily/tags/forms.py | lily/tags/forms.py | from django import forms
from django.contrib.contenttypes.models import ContentType
from django.db.models.query_utils import Q
from lily.tags.models import Tag
from lily.utils.fields import MultipleInputAndChoiceField
from lily.utils.widgets import InputAndSelectMultiple
class TagsFormMixin(forms.ModelForm):
"""
Mixin that adds tags to a ModelForm.
"""
tags = MultipleInputAndChoiceField(choices=[], required=False,
widget=InputAndSelectMultiple(attrs={
'class': 'input-and-choice-select',
}))
def __init__(self, *args, **kwargs):
"""
Set the initial values for tags.
"""
super(TagsFormMixin, self).__init__(*args, **kwargs)
# Provide autocomplete suggestions and already linked tags
self.fields['tags'].initial = self.instance.tags.all().values_list('name', flat=True)
self.fields['tags'].choices = [(tag, tag) for tag in Tag.objects.filter(content_type=ContentType.objects.get_for_model(self.instance.__class__)).values_list('name', flat=True).distinct()]
def save(self, commit=True):
"""
Overloading super().save to create save tags and create the relationships with
this account instance. Needs to be done here because the Tags are expected to exist
before self.instance is saved.
"""
# Save model instance
instance = super(TagsFormMixin, self).save()
# Save tags
tags = self.cleaned_data.get('tags')
for tag in tags:
# Create relationship with Tag if it's a new tag
tag_object, created = Tag.objects.get_or_create(
name=tag,
object_id=getattr(instance, instance.__class__._meta.pk.column),
content_type=ContentType.objects.get_for_model(instance.__class__),
)
if created:
instance.tags.add(tag_object)
# Remove tags with any relationships to instance that weren't included in POST data
tags_to_remove = Tag.objects.filter(~Q(name__in=tags), object_id=instance.pk)
tags_to_remove.delete()
return instance
class Meta:
fields = ('tags',)
| from django import forms
from django.contrib.contenttypes.models import ContentType
from django.db.models.query_utils import Q
from lily.tags.models import Tag
from lily.utils.fields import MultipleInputAndChoiceField
from lily.utils.widgets import InputAndSelectMultiple
class TagsFormMixin(forms.ModelForm):
"""
Mixin that adds tags to a ModelForm.
"""
tags = MultipleInputAndChoiceField(choices=[], required=False,
widget=InputAndSelectMultiple(attrs={
'class': 'input-and-choice-select',
}))
def __init__(self, *args, **kwargs):
"""
Set the initial values for tags.
"""
super(TagsFormMixin, self).__init__(*args, **kwargs)
# Provide autocomplete suggestions and already linked tags
self.fields['tags'].initial = self.instance.tags.all().values_list('name', flat=True)
self.fields['tags'].choices = [(tag, tag) for tag in Tag.objects.filter(content_type=ContentType.objects.get_for_model(self.instance.__class__)).values_list('name', flat=True).distinct()]
def save(self, commit=True):
"""
Overloading super().save to create save tags and create the relationships with
this account instance. Needs to be done here because the Tags are expected to exist
before self.instance is saved.
"""
# Save model instance
instance = super(TagsFormMixin, self).save()
# Save tags
tags = self.cleaned_data.get('tags')
for tag in tags:
# Create relationship with Tag if it's a new tag
object, created = Tag.objects.get_or_create(name=tag,
object_id=getattr(instance, instance.__class__._meta.pk.column),
content_type=ContentType.objects.get_for_model(instance.__class__),
)
if created:
instance.tags.add(object)
# Remove tags with any relationships to instance that weren't included in POST data
tags_to_remove = Tag.objects.filter(~Q(name__in=tags), object_id=instance.pk)
tags_to_remove.delete()
return instance
class Meta:
fields = ('tags',)
| agpl-3.0 | Python |
6514e7ab874fe18f7587aecea8ad35c1767eb7db | Add pep8 fixes | lbragstad/keystone-performance,lbragstad/keystone-performance,lbragstad/keystone-performance | listener/listen.py | listener/listen.py | import argparse
import json
import time
from pygerrit import client
from pygerrit import events
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Process event stream from Gerrit.')
parser.add_argument('-u', '--username', dest='username',
help='username', required=True)
options = parser.parse_args()
gerrit = client.GerritClient(host='review.openstack.org',
username=options.username,
port=29418)
print gerrit.gerrit_version()
gerrit.start_event_stream()
try:
while True:
event = gerrit.get_event()
if isinstance(event, events.CommentAddedEvent):
if event.change.project == 'openstack/keystone':
if 'check performance' in event.comment:
print event
# we have a patch set to test - write it to disk!
path = '/tmp/perf/'
fname = (path + event.change.number + '-' +
event.patchset.number + '.json')
with open(fname, 'w') as f:
f.write(json.dumps(event.json))
else:
time.sleep(1)
except KeyboardInterrupt:
gerrit.stop_event_stream()
| import argparse
import json
import time
from pygerrit import client
from pygerrit import events
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Process event stream from Gerrit.')
parser.add_argument('-u', '--username', dest='username',
help='username', required=True)
options = parser.parse_args()
gerrit = client.GerritClient(host='review.openstack.org',
username=options.username,
port=29418)
print gerrit.gerrit_version()
gerrit.start_event_stream()
try:
while True:
event = gerrit.get_event()
if isinstance(event, events.CommentAddedEvent):
if event.change.project == 'openstack/keystone':
if 'check performance' in event.comment:
print event
# we have a patch set to test - write it to disk!
path = '/tmp/perf/'
fname = (path + event.change.number + '-' +
event.patchset.number +'.json')
with open(fname, 'w') as f:
f.write(json.dumps(event.json))
else:
time.sleep(1)
except KeyboardInterrupt:
gerrit.stop_event_stream()
exit()
| apache-2.0 | Python |
949eca8726c5c221f8e83e2c31ed1156d920915e | fix tables with indices starting with 1 | NightNord/ljd,jjdredd/ljd,mrexodia/ljd | ljd/ast/helpers.py | ljd/ast/helpers.py | import ljd.ast.nodes as nodes
def insert_table_record(constructor, key, value):
array = constructor.array.contents
records = constructor.records.contents
if isinstance(key, nodes.MULTRES):
assert len(records) == 0 \
or isinstance(records[-1], nodes.TableRecord)
records.append(value)
return
while isinstance(key, nodes.Constant) \
and key.type == key.T_INTEGER \
and key.value >= 0:
index = key.value
if index == 1 and len(array) == 0:
record = nodes.ArrayRecord()
record.value = nodes.Primitive()
record.value.type = nodes.Primitive.T_NIL
array.append(record)
if (index > len(array)):
break
record = nodes.ArrayRecord()
record.value = value
if len(array) == 0 or index == len(array):
array.append(record)
else:
array[index] = record
return
record = nodes.TableRecord()
record.key = key
record.value = value
if len(records) == 0:
records.append(record)
return
last = records[-1]
if isinstance(last, (nodes.FunctionCall, nodes.Vararg)):
records.insert(-1, record)
else:
records.append(record)
| import ljd.ast.nodes as nodes
def insert_table_record(constructor, key, value):
array = constructor.array.contents
records = constructor.records.contents
if isinstance(key, nodes.MULTRES):
assert len(records) == 0 \
or isinstance(records[-1], nodes.TableRecord)
records.append(value)
return
while isinstance(key, nodes.Constant) \
and key.type == key.T_INTEGER \
and key.value >= 0:
if key.value > len(array):
break
record = nodes.ArrayRecord()
record.value = value
if key.value == len(array):
array.append(record)
else:
array[key.value] = record
return
record = nodes.TableRecord()
record.key = key
record.value = value
if len(records) == 0:
records.append(record)
return
last = records[-1]
if isinstance(last, (nodes.FunctionCall, nodes.Vararg)):
records.insert(-1, record)
else:
records.append(record)
| mit | Python |
1f6811843b4e5b7d4afb2af84805f2d8dbeac639 | fix pool | fr0der1c/EveryClass-server,fr0der1c/EveryClass-server,fr0der1c/EveryClass-server,fr0der1c/EveryClass-server | everyclass/server/db/postgres.py | everyclass/server/db/postgres.py | from contextlib import contextmanager
import psycopg2
from DBUtils.PooledDB import PooledDB
from flask import current_app, has_app_context
from psycopg2.extras import register_hstore, register_uuid
from everyclass.server.config import get_config
_config = get_config()
_options = f'-c search_path={_config.POSTGRES_SCHEMA}'
def init_pool(current_application) -> None:
"""创建连接池,保存在 app 的 postgres 属性中"""
# more information at https://cito.github.io/DBUtils/UsersGuide.html
current_application.postgres = PooledDB(creator=psycopg2,
mincached=1,
maxcached=4,
maxconnections=4,
blocking=True,
**_config.POSTGRES_CONNECTION,
options=_options)
@contextmanager
def pg_conn_context():
if has_app_context():
if not getattr(current_app, "postgres", None):
init_pool(current_app)
conn = current_app.postgres.connection()
else:
conn = psycopg2.connect(**_config.POSTGRES_CONNECTION,
options=_options)
register_types(conn)
yield conn
conn.close()
def register_types(conn):
if has_app_context():
real_conn = conn._con._con
# conn 是 PooledDB(或PersistentDB)的连接,它的 _con 是 SteadyDB。而 SteadyDB 的 _con 是原始的 psycopg2 连接对象
else:
real_conn = conn
register_uuid(conn_or_curs=real_conn)
register_hstore(conn_or_curs=real_conn)
| from contextlib import contextmanager
import psycopg2
from DBUtils.PooledDB import PooledDB
from flask import current_app, has_app_context
from psycopg2.extras import register_hstore, register_uuid
from everyclass.server.config import get_config
_config = get_config()
_options = f'-c search_path={_config.POSTGRES_SCHEMA}'
def init_pool(current_application) -> None:
"""创建连接池,保存在 app 的 postgres 属性中"""
# more information at https://cito.github.io/DBUtils/UsersGuide.html
current_application.postgres = PooledDB(creator=psycopg2,
mincached=1,
maxcached=4,
maxconnections=4,
blocking=True,
**_config.POSTGRES_CONNECTION,
options=_options)
@contextmanager
def pg_conn_context():
if has_app_context():
if not getattr(current_app, "postgres"):
init_pool(current_app)
conn = current_app.postgres.connection()
else:
conn = psycopg2.connect(**_config.POSTGRES_CONNECTION,
options=_options)
register_types(conn)
yield conn
conn.close()
def register_types(conn):
if has_app_context():
real_conn = conn._con._con
# conn 是 PooledDB(或PersistentDB)的连接,它的 _con 是 SteadyDB。而 SteadyDB 的 _con 是原始的 psycopg2 连接对象
else:
real_conn = conn
register_uuid(conn_or_curs=real_conn)
register_hstore(conn_or_curs=real_conn)
| mpl-2.0 | Python |
1c1acbf252406d366089e9296643ae674342e43a | bump version | google/evojax | evojax/version.py | evojax/version.py | __version__ = "0.2.14"
| __version__ = "0.2.13"
| apache-2.0 | Python |
dc162d6d99b645af5d2b9aca7eae7f3b4080bfde | Update mergeSort.py | xala3pa/my-way-to-algorithms,xala3pa/my-way-to-algorithms | mergeSort/python/mergeSort.py | mergeSort/python/mergeSort.py | ######################################################
# #
# Sort algorithms - MergeSort #
# #
######################################################
import operator
def merge_sort(lst, compare = operator.lt):
if len(lst) < 2:
return lst[:]
middle = len(lst)/2
left = merge_sort(lst[:middle], compare)
right = merge_sort(lst[middle:], compare)
return merge(left, right, compare)
def merge(left, right, compare):
result = []
left_offset = 0
right_offset = 0
len_left = len(left)
len_right = len(right)
while left_offset < len_left and right_offset < len_right:
if compare(left[left_offset], right[right_offset]):
result.append(left[left_offset])
left_offset += 1
else:
result.append(right[right_offset])
right_offset += 1
while left_offset < len_left:
result.append(left[left_offset])
left_offset += 1
while right_offset < len_right:
result.append(right[right_offset])
right_offset += 1
return result
def test_algorithms (sorted, unsorted):
expected = merge_sort(unsorted)
assert expected == sorted
print unsorted, ": sorted result -> ", expected
test_algorithms([1], [1])
test_algorithms([1, 2], [2, 1])
test_algorithms([1, 2, 3], [2, 3, 1])
test_algorithms([1, 2, 3, 4], [2, 3, 1, 4])
test_algorithms([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [10, 6, 2, 3, 5, 1, 7, 4, 9, 8])
test_algorithms([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [18, 10, 19, 6, 2, 3, 5, 12, 11, 1, 13, 7, 14, 4, 9, 15, 8, 16, 17,20])
| ######################################################
# #
# Sort algorithms - MergeSort #
# #
######################################################
import operator
def merge_sort(lst, compare = operator.lt):
if len(lst) < 2:
return lst[:]
middle = len(lst)/2
left = merge_sort(lst[:middle], compare)
right = merge_sort(lst[middle:], compare)
return merge(left, right, compare)
def merge(left, right, compare):
result = []
left_offset = 0
right_offset = 0
len_left = len(left)
len_right = len(right)
while left_offset < len_left and right_offset < len_right:
if compare(left[left_offset], right[right_offset]):
result.append(left[left_offset])
left_offset += 1
else:
result.append(right[right_offset])
right_offset += 1
while left_offset < len_left:
result.append(left[left_offset])
left_offset += 1
while right_offset < len_right:
result.append(right[right_offset])
right_offset += 1
return result
def test_algorithms (sorted, unsorted):
expected = merge_sort(unsorted)
assert expected == sorted
print unsorted, ": sorted result -> ", expected
test_algorithms([1], [1])
test_algorithms([1, 2], [2, 1])
test_algorithms([1, 2, 3], [2, 3, 1])
test_algorithms([1, 2, 3, 4], [2, 3, 1, 4])
test_algorithms([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [10, 6, 2, 3, 5, 1, 7, 4, 9, 8])
test_algorithms([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [18, 10, 19, 6, 2, 3, 5, 12, 11, 1, 13, 7, 14, 4, 9, 15, 8, 16, 17,20]) | mit | Python |
76878dbbeeb6fcff9b8d8c9b2798ee9df21923c9 | add version 0.8-81 to r-foreign (#20975) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-foreign/package.py | var/spack/repos/builtin/packages/r-foreign/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RForeign(RPackage):
"""Read Data Stored by 'Minitab', 'S', 'SAS', 'SPSS', 'Stata', 'Systat',
'Weka', 'dBase', ...
Reading and writing data stored by some versions of 'Epi Info', 'Minitab',
'S', 'SAS', 'SPSS', 'Stata', 'Systat', 'Weka', and for reading and writing
some 'dBase' files."""
homepage = "https://cloud.r-project.org/package=foreign"
url = "https://cloud.r-project.org/src/contrib/foreign_0.8-66.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/foreign"
version('0.8-81', sha256='1ae8f9f18f2a037697fa1a9060417ff255c71764f0145080b2bd23ba8262992c')
version('0.8-72', sha256='439c17c9cd387e180b1bb640efff3ed1696b1016d0f7b3b3b884e89884488c88')
version('0.8-70.2', sha256='ae82fad68159860b8ca75b49538406ef3d2522818e649d7ccc209c18085ef179')
version('0.8-66', sha256='d7401e5fcab9ce6e697d3520dbb8475e229c30341c0004c4fa489c82aa4447a4')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r@4.0.0:', when='@0.8-81:', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RForeign(RPackage):
"""Functions for reading and writing data stored by some versions of Epi
Info, Minitab, S, SAS, SPSS, Stata, Systat and Weka and for reading and
writing some dBase files."""
homepage = "https://cloud.r-project.org/package=foreign"
url = "https://cloud.r-project.org/src/contrib/foreign_0.8-66.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/foreign"
version('0.8-72', sha256='439c17c9cd387e180b1bb640efff3ed1696b1016d0f7b3b3b884e89884488c88')
version('0.8-70.2', sha256='ae82fad68159860b8ca75b49538406ef3d2522818e649d7ccc209c18085ef179')
version('0.8-66', sha256='d7401e5fcab9ce6e697d3520dbb8475e229c30341c0004c4fa489c82aa4447a4')
depends_on('r@3.0.0:', type=('build', 'run'))
| lgpl-2.1 | Python |
6c0ea77d1d5e349329ad36aecaadff13c0ccf6c4 | Add list_url for older versions. | matthiasdiener/spack,LLNL/spack,TheTimmy/spack,iulian787/spack,tmerrick1/spack,EmreAtes/spack,iulian787/spack,mfherbst/spack,EmreAtes/spack,LLNL/spack,lgarren/spack,iulian787/spack,skosukhin/spack,iulian787/spack,lgarren/spack,lgarren/spack,matthiasdiener/spack,mfherbst/spack,LLNL/spack,matthiasdiener/spack,tmerrick1/spack,TheTimmy/spack,lgarren/spack,tmerrick1/spack,lgarren/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,krafczyk/spack,krafczyk/spack,TheTimmy/spack,skosukhin/spack,krafczyk/spack,tmerrick1/spack,matthiasdiener/spack,TheTimmy/spack,LLNL/spack,EmreAtes/spack,skosukhin/spack,matthiasdiener/spack,mfherbst/spack,krafczyk/spack,EmreAtes/spack,skosukhin/spack,TheTimmy/spack,skosukhin/spack,mfherbst/spack,LLNL/spack,tmerrick1/spack,mfherbst/spack | var/spack/repos/builtin/packages/r-packrat/package.py | var/spack/repos/builtin/packages/r-packrat/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPackrat(Package):
"""Manage the R packages your project depends on in an isolated, portable,
and reproducible way."""
homepage = 'https://github.com/rstudio/packrat/'
url = "https://cran.r-project.org/src/contrib/packrat_0.4.7-1.tar.gz"
list_url = 'https://cran.r-project.org/src/contrib/Archive/packrat'
version('0.4.7-1', '80c2413269b292ade163a70ba5053e84')
extends('R')
def install(self, spec, prefix):
R('CMD', 'INSTALL', '--library={0}'.format(self.module.r_lib_dir),
self.stage.source_path)
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPackrat(Package):
"""Manage the R packages your project depends on in an isolated, portable,
and reproducible way."""
homepage = 'https://github.com/rstudio/packrat/'
url = "https://cran.r-project.org/src/contrib/packrat_0.4.7-1.tar.gz"
version('0.4.7-1', '80c2413269b292ade163a70ba5053e84')
extends('R')
def install(self, spec, prefix):
R('CMD', 'INSTALL', '--library={0}'.format(self.module.r_lib_dir),
self.stage.source_path)
| lgpl-2.1 | Python |
d54bf5feb2ef832993ed4f5f75d073a531c357ea | Adjust title in module admin page | pvital/patchew,pvital/patchew,pvital/patchew,pvital/patchew | api/admin.py | api/admin.py | from django.contrib import admin
from .models import *
from mod import get_module
from django.contrib.auth.models import User, Group
class MessagePropertyInline(admin.TabularInline):
model = MessageProperty
extra = 0
class MessageAdmin(admin.ModelAdmin):
inlines = [
MessagePropertyInline
]
list_filter = [('is_series_head')]
class ModuleAssetInline(admin.TabularInline):
model = ModuleAsset
extra = 0
class ModuleAdmin(admin.ModelAdmin):
inlines = [
ModuleAssetInline
]
def get_fieldsets(self, request, obj=None):
fs = super(ModuleAdmin, self).get_fieldsets(request, obj)
if obj:
po = get_module(obj.name)
if po:
a, b = fs[0]
b["fields"].remove("name")
doc = type(po).__doc__
if doc:
from markdown import markdown
b["description"] = markdown(doc)
return fs
def has_add_permission(self, request):
return False
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
q = Module.objects.filter(pk=object_id).first()
if q:
extra_context['title'] = "%s Module " % q.name.capitalize()
return super(ModuleAdmin, self).change_view(
request, object_id, form_url, extra_context=extra_context,
)
class PatchewAdminSite(admin.AdminSite):
site_header = 'Patchew admin'
site_title = 'Patchew admin'
index_title = 'Patchew administration'
admin_site = PatchewAdminSite()
admin_site.register(Project)
admin_site.register(Message, MessageAdmin)
admin_site.register(Module, ModuleAdmin)
admin_site.register(User)
admin_site.register(Group)
| from django.contrib import admin
from .models import *
from mod import get_module
from django.contrib.auth.models import User, Group
class MessagePropertyInline(admin.TabularInline):
model = MessageProperty
extra = 0
class MessageAdmin(admin.ModelAdmin):
inlines = [
MessagePropertyInline
]
list_filter = [('is_series_head')]
class ModuleAssetInline(admin.TabularInline):
model = ModuleAsset
extra = 0
class ModuleAdmin(admin.ModelAdmin):
inlines = [
ModuleAssetInline
]
def get_fieldsets(self, request, obj=None):
fs = super(ModuleAdmin, self).get_fieldsets(request, obj)
if obj:
po = get_module(obj.name)
if po:
a, b = fs[0]
b["fields"].remove("name")
doc = type(po).__doc__
if doc:
from markdown import markdown
b["description"] = markdown(doc)
return fs
def has_add_permission(self, request):
return False
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
q = Module.objects.filter(pk=object_id).first()
if q:
extra_context['title'] = "Configure module " + q.name
return super(ModuleAdmin, self).change_view(
request, object_id, form_url, extra_context=extra_context,
)
class PatchewAdminSite(admin.AdminSite):
site_header = 'Patchew admin'
site_title = 'Patchew admin'
index_title = 'Patchew administration'
admin_site = PatchewAdminSite()
admin_site.register(Project)
admin_site.register(Message, MessageAdmin)
admin_site.register(Module, ModuleAdmin)
admin_site.register(User)
admin_site.register(Group)
| mit | Python |
44aa2bb7924c3d18e576f165c79c6422ffedfc9a | allow listing of images not assigned to content | stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten | api/views.py | api/views.py | from . import serializers
from content import models as content_models
from rest_framework import viewsets, mixins
from django.db.models import Q
class ImageSet(mixins.CreateModelMixin, mixins.UpdateModelMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.Image
filter_fields = ('content', 'creator', )
def get_queryset(self):
user = self.request.user
content = content_models.Content.objects.permitted(user)
return content_models.Image.objects.filter(Q(content__in=content) | Q(creator=user.gestalt)).order_by('-weight')
def has_permission(self):
if self.action == 'create':
content_pk = self.request.data.get('content')
if content_pk:
content = content_models.Content.objects.get(pk=content_pk)
return self.request.user.has_perm('content.create_image', content)
return True
elif self.action == 'list':
return True
elif self.action == 'retrieve':
image = self.get_object()
return self.request.user.has_perm('content.view_image', image)
elif self.action == 'update':
image = self.get_object()
return self.request.user.has_perm('content.update_image', image)
return False
| from . import serializers
from content import models as content_models
from rest_framework import viewsets, mixins
from django.db.models import Q
class ImageSet(mixins.CreateModelMixin, mixins.UpdateModelMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = serializers.Image
filter_fields = ('content', 'creator', )
def get_queryset(self):
user = self.request.user
content = content_models.Content.objects.permitted(user)
return content_models.Image.objects.filter(Q(content__in=content) | Q(creator=user.gestalt)).order_by('-weight')
def has_permission(self):
if self.action == 'create':
content_pk = self.request.data.get('content')
if content_pk:
content = content_models.Content.objects.get(pk=content_pk)
return self.request.user.has_perm('content.create_image', content)
return False
elif self.action == 'list':
return True
elif self.action == 'retrieve':
image = self.get_object()
return self.request.user.has_perm('content.view_image', image)
elif self.action == 'update':
image = self.get_object()
return self.request.user.has_perm('content.update_image', image)
return False
| agpl-3.0 | Python |
75a28cc836c7eed2017ff0b2fab0392def3e0ce2 | fix InvalidCryptoBackendError trigering a TypeError thanks dz fixes #13 | lann/python-beaker | beaker/exceptions.py | beaker/exceptions.py | """Beaker exception classes"""
class BeakerException(Exception):
pass
class CreationAbortedError(Exception):
"""Deprecated."""
class InvalidCacheBackendError(BeakerException, ImportError):
pass
class MissingCacheParameter(BeakerException):
pass
class LockError(BeakerException):
pass
class InvalidCryptoBackendError(BeakerException, ImportError):
def __init__(self):
Exception.__init__(self,
'No supported crypto implementation was found')
| """Beaker exception classes"""
class BeakerException(Exception):
pass
class CreationAbortedError(Exception):
"""deprecated."""
class InvalidCacheBackendError(BeakerException, ImportError):
pass
class MissingCacheParameter(BeakerException):
pass
class LockError(BeakerException):
pass
class InvalidCryptoBackendError(BeakerException, ImportError):
def __init__(self):
Exception.__init__('No supported crypto implementation was found') | bsd-3-clause | Python |
034bbf1f3a3f3155583cc44d55be282ac6612746 | Index http transport | napalm-automation/napalm-logs,napalm-automation/napalm-logs | napalm_logs/transport/__init__.py | napalm_logs/transport/__init__.py | # -*- coding: utf-8 -*-
'''
napalm-logs pluggable publisher.
'''
from __future__ import absolute_import
from __future__ import unicode_literals
# Import python std lib
import logging
# Import napalm-logs pkgs
# Exceptions
from napalm_logs.exceptions import InvalidTransportException
# Transport classes
from napalm_logs.transport.cli import CLITransport
from napalm_logs.transport.log import LogTransport
from napalm_logs.transport.zeromq import ZMQTransport
# extras: require additional underlying libraries
# ~~~ Kafka ~~~
from napalm_logs.transport.kafka import HAS_KAFKA
from napalm_logs.transport.kafka import KafkaTransport
# ~~~ HTTP ~~~
from napalm_logs.transport.http import HAS_TORNADO
from napalm_logs.transport.http import HAS_REQUESTS
from napalm_logs.transport.http import HTTPTransport
# from napalm_logs.transport.rabbitmq import RabbitMQTransport
log = logging.getLogger(__file__)
TRANSPORT_LOOKUP = {
'zeromq': ZMQTransport,
'zmq': ZMQTransport,
'cli': CLITransport,
'print': CLITransport,
'console': CLITransport,
'log': LogTransport,
# 'rmq': RabbitMQransport,
# 'rabbitmq': RabbitMQransport,
'*': ZMQTransport
}
if HAS_KAFKA:
log.info('Kafka dependency seems to be installed, making kafka transport available.')
TRANSPORT_LOOKUP['kafka'] = KafkaTransport
if HAS_REQUESTS or HAS_TORNADO:
TRANSPORT_LOOKUP['http'] = HTTPTransport
def get_transport(name):
'''
Return the transport class.
'''
try:
log.debug('Using %s as transport', name)
return TRANSPORT_LOOKUP[name]
except KeyError:
msg = 'Transport {} is not available. Are the dependencies installed?'.format(name)
log.error(msg, exc_info=True)
raise InvalidTransportException(msg)
__all__ = (
'get_transport',
)
| # -*- coding: utf-8 -*-
'''
napalm-logs pluggable publisher.
'''
from __future__ import absolute_import
from __future__ import unicode_literals
# Import python std lib
import logging
# Import napalm-logs pkgs
# Exceptions
from napalm_logs.exceptions import InvalidTransportException
# Transport classes
from napalm_logs.transport.zeromq import ZMQTransport
from napalm_logs.transport.cli import CLITransport
from napalm_logs.transport.log import LogTransport
# extras: require additional underlying libraries
from napalm_logs.transport.kafka import HAS_KAFKA
from napalm_logs.transport.kafka import KafkaTransport
# from napalm_logs.transport.kafka import KafkaTransport
# from napalm_logs.transport.rabbitmq import RabbitMQTransport
log = logging.getLogger(__file__)
TRANSPORT_LOOKUP = {
'zeromq': ZMQTransport,
'zmq': ZMQTransport,
'cli': CLITransport,
'print': CLITransport,
'console': CLITransport,
'log': LogTransport,
# 'rmq': RabbitMQransport,
# 'rabbitmq': RabbitMQransport,
'*': ZMQTransport
}
if HAS_KAFKA:
log.info('Kafka dependency seems to be installed, making kafka transport available.')
TRANSPORT_LOOKUP['kafka'] = KafkaTransport
def get_transport(name):
'''
Return the transport class.
'''
try:
log.debug('Using %s as transport', name)
return TRANSPORT_LOOKUP[name]
except KeyError:
msg = 'Transport {} is not available. Are the dependencies installed?'.format(name)
log.error(msg, exc_info=True)
raise InvalidTransportException(msg)
__all__ = (
'get_transport',
)
| apache-2.0 | Python |
d012763c57450555d45385ed9b254f500388618e | Use the same normlization for whole gif | stevearm/automata | automata/render.py | automata/render.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.colors
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class AnimatedGif:
""" Setup various rendering things
"""
def __init__(self, dpi=100, colors="Purples"):
self.frames = []
self.fig = plt.figure(dpi=dpi)
plt.axis("off")
self.colors = colors
self.normalize = matplotlib.colors.Normalize()
self.dimensions = None
def append(self, universe):
if not self.dimensions:
if len(universe.shape) != 2 and not (len(universe.shape) == 3 and universe.shape[2] in [3, 4]):
raise ValueError("Only handles 2D arrays of numbers, or 2D arrays of RGB(A) values")
self.dimensions = universe.shape
if self.dimensions != universe.shape:
raise ValueError("Shape changed from {} to {}".format(self.dimensions, universe.shape))
self.frames.append((plt.imshow(universe, norm=self.normalize, cmap=self.colors),))
def render(self, filename, interval=300):
im_ani = animation.ArtistAnimation(
self.fig, self.frames, interval=interval, repeat_delay=3000, blit=True
)
im_ani.save(filename, writer="imagemagick")
| import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
class AnimatedGif:
""" Setup various rendering things
"""
def __init__(self, dpi=100, colors="Purples"):
self.frames = []
self.fig = plt.figure(dpi=dpi)
plt.axis("off")
self.colors = colors
self.dimensions = None
def append(self, universe):
if not self.dimensions:
if len(universe.shape) != 2 and not (len(universe.shape) == 3 and universe.shape[2] in [3, 4]):
raise ValueError("Only handles 2D arrays of numbers, or 2D arrays of RGB(A) values")
self.dimensions = universe.shape
if self.dimensions != universe.shape:
raise ValueError("Shape changed from {} to {}".format(self.dimensions, universe.shape))
self.frames.append((plt.imshow(universe, cmap=self.colors),))
def render(self, filename, interval=300):
im_ani = animation.ArtistAnimation(
self.fig, self.frames, interval=interval, repeat_delay=3000, blit=True
)
im_ani.save(filename, writer="imagemagick")
| apache-2.0 | Python |
5fdb39f158fea385c25cae93e47fc91683222724 | Fix person_by_id call. | fedora-infra/python-fedora | fedora/django/__init__.py | fedora/django/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Ignacio Vazquez-Abrams All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
'''
.. moduleauthor:: Ignacio Vazquez-Abrams <ivazquez@fedoraproject.org>
'''
import threading
from fedora.client import ProxyClient
from django.conf import settings
connection = None
if not connection:
connection = ProxyClient(settings.FAS_URL, settings.FAS_USERAGENT,
session_as_cookie=False)
def person_by_id(userid):
if not hasattr(local, 'session_id'):
return None
sid, userinfo = connection.send_request('json/person_by_id',
req_params={'person_id': userid},
auth_params={'session_id': local.session_id})
return userinfo
local = threading.local()
| # -*- coding: utf-8 -*-
#
# Copyright © 2009 Ignacio Vazquez-Abrams All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
'''
.. moduleauthor:: Ignacio Vazquez-Abrams <ivazquez@fedoraproject.org>
'''
import threading
from fedora.client import ProxyClient
from django.conf import settings
connection = None
if not connection:
connection = ProxyClient(settings.FAS_URL, settings.FAS_USERAGENT,
session_as_cookie=False)
def person_by_id(userid):
if not hasattr(local, 'session_id'):
return None
sid, userinfo = connection.send_request('json/person_by_id',
req_params={'id': userid},
auth_params={'session_id': local.session_id})
return userinfo
local = threading.local()
| lgpl-2.1 | Python |
174d037435f8f2f86778099472eb77c9899308e8 | add author to image admin | DylannCordel/django-filer,jakob-o/django-filer,skirsdeda/django-filer,mkoistinen/django-filer,lory87/django-filer,stefanfoulis/django-filer,o-zander/django-filer,mkoistinen/django-filer,nimbis/django-filer,nephila/django-filer,DylannCordel/django-filer,kriwil/django-filer,lory87/django-filer,lory87/django-filer,nephila/django-filer,vechorko/django-filer,mkoistinen/django-filer,nimbis/django-filer,vechorko/django-filer,sopraux/django-filer,matthiask/django-filer,belimawr/django-filer,Flight/django-filer,civicresourcegroup/django-filer,divio/django-filer,belimawr/django-filer,Flight/django-filer,webu/django-filer,webu/django-filer,kriwil/django-filer,skirsdeda/django-filer,stefanfoulis/django-filer,nephila/django-filer,civicresourcegroup/django-filer,belimawr/django-filer,lory87/django-filer,stefanfoulis/django-filer,DylannCordel/django-filer,stefanfoulis/django-filer,o-zander/django-filer,sopraux/django-filer,webu/django-filer,mkoistinen/django-filer,jakob-o/django-filer,Flight/django-filer,webu/django-filer,vechorko/django-filer,DylannCordel/django-filer,skirsdeda/django-filer,sopraux/django-filer,skirsdeda/django-filer,sopraux/django-filer,Flight/django-filer,skirsdeda/django-filer,matthiask/django-filer,DylannCordel/django-filer,matthiask/django-filer,o-zander/django-filer,civicresourcegroup/django-filer,civicresourcegroup/django-filer,belimawr/django-filer,divio/django-filer,matthiask/django-filer,nimbis/django-filer,Flight/django-filer,nimbis/django-filer,vechorko/django-filer,divio/django-filer,divio/django-filer,jakob-o/django-filer,kriwil/django-filer,stefanfoulis/django-filer,jakob-o/django-filer,kriwil/django-filer,o-zander/django-filer,jakob-o/django-filer | filer/admin/imageadmin.py | filer/admin/imageadmin.py | #-*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from filer import settings as filer_settings
from filer.admin.fileadmin import FileAdmin
from filer.models import Image
class ImageAdminForm(forms.ModelForm):
subject_location = forms.CharField(
max_length=64, required=False,
label=_('Subject location'),
help_text=_('Location of the main subject of the scene.'))
def sidebar_image_ratio(self):
if self.instance:
# this is very important. It forces the value to be returned as a
# string and always with a "." as seperator. If the conversion
# from float to string is done in the template, the locale will
# be used and in some cases there would be a "," instead of ".".
# javascript would parse that to an integer.
return '%.6F' % self.instance.sidebar_image_ratio()
else:
return ''
class Meta:
model = Image
exclude = ()
class Media:
css = {
#'all': (settings.MEDIA_URL + 'filer/css/focal_point.css',)
}
js = (
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/raphael.js',
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/focal_point.js',
)
class ImageAdmin(FileAdmin):
form = ImageAdminForm
ImageAdmin.fieldsets = ImageAdmin.build_fieldsets(
extra_main_fields=('author', 'default_alt_text', 'default_caption',),
extra_fieldsets=(
('Subject Location', {
'fields': ('subject_location',),
'classes': ('collapse',),
}),
)
)
| #-*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from filer import settings as filer_settings
from filer.admin.fileadmin import FileAdmin
from filer.models import Image
class ImageAdminForm(forms.ModelForm):
subject_location = forms.CharField(
max_length=64, required=False,
label=_('Subject location'),
help_text=_('Location of the main subject of the scene.'))
def sidebar_image_ratio(self):
if self.instance:
# this is very important. It forces the value to be returned as a
# string and always with a "." as seperator. If the conversion
# from float to string is done in the template, the locale will
# be used and in some cases there would be a "," instead of ".".
# javascript would parse that to an integer.
return '%.6F' % self.instance.sidebar_image_ratio()
else:
return ''
class Meta:
model = Image
exclude = ()
class Media:
css = {
#'all': (settings.MEDIA_URL + 'filer/css/focal_point.css',)
}
js = (
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/raphael.js',
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/focal_point.js',
)
class ImageAdmin(FileAdmin):
form = ImageAdminForm
ImageAdmin.fieldsets = ImageAdmin.build_fieldsets(
extra_main_fields=('default_alt_text', 'default_caption',),
extra_fieldsets=(
('Subject Location', {
'fields': ('subject_location',),
'classes': ('collapse',),
}),
)
)
| bsd-3-clause | Python |
1d4d6862429d472ae4269004d1e6fe57aa428cc1 | Bump to version 0.15.2 | nerevu/prometheus-api,nerevu/prometheus-api,nerevu/prometheus-api | app/setup.py | app/setup.py | try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
with open('../requirements.txt') as file:
requirements = file.read()
config = {
'name': 'prometheus',
'description': 'a global asset allocation tool',
'long_description': open('README.rst', 'rt').read(),
'author': 'Reuben Cummings',
'url': 'https://github.com/reubano/prometheus',
'download_url':
'https://github.com/reubano/prometheus/downloads/prometheus*.tgz',
'author_email': 'reubano@gmail.com',
'version': '0.15.2',
'install_requires': requirements.split('\n'),
'classifiers': ['Development Status :: 4 - Beta',
'License :: OSI Approved :: The MIT License (MIT)',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: POSIX'],
'packages': find_packages(),
'zip_safe': False
'license': 'MIT',
'platforms' ['MacOS X', 'Windows', 'Linux']
'include_package_data': True}
setup(**config)
| try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
with open('../requirements.txt') as file:
requirements = file.read()
config = {
'name': 'prometheus',
'description': 'a global asset allocation tool',
'long_description': open('README.rst', 'rt').read(),
'author': 'Reuben Cummings',
'url': 'https://github.com/reubano/prometheus',
'download_url':
'https://github.com/reubano/prometheus/downloads/prometheus*.tgz',
'author_email': 'reubano@gmail.com',
'version': '0.15.1',
'install_requires': requirements.split('\n'),
'classifiers': ['Development Status :: 4 - Beta',
'License :: OSI Approved :: The MIT License (MIT)',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: Microsoft :: POSIX'],
'packages': find_packages(),
'zip_safe': False
'license': 'MIT',
'platforms' ['MacOS X', 'Windows', 'Linux']
'include_package_data': True}
setup(**config)
| mit | Python |
eb31c9db7e2bf083b42141e6149fc65fd26ebf17 | Use Flask 0.11.1 | macbre/wbc.macbre.net,macbre/wbc.macbre.net,macbre/wbc.macbre.net,macbre/wbc.macbre.net | app/setup.py | app/setup.py | from setuptools import setup, find_packages
# @see https://github.com/pypa/sampleproject/blob/master/setup.py
setup(
name='wbc',
version='0.0.0',
author='Maciej Brencz',
author_email='maciej.brencz@gmail.com',
description='Flask app providing WBC archives API',
url='https://github.com/macbre/wbc.macbre.net',
packages=find_packages(),
install_requires=[
'coverage==4.0.1',
'flask==0.11.1',
'monolog-python==0.1.0',
'PyMySQL==0.7.5',
'pytest==2.8.2',
],
include_package_data=True,
entry_points={
'console_scripts': [
'server=wbc.app:start'
],
}
)
| from setuptools import setup, find_packages
# @see https://github.com/pypa/sampleproject/blob/master/setup.py
setup(
name='wbc',
version='0.0.0',
author='Maciej Brencz',
author_email='maciej.brencz@gmail.com',
description='Flask app providing WBC archives API',
url='https://github.com/macbre/wbc.macbre.net',
packages=find_packages(),
install_requires=[
'coverage==4.0.1',
'flask==0.10.1',
'monolog-python==0.1.0',
'PyMySQL==0.7.5',
'pytest==2.8.2',
],
include_package_data=True,
entry_points={
'console_scripts': [
'server=wbc.app:start'
],
}
)
| mit | Python |
8536e100fd734d7293f559c890a73b527f5ab03c | fix typo | louridas/djbr,louridas/djbr | manage.py | manage.py | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_site.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| #!/usr/bin/env python333
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_site.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| bsd-2-clause | Python |
30275d0677414220409e5d9fc1c9b5a8e1138955 | update info to db | hack4impact/clean-air-council,hack4impact/clean-air-council,hack4impact/clean-air-council | app/utils.py | app/utils.py | from flask import url_for
from app.models import IdlingIncident
def register_template_utils(app):
"""Register Jinja 2 helpers (called from __init__.py)."""
@app.template_test()
def equalto(value, other):
return value == other
@app.template_global()
def is_hidden_field(field):
from wtforms.fields import HiddenField
return isinstance(field, HiddenField)
app.add_template_global(index_for_role)
def index_for_role(role):
return url_for(role.name + '.index')
def parse_to_db(db, filename):
import csv, geocoder
city_default = ', philadelphia, pennsylvania, usa'
vehicle_id_index = 8
license_plate_index = 9
location_index = 4
date_index = 0
agency_index = 6
picture_index = 13
description_index = 11
with open(filename, 'rb') as file:
reader = csv.reader(file, delimiter=',')
columns = reader.next()
for row in reader:
print row
address_text = row[location_index]
# TODO: error handling for geocoder
coordinates = geocoder.arcgis(address_text + city_default).latlng
loc = Location(
latitude=coordinates[0],
longitude=coordinates[1],
original_user_text=address_text)
db.session.add(loc)
incident = IdlingIncident(
vehicle_id=row[vehicle_id_index],
license_plate=row[license_plate_index],
location=loc,
date=row[date_index],
duration=....,
picture_url=row[picture_index],
description=row[description_index])
db.session.add(incident)
db.session.commit()
return columns
| from flask import url_for
from app.models import IdlingIncident
def register_template_utils(app):
"""Register Jinja 2 helpers (called from __init__.py)."""
@app.template_test()
def equalto(value, other):
return value == other
@app.template_global()
def is_hidden_field(field):
from wtforms.fields import HiddenField
return isinstance(field, HiddenField)
app.add_template_global(index_for_role)
def index_for_role(role):
return url_for(role.name + '.index')
def parse_to_db(db, filename):
import csv, geocoder
city_default = ', philadelphia, pennsylvania, usa'
vehicle_id_index = 7;
location_index = 3;
date_index = 0;
agency_index = 6;
picture_index = 13;
description_index = 11;
with open(filename, 'rb') as file:
reader = csv.reader(file, delimiter=',')
columns = reader.next()
for row in reader:
print row
a = address=row[location_index]
g = geocoder.arcgis(a + city_default).latlng
l = Location(lat=g[0], long=g[1], address=a)
i = IdlingIncident(vehicle_id=row[vehicle_id_index], l,
date=row[date_index], agency=row[agency_index], picture=row[picture_index],
description=row[description_index])
db.session.add(i)
db.session.commit()
return columns
| mit | Python |
005ebdf83d2ad23d6dc7c3326da4ad72f101f8db | test debug=true on new project tree | if1live/importd-boilerplate,if1live/importd-boilerplate | manage.py | manage.py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from importd import d
import os
import sys
def get_sentry_apps():
if 'SENTRY_DSN' in os.environ:
return ('raven.contrib.django.raven_compat',)
else:
return ()
if 'gunicorn' in sys.argv[0]:
DEBUG = False
else:
DEBUG = True
DEBUG=True
d(
DEBUG=DEBUG,
INSTALLED_APPS=(
# external library
'django_nose',
# django rest framework
'rest_framework',
'rest_framework.authtoken',
'sella',
'demo',
'api',
) + get_sentry_apps(),
# django-jinja
DEFAULT_JINJA2_TEMPLATE_EXTENSION='.jinja2',
TEMPLATE_LOADERS=(
# django-jinja
'django_jinja.loaders.AppLoader',
'django_jinja.loaders.FileSystemLoader',
# django
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
# django-nose
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
# sentry
RAVEN_CONFIG={
'dsn': os.environ['SENTRY_DSN'] if 'SENTRY_DSN' in os.environ else '',
},
# '*' or '127.0.0.1'
ALLOWED_HOSTS=['127.0.0.1'],
mounts={"demo": "/demo/", 'rest_framework': '/api/'}
)
if __name__ == "__main__":
d.main()
| #!/usr/bin/env python
#-*- coding: utf-8 -*-
from importd import d
import os
import sys
def get_sentry_apps():
if 'SENTRY_DSN' in os.environ:
return ('raven.contrib.django.raven_compat',)
else:
return ()
if 'gunicorn' in sys.argv[0]:
DEBUG = False
else:
DEBUG = True
d(
DEBUG=DEBUG,
INSTALLED_APPS=(
# external library
'django_nose',
# django rest framework
'rest_framework',
'rest_framework.authtoken',
'sella',
'demo',
'api',
) + get_sentry_apps(),
# django-jinja
DEFAULT_JINJA2_TEMPLATE_EXTENSION='.jinja2',
TEMPLATE_LOADERS=(
# django-jinja
'django_jinja.loaders.AppLoader',
'django_jinja.loaders.FileSystemLoader',
# django
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
),
# django-nose
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
# sentry
RAVEN_CONFIG={
'dsn': os.environ['SENTRY_DSN'] if 'SENTRY_DSN' in os.environ else '',
},
# '*' or '127.0.0.1'
ALLOWED_HOSTS=['127.0.0.1'],
mounts={"demo": "/demo/", 'rest_framework': '/api/'}
)
if __name__ == "__main__":
d.main()
| mit | Python |
ce7c31f3dd97716051b72951c7c745dd2c63efcd | Include timestamp in audit logs | RafaelPalomar/girder,data-exp-lab/girder,RafaelPalomar/girder,data-exp-lab/girder,girder/girder,kotfic/girder,RafaelPalomar/girder,Kitware/girder,manthey/girder,jbeezley/girder,manthey/girder,girder/girder,data-exp-lab/girder,kotfic/girder,RafaelPalomar/girder,Kitware/girder,Kitware/girder,data-exp-lab/girder,kotfic/girder,girder/girder,manthey/girder,jbeezley/girder,data-exp-lab/girder,manthey/girder,girder/girder,Kitware/girder,RafaelPalomar/girder,kotfic/girder,kotfic/girder,jbeezley/girder,jbeezley/girder | plugins/audit_logs/server/__init__.py | plugins/audit_logs/server/__init__.py | import cherrypy
import datetime
import logging
from girder import auditLogger
from girder.models.model_base import Model
from girder.api.rest import getCurrentUser
class Record(Model):
def initialize(self):
self.name = 'audit_log_record'
def validate(self, doc):
return doc
class AuditLogHandler(logging.Handler):
def handle(self, record):
user = getCurrentUser()
Record().save({
'type': record.msg,
'details': record.details,
'ip': cherrypy.request.remote.ip,
'userId': user and user['_id'],
'when': datetime.datetime.utcnow()
})
def load(info):
auditLogger.addHandler(AuditLogHandler())
| import cherrypy
import logging
from girder import auditLogger
from girder.models.model_base import Model
from girder.api.rest import getCurrentUser
class Record(Model):
def initialize(self):
self.name = 'audit_log_record'
def validate(self, doc):
return doc
class AuditLogHandler(logging.Handler):
def handle(self, record):
user = getCurrentUser()
Record().save({
'type': record.msg,
'details': record.details,
'ip': cherrypy.request.remote.ip,
'userId': user and user['_id']
})
def load(info):
auditLogger.addHandler(AuditLogHandler())
| apache-2.0 | Python |
fc4d76fc60cbb929665449b461bb99fcc470acd2 | Remove second create_admin function | dylanshine/streamschool,dylanshine/streamschool | manage.py | manage.py | import datetime
from project import app, db
from project import models
from project.models import User
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(User(
email="ad@min.com",
password="admin",
admin=True,
confirmed=True,
confirmed_on=datetime.datetime.now())
)
db.session.commit()
if __name__ == '__main__':
manager.run()
| import datetime
from project import app, db
from project import models
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
@manager.command
def create_db():
"""Creates the db tables."""
db.create_all()
@manager.command
def drop_db():
"""Drops the db tables."""
db.drop_all()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(User("ad@min.com", "admin"))
db.session.commit()
@manager.command
def create_admin():
"""Creates the admin user."""
db.session.add(User(
email="ad@min.com",
password="admin",
admin=True,
confirmed=True,
confirmed_on=datetime.datetime.now())
)
db.session.commit()
if __name__ == '__main__':
manager.run()
| mit | Python |
f334a9c008eb7fada006ea1a2471c3ba97cfafb3 | fix python2 syntax | lanpa/tensorboardX,lanpa/tensorboardX | tensorboardX/graph.py | tensorboardX/graph.py | from .src.graph_pb2 import GraphDef
from .src.node_def_pb2 import NodeDef
from .src.versions_pb2 import VersionDef
from .src.attr_value_pb2 import AttrValue
from .src.tensor_shape_pb2 import TensorShapeProto
import torch
def replace(name, scope):
print(type(name), name)
return '/'.join([scope[name], name])
def parse(graph):
scope = {}
for n in graph.nodes():
inputs = [i.uniqueName() for i in n.inputs()]
for i in range(1, len(inputs)):
scope[inputs[i]] = n.scopeName()
uname = next(n.outputs()).uniqueName()
assert n.scopeName() != '', '{} has empty scope name'.format(n)
scope[uname] = n.scopeName()
scope['0'] = 'input'
nodes = []
for n in graph.nodes():
attrs = {k: n[k] for k in n.attributeNames()}
attrs = str(attrs).replace("'", ' ') # singlequote will be escaped by tensorboard
inputs = [replace(i.uniqueName(), scope) for i in n.inputs()]
uname = next(n.outputs()).uniqueName()
nodes.append({'name': replace(uname, scope), 'op': n.kind(), 'inputs': inputs, 'attr': attrs})
for n in graph.inputs():
print(n.type())
uname = n.uniqueName()
nodes.append({'name': replace(uname, scope), 'op': 'Parameter', 'inputs': [], 'attr': str(n.type())})
return nodes
def graph(model, args):
with torch.onnx.set_training(model, False):
trace, _ = torch.jit.trace(model, args)
torch.onnx._optimize_trace(trace, False)
graph = trace.graph()
print(graph)
list_of_nodes = parse(graph)
nodes = []
for node in list_of_nodes:
nodes.append(
NodeDef(name=node['name'], op=node['op'], input=node['inputs'],
attr={'lanpa': AttrValue(s=node['attr'].encode(encoding='utf_8'))}))
print(nodes)
return GraphDef(node=nodes, versions=VersionDef(producer=22))
| from .src.graph_pb2 import GraphDef
from .src.node_def_pb2 import NodeDef
from .src.versions_pb2 import VersionDef
from .src.attr_value_pb2 import AttrValue
from .src.tensor_shape_pb2 import TensorShapeProto
import torch
def replace(name, scope):
print(type(name), name)
return '/'.join([scope[name], name])
def parse(graph):
scope = {}
for n in graph.nodes():
inputs = [i.uniqueName() for i in n.inputs()]
for i in range(1, len(inputs)):
scope[inputs[i]] = n.scopeName()
uname = next(n.outputs()).uniqueName()
assert n.scopeName() != '', print(n, 'has empty scope name')
scope[uname] = n.scopeName()
scope['0'] = 'input'
nodes = []
for n in graph.nodes():
attrs = {k: n[k] for k in n.attributeNames()}
attrs = str(attrs).replace("'", ' ') # singlequote will be escaped by tensorboard
inputs = [replace(i.uniqueName(), scope) for i in n.inputs()]
uname = next(n.outputs()).uniqueName()
nodes.append({'name': replace(uname, scope), 'op': n.kind(), 'inputs': inputs, 'attr': attrs})
for n in graph.inputs():
print(n.type())
uname = n.uniqueName()
nodes.append({'name': replace(uname, scope), 'op': 'Parameter', 'inputs': [], 'attr': str(n.type())})
return nodes
def graph(model, args):
with torch.onnx.set_training(model, False):
trace, _ = torch.jit.trace(model, args)
torch.onnx._optimize_trace(trace, False)
graph = trace.graph()
print(graph)
list_of_nodes = parse(graph)
nodes = []
for node in list_of_nodes:
nodes.append(
NodeDef(name=node['name'], op=node['op'], input=node['inputs'],
attr={'lanpa': AttrValue(s=node['attr'].encode(encoding='utf_8'))}))
print(nodes)
return GraphDef(node=nodes, versions=VersionDef(producer=22))
| mit | Python |
3453b7961fae365f833199c88c7395428fcdf788 | Set default max threads for multithreading to 3 | TensorPy/TensorPy,TensorPy/TensorPy | tensorpy/constants.py | tensorpy/constants.py | """ Defining constants for tensorpy use """
DOWNLOADS_FOLDER = "downloads_folder" # Folder created for downloaded files
MIN_W_H = 50 # Minimum width/height for classifying images on a page
MAX_THREADS = 3 # Total threads to spin up for classifying images on a page
MAX_IMAGES_PER_PAGE = 15 # Limit of image classifications per page
| """ Defining constants for tensorpy use """
DOWNLOADS_FOLDER = "downloads_folder" # Folder created for downloaded files
MIN_W_H = 50 # Minimum width/height for classifying images on a page
MAX_THREADS = 4 # Total threads to spin up for classifying images on a page
MAX_IMAGES_PER_PAGE = 15 # Limit of image classifications per page
| mit | Python |
749f5159eb14be492dc76624f03c3a3c863b4582 | add comment matching to sb regex | kirichoi/tellurium,kirichoi/tellurium,sys-bio/tellurium,sys-bio/tellurium | tellurium/teconverters/antimony_regex.py | tellurium/teconverters/antimony_regex.py | from __future__ import print_function, division, absolute_import
def getModelStartRegex():
""" Return the regex string for Antimony model start. Matches whole line. """
return r'^\s*\*?\s*model\s*[^()\s]+\s*(\([^)]*\))?\s*(//.*)?$'
def getFunctionStartRegex():
""" Return the regex string for Antimony model start. Matches whole line. """
return r'^\s*function\s*[^()\s]*\s*(\([^)]*\))?\s*$'
def getModelEndRegex():
""" Return the regex string for Antimony model end. Matches whole line. """
return r'^\s*end\s*$'
def getSBORegex():
""" Return the regex string for Antimony model end. Matches whole line. """
return r'^\s*([^.]+)\.sboTerm\s*=\s*(SBO:)?([0-9]+)\s*(;)?\s*$'
| from __future__ import print_function, division, absolute_import
def getModelStartRegex():
""" Return the regex string for Antimony model start. Matches whole line. """
return r'^\s*\*?\s*model\s*[^()\s]+\s*(\([^)]*\))?\s*$'
def getFunctionStartRegex():
""" Return the regex string for Antimony model start. Matches whole line. """
return r'^\s*function\s*[^()\s]*\s*(\([^)]*\))?\s*$'
def getModelEndRegex():
""" Return the regex string for Antimony model end. Matches whole line. """
return r'^\s*end\s*$'
def getSBORegex():
""" Return the regex string for Antimony model end. Matches whole line. """
return r'^\s*([^.]+)\.sboTerm\s*=\s*(SBO:)?([0-9]+)\s*(;)?\s*$'
| apache-2.0 | Python |
12afaf5e52d640379dd26e659ed20b488803a3aa | Bump version | impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore | oeem_energy_datastore/__init__.py | oeem_energy_datastore/__init__.py | from __future__ import absolute_import
from .celery import app as celery_app
__title__ = 'Open Energy Efficiency Meter Datastore'
__version__ = '0.1.2'
__author__ = 'Phil Ngo'
__license__ = 'MIT'
__copyright__ = 'Copyright 2011-2016 Open Energy Efficiency'
# Version synonym
VERSION = __version__
| from __future__ import absolute_import
from .celery import app as celery_app
__title__ = 'Open Energy Efficiency Meter Datastore'
__version__ = '0.1.1'
__author__ = 'Phil Ngo'
__license__ = 'MIT'
__copyright__ = 'Copyright 2011-2016 Open Energy Efficiency'
# Version synonym
VERSION = __version__
| mit | Python |
b2069b2b4a07d82cc6831dde0e396d7dae79d23e | Set an initial size and disallow resizing | fkmclane/AutoPidact | autopidact/view.py | autopidact/view.py | from gi.repository import Gtk, GdkPixbuf, GLib
import cv2
class View(Gtk.Window):
def __init__(self, title, camera, interval=200):
Gtk.Window.__init__(self)
self.set_title(title)
self.set_size_request(640, 480)
self.set_resizable(False)
self.cam = camera
self.img = Gtk.Image()
self.add(self.img)
GLib.timeout_add(interval, self.update)
def update(self):
if self.cam.isReady():
frame = cv2.cvtColor(self.cam.getFrame(), cv2.COLOR_BGR2RGB)
self.img.set_from_pixbuf(GdkPixbuf.Pixbuf.new_from_data(frame.data, GdkPixbuf.Colorspace.RGB, False, 8, frame.shape[1], frame.shape[0], frame.strides[0], None, None))
else:
print('not ready')
| from gi.repository import Gtk, GdkPixbuf, GLib
import cv2
class View(Gtk.Window):
def __init__(self, title, camera, interval=200):
Gtk.Window.__init__(self)
self.set_title(title)
self.cam = camera
self.img = Gtk.Image()
self.add(self.img)
GLib.timeout_add(interval, self.update)
def update(self):
if self.cam.isReady():
frame = cv2.cvtColor(self.cam.getFrame(), cv2.COLOR_BGR2RGB)
self.img.set_from_pixbuf(GdkPixbuf.Pixbuf.new_from_data(frame.data, GdkPixbuf.Colorspace.RGB, False, 8, frame.shape[1], frame.shape[0], frame.strides[0], None, None))
else:
print('not ready')
| mit | Python |
c259d806eaff2a3c806192ea522e970408613cce | Update B.py | Pouf/CodingCompetition,Pouf/CodingCompetition | Google-Code-Jam/2016-1B/B.py | Google-Code-Jam/2016-1B/B.py | import os
import sys
script = __file__
script_path = os.path.dirname(script)
script_file = os.path.basename(script)[0]
files = [f for f in os.listdir(script_path) if script_file in f and '.in' in f]
if '{}-large'.format(script_file) in str(files):
size = 'large'
elif '{}-small'.format(script_file) in str(files):
size = 'small'
elif '{}-test'.format(script_file) in str(files):
size = 'test'
else:
print('{}-test not found'.format(script_file))
sys.exit()
latest = sorted(f for f in files if size in f)[-1][:-3]
f = '{}/{}'.format(script_path, latest)
i = open(f + '.in', 'r')
o = open(f + '.out', 'w')
print(f)
T = int(i.readline())
# https://code.google.com/codejam/contest/11254486/dashboard#s=p1
# Problem B. Close Match
for x in range(T):
C, J = i.readline().rstrip().split()
c, j = '', ''
leading = 1
N = len(C)
for i in range(N):
a, b = C[i], J[i]
if i < N - 1:
a1, b1 = C[i + 1], J[i + 1]
if i < N - 1 and ('?' in a + b and '?' not in a1 + b1):
if abs(int(a1) - int(b1)) > 5:
c += [a, ['1', '0'][a1 > b1]][a == '?']
j += [b, ['0', '1'][a1 > b1]][b == '?']
else:
c += [a, [b, '0'][b == '?']][a == '?']
j += [b, [a, '0'][a == '?']][b == '?']
else:
if c == j:
c += [a, [b, '0'][b == '?']][a == '?']
j += [b, [a, '0'][a == '?']][b == '?']
elif c > j:
c += [a, '0'][a == '?']
j += [b, '9'][b == '?']
elif c < j:
c += [a, '9'][a == '?']
j += [b, '0'][b == '?']
y = '{} {}'.format(c, j)
print(C, J, c, j)
o.write('{}Case #{}: {}'.format(['', '\n'][x > 0], x + 1, y))
i.close()
o.close()
| import os
import sys
script = __file__
scriptPath = os.path.dirname(script)
scriptFile = os.path.basename(script)[0]
files = [f for f in os.listdir(scriptPath) if scriptFile in f and '.in' in f]
if '{}-large'.format(scriptFile) in str(files):
size = 'large'
elif '{}-small'.format(scriptFile) in str(files):
size = 'small'
elif '{}-test'.format(scriptFile) in str(files):
size = 'test'
else:
print('{}-test not found'.format(scriptFile))
sys.exit()
latest = sorted(f for f in files if size in f)[-1][:-3]
f = '{}/{}'.format(scriptPath, latest)
i = open(f + '.in', 'r')
o = open(f + '.out', 'w')
print(f)
T = int(i.readline())
# https://code.google.com/codejam/contest/11254486/dashboard#s=p1
# Problem B. Close Match
for x in range(T):
C, J = i.readline().rstrip().split()
c, j = '', ''
leading = 1
N = len(C)
for i in range(N):
a, b = C[i], J[i]
if i < N - 1:
a1, b1 = C[i + 1], J[i + 1]
if i < N - 1 and ('?' in a + b and '?' not in a1 + b1):
if abs(int(a1) - int(b1)) > 5:
c += [a, ['1', '0'][a1 > b1]][a == '?']
j += [b, ['0', '1'][a1 > b1]][b == '?']
else:
c += [a, [b, '0'][b == '?']][a == '?']
j += [b, [a, '0'][a == '?']][b == '?']
else:
if c == j:
c += [a, [b, '0'][b == '?']][a == '?']
j += [b, [a, '0'][a == '?']][b == '?']
elif c > j:
c += [a, '0'][a == '?']
j += [b, '9'][b == '?']
elif c < j:
c += [a, '9'][a == '?']
j += [b, '0'][b == '?']
y = '{} {}'.format(c, j)
print(C, J, c, j)
o.write('{}Case #{}: {}'.format(['', '\n'][x > 0], x + 1, y))
i.close()
o.close()
| mit | Python |
79a88b1866227542731ff555925557fb22a900d4 | bump version to 0.2.10 | dcoker/awsmfa,dcoker/awsmfa | awsmfa/_version.py | awsmfa/_version.py | """0.2.10"""
VERSION = __doc__
| """0.2.9"""
VERSION = __doc__
| apache-2.0 | Python |
499ff491a6f2d6448ff8b5238c1182cdadea2876 | Fix logging | alphagov/performanceplatform-collector,gds-attic/backdrop-collector,alphagov/performanceplatform-collector,gds-attic/backdrop-collector,alphagov/performanceplatform-collector | backdrop/collector/write.py | backdrop/collector/write.py | import datetime
import logging
import pytz
import requests
import json
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
if obj.tzinfo is None:
obj = obj.replace(tzinfo=pytz.UTC)
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class Bucket(object):
"""Client for writing to a backdrop bucket"""
def __init__(self, url, token):
self.url = url
self.token = token
def post(self, records):
headers = {
"Authorization": "Bearer %s" % self.token,
"Content-type": "application/json"
}
response = requests.post(
url=self.url,
headers=headers,
data=json.dumps(records, cls=JsonEncoder)
)
logging.debug("[Backdrop] " + response.text)
response.raise_for_status()
| import datetime
import logging
import pytz
import requests
import json
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
if obj.tzinfo is None:
obj = obj.replace(tzinfo=pytz.UTC)
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class Bucket(object):
"""Client for writing to a backdrop bucket"""
def __init__(self, url, token):
self.url = url
self.token = token
def post(self, records):
headers = {
"Authorization": "Bearer %s" % self.token,
"Content-type": "application/json"
}
response = requests.post(
url=self.url,
headers=headers,
data=json.dumps(records, cls=JsonEncoder)
)
logging.debug("[Backdrop] ", response.text)
response.raise_for_status()
| mit | Python |
63a0e47e27c33027e0ba02b2a553853c12790321 | Add the key parameter when getting entries | yeasy/CSIT_Test | base/modules/arp_handler.py | base/modules/arp_handler.py | """
CSIT test tools.
Authors: Baohua Yang@IBM, Denghui Huang@IBM
Updated: 2013-11-01
"""
import sys
sys.path.append('..')
from restlib import *
from testmodule import TestModule
sys.path.remove('..')
class ArpHandler(TestModule):
"""
Test for the arp handler.
Start 2-layer tree topology network. e.g., in Mininet, run 'sudo mn --controller=remote,ip=127.0.0.1 --mac --topo tree,2'
"""
def __init__(self, restSubContext='/controller/nb/v2/subnetservice', user=DEFAULT_USER, password=DEFAULT_PWD,
container=DEFAULT_CONTAINER, contentType='json', prefix=DEFAULT_PREFIX):
super(self.__class__, self).__init__(restSubContext, user, password, container, contentType, prefix)
def get_subnets(self):
"""
The name is suggested to match the NB API.
list all subnets and their properties.
"""
return super(self.__class__, self).get_entries('subnets','subnetConfig')
def add_subnet_gateway(self, name, body):
"""
Add a subnet gateway.
"""
super(self.__class__, self).add_entry('subnet', name, body)
def remove_subnet_gateway(self, name):
"""
Remove a subnet gateway.
"""
super(self.__class__, self).remove_entry('subnet', name)
def test_subnet_operations(self, name, body):
"""
Test subnet operations, like adding and removeing a subnet.
>>> ArpHandler().test_subnet_operations('test',{'name':'test','subnet':'10.0.0.254/8'})
True
"""
return super(self.__class__, self).test_add_remove_operations('subnets', 'subnet', name, body, 'subnetConfig')
if __name__ == '__main__':
print 'arp handler' | """
CSIT test tools.
Authors: Baohua Yang@IBM, Denghui Huang@IBM
Updated: 2013-11-01
"""
import sys
sys.path.append('..')
from restlib import *
from testmodule import TestModule
sys.path.remove('..')
class ArpHandler(TestModule):
"""
Test for the arp handler.
Start 2-layer tree topology network. e.g., in Mininet, run 'sudo mn --controller=remote,ip=127.0.0.1 --mac --topo tree,2'
"""
def __init__(self, restSubContext='/controller/nb/v2/subnetservice', user=DEFAULT_USER, password=DEFAULT_PWD,
container=DEFAULT_CONTAINER, contentType='json', prefix=DEFAULT_PREFIX):
super(self.__class__, self).__init__(restSubContext, user, password, container, contentType, prefix)
def get_subnets(self):
"""
The name is suggested to match the NB API.
list all subnets and their properties.
"""
return super(self.__class__, self).get_entries('subnets')
def add_subnet_gateway(self, name, body):
"""
Add a subnet gateway.
"""
super(self.__class__, self).add_entry('subnet', name, body)
def remove_subnet_gateway(self, name):
"""
Remove a subnet gateway.
"""
super(self.__class__, self).remove_entry('subnet', name)
def test_subnet_operations(self, name, body):
"""
Test subnet operations, like adding and removeing a subnet.
>>> ArpHandler().test_subnet_operations('test',{'name':'test','subnet':'10.0.0.254/8'})
True
"""
return super(self.__class__, self).test_add_remove_operations('subnets', 'subnet', name, body, 'subnetConfig')
if __name__ == '__main__':
print 'arp handler'
| epl-1.0 | Python |
fb01aa54032b7ab73dcd5a3e73d4ece5f36517e2 | Add comment to explain pagination handling | praekelt/familyconnect-registration,praekelt/familyconnect-registration | locations/tasks.py | locations/tasks.py | from future.standard_library import install_aliases
install_aliases() # noqa
from urllib.parse import urlparse
from celery.task import Task
from django.conf import settings
from seed_services_client import IdentityStoreApiClient
from .models import Parish
class SyncLocations(Task):
"""
Has a look at all the identity store identities, and ensures that all of
the locations assigned to identities appear in the list of locations.
"""
def get_identities(self, client):
"""
Returns an iterator over all the identities in the identity store
specified by 'client'.
"""
identities = client.get_identities()
while True:
for identity in identities.get('results', []):
yield identity
# If there is a next page, extract the querystring and get it
if identities.get('next') is not None:
qs = urlparse(identities['next']).query
identities = client.get_identities(params=qs)
else:
break
def run(self, **kwargs):
l = self.get_logger(**kwargs)
l.info('Starting location import')
imported_count = 0
client = IdentityStoreApiClient(
settings.IDENTITY_STORE_TOKEN, settings.IDENTITY_STORE_URL)
for identity in self.get_identities(client):
parish = identity.get('details', {}).get('parish')
if parish is not None:
_, created = Parish.objects.get_or_create(name=parish.title())
if created:
imported_count += 1
l.info('Imported {} locations'.format(imported_count))
return imported_count
sync_locations = SyncLocations()
| from future.standard_library import install_aliases
install_aliases() # noqa
from urllib.parse import urlparse
from celery.task import Task
from django.conf import settings
from seed_services_client import IdentityStoreApiClient
from .models import Parish
class SyncLocations(Task):
"""
Has a look at all the identity store identities, and ensures that all of
the locations assigned to identities appear in the list of locations.
"""
def get_identities(self, client):
"""
Returns an iterator over all the identities in the identity store
specified by 'client'.
"""
identities = client.get_identities()
while True:
for identity in identities.get('results', []):
yield identity
if identities.get('next') is not None:
qs = urlparse(identities['next']).query
identities = client.get_identities(params=qs)
else:
break
def run(self, **kwargs):
l = self.get_logger(**kwargs)
l.info('Starting location import')
imported_count = 0
client = IdentityStoreApiClient(
settings.IDENTITY_STORE_TOKEN, settings.IDENTITY_STORE_URL)
for identity in self.get_identities(client):
parish = identity.get('details', {}).get('parish')
if parish is not None:
_, created = Parish.objects.get_or_create(name=parish.title())
if created:
imported_count += 1
l.info('Imported {} locations'.format(imported_count))
return imported_count
sync_locations = SyncLocations()
| bsd-3-clause | Python |
a9c8741e07e2eadfd0cdebcb51922e15209c740c | add LoadTestShape to __all__ in order to fix warning "'LoadTestShape' is not declared in __all__" raised by pycharm code->inspect | locustio/locust,mbeacom/locust,locustio/locust,mbeacom/locust,locustio/locust,mbeacom/locust,locustio/locust,mbeacom/locust | locust/__init__.py | locust/__init__.py | from gevent import monkey
monkey.patch_all()
from .user.sequential_taskset import SequentialTaskSet
from .user import wait_time
from .user.task import task, tag, TaskSet
from .user.users import HttpUser, User
from .user.wait_time import between, constant, constant_pacing
from .shape import LoadTestShape
from .event import Events
events = Events()
__version__ = "1.4.3"
__all__ = (
"SequentialTaskSet",
"wait_time",
"task",
"tag",
"TaskSet",
"HttpUser",
"User",
"between",
"constant",
"constant_pacing",
"events",
"LoadTestShape",
)
# Used for raising a DeprecationWarning if old Locust/HttpLocust is used
from .util.deprecation import DeprecatedLocustClass as Locust
from .util.deprecation import DeprecatedHttpLocustClass as HttpLocust
| from gevent import monkey
monkey.patch_all()
from .user.sequential_taskset import SequentialTaskSet
from .user import wait_time
from .user.task import task, tag, TaskSet
from .user.users import HttpUser, User
from .user.wait_time import between, constant, constant_pacing
from .shape import LoadTestShape
from .event import Events
events = Events()
__version__ = "1.4.3"
__all__ = (
"SequentialTaskSet",
"wait_time",
"task",
"tag",
"TaskSet",
"HttpUser",
"User",
"between",
"constant",
"constant_pacing",
"events",
)
# Used for raising a DeprecationWarning if old Locust/HttpLocust is used
from .util.deprecation import DeprecatedLocustClass as Locust
from .util.deprecation import DeprecatedHttpLocustClass as HttpLocust
| mit | Python |
c8fbf939d3a2d3d11eb54fc3b89eef1b546551a8 | add new program urls | OKThess/website,OKThess/website,OKThess/website | main/urls.py | main/urls.py | from django.conf.urls import url
from django.contrib import admin
from . import views
app_name = 'main'
admin.site.site_header = 'OK!Thess administration'
urlpatterns = [
# /
url(r'^$', views.get_index, name='index'),
# /about/
url(r'^about/$', views.get_about, name='about'),
# /program/
url(r'^program/$', views.program_redir, name='program_redir'),
url(r'^program/teams/$', views.get_program_teams, name='program_teams'),
url(r'^program/mentors/$', views.get_program_mentors, name='program_mentors'),
url(r'^program/alumni/$', views.get_program_alumni, name='program_alumni'),
# /events/
url(r'^events/$', views.get_events, name='events'),
# /blog/
url(r'^blog/$', views.get_blog, name='blog'),
# e.g. /blog/sample-post
url(r'^blog/sample-post/$', views.get_blog_post_sample, name='post'),
# /contact/
url(r'^contact/$', views.get_contact, name='contact'),
# /apply/
url(r'^apply/$', views.apply, name='apply'),
]
| from django.conf.urls import url
from django.contrib import admin
from . import views
app_name = 'main'
admin.site.site_header = 'OK!Thess administration'
urlpatterns = [
# /
url(r'^$', views.get_index, name='index'),
# /about/
url(r'^about/$', views.get_about, name='about'),
# /program/
url(r'^program/$', views.get_program, name='program'),
# /events/
url(r'^events/$', views.get_events, name='events'),
# /blog/
url(r'^blog/$', views.get_blog, name='blog'),
# e.g. /blog/sample-post
url(r'^blog/sample-post/$', views.get_blog_post_sample, name='post'),
# /contact/
url(r'^contact/$', views.get_contact, name='contact'),
# /apply/
url(r'^apply/$', views.apply, name='apply'),
]
| mit | Python |
89206879c447613d1c780de0be089b00eb187ed3 | Remove debugging code inadvertently left in place | arista-eosplus/ansible-eos-acl,arista-eosplus/ansible-eos-acl | filter_plugins/netaddr.py | filter_plugins/netaddr.py | # Copyright (c) 2017, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__metaclass__ = type
from netaddr import IPNetwork
def network_address(net_addr):
""" Converts a CIDR network address/mask string to a network address.
Args:
net_addr (str): a network address/mask string in
the format A.B.C.D/E.
Returns:
str: The network address specified by the input net_addr.
"""
ip = IPNetwork(net_addr)
return(str(ip.network))
class FilterModule(object):
def filters(self):
return {
'network_address': network_address,
}
| # Copyright (c) 2017, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__metaclass__ = type
from netaddr import IPNetwork
def network_address(net_addr):
""" Converts a CIDR network address/mask string to a network address.
Args:
net_addr (str): a network address/mask string in
the format A.B.C.D/E.
Returns:
str: The network address specified by the input net_addr.
"""
ip = IPNetwork(net_addr)
import XXX
XXX.file("XXX")
return(str(ip.network))
class FilterModule(object):
def filters(self):
return {
'network_address': network_address,
}
| bsd-3-clause | Python |
6c3c03b4f9f1cd0f3fd23ebaddc007b162fdb1c2 | Update P2_phoneAndEmail.py changed pyperclip import to ATBS Ch8 | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | pythontutorials/books/AutomateTheBoringStuff/Ch07/P2_phoneAndEmail.py | pythontutorials/books/AutomateTheBoringStuff/Ch07/P2_phoneAndEmail.py | #! python3
"""Phone and email
Finds phone numbers and email addresses in the clipboard using :py:mod:`re` and
:py:mod:`pythontutorials.books.CrackingCodesWithPython.pyperclip`.
Attributes:
phoneRegex (re.compile): Regular expression object representing a phone number pattern.
emailRegex (re.compile): Regular expression object representing an email pattern.
"""
import re
from pythontutorials.books.AutomateTheBoringStuff.Ch08 import pyperclip
phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code (optional)
(\s|-|\.)? # separator
(\d{3}) # first 3 digits
(\s|-|\.) # separator
(\d{4}) # last 4 digits
(\s*(ext|x|ext.)\s*(\d{2,5}))? # extension (optional)
)''', re.VERBOSE)
# Create email regex
emailRegex = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # username
@ # @ symbol
[a-zA-Z0-9.-]+ # domain name
(\.[a-zA-Z]{2,4}) # dot-something
)''', re.VERBOSE)
def main() -> None:
"""P2_phoneAndEmail.py
Checks clipboard text for phone numbers and emails using :py:mod:`re`. If found,
matches are copied to the clipboard and printed to terminal.
Returns:
None. Prints and copies matches to clipboard or prints status message.
"""
# Find matches in clipboard text.
text = str(pyperclip.paste())
matches = []
for groups in phoneRegex.findall(text):
phoneNum = '-'.join([groups[1], groups[3], groups[5]])
if groups[8] != '':
phoneNum += ' x' + groups[8]
matches.append(phoneNum)
for groups in emailRegex.findall(text):
matches.append(groups[0])
# Copy the results to the clipboard.
if len(matches) > 0:
pyperclip.copy('\n'.join(matches))
print('Copied to clipboard:')
print('\n'.join(matches))
else:
print('No phone numbers or email addresses found.')
if __name__ == '__main__':
main()
| #! python3
"""Phone and email
Finds phone numbers and email addresses in the clipboard using :py:mod:`re` and
:py:mod:`pythontutorials.books.CrackingCodesWithPython.pyperclip`.
Attributes:
phoneRegex (re.compile): Regular expression object representing a phone number pattern.
emailRegex (re.compile): Regular expression object representing an email pattern.
"""
import pythontutorials.books.CrackingCodesWithPython.pyperclip, re
phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code (optional)
(\s|-|\.)? # separator
(\d{3}) # first 3 digits
(\s|-|\.) # separator
(\d{4}) # last 4 digits
(\s*(ext|x|ext.)\s*(\d{2,5}))? # extension (optional)
)''', re.VERBOSE)
# Create email regex
emailRegex = re.compile(r'''(
[a-zA-Z0-9._%+-]+ # username
@ # @ symbol
[a-zA-Z0-9.-]+ # domain name
(\.[a-zA-Z]{2,4}) # dot-something
)''', re.VERBOSE)
def main() -> None:
"""P2_phoneAndEmail.py
Checks clipboard text for phone numbers and emails using :py:mod:`re`. If found,
matches are copied to the clipboard and printed to terminal.
Returns:
None. Prints and copies matches to clipboard or prints status message.
"""
# Find matches in clipboard text.
text = str(pythontutorials.books.CrackingCodesWithPython.pyperclip.paste())
matches = []
for groups in phoneRegex.findall(text):
phoneNum = '-'.join([groups[1], groups[3], groups[5]])
if groups[8] != '':
phoneNum += ' x' + groups[8]
matches.append(phoneNum)
for groups in emailRegex.findall(text):
matches.append(groups[0])
# Copy the results to the clipboard.
if len(matches) > 0:
pythontutorials.books.CrackingCodesWithPython.pyperclip.copy('\n'.join(matches))
print('Copied to clipboard:')
print('\n'.join(matches))
else:
print('No phone numbers or email addresses found.')
if __name__ == '__main__':
main()
| mit | Python |
5123cf157375da497397a3a411edf0a2e0aaa161 | remove unused import statement | istb-mia/miapy | miapy/data/indexexpression.py | miapy/data/indexexpression.py | import typing as t
# could maybe be replaced or wrapper with numpy.s_
class IndexExpression:
def __init__(self, indexing: t.Union[int, tuple, t.List[int], t.List[tuple]]=None,
axis: t.Union[int, tuple]=None) -> None:
self.expression = None
self.set_indexing(indexing, axis)
def set_indexing(self, indexing: t.Union[int, tuple, t.List[int], t.List[tuple]], axis: t.Union[int, tuple]=None):
if indexing is None:
self.expression = slice(None)
return
if isinstance(indexing, int) or isinstance(indexing, tuple):
indexing = [indexing]
if axis is None:
axis = tuple(range(len(indexing)))
if isinstance(axis, int):
axis = (axis,)
expr = [slice(None) for _ in range(max(axis) + 1)]
for a, index in zip(axis, indexing):
if isinstance(index, int):
expr[a] = index
elif isinstance(index, tuple):
start, stop = index
expr[a] = slice(start, stop)
# needs to be tuple otherwise exception from h5py while slicing
self.expression = tuple(expr)
def get_indexing(self):
indexing = []
for index in self.expression:
if index is None:
indexing.append(None)
elif isinstance(index, slice):
indexing.append((index.start, index.stop))
elif isinstance(index, int):
indexing.append(index)
else:
raise ValueError("only 'int', 'slice', and 'None' types possible in expression")
return indexing if len(indexing) > 1 else indexing[0]
| import typing as t
import pickle
# could maybe be replaced or wrapper with numpy.s_
class IndexExpression:
def __init__(self, indexing: t.Union[int, tuple, t.List[int], t.List[tuple]]=None,
axis: t.Union[int, tuple]=None) -> None:
self.expression = None
self.set_indexing(indexing, axis)
def set_indexing(self, indexing: t.Union[int, tuple, t.List[int], t.List[tuple]], axis: t.Union[int, tuple]=None):
if indexing is None:
self.expression = slice(None)
return
if isinstance(indexing, int) or isinstance(indexing, tuple):
indexing = [indexing]
if axis is None:
axis = tuple(range(len(indexing)))
if isinstance(axis, int):
axis = (axis,)
expr = [slice(None) for _ in range(max(axis) + 1)]
for a, index in zip(axis, indexing):
if isinstance(index, int):
expr[a] = index
elif isinstance(index, tuple):
start, stop = index
expr[a] = slice(start, stop)
# needs to be tuple otherwise exception from h5py while slicing
self.expression = tuple(expr)
def get_indexing(self):
indexing = []
for index in self.expression:
if index is None:
indexing.append(None)
elif isinstance(index, slice):
indexing.append((index.start, index.stop))
elif isinstance(index, int):
indexing.append(index)
else:
raise ValueError("only 'int', 'slice', and 'None' types possible in expression")
return indexing if len(indexing) > 1 else indexing[0]
| apache-2.0 | Python |
e0665dd33ee3953e1e20a9115c86210e5b6fcd32 | Update __init__.py | PaesslerAG/PythonMiniProbe,PaesslerAG/PythonMiniProbe | miniprobe/sensors/__init__.py | miniprobe/sensors/__init__.py | __all__ = ['Ping', 'HTTP', 'Port', 'SNMPCustom', 'CPULoad', 'Memory', 'Diskspace', 'SNMPTraffic', 'CPUTemp', 'Probehealth', 'ExternalIP', 'ADNS', 'APT', 'NMAP', 'MDADM']
| __all__ = ['Ping', 'HTTP', 'Port', 'SNMPCustom', 'CPULoad', 'Memory', 'Diskspace', 'SNMPTraffic', 'CPUTemp', 'Probehealth', 'ExternalIP', 'ADNS', 'APT', 'NMAP']
| bsd-3-clause | Python |
9f9bf9b9cccd2e39c3d567d4768d03c1a4a2c4b1 | Remove description key as it's already on README.rst | jbq/account-payment,Vauxoo/account-payment,incaser/account-payment,VitalPet/account-payment,Antiun/account-payment,alanljj/account-payment,abstract-open-solutions/account-payment,jesusVMayor/account-payment,Eficent/account-payment,open-synergy/account-payment,Endika/account-payment | account_due_list/__openerp__.py | account_due_list/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2011-2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
# @author Jordi Esteve <jesteve@zikzakmedia.com>
# @author Lorenzo Battistini <lorenzo.battistini@agilebg.com>
# Ported to OpenERP 7.0 by Alex Comba <alex.comba@agilebg.com> and
# Bruno Bottacini <bruno.bottacini@dorella.com>
# Ported to Odoo by Andrea Cometa <info@andreacometa.it>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Payments Due list",
'version': '0.1',
'category': 'Generic Modules/Payment',
'author': 'Odoo Community Association (OCA), Agile Business Group, '
'Zikzakmedia SL',
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": [
'account',
],
'conflicts': [
'account_payment_extension',
],
"data": [
'payment_view.xml',
],
"active": False,
"installable": True,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
# Copyright (C) 2011-2013 Agile Business Group sagl
# (<http://www.agilebg.com>)
# @author Jordi Esteve <jesteve@zikzakmedia.com>
# @author Lorenzo Battistini <lorenzo.battistini@agilebg.com>
# Ported to OpenERP 7.0 by Alex Comba <alex.comba@agilebg.com> and
# Bruno Bottacini <bruno.bottacini@dorella.com>
# Ported to Odoo by Andrea Cometa <info@andreacometa.it>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Payments Due list",
'version': '0.1',
'category': 'Generic Modules/Payment',
'description': """
A due list of pending payments. The list contains every expected payment,
generated by invoices. The list is fully filterable.
""",
'author': 'Odoo Community Association (OCA), Agile Business Group, '
'Zikzakmedia SL',
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": [
'account',
],
'conflicts': [
'account_payment_extension',
],
"data": [
'payment_view.xml',
],
"active": False,
"installable": True,
}
| agpl-3.0 | Python |
2696e71ac88276a2291732d98caade883ea8963e | clean up | chintak/image-captioning | external/score.py | external/score.py | # see https://github.com/tylin/coco-caption/
# standard evaluation code
#
import cPickle as pickle
import os
import sys
sys.path.append('../../coco-caption')
sys.path.append('../coco-caption')
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
def get_score(ref, hypo):
scorers = [
(Bleu(4),["Bleu_1","Bleu_2","Bleu_3","Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(),"ROUGE_L"),
(Cider(),"CIDEr")
]
final_scores = {}
for scorer,method in scorers:
score,scores = scorer.compute_score(ref,hypo)
if type(score)==list:
for m,s in zip(method,score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
def evaluate(data_path='./data', split='val', get_scores=False):
reference_path = os.path.join(data_path, "%s/%s.references.pkl" %(split, split))
candidate_path = os.path.join(data_path, "%s/%s.candidate.captions.pkl" %(split, split))
# load caption data
with open(reference_path, 'rb') as f:
ref = pickle.load(f)
with open(candidate_path, 'rb') as f:
cand = pickle.load(f)
# make dictionary
hypo = {}
for i, caption in enumerate(cand):
hypo[i] = [caption]
# compute bleu score
print ref.items()[:10], hypo.items()[:10]
final_scores = get_score(ref, hypo)
# print out scores
print 'Bleu_1:\t',final_scores['Bleu_1']
print 'Bleu_2:\t',final_scores['Bleu_2']
print 'Bleu_3:\t',final_scores['Bleu_3']
print 'Bleu_4:\t',final_scores['Bleu_4']
print 'METEOR:\t',final_scores['METEOR']
print 'ROUGE_L:',final_scores['ROUGE_L']
print 'CIDEr:\t',final_scores['CIDEr']
if get_scores:
return final_scores
| import cPickle as pickle
import os
import sys
sys.path.append('../../coco-caption')
sys.path.append('../coco-caption')
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.meteor.meteor import Meteor
def get_score(ref, hypo):
scorers = [
(Bleu(4),["Bleu_1","Bleu_2","Bleu_3","Bleu_4"]),
(Meteor(),"METEOR"),
(Rouge(),"ROUGE_L"),
(Cider(),"CIDEr")
]
final_scores = {}
for scorer,method in scorers:
score,scores = scorer.compute_score(ref,hypo)
if type(score)==list:
for m,s in zip(method,score):
final_scores[m] = s
else:
final_scores[method] = score
return final_scores
def evaluate(data_path='./data', split='val', get_scores=False):
reference_path = os.path.join(data_path, "%s/%s.references.pkl" %(split, split))
candidate_path = os.path.join(data_path, "%s/%s.candidate.captions.pkl" %(split, split))
# load caption data
with open(reference_path, 'rb') as f:
ref = pickle.load(f)
with open(candidate_path, 'rb') as f:
cand = pickle.load(f)
# make dictionary
hypo = {}
for i, caption in enumerate(cand):
hypo[i] = [caption]
# compute bleu score
print ref.items()[:10], hypo.items()[:10]
final_scores = get_score(ref, hypo)
# print out scores
print 'Bleu_1:\t',final_scores['Bleu_1']
print 'Bleu_2:\t',final_scores['Bleu_2']
print 'Bleu_3:\t',final_scores['Bleu_3']
print 'Bleu_4:\t',final_scores['Bleu_4']
print 'METEOR:\t',final_scores['METEOR']
print 'ROUGE_L:',final_scores['ROUGE_L']
print 'CIDEr:\t',final_scores['CIDEr']
if get_scores:
return final_scores
| mit | Python |
cf2fa1ef4c2f7b4677cbd17202d6703cdcc8cc0d | Use optparse style argument parsing. Implement a 'wait-for-pid' option on xsend, to delay sending a message until after the specified process id exits | detrout/benderjab | benderjab/xsend.py | benderjab/xsend.py | #!/usr/bin/env python
#
# Copyright 2007 Diane Trout
# This software is covered by the GNU Lesser Public License 2.1
#
import optparse
import os
import random
import sys
import time
import xmpp
from xmpp import simplexml
from benderjab.util import get_config, toJID
def connect(profile=None):
"""
Connect to the server for our jabber id
"""
jidparams = get_config(profile)
# if we have no credentials, don't bother logging in
if jidparams is None:
return
myjid=toJID(jidparams['jid'])
# construct a client instance, logging into the JIDs servername
# xmpp's default debug didn't work when I started using it
cl=xmpp.Client(myjid.getDomain(),debug=[])
connection_type = ''
connection_tries = 3
# if use_srv is true, xmpp will try to use dnspython to look up
# the right server via a DNS SRV request, this doesn't work right
# for my server
while connection_type == '' and connection_tries > 0:
connection_type = cl.connect(use_srv=False)
# wait a random length of time between 2.5 and 7.5 seconds
# if we didn't manage to connect
if connection_type == '':
time.sleep( 5 + (random.random()*5 - 2.5))
# connection failed
if connection_type == '':
raise IOError("unable to connect to" + str(cl.Server))
# try logging in
if cl.auth(myjid.getNode(),jidparams['password'], 'xsend') is None:
raise IOError("Couldn't auth:"+str(cl.lastErr))
return cl
def send(tojid, text, profile=None):
"""Quickly send a jabber message tojid
:Parameters:
- `tojid`: The Jabber ID to send to
- `text`: a string containing the message to send
- `profile`: which set of credentials to use from the config file
"""
cl = connect(profile)
# we logged in, so we can send a message
cl.send(xmpp.protocol.Message(tojid,text))
# hang up politely
cl.disconnect()
def wait_for_pid(pid, timeout=10):
"""
Wait for a process id to disappear before returning
pid is the process id to watch
time out is how long in seconds to wait between polls
"""
while True:
try:
os.kill(pid, 0)
except OSError, e:
# there is no PID, return
return
time.sleep(timeout)
def make_parser():
usage = "%prog: [options] jabber-id message..."
parser = optparse.OptionParser()
parser.add_option('--wait-for-pid', type='int',
help="Wait for a process ID to exit before sending message",
default=None)
return parser
def main(cmdline=None):
parser = make_parser()
opt, args = parser.parse_args(cmdline)
if len(args) < 2:
parser.error("Need JabberID and a message")
if opt.wait_for_pid is not None:
wait_for_pid(opt.wait_for_pid)
# parse command line arguments
tojid=args[1]
message=' '.join(args[2:])
send(tojid, message)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| #!/usr/bin/env python
#
# Copyright 2007 Diane Trout
# This software is covered by the GNU Lesser Public License 2.1
#
import os
import random
import sys
import time
import xmpp
from xmpp import simplexml
from benderjab.util import get_config, toJID
def connect(profile=None):
"""
Connect to the server for our jabber id
"""
jidparams = get_config(profile)
# if we have no credentials, don't bother logging in
if jidparams is None:
return
myjid=toJID(jidparams['jid'])
# construct a client instance, logging into the JIDs servername
# xmpp's default debug didn't work when I started using it
cl=xmpp.Client(myjid.getDomain(),debug=[])
connection_type = ''
connection_tries = 3
# if use_srv is true, xmpp will try to use dnspython to look up
# the right server via a DNS SRV request, this doesn't work right
# for my server
while connection_type == '' and connection_tries > 0:
connection_type = cl.connect(use_srv=False)
# wait a random length of time between 2.5 and 7.5 seconds
# if we didn't manage to connect
if connection_type == '':
time.sleep( 5 + (random.random()*5 - 2.5))
# connection failed
if connection_type == '':
raise IOError("unable to connect to" + str(cl.Server))
# try logging in
if cl.auth(myjid.getNode(),jidparams['password'], 'xsend') is None:
raise IOError("Couldn't auth:"+str(cl.lastErr))
return cl
def send(tojid, text, profile=None):
"""Quickly send a jabber message tojid
:Parameters:
- `tojid`: The Jabber ID to send to
- `text`: a string containing the message to send
- `profile`: which set of credentials to use from the config file
"""
cl = connect(profile)
# we logged in, so we can send a message
cl.send(xmpp.protocol.Message(tojid,text))
# hang up politely
cl.disconnect()
def main(args=None):
if args is None:
args = sys.argv
if len(args) < 2:
print "Syntax: xsend JID text"
return 1
# parse command line arguments
tojid=args[1]
text=' '.join(args[2:])
send(tojid, text)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| lgpl-2.1 | Python |
e9fc291faca8af35398b958d046e951aa8471cbf | Fix broken test since models new default ordering | Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel | apps/core/tests/test_factories.py | apps/core/tests/test_factories.py | from .. import factories, models
from . import CoreFixturesTestCase
class AnalysisFactoryTestCase(CoreFixturesTestCase):
def test_new_factory_with_Experiments(self):
experiments = factories.ExperimentFactory.create_batch(3)
# build
analysis = factories.AnalysisFactory.build(experiments=experiments)
self.assertEqual(analysis.experiments.count(), 0)
# create
analysis = factories.AnalysisFactory(experiments=experiments)
experiments_ids = analysis.experiments.values_list(
'id', flat=True
)
expected_experiments_ids = models.Experiment.objects.values_list(
'id', flat=True
)
self.assertEqual(
list(experiments_ids),
list(expected_experiments_ids)
)
| from .. import factories
from . import CoreFixturesTestCase
class AnalysisFactoryTestCase(CoreFixturesTestCase):
def test_new_factory_with_Experiments(self):
experiments = factories.ExperimentFactory.create_batch(3)
# build
analysis = factories.AnalysisFactory.build(experiments=experiments)
self.assertEqual(analysis.experiments.count(), 0)
# create
analysis = factories.AnalysisFactory(experiments=experiments)
experiments_ids = list(
analysis.experiments.values_list('id', flat=True)
)
expected_experiments_ids = [e.id for e in experiments]
self.assertEqual(experiments_ids, expected_experiments_ids)
| bsd-3-clause | Python |
bd0f94b8a1af1f4232c7dbc09aa0bb32bf431d80 | check that multiline string literals don't expand as magic | ipython/ipython,ipython/ipython | IPython/core/tests/test_prefilter.py | IPython/core/tests/test_prefilter.py | """Tests for input manipulation machinery."""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.testing import tools as tt, decorators as dec
from IPython.testing.globalipapp import get_ipython
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
ip = get_ipython()
@dec.parametric
def test_prefilter():
"""Test user input conversions"""
# pairs of (raw, expected correct) input
pairs = [ ('2+2','2+2'),
('>>> 2+2','2+2'),
('>>> # This is a comment\n'
'... 2+2',
'# This is a comment\n'
'2+2'),
# Some IPython input
('In [1]: 1', '1'),
('In [2]: for i in range(5):\n'
' ...: print i,',
'for i in range(5):\n'
' print i,'),
]
for raw, correct in pairs:
yield nt.assert_equals(ip.prefilter(raw), correct)
@dec.parametric
def test_autocall_binops():
"""See https://bugs.launchpad.net/ipython/+bug/315706"""
ip.magic('autocall 2')
f = lambda x: x
ip.user_ns['f'] = f
try:
yield nt.assert_equals(ip.prefilter('f 1'),'f(1)')
for t in ['f +1', 'f -1']:
yield nt.assert_equals(ip.prefilter(t), t)
finally:
ip.magic('autocall 0')
del ip.user_ns['f']
@dec.parametric
def test_issue114():
"""Check that multiline string literals don't expand as magic
see http://github.com/ipython/ipython/issues/#issue/114"""
template = '"""\n%s\n"""'
for mgk in ip.lsmagic():
raw = template % mgk
yield nt.assert_equals(ip.prefilter(raw), raw)
| """Tests for input manipulation machinery."""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import nose.tools as nt
from IPython.testing import tools as tt, decorators as dec
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
ip = get_ipython()
@dec.parametric
def test_prefilter():
"""Test user input conversions"""
# pairs of (raw, expected correct) input
pairs = [ ('2+2','2+2'),
('>>> 2+2','2+2'),
('>>> # This is a comment\n'
'... 2+2',
'# This is a comment\n'
'2+2'),
# Some IPython input
('In [1]: 1', '1'),
('In [2]: for i in range(5):\n'
' ...: print i,',
'for i in range(5):\n'
' print i,'),
]
for raw, correct in pairs:
yield nt.assert_equals(ip.prefilter(raw), correct)
@dec.parametric
def test_autocall_binops():
"""See https://bugs.launchpad.net/ipython/+bug/315706"""
ip.magic('autocall 2')
f = lambda x: x
ip.user_ns['f'] = f
try:
yield nt.assert_equals(ip.prefilter('f 1'),'f(1)')
for t in ['f +1', 'f -1']:
yield nt.assert_equals(ip.prefilter(t), t)
finally:
ip.magic('autocall 0')
del ip.user_ns['f']
| bsd-3-clause | Python |
ae99f3b3e2dae482292492ae1ddab512549f2e7b | Improve formatting (attempt 2) | DavisNT/mopidy-alarmclock,DavisNT/mopidy-alarmclock,DavisNT/mopidy-alarmclock | mopidy_alarmclock/__init__.py | mopidy_alarmclock/__init__.py | import os
from mopidy import config, ext
from .alarm_manager import AlarmManager
from .http import MessageStore, factory_decorator
__version__ = '0.1.7'
class Extension(ext.Extension):
dist_name = 'Mopidy-AlarmClock'
ext_name = 'alarmclock'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['def_time'] = config.String()
schema['def_playlist'] = config.String(optional=True)
schema['def_random'] = config.Boolean()
schema['def_volume'] = config.Integer()
schema['def_vol_inc_duration'] = config.Integer()
return schema
def setup(self, registry):
alarm_manager = AlarmManager()
msg_store = MessageStore()
registry.add('http:app', {
'name': self.ext_name,
'factory': factory_decorator(alarm_manager, msg_store),
})
| import os
from mopidy import config, ext
from .alarm_manager import AlarmManager
from .http import MessageStore, factory_decorator
__version__ = '0.1.7'
class Extension(ext.Extension):
dist_name = 'Mopidy-AlarmClock'
ext_name = 'alarmclock'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['def_time'] = config.String()
schema['def_playlist'] = config.String(optional=True)
schema['def_random'] = config.Boolean()
schema['def_volume'] = config.Integer()
schema['def_vol_inc_duration'] = config.Integer()
return schema
def setup(self, registry):
alarm_manager = AlarmManager()
msg_store = MessageStore()
registry.add('http:app', {
'name': self.ext_name,
'factory': factory_decorator(alarm_manager, msg_store),
})
| apache-2.0 | Python |
e91c634c654addef2f306f756373d474f60c9910 | Fix division by zero in ProgressBar | PermutaTriangle/Permuta | permuta/misc/progressbar.py | permuta/misc/progressbar.py | import time
import sys
import math
class ProgressBar(object):
@staticmethod
def create(mx, mn=0):
ProgressBar.mn = mn
ProgressBar.mx = mx
ProgressBar.at = mn
ProgressBar.start = time.time()
ProgressBar.last = 0
sys.stderr.write('\n')
ProgressBar.progress(mn)
@staticmethod
def progress(prg=None, fin=False):
if prg is not None:
ProgressBar.at = prg
else:
ProgressBar.at = ProgressBar.at + 1
prg = ProgressBar.at
curt = time.time()
if curt - ProgressBar.last < 0.5 and not fin:
return
ProgressBar.last = curt
sys.stderr.write('\033[1F')
width = 50
prog = 1 if ProgressBar.mn == ProgressBar.mx else float(prg - ProgressBar.mn) / (ProgressBar.mx - ProgressBar.mn)
bars = int(round(prog * width))
bars = max(0, min(width, bars))
sys.stderr.write('%3d%% [%s%s]' % (round(prog * 100), '#' * bars, '-' * (width - bars)))
elapsed = curt - ProgressBar.start
# if elapsed >= 4 and prog > 0:
show_time = None
if fin:
show_time = elapsed
elif elapsed >= 2 and prog > 0:
show_time = max(0, elapsed / prog - elapsed)
if show_time is not None:
h = math.floor(show_time / 60 / 60)
show_time -= h * 60 * 60
m = math.floor(show_time / 60)
show_time -= m * 60
s = math.floor(show_time)
sys.stderr.write(' %02d:%02d:%02d' % (h,m,s))
sys.stderr.write('\n')
@staticmethod
def finish():
ProgressBar.progress(ProgressBar.mx, fin=True)
| import time
import sys
import math
class ProgressBar(object):
@staticmethod
def create(mx, mn=0):
ProgressBar.mn = mn
ProgressBar.mx = mx
ProgressBar.at = mn
ProgressBar.start = time.time()
ProgressBar.last = 0
sys.stderr.write('\n')
ProgressBar.progress(mn)
@staticmethod
def progress(prg=None, fin=False):
if prg is not None:
ProgressBar.at = prg
else:
ProgressBar.at = ProgressBar.at + 1
prg = ProgressBar.at
curt = time.time()
if curt - ProgressBar.last < 0.5 and not fin:
return
ProgressBar.last = curt
sys.stderr.write('\033[1F')
width = 50
prog = float(prg - ProgressBar.mn) / (ProgressBar.mx - ProgressBar.mn)
bars = width if ProgressBar.mn == ProgressBar.mx else int(round(prog * width))
bars = max(0, min(width, bars))
sys.stderr.write('%3d%% [%s%s]' % (round(prog * 100), '#' * bars, '-' * (width - bars)))
elapsed = curt - ProgressBar.start
# if elapsed >= 4 and prog > 0:
show_time = None
if fin:
show_time = elapsed
elif elapsed >= 2 and prog > 0:
show_time = max(0, elapsed / prog - elapsed)
if show_time is not None:
h = math.floor(show_time / 60 / 60)
show_time -= h * 60 * 60
m = math.floor(show_time / 60)
show_time -= m * 60
s = math.floor(show_time)
sys.stderr.write(' %02d:%02d:%02d' % (h,m,s))
sys.stderr.write('\n')
@staticmethod
def finish():
ProgressBar.progress(ProgressBar.mx, fin=True)
| bsd-3-clause | Python |
1726295eab3de3759729d18d17fdbe0baba77c40 | Enable easier import | david-zwicker/video-analysis,david-zwicker/cv-mouse-burrows,david-zwicker/cv-mouse-burrows | mousetracking/hpc/__init__.py | mousetracking/hpc/__init__.py | from slurm import SlurmProject | bsd-3-clause | Python | |
96f9d8828934f61c4f1a623b7feecb9712dc83ea | Add id of Antorus raid. | PuckCh/battlenet | battlenet/enums.py | battlenet/enums.py | RACE = {
1: 'Human',
2: 'Orc',
3: 'Dwarf',
4: 'Night Elf',
5: 'Undead',
6: 'Tauren',
7: 'Gnome',
8: 'Troll',
9: 'Goblin',
10: 'Blood Elf',
11: 'Draenei',
22: 'Worgen',
24: 'Pandaren',
25: 'Pandaren',
26: 'Pandaren',
}
CLASS = {
1: 'Warrior',
2: 'Paladin',
3: 'Hunter',
4: 'Rogue',
5: 'Priest',
6: 'Death Knight',
7: 'Shaman',
8: 'Mage',
9: 'Warlock',
10: 'Monk',
11: 'Druid',
12: 'Demon Hunter',
}
QUALITY = {
1: 'Common',
2: 'Uncommon',
3: 'Rare',
4: 'Epic',
5: 'Legendary',
6: 'Artifact',
7: 'Heirloom',
}
RACE_TO_FACTION = {
1: 'Alliance',
2: 'Horde',
3: 'Alliance',
4: 'Alliance',
5: 'Horde',
6: 'Horde',
7: 'Alliance',
8: 'Horde',
9: 'Horde',
10: 'Horde',
11: 'Alliance',
22: 'Alliance',
24: '?',
25: 'Alliance',
26: 'Horde',
}
EXPANSION = {
0: ('wow', 'World of Warcraft'),
1: ('bc', 'The Burning Crusade'),
2: ('lk', 'Wrath of the Lich King'),
3: ('cata', 'Cataclysm'),
4: ('mop', 'Mists of Pandaria'),
5: ('wod', 'Warlords of Draenor'),
6: ('legion', 'Legion'),
}
RAIDS = {
'wow': (2717, 2677, 3429, 3428),
'bc': (3457, 3836, 3923, 3607, 3845, 3606, 3959, 4075),
'lk': (4603, 3456, 4493, 4500, 4273, 2159, 4722, 4812, 4987),
'cata': (5600, 5094, 5334, 5638, 5723, 5892),
'mop': (6125, 6297, 6067, 6622, 6738),
'wod': (6996, 6967, 7545),
'legion': (8026, 8440, 8025, 8524, 8638),
}
| RACE = {
1: 'Human',
2: 'Orc',
3: 'Dwarf',
4: 'Night Elf',
5: 'Undead',
6: 'Tauren',
7: 'Gnome',
8: 'Troll',
9: 'Goblin',
10: 'Blood Elf',
11: 'Draenei',
22: 'Worgen',
24: 'Pandaren',
25: 'Pandaren',
26: 'Pandaren',
}
CLASS = {
1: 'Warrior',
2: 'Paladin',
3: 'Hunter',
4: 'Rogue',
5: 'Priest',
6: 'Death Knight',
7: 'Shaman',
8: 'Mage',
9: 'Warlock',
10: 'Monk',
11: 'Druid',
12: 'Demon Hunter',
}
QUALITY = {
1: 'Common',
2: 'Uncommon',
3: 'Rare',
4: 'Epic',
5: 'Legendary',
6: 'Artifact',
7: 'Heirloom',
}
RACE_TO_FACTION = {
1: 'Alliance',
2: 'Horde',
3: 'Alliance',
4: 'Alliance',
5: 'Horde',
6: 'Horde',
7: 'Alliance',
8: 'Horde',
9: 'Horde',
10: 'Horde',
11: 'Alliance',
22: 'Alliance',
24: '?',
25: 'Alliance',
26: 'Horde',
}
EXPANSION = {
0: ('wow', 'World of Warcraft'),
1: ('bc', 'The Burning Crusade'),
2: ('lk', 'Wrath of the Lich King'),
3: ('cata', 'Cataclysm'),
4: ('mop', 'Mists of Pandaria'),
5: ('wod', 'Warlords of Draenor'),
6: ('legion', 'Legion'),
}
RAIDS = {
'wow': (2717, 2677, 3429, 3428),
'bc': (3457, 3836, 3923, 3607, 3845, 3606, 3959, 4075),
'lk': (4603, 3456, 4493, 4500, 4273, 2159, 4722, 4812, 4987),
'cata': (5600, 5094, 5334, 5638, 5723, 5892),
'mop': (6125, 6297, 6067, 6622, 6738),
'wod': (6996, 6967, 7545),
'legion': (8026, 8440, 8025, 8524),
}
| mit | Python |
50c91595f47b323081dfc44d9fdf16fbc4ba6f59 | add comment | jaebradley/nba_data | nba_data/data/season_range.py | nba_data/data/season_range.py | from nba_data.data.season import Season
# TODO: @jbradley add assertions to compare start and end
class SeasonRange:
def __init__(self, start, end):
assert isinstance(start, Season)
assert isinstance(end, Season)
self.start = start
self.end = end
| from nba_data.data.season import Season
class SeasonRange:
def __init__(self, start, end):
assert isinstance(start, Season)
assert isinstance(end, Season)
self.start = start
self.end = end
| mit | Python |
fcf8fa3368386dee3ede4575f58aa9cb57cb0649 | bump to 0.8.3 | tsuru/varnishapi,tsuru/varnishapi | feaas/__init__.py | feaas/__init__.py | # Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.8.3"
| # Copyright 2014 varnishapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
__version__ = "0.8.2"
| bsd-3-clause | Python |
bab3bfb70a1da30134a0e020be31257f08a2e709 | Remove a vestige of the old zeromq implementation where we specified a callback port as a decorated parameter to the overall test case | wwitzel3/awx,wwitzel3/awx,snahelou/awx,snahelou/awx,snahelou/awx,snahelou/awx,wwitzel3/awx,wwitzel3/awx | awx/api/tests/job_tasks.py | awx/api/tests/job_tasks.py | # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase
from django.test.utils import override_settings
from rest_framework.test import APIClient
import mock
from awx.api.views import JobJobTasksList
from awx.main.models import Job, JobTemplate, JobEvent
from awx.main.tests.jobs import BaseJobTestMixin
@override_settings(CELERY_ALWAYS_EAGER=True,
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
ANSIBLE_TRANSPORT='local')
class JobTasksTests(BaseJobTestMixin, LiveServerTestCase):
"""A set of tests to ensure that the job_tasks endpoint, available at
`/api/v1/jobs/{id}/job_tasks/`, works as expected.
"""
def setUp(self):
super(JobTasksTests, self).setUp()
settings.INTERNAL_API_URL = self.live_server_url
def test_tasks_endpoint(self):
"""Establish that the `job_tasks` endpoint shows what we expect,
which is a rollup of information about each of the corresponding
job events.
"""
# Create a job
job = self.make_job(self.jt_ops_east_run, self.user_sue, 'new')
job.signal_start()
# Get the initial job event.
event = job.job_events.get(event='playbook_on_play_start')
# Actually make the request for the job tasks.
with self.current_user(self.user_sue):
url = '/api/v1/jobs/%d/job_tasks/?event_id=%d' % (job.id, event.id)
response = self.get(url)
# Test to make sure we got back what we expected.
result = response['results'][0]
self.assertEqual(result['host_count'], 7)
self.assertEqual(result['changed_count'], 7)
self.assertFalse(result['failed'])
self.assertTrue(result['changed'])
| # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase
from django.test.utils import override_settings
from rest_framework.test import APIClient
import mock
from awx.api.views import JobJobTasksList
from awx.main.models import Job, JobTemplate, JobEvent
from awx.main.tests.jobs import BaseJobTestMixin
@override_settings(CELERY_ALWAYS_EAGER=True,
CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
CALLBACK_CONSUMER_PORT='',
ANSIBLE_TRANSPORT='local')
class JobTasksTests(BaseJobTestMixin, LiveServerTestCase):
"""A set of tests to ensure that the job_tasks endpoint, available at
`/api/v1/jobs/{id}/job_tasks/`, works as expected.
"""
def setUp(self):
super(JobTasksTests, self).setUp()
settings.INTERNAL_API_URL = self.live_server_url
def test_tasks_endpoint(self):
"""Establish that the `job_tasks` endpoint shows what we expect,
which is a rollup of information about each of the corresponding
job events.
"""
# Create a job
job = self.make_job(self.jt_ops_east_run, self.user_sue, 'new')
job.signal_start()
# Get the initial job event.
event = job.job_events.get(event='playbook_on_play_start')
# Actually make the request for the job tasks.
with self.current_user(self.user_sue):
url = '/api/v1/jobs/%d/job_tasks/?event_id=%d' % (job.id, event.id)
response = self.get(url)
# Test to make sure we got back what we expected.
result = response['results'][0]
self.assertEqual(result['host_count'], 7)
self.assertEqual(result['changed_count'], 7)
self.assertFalse(result['failed'])
self.assertTrue(result['changed'])
| apache-2.0 | Python |
9a470f41174c617787a8f06e003709f1f7ba6310 | Update file.py | jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi | apps/bigdata/file_download/file.py | apps/bigdata/file_download/file.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Author : https://github.com/jeonghoonkang
import numpy as np
import json
import time
import datetime
import os.path
import sys
def inputfile_chck(fname):
if os.path.isfile('./'+_internet_file_name_):
with open("./"+_internet_file_name_) as data_file:
data = json.load(data_file)
if 0 : print data[0].keys()
print " previous old downlaod file exists... "
else:
print "There is no file on the disk, try to download from internet"
target_file = 'https://raw.githubusercontent.com/jeonghoonkang/BerePi'
target_file += '/master/apps/bigdata/file/2014_06_01_gps_sangic_kwangic.json'
print target_file
_response = urllib2.urlopen(target_file)
_of = open(_internet_file_name_, 'wb')
meta = _response.info()
if 0 : print meta
_fsize = int (meta.getheaders("Content-Length")[0])
_blk = (1024*8)
_cursor_ = 0
while True:
buff = _response.read(_blk)
if not buff: break
_cursor_ += len(buff)
_of.write(buff)
_pout = " download progress, %s " %(_internet_file_name_)
_pout += " %3.2f %%" %(100.0*_cursor_/_fsize)
_pout += " %s / %s " %(_cursor_, _fsize)
_pout += "\r"
sys.stdout.write(_pout)
sys.stdout.flush()
_of.close()
with open("./"+_internet_file_name_) as data_file:
data = json.load(data_file)
if 0: print data
return data
if __name__ == "__main__":
_internet_file_name_ = "_input_json.json"
try :
with open('./2014_06_01_filtered_data.json') as data_file:
data = json.load(data_file)
except :
data = inputfile_chck(_internet_file_name_)
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import numpy as np
import json
import time
import datetime
import os.path
import sys
def inputfile_chck(fname):
if os.path.isfile('./'+_internet_file_name_):
with open("./"+_internet_file_name_) as data_file:
data = json.load(data_file)
if 0 : print data[0].keys()
print " previous old downlaod file exists... "
else:
print "There is no file on the disk, try to download from internet"
target_file = 'https://raw.githubusercontent.com/jeonghoonkang/BerePi'
target_file += '/master/apps/bigdata/file/2014_06_01_gps_sangic_kwangic.json'
print target_file
_response = urllib2.urlopen(target_file)
_of = open(_internet_file_name_, 'wb')
meta = _response.info()
if 0 : print meta
_fsize = int (meta.getheaders("Content-Length")[0])
_blk = (1024*8)
_cursor_ = 0
while True:
buff = _response.read(_blk)
if not buff: break
_cursor_ += len(buff)
_of.write(buff)
_pout = " download progress, %s " %(_internet_file_name_)
_pout += " %3.2f %%" %(100.0*_cursor_/_fsize)
_pout += " %s / %s " %(_cursor_, _fsize)
_pout += "\r"
sys.stdout.write(_pout)
sys.stdout.flush()
_of.close()
with open("./"+_internet_file_name_) as data_file:
data = json.load(data_file)
if 0: print data
return data
if __name__ == "__main__":
_internet_file_name_ = "_input_json.json"
try :
with open('./2014_06_01_filtered_data.json') as data_file:
data = json.load(data_file)
except :
data = inputfile_chck(_internet_file_name_)
| bsd-2-clause | Python |
28749b35e5e4241c744256a7d64fc696bf4eac97 | Remove debug print. | nanuxbe/djangopackages,QLGu/djangopackages,audreyr/opencomparison,QLGu/djangopackages,benracine/opencomparison,miketheman/opencomparison,pydanny/djangopackages,pydanny/djangopackages,audreyr/opencomparison,nanuxbe/djangopackages,cartwheelweb/packaginator,benracine/opencomparison,pydanny/djangopackages,nanuxbe/djangopackages,cartwheelweb/packaginator,cartwheelweb/packaginator,miketheman/opencomparison,QLGu/djangopackages | apps/package/handlers/launchpad.py | apps/package/handlers/launchpad.py | import os
from django.conf import settings
from launchpadlib.launchpad import Launchpad
from package.handlers.base_handler import BaseHandler
class LaunchpadHandler(BaseHandler):
title = 'Launchpad'
url = 'https://code.launchpad.net'
user_url = 'https://launchpad.net/~%s'
repo_regex = r'https://code.launchpad.net/~[\w\-\_]+/([\w\-\_]+)/[\w\-\_]+/{0,1}'
slug_regex = r'https://code.launchpad.net/~[\w\-\_]+/([\w\-\_]+)/[\w\-\_]+/{0,1}'
def pull(self, package):
cachedir = getattr(settings, 'LAUNCHPAD_CACHE_DIR', os.path.join(settings.PROJECT_ROOT, 'lp-cache'))
launchpad = Launchpad.login_anonymously('djangopackages.com', 'production', cachedir)
repo_name = package.repo_name
branch = launchpad.branches.getByUrl(url='lp:%s' % repo_name)
package.repo_description = branch.description or ''
package.repo_forks = len(branch.project.getBranches())
package.repo_watchers = len(branch.subscribers)
package.participants = branch.owner.name
return package
repo_handler = LaunchpadHandler()
| import os
from django.conf import settings
from launchpadlib.launchpad import Launchpad
from package.handlers.base_handler import BaseHandler
class LaunchpadHandler(BaseHandler):
title = 'Launchpad'
url = 'https://code.launchpad.net'
user_url = 'https://launchpad.net/~%s'
repo_regex = r'https://code.launchpad.net/~[\w\-\_]+/([\w\-\_]+)/[\w\-\_]+/{0,1}'
slug_regex = r'https://code.launchpad.net/~[\w\-\_]+/([\w\-\_]+)/[\w\-\_]+/{0,1}'
def pull(self, package):
cachedir = getattr(settings, 'LAUNCHPAD_CACHE_DIR', os.path.join(settings.PROJECT_ROOT, 'lp-cache'))
launchpad = Launchpad.login_anonymously('djangopackages.com', 'production', cachedir)
repo_name = package.repo_name
print "DEBUG: repo_name =", repo_name
branch = launchpad.branches.getByUrl(url='lp:%s' % repo_name)
package.repo_description = branch.description or ''
package.repo_forks = len(branch.project.getBranches())
package.repo_watchers = len(branch.subscribers)
package.participants = branch.owner.name
return package
repo_handler = LaunchpadHandler()
| mit | Python |
17e8ead8cc1950779dca5614d24fc89f6606de9c | Remove unnecessary print from get_tlds | jeffknupp/domain-parser,jeffknupp/domain-parser | domain_parser/domain_parser.py | domain_parser/domain_parser.py | """Parses a URL using the publicsuffix.org TLD list."""
try:
import cPickle as pickle
except:
import pickle
import urllib2
from urlparse import urlparse
TLD_URL = 'https://publicsuffix.org/list/effective_tld_names.dat'
def get_tlds():
"""Return a list of top-level domains as maintained by Mozilla and
publicsuffix.org."""
try:
with open('.tlds.pickle') as infile:
return pickle.load(infile)
except IOError:
pass
response = urllib2.urlopen(TLD_URL)
if response.code != 200:
raise RuntimeError('Unable to get list of TLDs')
tlds = {'starred': [], 'normal': []}
for line in response.readlines()[1:]:
if line.startswith('//') or line == '\n':
continue
if line.startswith('*'):
tlds['starred'].append(line.strip())
else:
tlds['normal'].append(line.strip())
with open('.tlds.pickle', 'w') as outfile:
pickle.dump(tlds, outfile)
return tlds
def parse_domain(url):
"""Return a tuple containing any subdomains, the second-level domain, and
the top-level domain for a given URI.
Uses a list of active top-level domains to ensure long TLD's such as
'.co.uk' are correctly treated as a single TLD. If the domain has an
unrecognizable TLD, assumes it is one level.
"""
if not url.startswith('http://'):
url = 'http://' + url
top_level_domains = get_tlds()
parsed = urlparse(url.lower())
hostname = parsed.netloc
tld = ''
tld_index = 0
uri = hostname.split('.')
for index in range(len(uri)):
tld_index = index
tld = '.'.join(uri[index:])
if tld in top_level_domains['normal']:
break
if '.'.join(['*'] + [uri[index+1]]) in top_level_domains['starred']:
break
second_level_domain = ''.join(uri[tld_index-1:tld_index])
subdomains = '.'.join(uri[:tld_index-1])
return tld, second_level_domain, subdomains
| """Parses a URL using the publicsuffix.org TLD list."""
try:
import cPickle as pickle
except:
import pickle
import urllib2
from urlparse import urlparse
TLD_URL = 'https://publicsuffix.org/list/effective_tld_names.dat'
def get_tlds():
"""Return a list of top-level domains as maintained by Mozilla and
publicsuffix.org."""
try:
with open('.tlds.pickle') as infile:
return pickle.load(infile)
except IOError:
pass
response = urllib2.urlopen(TLD_URL)
if response.code != 200:
raise RuntimeError('Unable to get list of TLDs')
tlds = {'starred': [], 'normal': []}
for line in response.readlines()[1:]:
if line.startswith('//') or line == '\n':
continue
if line.startswith('*'):
tlds['starred'].append(line.strip())
else:
tlds['normal'].append(line.strip())
with open('.tlds.pickle', 'w') as outfile:
pickle.dump(tlds, outfile)
import pprint
pprint.pprint(tlds)
return tlds
def parse_domain(url):
"""Return a tuple containing any subdomains, the second-level domain, and
the top-level domain for a given URI.
Uses a list of active top-level domains to ensure long TLD's such as
'.co.uk' are correctly treated as a single TLD. If the domain has an
unrecognizable TLD, assumes it is one level.
"""
if not url.startswith('http://'):
url = 'http://' + url
top_level_domains = get_tlds()
parsed = urlparse(url.lower())
hostname = parsed.netloc
tld = ''
tld_index = 0
uri = hostname.split('.')
for index in range(len(uri)):
tld_index = index
tld = '.'.join(uri[index:])
if tld in top_level_domains['normal']:
break
if '.'.join(['*'] + [uri[index+1]]) in top_level_domains['starred']:
break
second_level_domain = ''.join(uri[tld_index-1:tld_index])
subdomains = '.'.join(uri[:tld_index-1])
return tld, second_level_domain, subdomains
| apache-2.0 | Python |
04eb362b245103f7c65bfb22a3b7bb64d0f87b59 | Fix get_image_name for light chutes. | ParadropLabs/Paradrop,ParadropLabs/Paradrop,ParadropLabs/Paradrop | paradrop/daemon/paradrop/core/chute/service.py | paradrop/daemon/paradrop/core/chute/service.py | class Service(object):
"""
A service is a long-running process that provides chute functionality.
"""
def __init__(self,
chute=None,
name=None,
type="normal",
image=None,
command=None,
dockerfile=None,
build=None,
environment=None,
interfaces=None,
requests=None):
self.chute = chute
self.name = name
self.type = type
self.image = image
self.command = command
self.dockerfile = dockerfile
if build is None:
self.build = {}
else:
self.build = build
if environment is None:
self.environment = {}
else:
self.environment = environment
if interfaces is None:
self.interfaces = {}
else:
self.interfaces = interfaces
if requests is None:
self.requests = {}
else:
self.requests = requests
def get_container_name(self):
"""
Get the name for the service's container.
This will be a combination of the chute name and the service name.
"""
if self.name is None:
# name can be None for old-style single-service chutes where the
# container name is expected to be the name of the chute.
return self.chute.name
else:
return "{}-{}".format(self.chute.name, self.name)
def get_image_name(self):
"""
Get the name of the image to be used.
"""
# Light chute services have a shorthand image name like "python2" that
# should not be interpreted as an actual Docker image name.
if self.image is None or self.type == "light":
return "{}:{}".format(self.get_container_name(), self.chute.version)
else:
return self.image
| class Service(object):
"""
A service is a long-running process that provides chute functionality.
"""
def __init__(self,
chute=None,
name=None,
type="normal",
image=None,
command=None,
dockerfile=None,
build=None,
environment=None,
interfaces=None,
requests=None):
self.chute = chute
self.name = name
self.type = type
self.image = image
self.command = command
self.dockerfile = dockerfile
if build is None:
self.build = {}
else:
self.build = build
if environment is None:
self.environment = {}
else:
self.environment = environment
if interfaces is None:
self.interfaces = {}
else:
self.interfaces = interfaces
if requests is None:
self.requests = {}
else:
self.requests = requests
def get_container_name(self):
"""
Get the name for the service's container.
This will be a combination of the chute name and the service name.
"""
if self.name is None:
# name can be None for old-style single-service chutes where the
# container name is expected to be the name of the chute.
return self.chute.name
else:
return "{}-{}".format(self.chute.name, self.name)
def get_image_name(self):
"""
Get the name of the image to be used.
"""
if self.image is not None:
return self.image
else:
return "{}:{}".format(self.get_container_name(), self.chute.version)
| apache-2.0 | Python |
c4753b200359774aff966c4e47ba67b176f80dc3 | add missing sys import | haridsv/pip,tdsmith/pip,nthall/pip,KarelJakubec/pip,davidovich/pip,mujiansu/pip,qbdsoft/pip,chaoallsome/pip,dstufft/pip,squidsoup/pip,nthall/pip,minrk/pip,sigmavirus24/pip,caosmo/pip,natefoo/pip,supriyantomaftuh/pip,blarghmatey/pip,benesch/pip,dstufft/pip,zenlambda/pip,fiber-space/pip,caosmo/pip,benesch/pip,Gabriel439/pip,caosmo/pip,erikrose/pip,blarghmatey/pip,pradyunsg/pip,rouge8/pip,xavfernandez/pip,cjerdonek/pip,yati-sagade/pip,atdaemon/pip,mujiansu/pip,atdaemon/pip,jythontools/pip,jythontools/pip,techtonik/pip,mindw/pip,sbidoul/pip,James-Firth/pip,sbidoul/pip,pfmoore/pip,h4ck3rm1k3/pip,prasaianooz/pip,zenlambda/pip,jythontools/pip,natefoo/pip,pypa/pip,haridsv/pip,pjdelport/pip,Gabriel439/pip,Gabriel439/pip,jmagnusson/pip,natefoo/pip,ncoghlan/pip,jamezpolley/pip,qbdsoft/pip,xavfernandez/pip,luzfcb/pip,techtonik/pip,jasonkying/pip,supriyantomaftuh/pip,harrisonfeng/pip,prasaianooz/pip,yati-sagade/pip,luzfcb/pip,KarelJakubec/pip,James-Firth/pip,qbdsoft/pip,supriyantomaftuh/pip,graingert/pip,Carreau/pip,mattrobenolt/pip,willingc/pip,zorosteven/pip,wkeyword/pip,KarelJakubec/pip,minrk/pip,harrisonfeng/pip,qwcode/pip,fiber-space/pip,zorosteven/pip,fiber-space/pip,ChristopherHogan/pip,techtonik/pip,ChristopherHogan/pip,rouge8/pip,ncoghlan/pip,RonnyPfannschmidt/pip,alquerci/pip,nthall/pip,zvezdan/pip,esc/pip,mujiansu/pip,ianw/pip,patricklaw/pip,patricklaw/pip,willingc/pip,msabramo/pip,pjdelport/pip,luzfcb/pip,James-Firth/pip,habnabit/pip,sigmavirus24/pip,esc/pip,ianw/pip,squidsoup/pip,ncoghlan/pip,chaoallsome/pip,RonnyPfannschmidt/pip,yati-sagade/pip,RonnyPfannschmidt/pip,Ivoz/pip,davidovich/pip,ChristopherHogan/pip,pradyunsg/pip,alex/pip,xavfernandez/pip,pjdelport/pip,rouge8/pip,erikrose/pip,zvezdan/pip,davidovich/pip,h4ck3rm1k3/pip,graingert/pip,msabramo/pip,rbtcollins/pip,h4ck3rm1k3/pip,zvezdan/pip,chaoallsome/pip,alex/pip,habnabit/pip,habnabit/pip,squidsoup/pip,Carreau/pip,mindw/pip,prasaianooz/pip,atdaemon/pip,harrisonfeng/pip,erikrose/pip,tdsmith/pip,dstufft/pip,Ivoz/pip,jasonkying/pip,wkeyword/pip,benesch/pip,graingert/pip,cjerdonek/pip,haridsv/pip,tdsmith/pip,esc/pip,qwcode/pip,jasonkying/pip,mattrobenolt/pip,pfmoore/pip,jamezpolley/pip,alex/pip,jmagnusson/pip,jmagnusson/pip,blarghmatey/pip,jamezpolley/pip,zenlambda/pip,willingc/pip,wkeyword/pip,rbtcollins/pip,zorosteven/pip,alquerci/pip,sigmavirus24/pip,rbtcollins/pip,pypa/pip,mindw/pip | pip/backwardcompat/socket_create_connection.py | pip/backwardcompat/socket_create_connection.py | """
patch for py25 socket to work with http://pypi.python.org/pypi/ssl/
copy-paste from py2.6 stdlib socket.py
https://gist.github.com/zed/1347055
"""
import socket
import sys
_GLOBAL_DEFAULT_TIMEOUT = getattr(socket, '_GLOBAL_DEFAULT_TIMEOUT', object())
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error:
err = sys.exc_info()[1]
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
| """
patch for py25 socket to work with http://pypi.python.org/pypi/ssl/
copy-paste from py2.6 stdlib socket.py
https://gist.github.com/zed/1347055
"""
import socket
_GLOBAL_DEFAULT_TIMEOUT = getattr(socket, '_GLOBAL_DEFAULT_TIMEOUT', object())
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error:
err = sys.exc_info()[1]
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
| mit | Python |
8d3ffefe6c72f5d143d6b5a1bba6177f03bfc4f2 | clean up, do ties | msullivan/advent-of-code,msullivan/advent-of-code,msullivan/advent-of-code | 2018/23b.py | 2018/23b.py | #!/usr/bin/env python2
# z3 might have python 3 bindings but they weren't on my laptop from
# the install of z3 left over from grad school so back to python 2 it
# is!
from __future__ import print_function
from z3 import *
import sys
sys.setrecursionlimit(3000)
from collections import defaultdict, deque
import sys
import re
#from dataclasses import dataclass
def extract(s):
return [int(x) for x in re.findall(r'-?\d+', s)]
def dist(x, y):
return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2])
def z3_abs(x):
return If(x >= 0,x,-x)
def z3_dist(x, y):
return z3_abs(x[0] - y[0]) + z3_abs(x[1] - y[1]) + z3_abs(x[2] - y[2])
def main(args):
data = [extract(s.strip()) for s in sys.stdin]
data = [(x[3], tuple(x[:-1])) for x in data]
m = max(data)
in_range = [x for x in data if dist(x[1], m[1]) <= m[0]]
print(len(in_range))
x = Int('x')
y = Int('y')
z = Int('z')
orig = (x, y, z)
cost_expr = x * 0
for r, pos in data:
cost_expr += If(z3_dist(orig, pos) <= r, 1, 0)
opt = Optimize()
print("let's go")
opt.maximize(cost_expr)
# I didn't do this step in my initial #2 ranking solution but I
# suppose you should.
# z3 does them lexicographically by default.
opt.minimize(z3_dist((0,0,0), (x, y, z)))
opt.check()
model = opt.model()
print(model)
print(dist((0,0,0), (model[x].as_long(), model[y].as_long(), model[z].as_long())))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #!/usr/bin/env python2
# z3 might have python 3 bindings but they weren't on my laptop from
# the install of z3 left over from grad school so back to python 2 it
# is!
from __future__ import print_function
from z3 import *
import sys
sys.setrecursionlimit(3000)
from collections import defaultdict, deque
import sys
import re
#from dataclasses import dataclass
def extract(s):
return [int(x) for x in re.findall(r'-?\d+', s)]
def dist(x, y):
return abs(x[0] - y[0]) + abs(x[1] - y[1]) + abs(x[2] - y[2])
def z3_abs(x):
return If(x >= 0,x,-x)
def z3_dist(x, y):
return z3_abs(x[0] - y[0]) + z3_abs(x[1] - y[1]) + z3_abs(x[2] - y[2])
def main(args):
data = [extract(s.strip()) for s in sys.stdin]
data = [(x[3], tuple(x[:-1])) for x in data]
m = max(data)
in_range = [x for x in data if dist(x[1], m[1]) <= m[0]]
print(len(in_range))
x = Int('x')
y = Int('y')
z = Int('z')
orig = (x, y, z)
cost_expr = x * 0
for r, pos in data:
cost_expr += If(z3_dist(orig, pos) <= r, 1, 0)
opt = Optimize()
cost = Int('cost')
opt.add(cost == cost_expr)
print("let's go")
h = opt.maximize(cost)
opt.check()
opt.lower(h)
model = opt.model()
print(model)
print(dist((0,0,0), (model[x].as_long(), model[y].as_long(), model[z].as_long())))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit | Python |
11e6879d5f6d35687dd2b3d8053c406dc49f1d75 | update version | gbrammer/grizli | grizli/version.py | grizli/version.py | # git describe --tags
__version__ = "0.6.0-52-gdb6bc8c"
| # git describe --tags
__version__ = "0.6.0-46-g0580c1c"
| mit | Python |
5abf5b657cd5ae7cfa94f3085fbe1121f7ea49c1 | Improve cli | tim-sueberkrueb/grout | grout/cli.py | grout/cli.py | # -*- coding: utf-8 -*-
import os
import click
from typing import Tuple
import grout.core
import grout.core.backend
@click.command()
@click.option('--project', type=click.Path(dir_okay=False, exists=True), help='Path to project file')
@click.option('--skip', default=None, multiple=True, help='Skip a job by its name')
@click.option('--skip-environment', flag_value=True, help='Skip environment setup')
@click.option('--backend', default='lxc', type=click.Choice(('lxc', 'docker',)), help='Container backend to use')
@click.option('--name', help='Container backend name')
@click.option('--image', help='Container backend image')
@click.option('--arch', help='Container backend arch')
@click.option('--persistent', flag_value=False, help='Set container persistent')
def cli(project: str = None, skip: Tuple[str] = None, skip_environment: bool = False,
backend: str = 'lxc', name: str = None, image: str = None,
arch: str = None, persistent: bool = False):
"""Grout a simple tool and library for continuous, clean builds.
Grout was primarily created to be used in combination with Snapcraft.
"""
cwd = os.getcwd()
if not project:
project = os.path.join(cwd, 'project.yaml')
if not os.path.isfile(project):
raise click.ClickException('Project file "{}" does not exist.'.format(project))
if not grout.core.backend.type_exists(backend):
raise click.ClickException('The requested container backend "{}" could not be found.'.format(backend))
backend_options = {
'name': name,
'image': image,
'arch': arch,
'ephemeral': not persistent
}
grout.core.run_declarative(
project, backend_type=backend, backend_options=backend_options,
skip_jobs=skip, skip_environment=skip_environment
)
if __name__ == '__main__':
cli()
| # -*- coding: utf-8 -*-
import os
import click
from typing import Tuple
import grout.core
import grout.core.backend
@click.command()
@click.option('--path', default=None, help='Path to project')
@click.option('--project-file', default='project.yaml', help='Project declaration file')
@click.option('--skip', 'skip_jobs', default=None, multiple=True, help='Skip a job by its name')
@click.option('--skip-environment', 'skip_environment', flag_value=True, help='Skip environment setup')
@click.option('--backend', 'backend_type', default='lxc', help='Container backend to use')
@click.option('--name', 'backend_name', default=None, help='Container backend name')
@click.option('--image', 'backend_image', default=None, help='Container backend image')
@click.option('--arch', 'backend_arch', default=None, help='Container backend arch')
@click.option('--persistent', 'backend_ephemeral', flag_value=False, help='Set container persistent')
def cli(path: str = None, project_file: str = 'project.yaml',
skip_jobs: Tuple[str] = None, skip_environment: bool = False,
backend_type: str = 'lxc', backend_name: str = None, backend_image: str = None,
backend_arch: str = None, backend_ephemeral: bool = True):
"""Grout a simple tool and library for continuous, clean builds.
Grout was primarily created to be used in combination with Snapcraft.
"""
cwd = os.getcwd()
if not path:
path = cwd
filepath = os.path.join(path, project_file)
if not grout.core.backend.type_exists(backend_type):
raise click.ClickException('The requested container backend "{}" could not be found.'.format(backend_type))
backend_options = {
'name': backend_name,
'image': backend_image,
'arch': backend_arch,
'ephemeral': backend_ephemeral
}
grout.core.run_declarative(
filepath, backend_type=backend_type, backend_options=backend_options,
skip_jobs=skip_jobs, skip_environment=skip_environment
)
if __name__ == '__main__':
cli()
| mit | Python |
131cdaf337e3bfa6524443668e67835c3ecd9e80 | Bump version to 0.3.0 | eddieantonio/paperpal,eddieantonio/paperpal | paperpal/__init__.py | paperpal/__init__.py | #!/usr/bin/env python
# -*- encoding: UTF-8 -*-
# Copyright 2016 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tool for paper management with Zotero.
"""
__version__ = '0.3.0'
__all__ = ['Zotero', 'ZoteroError']
from .zotero import Zotero, ZoteroError
| #!/usr/bin/env python
# -*- encoding: UTF-8 -*-
# Copyright 2016 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tool for paper management with Zotero.
"""
__version__ = '0.2.0'
__all__ = ['Zotero', 'ZoteroError']
from .zotero import Zotero, ZoteroError
| apache-2.0 | Python |
0db6ccd51ed354ea1bca8f7bbb71436e5984ecd4 | fix bug with exception handling in config | mitin123/PyVPN | src/config.py | src/config.py | import yaml
from vpnexcept import VPNException
class InvalidConfigException(VPNException):
pass
class VPNConfig(object):
def __init__(self, path_to_config=None):
self.__path = path_to_config or self.__class__.__default_config_file
self.read_config()
#self.validate()
def read_config(self):
try:
with open(self.__path) as config_file:
self.config = yaml.load(config_file)
except Exception:
raise InvalidConfigException("configuration file reading failed")
def validate(self):
for validator in self.__class__.__conf_validators:
if not validator(self.config):
raise InvalidConfigException("validation failed")
def __getattr__(self, attr):
if attr in self.config:
return self.config[attr]
else:
return super(VPNConfig, self).__getattr__(attr)
def __getitem__(self, item):
if item in self.config:
return self.config[item]
def __setitem__(self, key, value):
self.config[key] = value
def __iter__(self):
return iter(self.config)
class VPNClientConfig(VPNConfig):
__default_config_file = "/etc/pyvpn/client.conf"
__conf_validators = [
lambda c: "subnet" in c,
lambda c: "netmask" in c,
]
class VPNServerConfig(VPNConfig):
__default_config_file = "/etc/pyvpn/server.conf"
__conf_validators = [
lambda c: "subnet" in c,
lambda c: "netmask" in c,
]
| import yaml
from vpnexcept import VPNException
class InvalidConfigException(VPNException):
pass
class VPNConfig(object):
def __init__(self, path_to_config=None):
self.__path = path_to_config or self.__class__.__default_config_file
self.read_config()
#self.validate()
def read_config(self):
try:
with open(self.__path) as config_file:
self.config = yaml.load(config_file)
except Exception:
ex = InvalidConfigException()
ex.msg = "configuration file reading failed"
raise ex
def validate(self):
for validator in self.__class__.__conf_validators:
if not validator(self.config):
ex = InvalidConfigException()
ex.msg = "wtf"
raise ex
def __getattr__(self, attr):
if attr in self.config:
return self.config[attr]
else:
return super(VPNConfig, self).__getattr__(attr)
def __getitem__(self, item):
if item in self.config:
return self.config[item]
def __setitem__(self, key, value):
self.config[key] = value
def __iter__(self):
return iter(self.config)
class VPNClientConfig(VPNConfig):
__default_config_file = "/etc/pyvpn/client.conf"
__conf_validators = [
lambda c: "subnet" in c,
lambda c: "netmask" in c,
]
class VPNServerConfig(VPNConfig):
__default_config_file = "/etc/pyvpn/server.conf"
__conf_validators = [
lambda c: "subnet" in c,
lambda c: "netmask" in c,
]
| apache-2.0 | Python |
4c34fb65d757e2dcfabf4f8b532a712f59eb0663 | Support JSON encoding of a Link object | thisissoon/Flask-HAL,thisissoon/Flask-HAL | flask_hal/link.py | flask_hal/link.py | #!/usr/bin/env python
# encoding: utf-8
"""
flask_hal.link
==============
Implements the ``HAL`` Link specification.
"""
# Standard Libs
import json
VALID_LINK_ATTRS = [
'name',
'title',
'type',
'deprecation',
'profile',
'templated',
'hreflang'
]
class Link(object):
"""Build ``HAL`` specification ``_links`` object.
Example:
>>> from flask_hal.link import Link
>>> l = Link('foo', 'http://foo.com/bar')
>>> print l.to_json()
... '{"foo": {"href": "http://foo.com/bar"}}'
>>> l.title = 'Foo'
>>> print l.to_json()
... '{"foo": {"href": "http://foo.com/bar", "name": "Foo"}}'
"""
def __init__(self, rel, href, **kwargs):
"""Initialise a new ``Link`` object.
Args:
rel (str): The links ``rel`` or name
href (str): The URI to the resource
Keyword Args:
name (str): The links name attribute, optional
title (str): The links title attribute, optional
type (str): The links type attribute, optional
deprecation (str): The deprecation attribute, optional
profile (str): The profile attribute, optional
templated (bool): The templated attribute, optional
hreflang (str): The hreflang attribute, optional
"""
self.rel = rel
self.href = href
for attr in VALID_LINK_ATTRS:
if attr in kwargs:
setattr(self, attr, kwargs.pop(attr))
def to_json(self):
"""Returns the ``JSON`` encoded representation of the ``Link`` object.
Returns:
str: The ``JSON`` encoded object
"""
# Minimum viable link
link = {
'href': self.href
}
# Add extra attributes if they exist
for attr in VALID_LINK_ATTRS:
if hasattr(self, attr):
link[attr] = getattr(self, attr)
return json.dumps({self.rel: link})
| #!/usr/bin/env python
# encoding: utf-8
"""
flask_hal.link
==============
Implements the ``HAL`` Link specification.
"""
VALID_LINK_ATTRS = [
'name',
'title',
'type',
'deprecation',
'profile',
'templated',
'hreflang'
]
class Link(object):
"""Build ``HAL`` specification ``_links`` object.
Example:
>>> from flask_hal.link import Link
>>> l = Link('foo', 'http://foo.com/bar')
>>> print l.to_json()
... '{"foo": {"href": "http://foo.com/bar"}}'
>>> l.title = 'Foo'
>>> print l.to_json()
... '{"foo": {"href": "http://foo.com/bar", "name": "Foo"}}'
"""
def __init__(self, rel, href, **kwargs):
"""Initialise a new ``Link`` object.
Args:
rel (str): The links ``rel`` or name
href (str): The URI to the resource
Keyword Args:
name (str): The links name attribute, optional
title (str): The links title attribute, optional
type (str): The links type attribute, optional
deprecation (str): The deprecation attribute, optional
profile (str): The profile attribute, optional
templated (bool): The templated attribute, optional
hreflang (str): The hreflang attribute, optional
"""
self.rel = rel
self.href = href
for attr in VALID_LINK_ATTRS:
if attr in kwargs:
setattr(self, attr, kwargs.pop(attr))
def to_json(self):
"""Returns the ``JSON`` encoded representation of the ``Link`` object.
"""
pass
| unlicense | Python |
49b0b7e3b00672d17167d1cfcf974414709bb647 | Fix NullLogger | thombashi/pytablewriter | pytablewriter/_logger/_null_logger.py | pytablewriter/_logger/_null_logger.py | class NullLogger:
level_name = None
def remove(self, handler_id=None): # pragma: no cover
pass
def add(self, sink, **kwargs): # pragma: no cover
pass
def disable(self, name): # pragma: no cover
pass
def enable(self, name): # pragma: no cover
pass
def critical(self, __message, *args, **kwargs): # pragma: no cover
pass
def debug(self, __message, *args, **kwargs): # pragma: no cover
pass
def error(self, __message, *args, **kwargs): # pragma: no cover
pass
def exception(self, __message, *args, **kwargs): # pragma: no cover
pass
def info(self, __message, *args, **kwargs): # pragma: no cover
pass
def log(self, __level, __message, *args, **kwargs): # pragma: no cover
pass
def success(self, __message, *args, **kwargs): # pragma: no cover
pass
def trace(self, __message, *args, **kwargs): # pragma: no cover
pass
def warning(self, __message, *args, **kwargs): # pragma: no cover
pass
| class NullLogger:
level_name = None
def remove(self, handler_id=None): # pragma: no cover
pass
def add(self, **kwargs): # pragma: no cover
pass
def disable(self, name): # pragma: no cover
pass
def enable(self, name): # pragma: no cover
pass
def critical(self, __message, *args, **kwargs): # pragma: no cover
pass
def debug(self, __message, *args, **kwargs): # pragma: no cover
pass
def error(self, __message, *args, **kwargs): # pragma: no cover
pass
def exception(self, __message, *args, **kwargs): # pragma: no cover
pass
def info(self, __message, *args, **kwargs): # pragma: no cover
pass
def log(self, __level, __message, *args, **kwargs): # pragma: no cover
pass
def success(self, __message, *args, **kwargs): # pragma: no cover
pass
def trace(self, __message, *args, **kwargs): # pragma: no cover
pass
def warning(self, __message, *args, **kwargs): # pragma: no cover
pass
| mit | Python |
39813f2b63f9015393636ef91460fddd57e17558 | fix website url in manifest | acsone/server-tools,jobiols/server-tools,open-synergy/server-tools,initOS/server-tools,acsone/server-tools,acsone/server-tools,ddico/server-tools,algiopensource/server-tools,ddico/server-tools,open-synergy/server-tools,jobiols/server-tools,osiell/server-tools,initOS/server-tools,sergiocorato/server-tools,ddico/server-tools,osiell/server-tools,rossasa/server-tools,sergiocorato/server-tools,osiell/server-tools,algiopensource/server-tools,sergiocorato/server-tools,rossasa/server-tools,initOS/server-tools | attachment_metadata/__openerp__.py | attachment_metadata/__openerp__.py | # coding: utf-8
# @ 2015 Valentin CHEMIERE @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Attachment Metadata',
'version': '8.0.1.0.0',
'author': 'Akretion,Odoo Community Association (OCA)',
'website': 'http://www.akretion.com/',
'license': 'AGPL-3',
'category': 'Generic Modules',
'depends': [
],
'data': [
'views/attachment_view.xml',
'security/ir.model.access.csv',
],
'installable': True,
'application': False,
'images': [],
}
| # coding: utf-8
# @ 2015 Valentin CHEMIERE @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Attachment Metadata',
'version': '8.0.1.0.0',
'author': 'Akretion,Odoo Community Association (OCA)',
'website': 'www.akretion.com',
'license': 'AGPL-3',
'category': 'Generic Modules',
'depends': [
],
'data': [
'views/attachment_view.xml',
'security/ir.model.access.csv',
],
'installable': True,
'application': False,
'images': [],
}
| agpl-3.0 | Python |
2faaea457761063246d05d2e0cd03e172dec4369 | rephrase 408 | ufjfeng/leetcode-jf-soln,ufjfeng/leetcode-jf-soln | python/408_valid_word_abbreviation.py | python/408_valid_word_abbreviation.py | """
Given a non-empty string s and an abbreviation abbr, return whether the string
matches with the given abbreviation.
A string such as "word" contains only the following valid abbreviations:
["word", "1ord", "w1rd", "wo1d", "wor1", "2rd", "w2d", "wo2", "1o1d", "1or1",
"w1r1", "1o2", "2r1", "3d", "w3", "4"]
Notice that only the above abbreviations are valid abbreviations of the string
"word". Any other string is not a valid abbreviation of "word".
Note:
Assume s contains only lowercase letters and abbr contains only lowercase
letters and digits.
Example 1:
Given s = "internationalization", abbr = "i12iz4n":
Return true.
Example 2:
Given s = "apple", abbr = "a2e":
Return false.
"""
class Solution(object):
def validWordAbbreviation(self, word, abbr):
"""
:type word: str
:type abbr: str
:rtype: bool
"""
nums = set([str(i) for i in range(10)])
digits = []
loc = -1
for c in abbr:
if c in nums:
if c == '0' and digits == []:
return False
digits.append(c)
else:
if digits:
loc += int("".join(digits))
digits = []
loc += 1
if loc >= len(word):
return False
if c != word[loc]:
return False
if digits:
loc += int("".join(digits))
return loc == len(word) - 1
assert Solution().validWordAbbreviation("a", "2") == False
assert Solution().validWordAbbreviation("word", "w2d") == True
assert Solution().validWordAbbreviation("internationalization", "i12iz4n") == True
assert Solution().validWordAbbreviation("apple", "a3e") == True
assert Solution().validWordAbbreviation("apple", "a2e") == False
print("408 all test cases passed")
| """
Given a non-empty string s and an abbreviation abbr, return whether the string
matches with the given abbreviation.
A string such as "word" contains only the following valid abbreviations:
["word", "1ord", "w1rd", "wo1d", "wor1", "2rd", "w2d", "wo2", "1o1d", "1or1",
"w1r1", "1o2", "2r1", "3d", "w3", "4"]
Notice that only the above abbreviations are valid abbreviations of the string
"word". Any other string is not a valid abbreviation of "word".
Note:
Assume s contains only lowercase letters and abbr contains only lowercase
letters and digits.
Example 1:
Given s = "internationalization", abbr = "i12iz4n":
Return true.
Example 2:
Given s = "apple", abbr = "a2e":
Return false.
"""
class Solution(object):
def validWordAbbreviation(self, word, abbr):
"""
:type word: str
:type abbr: str
:rtype: bool
"""
nums = set([str(i) for i in range(10)])
digits = []
loc = -1
for c in abbr:
if c in nums:
if c == '0' and digits == []:
return False
digits.append(c)
else:
if digits:
loc += int("".join(digits))
digits = []
loc += 1
if loc >= len(word):
return False
if c != word[loc]:
return False
if digits:
loc += int("".join(digits))
return loc == len(word) - 1
assert Solution().validWordAbbreviation("a", "2") == False
assert Solution().validWordAbbreviation("word", "w2d") == True
assert Solution().validWordAbbreviation("internationalization", "i12iz4n") == True
assert Solution().validWordAbbreviation("apple", "a3e") == True
assert Solution().validWordAbbreviation("apple", "a2e") == False
print("all cases passed")
| mit | Python |
200f3897f0b040775a5abc5c1f6a74276f6aea7a | Update local_settings.py | andymboyle/gdocs_importer | gdocs_importer/local_settings.py | gdocs_importer/local_settings.py | SECRET_KEY = 'secret_key_goes_here'
| SECRET_KEY = 'o1v*eup$vkkldvomosh(#p87x)l)vin--$nfut!5z^6dksiwi6'
| mit | Python |
f4e36132448a4a55bff5660b3f5a669e0095ecc5 | Fix up some issues with supporting schema migration | wwitzel3/awx,wwitzel3/awx,snahelou/awx,wwitzel3/awx,snahelou/awx,snahelou/awx,snahelou/awx,wwitzel3/awx | awx/main/models/activity_stream.py | awx/main/models/activity_stream.py | # Copyright (c) 2013 AnsibleWorks, Inc.
# All Rights Reserved.
from django.db import models
from django.utils.translation import ugettext_lazy as _
class ActivityStream(models.Model):
'''
Model used to describe activity stream (audit) events
'''
class Meta:
app_label = 'main'
OPERATION_CHOICES = [
('create', _('Entity Created')),
('update', _("Entity Updated")),
('delete', _("Entity Deleted")),
('associate', _("Entity Associated with another Entity")),
('disaassociate', _("Entity was Disassociated with another Entity"))
]
user = models.ForeignKey('auth.User', null=True, on_delete=models.SET_NULL, related_name='activity_stream')
operation = models.CharField(max_length=9, choices=OPERATION_CHOICES)
timestamp = models.DateTimeField(auto_now_add=True)
changes = models.TextField(blank=True)
object1_id = models.PositiveIntegerField(db_index=True)
object1_type = models.TextField()
object2_id = models.PositiveIntegerField(db_index=True, null=True)
object2_type = models.TextField(null=True, blank=True)
object_relationship_type = models.TextField(blank=True)
| # Copyright (c) 2013 AnsibleWorks, Inc.
# All Rights Reserved.
from django.db import models
class ActivityStream(models.Model):
'''
Model used to describe activity stream (audit) events
'''
OPERATION_CHOICES = [
('create', _('Entity Created')),
('update', _("Entity Updated")),
('delete', _("Entity Deleted")),
('associate', _("Entity Associated with another Entity")),
('disaassociate', _("Entity was Disassociated with another Entity"))
]
user = models.ForeignKey('auth.User', null=True, on_delete=models.SET_NULL)
operation = models.CharField(max_length=9, choices=OPERATION_CHOICES)
timestamp = models.DateTimeField(auto_now_add=True)
changes = models.TextField(blank=True)
object1_id = models.PositiveIntegerField(db_index=True)
object1_type = models.TextField()
object2_id = models.PositiveIntegerField(db_index=True)
object2_type = models.TextField()
object_relationship_type = models.TextField()
| apache-2.0 | Python |
052ec48b74dc25750db055ea5ebf677c3217572a | set bill amount | ioO/billjobs | billjobs/tests/factories.py | billjobs/tests/factories.py | import factory
import factory.fuzzy
import factory.django
from django.contrib.auth.models import User
from django.db.models.signals import post_save
@factory.django.mute_signals(post_save)
class UserProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'billjobs.UserProfile'
billing_address = factory.Faker('address')
user = factory.SubFactory(
'billjobs.tests.factories.UserFactory', profile=None)
@factory.django.mute_signals(post_save)
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username',)
username = 'steve'
password = 'gates'
first_name = 'Steve'
last_name = 'Gates'
email = 'steve.gates@billjobs.org'
userprofile = factory.RelatedFactory(UserProfileFactory, 'user')
class SuperUserFactory(UserFactory):
username = 'bill'
password = 'jobs'
first_name = 'Bill'
last_name = 'Jobs'
email = 'bill.jobs@billjobs.org'
is_staff = True
is_superuser = True
class ServiceFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'billjobs.Service'
reference = factory.Sequence(lambda n: 'SE%03d' % n)
price = factory.fuzzy.FuzzyInteger(100, 200, 10)
class BillFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'billjobs.Bill'
django_get_or_create = ('user',)
user = factory.SubFactory(UserFactory)
amount = factory.fuzzy.FuzzyInteger(100, 200, 10)
| import factory
import factory.fuzzy
import factory.django
from django.contrib.auth.models import User
from django.db.models.signals import post_save
@factory.django.mute_signals(post_save)
class UserProfileFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'billjobs.UserProfile'
billing_address = factory.Faker('address')
user = factory.SubFactory(
'billjobs.tests.factories.UserFactory', profile=None)
@factory.django.mute_signals(post_save)
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = User
django_get_or_create = ('username',)
username = 'steve'
password = 'gates'
first_name = 'Steve'
last_name = 'Gates'
email = 'steve.gates@billjobs.org'
userprofile = factory.RelatedFactory(UserProfileFactory, 'user')
class SuperUserFactory(UserFactory):
username = 'bill'
password = 'jobs'
first_name = 'Bill'
last_name = 'Jobs'
email = 'bill.jobs@billjobs.org'
is_staff = True
is_superuser = True
class ServiceFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'billjobs.Service'
reference = factory.Sequence(lambda n: 'SE%03d' % n)
price = factory.fuzzy.FuzzyInteger(100, 200, 10)
class BillFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'billjobs.Bill'
django_get_or_create = ('user',)
user = factory.SubFactory(UserFactory)
| mit | Python |
681fa43db08194fae4973c300c82c3c74ac26406 | remove excess code | jpbottaro/anna | anna/main.py | anna/main.py | """Main entry point for any experiments."""
import os
import sys
import dataset.reuters21578.parser as data
import nlp.utils as nlp
from evaluation.mlc import evaluate
from model.binary_classifier import BinaryClassifierLearner as Learner
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: main.py DATA_FOLDER")
exit(1)
# Resolve data folder
data_dir = os.path.abspath(sys.argv[1])
# Fetch and preprocess dataset
train_docs, test_docs, unused_docs = data.fetch_and_parse(data_dir)
labels = []
for d in train_docs + test_docs:
for l in d.labels:
if l not in labels:
labels.append(l)
# Create and train model
model = Learner(data_dir, labels, verbose=True)
model.train(train_docs, test_docs=test_docs)
model.save()
# Predict labels for the test set
predicted_docs = model.predict(nlp.clean(test_docs))
# Print evaluation metrics
print(evaluate(test_docs, predicted_docs, labels))
| """Main entry point for any experiments."""
import os
import sys
import dataset.reuters21578.parser as data
import nlp.utils as nlp
from evaluation.mlc import evaluate
from model.binary_classifier import BinaryClassifierLearner as Learner
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: main.py DATA_FOLDER")
exit(1)
# Resolve data folder
data_dir = os.path.abspath(sys.argv[1])
# Fetch and preprocess dataset
train_docs, test_docs, unused_docs = data.fetch_and_parse(data_dir)
labels = []
for d in train_docs + test_docs:
for l in d.labels:
if l not in labels:
labels.append(l)
# Create and train model
model = Learner(data_dir, labels, verbose=True)
model.train(train_docs, test_docs=test_docs)
model.save()
# Predict labels for the test set
predicted_docs = model.predict(nlp.clean(test_docs))
for i in range(2):
test_doc = test_docs[i]
predicted_doc = predicted_docs[i]
print("Text: " + str(test_doc.text[:200]))
print("Expected Labels: " + str(test_doc.labels))
print("Predicted Labels: " + str(predicted_doc.labels))
print()
print(evaluate(test_docs, predicted_docs, labels))
| mit | Python |
0c4163a4847b2de08bbce8c29d036a8f5a6ea12b | increase lengths of username field | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient/clearinghouse/users/models.py | src/python/expedient/clearinghouse/users/models.py | '''
Created on Dec 3, 2009
@author: jnaous
'''
from django.db import models
from django.contrib import auth
auth.models.User._meta.get_field_by_name('username')[0].max_length=255
class UserProfile(models.Model):
'''
Additional information about a user.
@ivar user: the user to whom this UserProfile belongs
@type user: L{auth.models.User}
@ivar affiliation: The organization to which the user is affiliated
@type affiliation: L{str}
'''
user = models.ForeignKey(auth.models.User, unique=True)
affiliation = models.CharField(max_length=100, default="")
def __unicode__(self):
try:
return "Profile for %s" % self.user
except:
return "No user"
@classmethod
def get_or_create_profile(cls, user):
'''
Gets the user's profile if available or creates one if one doesn't exist
@param user: the User whose UserProfile to get or create
@type user: L{auth.models.User}
@return: user's profile
@rtype: L{UserProfile}
'''
try:
profile = user.get_profile()
except UserProfile.DoesNotExist:
profile = cls.objects.create(
user=user,
)
return profile
| '''
Created on Dec 3, 2009
@author: jnaous
'''
from django.db import models
from django.contrib import auth
class UserProfile(models.Model):
'''
Additional information about a user.
@ivar user: the user to whom this UserProfile belongs
@type user: L{auth.models.User}
@ivar affiliation: The organization to which the user is affiliated
@type affiliation: L{str}
'''
user = models.ForeignKey(auth.models.User, unique=True)
affiliation = models.CharField(max_length=100, default="")
def __unicode__(self):
try:
return "Profile for %s" % self.user
except:
return "No user"
@classmethod
def get_or_create_profile(cls, user):
'''
Gets the user's profile if available or creates one if one doesn't exist
@param user: the User whose UserProfile to get or create
@type user: L{auth.models.User}
@return: user's profile
@rtype: L{UserProfile}
'''
try:
profile = user.get_profile()
except UserProfile.DoesNotExist:
profile = cls.objects.create(
user=user,
)
return profile
| bsd-3-clause | Python |
78a02036df6d9472564cf7ffbed7cd002ccc573c | Add new features | jmstaley/sb_blog | models.py | models.py | import datetime
from django.db import models
from django.contrib.auth.models import User
from tagging.fields import TagField
class Category(models.Model):
title = models.CharField(max_length=255,
help_text='Maximum 250 characters')
slug = models.SlugField(unique=True,
help_text='Automatically generated from title')
description = models.TextField(blank=True)
class Meta:
ordering = ['title']
verbose_name_plural = 'Categories'
def __unicode__(self):
return self.title
def get_absolute_url(self):
return '/categories/%s' % self.slug
class Entry(models.Model):
LIVE_STATUS = 1
DRAFT_STATUS = 2
STATUS_CHOICES = (
(LIVE_STATUS, 'Live'),
(DRAFT_STATUS, 'Draft')
)
author = models.ForeignKey(User)
title = models.CharField(max_length=250)
excerpt = models.TextField(blank=True)
body = models.TextField()
pub_date = models.DateTimeField(default=datetime.datetime.now)
slug = models.SlugField(unique_for_date='pub_date')
enable_comments = models.BooleanField(default=True)
featured = models.BooleanField(default=False)
status = models.IntegerField(choices=STATUS_CHOICES, default=LIVE_STATUS)
categories = models.ManyToManyField(Category)
tags = TagField()
| import datetime
from django.db import models
class Category(models.Model):
title = models.CharField(max_length=255,
help_text='Maximum 250 characters')
slug = models.SlugField(unique=True,
help_text='Automatically generated from title')
description = models.TextField(blank=True)
class Meta:
ordering = ['title']
verbose_name_plural = 'Categories'
def __unicode__(self):
return self.title
def get_absolute_url(self):
return '/categories/%s' % self.slug
class Entry(models.Model):
title = models.CharField(max_length=250)
excerpt = models.TextField(blank=True)
body = models.TextField()
pub_date = models.DateTimeField(default=datetime.datetime.now)
slug = models.SlugField(unique_for_date='pub_date')
| mit | Python |
b8c5dc2d6db40ea6c3a0baee6dc67b66006ad76f | Fix node_count default | reactiveops/pentagon,reactiveops/pentagon,reactiveops/pentagon | pentagon/defaults.py | pentagon/defaults.py | from datetime import datetime
class AWSPentagonDefaults(object):
ssh = {
'admin_vpn_key': 'admin-vpn',
'production_kube_key': 'production-kube',
'production_private_key': 'production-private',
'working_kube_key': 'working-kube',
'working_private_key': 'working-private',
}
kubernetes = {
'authorization': {'rbac': {}},
'kubernetes_version': '1.10.8',
'master_node_image': '379101102735/debian-stretch-hvm-x86_64-gp2-2018-10-01-66564',
'master_node_type': 't2.medium',
'network_cidr': '172.20.0.0/16',
'network_mask': 24,
'networking': {'flannel': {'backend': 'vxlan'}},
'node_additional_policies': '[{"Effect": "Allow","Action": ["autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeAutoScalingInstances", "autoscaling:DescribeTags", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup"],"Resource": "*"}]',
'node_count': 1,
'node_root_volume_size': 200,
'production_third_octet': 16,
'ssh_key_path': '~/.ssh/id_rsa.pub',
'third_octet_increment': 1,
'third_octet': 16,
'v_log_level': 10,
'worker_node_image': '379101102735/debian-stretch-hvm-x86_64-gp2-2018-10-01-66564',
'worker_node_type': 't2.medium',
'working_third_octet': 24,
}
vpc = {
'aws_availability_zone_count': 3,
'vpc_cidr_base': '172.20',
'vpc_name': datetime.today().strftime('%Y%m%d'),
}
| from datetime import datetime
class AWSPentagonDefaults(object):
ssh = {
'admin_vpn_key': 'admin-vpn',
'production_kube_key': 'production-kube',
'production_private_key': 'production-private',
'working_kube_key': 'working-kube',
'working_private_key': 'working-private',
}
kubernetes = {
'authorization': {'rbac': {}},
'kubernetes_version': '1.10.8',
'master_node_image': '379101102735/debian-stretch-hvm-x86_64-gp2-2018-10-01-66564',
'master_node_type': 't2.medium',
'network_cidr': '172.20.0.0/16',
'network_mask': 24,
'networking': {'flannel': {'backend': 'vxlan'}},
'node_additional_policies': '[{"Effect": "Allow","Action": ["autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeAutoScalingInstances", "autoscaling:DescribeTags", "autoscaling:SetDesiredCapacity", "autoscaling:TerminateInstanceInAutoScalingGroup"],"Resource": "*"}]',
'node_count': 3,
'node_root_volume_size': 200,
'production_third_octet': 16,
'ssh_key_path': '~/.ssh/id_rsa.pub',
'third_octet_increment': 1,
'third_octet': 16,
'v_log_level': 10,
'worker_node_image': '379101102735/debian-stretch-hvm-x86_64-gp2-2018-10-01-66564',
'worker_node_type': 't2.medium',
'working_third_octet': 24,
}
vpc = {
'aws_availability_zone_count': 3,
'vpc_cidr_base': '172.20',
'vpc_name': datetime.today().strftime('%Y%m%d'),
}
| apache-2.0 | Python |
20604001e280a445d7c25bac6eb31b1f0512c20f | Fix argv handling in Python transitive closure example | lyogavin/spark,caneGuy/spark,esi-mineset/spark,cloud-fan/spark,jrshust/spark,ddna1021/spark,WeichenXu123/spark,WindCanDie/spark,ron8hu/spark,pronix/spark,kevinyu98/spark,actuaryzhang/spark,facaiy/spark,stanzhai/spark,wzhfy/spark,tejasapatil/spark,jrshust/spark,metamx/spark,eyalfa/spark,akopich/spark,jrshust/spark,andrewor14/spark,HyukjinKwon/spark,gengliangwang/spark,andrewor14/iolap,akopich/spark,someorz/spark,ptkool/spark,wzhfy/spark,pgandhi999/spark,SnappyDataInc/spark,someorz/spark,loneknightpy/spark,wgpshashank/spark,debugger87/spark,saltstar/spark,taroplus/spark,huang1900/spark,cin/spark,tejasapatil/spark,dongjoon-hyun/spark,loneknightpy/spark,ron8hu/spark,shuangshuangwang/spark,kimoonkim/spark,lyogavin/spark,akopich/spark,saltstar/spark,1haodian/spark,BryanCutler/spark,caneGuy/spark,liutang123/spark,ptkool/spark,jianran/spark,wangmiao1981/spark,cin/spark,wgpshashank/spark,guoxiaolongzte/spark,shuangshuangwang/spark,bravo-zhang/spark,techaddict/spark,mzl9039/spark,yanboliang/spark,pronix/spark,rezasafi/spark,xflin/spark,liyichao/spark,zero323/spark,shubhamchopra/spark,darionyaphet/spark,chuckchen/spark,hhbyyh/spark,aray/spark,apache/spark,wangyum/spark,sahilTakiar/spark,jkbradley/spark,nilsgrabbert/spark,MLnick/spark,szhem/spark,wzhfy/spark,BryanCutler/spark,haowu80s/spark,LantaoJin/spark,tejasapatil/spark,bravo-zhang/spark,zhouyejoe/spark,HyukjinKwon/spark,skonto/spark,nlalevee/spark,bOOm-X/spark,bOOm-X/spark,bdrillard/spark,jlopezmalla/spark,Panos-Bletsos/spark-cost-model-optimizer,sureshthalamati/spark,milliman/spark,cin/spark,jlopezmalla/spark,sureshthalamati/spark,wzhfy/spark,techaddict/spark,maropu/spark,nlalevee/spark,patrick-nicholson/spark,big-pegasus/spark,cloud-fan/spark,mzl9039/spark,taroplus/spark,bravo-zhang/spark,guoxiaolongzte/spark,HyukjinKwon/spark,vinodkc/spark,cin/spark,shuangshuangwang/spark,wangmiao1981/spark,matthewfranglen/spark,1haodian/spark,wzhfy/spark,lvdongr/spark,ajaysaini725/spark,setjet/spark,dongjoon-hyun/spark,koeninger/spark,chuckchen/spark,sachintyagi22/spark,witgo/spark,zuotingbing/spark,debugger87/spark,ibm-research-ireland/sparkoscope,aosagie/spark,brad-kaiser/spark,map222/spark,techaddict/spark,sureshthalamati/spark,HyukjinKwon/spark,1haodian/spark,nchammas/spark,gioenn/xSpark,hhbyyh/spark,nchammas/spark,mkolod/incubator-spark,ron8hu/spark,dbtsai/spark,adrian-ionescu/apache-spark,bdrillard/spark,ConeyLiu/spark,gengliangwang/spark,apache-spark-on-k8s/spark,actuaryzhang/spark,aokolnychyi/spark,ron8hu/spark,rezasafi/spark,ioana-delaney/spark,mahak/spark,joseph-torres/spark,skonto/spark,jkbradley/spark,SHASHANKB/spark,liutang123/spark,icexelloss/spark,ueshin/apache-spark,janewangfb/spark,metamx/spark,esi-mineset/spark,janewangfb/spark,panfengfeng/spark,panfengfeng/spark,holdenk/spark,taroplus/spark,metamx/spark,nilsgrabbert/spark,kimoonkim/spark,bOOm-X/spark,liutang123/spark,tejasapatil/spark,WeichenXu123/spark,akopich/spark,mahak/spark,big-pegasus/spark,patrick-nicholson/spark,andrewor14/spark,WindCanDie/spark,Aegeaner/spark,mzl9039/spark,shaneknapp/spark,caneGuy/spark,hhbyyh/spark,poffuomo/spark,panfengfeng/spark,poffuomo/spark,zuotingbing/spark,andrewor14/iolap,LantaoJin/spark,cloud-fan/spark,LantaoJin/spark,maropu/spark,yanboliang/spark,sachintyagi22/spark,chuckchen/spark,cloudera/spark,SnappyDataInc/spark,aokolnychyi/spark,cloud-fan/spark,big-pegasus/spark,maropu/spark,zzcclp/spark,ueshin/apache-spark,byakuinss/spark,WindCanDie/spark,vinodkc/spark,ddna1021/spark,ioana-delaney/spark,joseph-torres/spark,WeichenXu123/spark,jkbradley/spark,rednaxelafx/apache-spark,wangmiao1981/spark,aosagie/spark,andrewor14/iolap,kissmetrics/spark,akopich/spark,taroplus/spark,highfei2011/spark,rednaxelafx/apache-spark,gioenn/xSpark,gioenn/xSpark,brad-kaiser/spark,ptkool/spark,holdenk/spark,SnappyDataInc/spark,poffuomo/spark,techaddict/spark,alunarbeach/spark,holdenk/spark,andrewor14/spark,jkbradley/spark,loneknightpy/spark,dhruve/spark,ueshin/apache-spark,JerryLead/spark,zero323/spark,saltstar/spark,cloudera/spark,rekhajoshm/spark,lyogavin/spark,ibm-research-ireland/sparkoscope,shubhamchopra/spark,big-pegasus/spark,xflin/spark,map222/spark,apache-spark-on-k8s/spark,MLnick/spark,jiangxb1987/spark,ajaysaini725/spark,pgandhi999/spark,Aegeaner/spark,gengliangwang/spark,rjpower/spark,cin/spark,haowu80s/spark,shubhamchopra/spark,spark-test/spark,MLnick/spark,narahari92/spark,WindCanDie/spark,dbtsai/spark,huang1900/spark,mike0sv/spark,poffuomo/spark,apache/spark,mdespriee/spark,zzcclp/spark,andrewor14/iolap,apache/spark,wangmiao1981/spark,huang1900/spark,aray/spark,sahilTakiar/spark,szhem/spark,koeninger/spark,huang1900/spark,SnappyDataInc/spark,saturday-shi/spark,darionyaphet/spark,pgandhi999/spark,WeichenXu123/spark,mahak/spark,JerryLead/spark,kiszk/spark,srowen/spark,Panos-Bletsos/spark-cost-model-optimizer,WeichenXu123/spark,vax11780/spark,adrian-ionescu/apache-spark,xuanyuanking/spark,maropu/spark,jianran/spark,panfengfeng/spark,mdespriee/spark,debugger87/spark,ptkool/spark,sryza/spark,markhamstra/spark,hvanhovell/spark,JoshRosen/spark,maropu/spark,shaneknapp/spark,bdrillard/spark,Panos-Bletsos/spark-cost-model-optimizer,icexelloss/spark,hhbyyh/spark,rekhajoshm/spark,jlopezmalla/spark,rekhajoshm/spark,publicRoman/spark,haowu80s/spark,vax11780/spark,ahnqirage/spark,rezasafi/spark,wangyum/spark,minixalpha/spark,brad-kaiser/spark,Aegeaner/spark,witgo/spark,spark-test/spark,vax11780/spark,UndeadBaneGitHub/spark,lvdongr/spark,lxsmnv/spark,srowen/spark,cin/spark,highfei2011/spark,SHASHANKB/spark,JerryLead/spark,kissmetrics/spark,shaneknapp/spark,dbtsai/spark,MLnick/spark,wangmiao1981/spark,hvanhovell/spark,ueshin/apache-spark,jrshust/spark,dbtsai/spark,MLnick/spark,apache-spark-on-k8s/spark,srowen/spark,ddna1021/spark,rikima/spark,SHASHANKB/spark,michalsenkyr/spark,sahilTakiar/spark,stanzhai/spark,andrewor14/spark,saltstar/spark,spark-test/spark,metamx/spark,rednaxelafx/apache-spark,nlalevee/spark,eyalfa/spark,apache/spark,ptkool/spark,zuotingbing/spark,ajaysaini725/spark,zero323/spark,minixalpha/spark,minixalpha/spark,setjet/spark,liyichao/spark,minixalpha/spark,ConeyLiu/spark,szhem/spark,witgo/spark,ahnqirage/spark,tengpeng/spark,icexelloss/spark,janewangfb/spark,michalsenkyr/spark,chuckchen/spark,goldmedal/spark,alunarbeach/spark,jrshust/spark,zhouyejoe/spark,aosagie/spark,saturday-shi/spark,liutang123/spark,tejasapatil/spark,alunarbeach/spark,guoxiaolongzte/spark,lyogavin/spark,actuaryzhang/spark,goldmedal/spark,kiszk/spark,srowen/spark,jrshust/spark,stanzhai/spark,sachintyagi22/spark,nchammas/spark,nlalevee/spark,mkolod/incubator-spark,gioenn/xSpark,publicRoman/spark,techaddict/spark,dongjoon-hyun/spark,jlopezmalla/spark,ron8hu/spark,kevinyu98/spark,liyichao/spark,markhamstra/spark,narahari92/spark,UndeadBaneGitHub/spark,jianran/spark,adrian-ionescu/apache-spark,lvdongr/spark,yanboliang/spark,zhouyejoe/spark,dbtsai/spark,map222/spark,chuckchen/spark,ddna1021/spark,dbtsai/spark,rednaxelafx/apache-spark,apache-spark-on-k8s/spark,narahari92/spark,eyalfa/spark,aray/spark,Panos-Bletsos/spark-cost-model-optimizer,skonto/spark,zzcclp/spark,sryza/spark,JoshRosen/spark,esi-mineset/spark,liyichao/spark,jrshust/spark,setjet/spark,loneknightpy/spark,andrewor14/spark,esi-mineset/spark,lxsmnv/spark,mkolod/incubator-spark,actuaryzhang/spark,BryanCutler/spark,apache/spark,shubhamchopra/spark,hvanhovell/spark,loneknightpy/spark,ericvandenbergfb/spark,mike0sv/spark,alunarbeach/spark,BryanCutler/spark,esi-mineset/spark,michalsenkyr/spark,xuanyuanking/spark,jianran/spark,szhem/spark,saturday-shi/spark,mike0sv/spark,michalsenkyr/spark,vinodkc/spark,ajaysaini725/spark,hvanhovell/spark,saturday-shi/spark,Panos-Bletsos/spark-cost-model-optimizer,facaiy/spark,markhamstra/spark,cloud-fan/spark,wzhfy/spark,ioana-delaney/spark,jiangxb1987/spark,dotunolafunmiloye/spark,facaiy/spark,publicRoman/spark,lyogavin/spark,ConeyLiu/spark,kiszk/spark,sryza/spark,someorz/spark,janewangfb/spark,spark-test/spark,xflin/spark,ptkool/spark,adrian-ionescu/apache-spark,zzcclp/spark,loneknightpy/spark,ibm-research-ireland/sparkoscope,map222/spark,aokolnychyi/spark,jiangxb1987/spark,lxsmnv/spark,jiangxb1987/spark,cloudera/spark,nchammas/spark,eyalfa/spark,byakuinss/spark,wangmiao1981/spark,mkolod/incubator-spark,wangmiao1981/spark,sachintyagi22/spark,sureshthalamati/spark,xuanyuanking/spark,dhruve/spark,wgpshashank/spark,liyichao/spark,brad-kaiser/spark,goldmedal/spark,ericvandenbergfb/spark,ajaysaini725/spark,WindCanDie/spark,kiszk/spark,mike0sv/spark,debugger87/spark,jkbradley/spark,patrick-nicholson/spark,tengpeng/spark,map222/spark,jiangxb1987/spark,someorz/spark,ioana-delaney/spark,maropu/spark,koeninger/spark,rekhajoshm/spark,SnappyDataInc/spark,mahak/spark,hhbyyh/spark,UndeadBaneGitHub/spark,dhruve/spark,pronix/spark,someorz/spark,sureshthalamati/spark,kevinyu98/spark,nilsgrabbert/spark,jianran/spark,markhamstra/spark,chuckchen/spark,pgandhi999/spark,haowu80s/spark,WeichenXu123/spark,LantaoJin/spark,vinodkc/spark,xflin/spark,LantaoJin/spark,szhem/spark,jiangxb1987/spark,huang1900/spark,mike0sv/spark,kissmetrics/spark,hvanhovell/spark,kissmetrics/spark,tengpeng/spark,patrick-nicholson/spark,hhbyyh/spark,rikima/spark,wangyum/spark,Panos-Bletsos/spark-cost-model-optimizer,witgo/spark,Aegeaner/spark,ptkool/spark,apache-spark-on-k8s/spark,nchammas/spark,highfei2011/spark,joseph-torres/spark,wgpshashank/spark,1haodian/spark,patrick-nicholson/spark,saltstar/spark,rekhajoshm/spark,jlopezmalla/spark,UndeadBaneGitHub/spark,dbtsai/spark,zero323/spark,tejasapatil/spark,gioenn/xSpark,maropu/spark,SHASHANKB/spark,dhruve/spark,ibm-research-ireland/sparkoscope,JoshRosen/spark,mahak/spark,zuotingbing/spark,jkbradley/spark,brad-kaiser/spark,wangyum/spark,cloud-fan/spark,mike0sv/spark,lvdongr/spark,aosagie/spark,xuanyuanking/spark,taroplus/spark,skonto/spark,andrewor14/spark,publicRoman/spark,milliman/spark,apache-spark-on-k8s/spark,kissmetrics/spark,bdrillard/spark,skonto/spark,ioana-delaney/spark,minixalpha/spark,patrick-nicholson/spark,UndeadBaneGitHub/spark,taroplus/spark,minixalpha/spark,pronix/spark,rednaxelafx/apache-spark,mdespriee/spark,dotunolafunmiloye/spark,bravo-zhang/spark,ericvandenbergfb/spark,ron8hu/spark,techaddict/spark,shubhamchopra/spark,sureshthalamati/spark,lxsmnv/spark,michalsenkyr/spark,rikima/spark,actuaryzhang/spark,taroplus/spark,poffuomo/spark,mzl9039/spark,ConeyLiu/spark,saturday-shi/spark,nlalevee/spark,cloud-fan/spark,bdrillard/spark,ajaysaini725/spark,dongjoon-hyun/spark,lvdongr/spark,aray/spark,gengliangwang/spark,JerryLead/spark,nlalevee/spark,shuangshuangwang/spark,haowu80s/spark,esi-mineset/spark,skonto/spark,map222/spark,rikima/spark,stanzhai/spark,UndeadBaneGitHub/spark,sachintyagi22/spark,mzl9039/spark,ericvandenbergfb/spark,kevinyu98/spark,JerryLead/spark,panfengfeng/spark,aosagie/spark,sryza/spark,darionyaphet/spark,MLnick/spark,debugger87/spark,dhruve/spark,sachintyagi22/spark,kiszk/spark,alunarbeach/spark,xflin/spark,big-pegasus/spark,bravo-zhang/spark,nilsgrabbert/spark,pgandhi999/spark,narahari92/spark,ahnqirage/spark,jlopezmalla/spark,sachintyagi22/spark,aray/spark,hvanhovell/spark,panfengfeng/spark,janewangfb/spark,map222/spark,gengliangwang/spark,byakuinss/spark,zhouyejoe/spark,shuangshuangwang/spark,koeninger/spark,sryza/spark,xflin/spark,ahnqirage/spark,vax11780/spark,ahnqirage/spark,adrian-ionescu/apache-spark,zhouyejoe/spark,SHASHANKB/spark,zero323/spark,rekhajoshm/spark,rjpower/spark,eyalfa/spark,BryanCutler/spark,hvanhovell/spark,esi-mineset/spark,jiangxb1987/spark,vinodkc/spark,apache-spark-on-k8s/spark,rezasafi/spark,joseph-torres/spark,rednaxelafx/apache-spark,srowen/spark,saltstar/spark,dongjoon-hyun/spark,brad-kaiser/spark,SnappyDataInc/spark,andrewor14/iolap,rekhajoshm/spark,holdenk/spark,narahari92/spark,zhouyejoe/spark,narahari92/spark,vinodkc/spark,rjpower/spark,tengpeng/spark,zero323/spark,rednaxelafx/apache-spark,ibm-research-ireland/sparkoscope,facaiy/spark,xuanyuanking/spark,xuanyuanking/spark,jkbradley/spark,pronix/spark,liyichao/spark,sahilTakiar/spark,BryanCutler/spark,dotunolafunmiloye/spark,dongjoon-hyun/spark,ConeyLiu/spark,dongjoon-hyun/spark,mdespriee/spark,metamx/spark,1haodian/spark,mdespriee/spark,michalsenkyr/spark,publicRoman/spark,brad-kaiser/spark,JoshRosen/spark,aray/spark,yanboliang/spark,JerryLead/spark,andrewor14/spark,liutang123/spark,nlalevee/spark,darionyaphet/spark,koeninger/spark,Panos-Bletsos/spark-cost-model-optimizer,holdenk/spark,ueshin/apache-spark,aokolnychyi/spark,nchammas/spark,pgandhi999/spark,liutang123/spark,adrian-ionescu/apache-spark,apache/spark,vinodkc/spark,gioenn/xSpark,byakuinss/spark,zero323/spark,yanboliang/spark,setjet/spark,WindCanDie/spark,bOOm-X/spark,spark-test/spark,ueshin/apache-spark,guoxiaolongzte/spark,rjpower/spark,akopich/spark,lxsmnv/spark,facaiy/spark,techaddict/spark,liyichao/spark,srowen/spark,MLnick/spark,stanzhai/spark,Aegeaner/spark,sureshthalamati/spark,kimoonkim/spark,highfei2011/spark,dhruve/spark,JoshRosen/spark,caneGuy/spark,kiszk/spark,jlopezmalla/spark,goldmedal/spark,zhouyejoe/spark,shaneknapp/spark,ahnqirage/spark,cloudera/spark,LantaoJin/spark,ConeyLiu/spark,caneGuy/spark,aray/spark,Aegeaner/spark,SHASHANKB/spark,chuckchen/spark,goldmedal/spark,markhamstra/spark,setjet/spark,HyukjinKwon/spark,nchammas/spark,huang1900/spark,pgandhi999/spark,ajaysaini725/spark,milliman/spark,darionyaphet/spark,ron8hu/spark,saturday-shi/spark,hhbyyh/spark,bOOm-X/spark,ConeyLiu/spark,shubhamchopra/spark,tengpeng/spark,andrewor14/iolap,JoshRosen/spark,wzhfy/spark,facaiy/spark,Aegeaner/spark,szhem/spark,rezasafi/spark,ioana-delaney/spark,xflin/spark,skonto/spark,spark-test/spark,bravo-zhang/spark,goldmedal/spark,gengliangwang/spark,rikima/spark,rikima/spark,darionyaphet/spark,janewangfb/spark,ddna1021/spark,stanzhai/spark,stanzhai/spark,poffuomo/spark,WindCanDie/spark,witgo/spark,dotunolafunmiloye/spark,aosagie/spark,ddna1021/spark,ueshin/apache-spark,mzl9039/spark,SHASHANKB/spark,milliman/spark,actuaryzhang/spark,JoshRosen/spark,icexelloss/spark,BryanCutler/spark,tengpeng/spark,big-pegasus/spark,caneGuy/spark,liutang123/spark,jianran/spark,joseph-torres/spark,gioenn/xSpark,nilsgrabbert/spark,ericvandenbergfb/spark,vax11780/spark,shaneknapp/spark,jianran/spark,rikima/spark,LantaoJin/spark,goldmedal/spark,guoxiaolongzte/spark,ahnqirage/spark,bravo-zhang/spark,saltstar/spark,dhruve/spark,shaneknapp/spark,setjet/spark,guoxiaolongzte/spark,mahak/spark,shubhamchopra/spark,kiszk/spark,publicRoman/spark,kimoonkim/spark,someorz/spark,zzcclp/spark,wangyum/spark,tejasapatil/spark,akopich/spark,lxsmnv/spark,rjpower/spark,ericvandenbergfb/spark,bOOm-X/spark,ibm-research-ireland/sparkoscope,markhamstra/spark,JerryLead/spark,mkolod/incubator-spark,SnappyDataInc/spark,andrewor14/iolap,poffuomo/spark,yanboliang/spark,sahilTakiar/spark,caneGuy/spark,ddna1021/spark,someorz/spark,sahilTakiar/spark,kimoonkim/spark,mdespriee/spark,guoxiaolongzte/spark,rezasafi/spark,kimoonkim/spark,kevinyu98/spark,highfei2011/spark,ibm-research-ireland/sparkoscope,zzcclp/spark,wangyum/spark,zuotingbing/spark,patrick-nicholson/spark,icexelloss/spark,szhem/spark,byakuinss/spark,panfengfeng/spark,haowu80s/spark,zuotingbing/spark,debugger87/spark,publicRoman/spark,kimoonkim/spark,kissmetrics/spark,pronix/spark,nilsgrabbert/spark,kevinyu98/spark,mahak/spark,actuaryzhang/spark,tengpeng/spark,1haodian/spark,holdenk/spark,janewangfb/spark,alunarbeach/spark,holdenk/spark,debugger87/spark,milliman/spark,eyalfa/spark,milliman/spark,nilsgrabbert/spark,aokolnychyi/spark,WeichenXu123/spark,wangyum/spark,spark-test/spark,minixalpha/spark,zzcclp/spark,ioana-delaney/spark,rezasafi/spark,big-pegasus/spark,huang1900/spark,witgo/spark,metamx/spark,lxsmnv/spark,srowen/spark,highfei2011/spark,bdrillard/spark,markhamstra/spark,byakuinss/spark,highfei2011/spark,UndeadBaneGitHub/spark,byakuinss/spark,facaiy/spark,cin/spark,xuanyuanking/spark,sahilTakiar/spark,icexelloss/spark,mike0sv/spark,alunarbeach/spark,HyukjinKwon/spark,michalsenkyr/spark,kissmetrics/spark,loneknightpy/spark,mdespriee/spark,witgo/spark,bOOm-X/spark,shaneknapp/spark,joseph-torres/spark,yanboliang/spark,adrian-ionescu/apache-spark,metamx/spark,shuangshuangwang/spark,1haodian/spark,aosagie/spark,zuotingbing/spark,lvdongr/spark,ericvandenbergfb/spark,cloudera/spark,aokolnychyi/spark,aokolnychyi/spark,apache/spark,narahari92/spark,bdrillard/spark,shuangshuangwang/spark,HyukjinKwon/spark,kevinyu98/spark,lvdongr/spark,eyalfa/spark,gengliangwang/spark,joseph-torres/spark,setjet/spark,milliman/spark,mzl9039/spark,icexelloss/spark,saturday-shi/spark,darionyaphet/spark | python/examples/transitive_closure.py | python/examples/transitive_closure.py | import sys
from random import Random
from pyspark import SparkContext
numEdges = 200
numVertices = 100
rand = Random(42)
def generateGraph():
edges = set()
while len(edges) < numEdges:
src = rand.randrange(0, numEdges)
dst = rand.randrange(0, numEdges)
if src != dst:
edges.add((src, dst))
return edges
if __name__ == "__main__":
if len(sys.argv) == 1:
print >> sys.stderr, \
"Usage: PythonTC <master> [<slices>]"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonTC")
slices = int(sys.argv[2]) if len(sys.argv) > 2 else 2
tc = sc.parallelize(generateGraph(), slices).cache()
# Linear transitive closure: each round grows paths by one edge,
# by joining the graph's edges with the already-discovered paths.
# e.g. join the path (y, z) from the TC with the edge (x, y) from
# the graph to obtain the path (x, z).
# Because join() joins on keys, the edges are stored in reversed order.
edges = tc.map(lambda (x, y): (y, x))
oldCount = 0L
nextCount = tc.count()
while True:
oldCount = nextCount
# Perform the join, obtaining an RDD of (y, (z, x)) pairs,
# then project the result to obtain the new (x, z) paths.
new_edges = tc.join(edges).map(lambda (_, (a, b)): (b, a))
tc = tc.union(new_edges).distinct().cache()
nextCount = tc.count()
if nextCount == oldCount:
break
print "TC has %i edges" % tc.count()
| import sys
from random import Random
from pyspark import SparkContext
numEdges = 200
numVertices = 100
rand = Random(42)
def generateGraph():
edges = set()
while len(edges) < numEdges:
src = rand.randrange(0, numEdges)
dst = rand.randrange(0, numEdges)
if src != dst:
edges.add((src, dst))
return edges
if __name__ == "__main__":
if len(sys.argv) == 1:
print >> sys.stderr, \
"Usage: PythonTC <master> [<slices>]"
exit(-1)
sc = SparkContext(sys.argv[1], "PythonTC")
slices = sys.argv[2] if len(sys.argv) > 2 else 2
tc = sc.parallelize(generateGraph(), slices).cache()
# Linear transitive closure: each round grows paths by one edge,
# by joining the graph's edges with the already-discovered paths.
# e.g. join the path (y, z) from the TC with the edge (x, y) from
# the graph to obtain the path (x, z).
# Because join() joins on keys, the edges are stored in reversed order.
edges = tc.map(lambda (x, y): (y, x))
oldCount = 0L
nextCount = tc.count()
while True:
oldCount = nextCount
# Perform the join, obtaining an RDD of (y, (z, x)) pairs,
# then project the result to obtain the new (x, z) paths.
new_edges = tc.join(edges).map(lambda (_, (a, b)): (b, a))
tc = tc.union(new_edges).distinct().cache()
nextCount = tc.count()
if nextCount == oldCount:
break
print "TC has %i edges" % tc.count()
| apache-2.0 | Python |
8eb5cc80f003c5802b9bc3e87d77c1deee220610 | update admin | blueicepl/django-permissions | permissions/admin.py | permissions/admin.py | from django.contrib import admin
from permissions.models import ObjectPermission, PrincipalRoleRelation, Role, Permission
class ObjectPermissionAdmin(admin.ModelAdmin):
list_display = ('pk', 'role', 'permission', 'content')
list_filter = ('role', 'permission')
admin.site.register(ObjectPermission, ObjectPermissionAdmin)
class PermissionAdmin(admin.ModelAdmin):
list_display = ('codename', 'name', 'types')
search_fields = ('codename', 'name')
def types(self, obj):
return ','.join([x.name for x in obj.content_types.all()])
admin.site.register(Permission, PermissionAdmin)
class RoleAdmin(admin.ModelAdmin):
list_display = ('codename', 'name', 'perms')
search_fields = ('codename', 'name')
def perms(self, obj):
return ','.join([x.codename for x in obj.global_permissions.all()])
admin.site.register(Role, RoleAdmin)
class PrincipalRoleRelationAdmin(admin.ModelAdmin):
list_display = ('pk', 'role', 'user', 'group', 'content')
list_filter = ('role', 'group')
raw_id_fields = ('role', 'user', 'group')
admin.site.register(PrincipalRoleRelation, PrincipalRoleRelationAdmin)
| from django.contrib import admin
from permissions.models import ObjectPermission, PrincipalRoleRelation, Role, Permission
class ObjectPermissionAdmin(admin.ModelAdmin):
list_display = ('pk', 'role', 'permission', 'content')
list_filter = ('role', 'permission')
admin.site.register(ObjectPermission, ObjectPermissionAdmin)
class PermissionAdmin(admin.ModelAdmin):
list_display = ('codename', 'name', 'types')
search_fields = ('codename', 'name')
def types(self, obj):
return ','.join([x.name for x in obj.content_types.all()])
admin.site.register(Permission, PermissionAdmin)
class RoleAdmin(admin.ModelAdmin):
list_display = ('codename', 'name', 'perms')
search_fields = ('codename', 'name')
def perms(self, obj):
return ','.join([x.codename for x in obj.global_permissions.all()])
admin.site.register(Role, RoleAdmin)
class PrincipalRoleRelationAdmin(admin.ModelAdmin):
list_display = ('pk', 'role', 'user', 'group', 'content')
list_filter = ('role', 'group')
raw_id_fields = ('role', 'user', 'group', 'content')
admin.site.register(PrincipalRoleRelation, PrincipalRoleRelationAdmin)
| bsd-3-clause | Python |
1bcf8cba05749a83f86c1cd930d3c08c237f3b52 | add INSTALLED_APPS comment | bungoume/django-template | project_name/project_name/settings/__init__.py | project_name/project_name/settings/__init__.py | """
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import dirname
BASE_DIR = dirname(dirname(dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third-party applications
# Project applications
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
| """
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import dirname
BASE_DIR = dirname(dirname(dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
| mit | Python |
69ba02dafebe0c6e2e6e32b1926d53ddb3188707 | fix spelling in log message | genome/flow-core,genome/flow-core,genome/flow-core | flow/util/exit.py | flow/util/exit.py | import logging
import os
import psutil
import signal
import time
LOG = logging.getLogger(__name__)
_SIGNAL_TIMEOUT = 10
def exit_process(exit_code, child_signals=[signal.SIGINT, signal.SIGTERM]):
LOG.info('Exitting process: signalling children.')
for signum in child_signals:
_signal_child_processes(signum, timeout=_SIGNAL_TIMEOUT)
_signal_child_processes(signal.SIGKILL, recursive=True,
timeout=_SIGNAL_TIMEOUT)
LOG.info('Children killed, exiting with code %d', exit_code)
os._exit(exit_code)
def _signal_child_processes(signum, recursive=False, timeout=_SIGNAL_TIMEOUT):
for child in psutil.Process(os.getpid()).get_children(recursive=recursive):
child.send_signal(signum)
_wait_children(timeout, recursive=recursive)
def _wait_children(timeout, recursive=False):
final_time = time.time() + timeout
for child in psutil.Process(os.getpid()).get_children(recursive=recursive):
try:
child.wait(max(0, final_time - time.time()))
except psutil.TimeoutExpired:
break
| import logging
import os
import psutil
import signal
import time
LOG = logging.getLogger(__name__)
_SIGNAL_TIMEOUT = 10
def exit_process(exit_code, child_signals=[signal.SIGINT, signal.SIGTERM]):
LOG.info('Exitting process: signalling children.')
for signum in child_signals:
_signal_child_processes(signum, timeout=_SIGNAL_TIMEOUT)
_signal_child_processes(signal.SIGKILL, recursive=True,
timeout=_SIGNAL_TIMEOUT)
LOG.info('Children killed, exitting with code %d', exit_code)
os._exit(exit_code)
def _signal_child_processes(signum, recursive=False, timeout=_SIGNAL_TIMEOUT):
for child in psutil.Process(os.getpid()).get_children(recursive=recursive):
child.send_signal(signum)
_wait_children(timeout, recursive=recursive)
def _wait_children(timeout, recursive=False):
final_time = time.time() + timeout
for child in psutil.Process(os.getpid()).get_children(recursive=recursive):
try:
child.wait(max(0, final_time - time.time()))
except psutil.TimeoutExpired:
break
| agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.