commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
cfabd36edd10819151caa25e8a30ef2938a55905
|
add django-compat as requirement
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os.path import join, dirname
from setuptools import setup, find_packages
import organizations as app
def long_description():
try:
return open(join(dirname(__file__), 'README.rst')).read()
except IOError:
return "LONG_DESCRIPTION Error"
setup(
author="Ben Lopatin + arteria GmbH",
author_email="ben.lopatin@wellfireinteractive.com",
name='django-ar-organizations',
version=app.__version__,
description='Group accounts for Django',
long_description=long_description(),
url='https://github.com/wellfire/django-organizations/',
license='BSD License',
platforms=['OS Independent'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
install_requires=[
'Django>=1.4',
'django-extensions>=0.9',
],
#test_suite='tests.runtests.runtests',
include_package_data=True,
packages=find_packages(exclude=["tests.tests", "tests.test_app", "tests"]),
zip_safe=False
)
|
Python
| 0.000001
|
@@ -1086,16 +1086,48 @@
%3E=0.9',%0A
+ 'django-compat%3E=1.0.2',%0A
%5D,%0A
@@ -1130,16 +1130,17 @@
%5D,%0A #
+
test_sui
|
d0002645b0f36ec9fbebaadfb0fa7717f1431ef4
|
improve generated form view in code
|
openerp/addons/base/module/wizard/base_module_upgrade.py
|
openerp/addons/base/module/wizard/base_module_upgrade.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import pooler
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Apply Scheduled Upgrades" version="7.0">
<header>
<button name="config" string="Start configuration" type="object" class="oe_highlight_on_dirty"/>
or
<button string="Cancel" class="oe_link" special="cancel"/>
</header>
<group>
<separator string="System update completed" colspan="4"/>
<label align="0.0" string="The selected modules have been updated / installed !" colspan="4"/>
<label align="0.0" string="We suggest to reload the page to see the new menus." colspan="4"/>
</group>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet dependency !'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
pooler.restart_pool(cr.dbname, update_module=True)
ir_model_data = self.pool.get('ir.model.data')
__, res_id = ir_model_data.get_object_reference(cr, uid, 'base', 'view_base_module_upgrade_install')
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'views': [(res_id, 'form')],
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0
|
@@ -2006,19 +2006,36 @@
rch'%5D =
-'''
+%22%22%22%0A
%3Cform st
@@ -2101,28 +2101,16 @@
-
%3Cheader%3E
@@ -2130,36 +2130,24 @@
-
-
%3Cbutton name
@@ -2222,26 +2222,12 @@
ight
-_on_dirty%22/%3E%0A
+%22/%3E%0A
@@ -2250,24 +2250,11 @@
- or%0A
+or%0A
@@ -2265,38 +2265,32 @@
-
%3Cbutton string=%22
@@ -2356,218 +2356,110 @@
- %3C/header%3E%0A %3Cgroup%3E%0A %3Cseparator string=%22System update completed%22 colspan=%224%22/%3E%0A %3Clabel align=%220.0%22
+%3C/header%3E%0A %3Cseparator string=%22System update completed%22/%3E%0A %3Clabel
str
@@ -2516,28 +2516,16 @@
alled !%22
- colspan=%224%22
/%3E%0A
@@ -2527,32 +2527,47 @@
+ %3Cnewline/%3E%0A
@@ -2580,20 +2580,8 @@
abel
- align=%220.0%22
str
@@ -2641,104 +2641,50 @@
us.%22
- colspan=%224%22/%3E%0A %3C/group%3E%0A %3C/form%3E'''%0A
+/%3E%0A %3C/form%3E%0A %22%22%22
%0A
|
103418e32d77161dacb67eabea559403b62e819b
|
Add test to confirm proper handling of org extensions in site_persistence
|
tests/test_model_persistence.py
|
tests/test_model_persistence.py
|
from tests import TestCase
from flask_webtest import SessionScope
from shutil import rmtree
from tempfile import mkdtemp
from portal.config.model_persistence import ModelPersistence
from portal.database import db
from portal.models.app_text import AppText
from portal.models.communication_request import CommunicationRequest
from portal.models.coding import Coding
from portal.models.organization import Organization
from portal.system_uri import SNOMED
class TestModelPersistence(TestCase):
def setUp(self):
super(TestModelPersistence, self).setUp()
self.tmpdir = mkdtemp()
def tearDown(self):
super(TestModelPersistence, self).tearDown()
rmtree(self.tmpdir)
def test_adjust_sequence(self):
mp = ModelPersistence(Organization, sequence_name='organizations_id_seq')
# Insert dummy row w/ large id, confirm
# the sequence becomes greater
id = 10000
dummy = Organization(id=id, name='big id')
with SessionScope(db):
db.session.add(dummy)
db.session.commit()
mp.update_sequence()
currval = db.engine.execute(
"SELECT CURRVAL('{}')".format(
'organizations_id_seq')).fetchone()[0]
self.assertTrue(currval > id)
def test_identifier_lookup(self):
# setup a minimal communication request
from tests.test_communication import mock_communication_request
from tests.test_assessment_status import mock_tnth_questionnairebanks
from portal.system_uri import TRUENTH_CR_NAME
from portal.models.identifier import Identifier
mock_tnth_questionnairebanks()
cr = mock_communication_request('symptom_tracker_recurring', '{"days": 14}')
cr.identifiers.append(Identifier(value='2 week ST', system=TRUENTH_CR_NAME))
with SessionScope(db):
db.session.add(cr)
db.session.commit()
cr = db.session.merge(cr)
self.assertEquals(cr.identifiers.count(), 1)
data = cr.as_fhir()
mp = ModelPersistence(
CommunicationRequest, sequence_name='communication_requests_id_seq',
lookup_field='identifier')
new_obj = CommunicationRequest.from_fhir(data)
match, field_description = mp.lookup_existing(new_obj=new_obj, new_data=data)
self.assertEquals(match.name, cr.name)
def test_composite_key(self):
known_coding = Coding(
system=SNOMED, code='26294005',
display='Radical prostatectomy (nerve-sparing)').add_if_not_found(True)
mp = ModelPersistence(
Coding, sequence_name='codings_id_seq', lookup_field=('system', 'code'))
data = known_coding.as_fhir()
# Modify only the `display` - composite keys should still match
modified_data = data.copy()
modified_data['display'] = 'Radical prostatectomy'
modified = Coding.from_fhir(data)
match, _ = mp.lookup_existing(new_obj=modified, new_data=modified_data)
self.assertEquals(data, match.as_fhir())
# Import and see the change
updated = mp.update(modified_data)
self.assertEquals(modified, updated)
# Export and verify
serial = mp.serialize()
self.assertTrue(modified_data in serial)
def test_delete_unnamed(self):
keeper = AppText(name='keep me', custom_text='worthy')
mp = ModelPersistence(
AppText, lookup_field='name', sequence_name='apptext_id_seq')
with SessionScope(db):
db.session.add(keeper)
db.session.commit()
mp.export(target_dir=self.tmpdir)
# Add another app text, expecting it'll be removed
bogus = AppText(name='temp', custom_text='not worthy')
with SessionScope(db):
db.session.add(bogus)
db.session.commit()
# Import w/ keep_unmentioned and expect both
mp.import_(keep_unmentioned=True, target_dir=self.tmpdir)
self.assertEquals(AppText.query.count(), 2)
# Now import, and expect only keeper to remain
mp.import_(keep_unmentioned=False, target_dir=self.tmpdir)
self.assertEquals(AppText.query.count(), 1)
self.assertEquals(AppText.query.first().name, 'keep me')
|
Python
| 0
|
@@ -207,16 +207,65 @@
port db%0A
+from portal.models.locale import LocaleConstants%0A
from por
@@ -4329,8 +4329,1079 @@
ep me')%0A
+%0A def test_delete_extension(self):%0A org = Organization(name='testy')%0A org.timezone = 'Asia/Tokyo' # stored in an extension%0A with SessionScope(db):%0A db.session.add(org)%0A db.session.commit()%0A org = db.session.merge(org)%0A mp = ModelPersistence(%0A Organization, lookup_field='id',%0A sequence_name='organizations_id_seq'%0A )%0A mp.export(self.tmpdir)%0A%0A # Add an additional extension to the org, make sure%0A # they are deleted when importing again from%0A # persistence that doesn't include them%0A%0A org.locales.append(LocaleConstants().AmericanEnglish)%0A with SessionScope(db):%0A db.session.commit()%0A org = db.session.merge(org)%0A self.assertTrue(len(org.as_fhir()%5B'extension'%5D) %3E 1)%0A%0A mp.import_(keep_unmentioned=False, target_dir=self.tmpdir)%0A org = Organization.query.filter(Organization.name == 'testy').one()%0A self.assertEquals(org.locales.count(), 0)%0A self.assertEquals(org.timezone, 'Asia/Tokyo')%0A
|
333df12d64b7d0724a90c155858e3a8421967aa0
|
Add test for copy_reads_file()
|
tests/samples/test_fake.py
|
tests/samples/test_fake.py
|
import os
import pytest
from virtool.samples.fake import create_fake_sample, create_fake_samples
from virtool.fake.wrapper import FakerWrapper
from virtool.samples.db import LIST_PROJECTION
@pytest.fixture
def app(dbi, pg, run_in_thread, tmp_path):
return {
"db": dbi,
"fake": FakerWrapper(),
"pg": pg,
"run_in_thread": run_in_thread,
"settings": {
"data_path": tmp_path
},
}
@pytest.mark.parametrize("paired", [True, False])
@pytest.mark.parametrize("finalized", [True, False])
async def test_create_fake_unpaired(paired, finalized, app, snapshot,
static_time):
fake_sample = await create_fake_sample(app,
"sample_1",
"bob",
paired=paired,
finalized=finalized)
for key in LIST_PROJECTION:
assert key in fake_sample
if finalized is True:
assert len(fake_sample["reads"]) == (2 if paired else 1)
assert fake_sample["ready"] is True
snapshot.assert_match(fake_sample)
async def test_create_fake_samples(app, snapshot, dbi, static_time):
samples = await create_fake_samples(app)
assert len(samples) == 3
for sample in samples:
snapshot.assert_match(sample)
assert os.listdir(app["settings"]["data_path"] / "samples" / "LB1U6zCj") == ["reads_1.fq.gz"]
assert set(os.listdir(app["settings"]["data_path"] / "samples" / "2x6YnyMt")) == {"reads_1.fq.gz", "reads_2.fq.gz"}
|
Python
| 0
|
@@ -18,16 +18,17 @@
pytest%0A
+%0A
from vir
@@ -91,16 +91,50 @@
_samples
+, copy_reads_file, READ_FILES_PATH
%0Afrom vi
@@ -1649,8 +1649,269 @@
fq.gz%22%7D%0A
+%0A%0Aasync def test_copy_reads_file(app):%0A file_path = READ_FILES_PATH / %22paired_1.fq.gz%22%0A%0A await copy_reads_file(app, file_path, %22reads_1.fq.gz%22, %22sample_1%22)%0A%0A assert os.listdir(app%5B%22settings%22%5D%5B%22data_path%22%5D / %22samples%22 / %22sample_1%22) == %5B%22reads_1.fq.gz%22%5D%0A
|
458748b5354f9ef6c5bf077f773d92c83dd73961
|
Add pytest-mock to dev requirements
|
setup.py
|
setup.py
|
"""
Build Cutadapt.
"""
import sys
import os.path
from setuptools import setup, Extension, find_packages
from distutils.version import LooseVersion
from distutils.command.sdist import sdist as _sdist
from distutils.command.build_ext import build_ext as _build_ext
MIN_CYTHON_VERSION = '0.28'
if sys.version_info[:2] < (3, 5):
sys.stdout.write('You need at least Python 3.5\n')
sys.exit(1)
def no_cythonize(extensions, **_ignore):
"""
Change file extensions from .pyx to .c or .cpp.
Copied from Cython documentation
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
def check_cython_version():
"""Exit if Cython was not found or is too old"""
try:
from Cython import __version__ as cyversion
except ImportError:
sys.stdout.write(
"ERROR: Cython is not installed. Install at least Cython version " +
str(MIN_CYTHON_VERSION) + " to continue.\n")
sys.exit(1)
if LooseVersion(cyversion) < LooseVersion(MIN_CYTHON_VERSION):
sys.stdout.write(
"ERROR: Your Cython is at version '" + str(cyversion) +
"', but at least version " + str(MIN_CYTHON_VERSION) + " is required.\n")
sys.exit(1)
extensions = [
Extension('cutadapt._align', sources=['src/cutadapt/_align.pyx']),
Extension('cutadapt.qualtrim', sources=['src/cutadapt/qualtrim.pyx']),
]
class BuildExt(_build_ext):
def run(self):
# If we encounter a PKG-INFO file, then this is likely a .tar.gz/.zip
# file retrieved from PyPI that already includes the pre-cythonized
# extension modules, and then we do not need to run cythonize().
if os.path.exists('PKG-INFO'):
no_cythonize(extensions)
else:
# Otherwise, this is a 'developer copy' of the code, and then the
# only sensible thing is to require Cython to be installed.
check_cython_version()
from Cython.Build import cythonize
self.extensions = cythonize(self.extensions)
super().run()
class SDist(_sdist):
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
from Cython.Build import cythonize
check_cython_version()
cythonize(extensions)
super().run()
encoding_arg = {'encoding': 'utf-8'} if sys.version_info[0] >= 3 else dict()
with open('README.rst', **encoding_arg) as f:
long_description = f.read()
setup(
name='cutadapt',
setup_requires=['setuptools_scm'], # Support pip versions that don't know about pyproject.toml
use_scm_version={'write_to': 'src/cutadapt/_version.py'},
author='Marcel Martin',
author_email='marcel.martin@scilifelab.se',
url='https://cutadapt.readthedocs.io/',
description='trim adapters from high-throughput sequencing reads',
long_description=long_description,
license='MIT',
cmdclass={'build_ext': BuildExt, 'sdist': SDist},
ext_modules=extensions,
package_dir={'': 'src'},
packages=find_packages('src'),
entry_points={'console_scripts': ['cutadapt = cutadapt.__main__:main']},
install_requires=[
'dnaio~=0.4.1',
'xopen~=0.8.4',
],
extras_require={
'dev': ['Cython', 'pytest', 'pytest-timeout', 'sphinx', 'sphinx_issues'],
},
python_requires='>=3.5',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Cython",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Bio-Informatics"
],
)
|
Python
| 0
|
@@ -3647,16 +3647,31 @@
imeout',
+ 'pytest-mock',
'sphinx
|
e6551e977482003da2e0708a3aa0b408e2033ae7
|
remove the type
|
redash/query_runner/prometheus.py
|
redash/query_runner/prometheus.py
|
import requests
from datetime import datetime
from urlparse import parse_qs
from redash.query_runner import BaseQueryRunner, register, TYPE_DATETIME, TYPE_STRING
from redash.utils import json_dumps
def get_instant_rows(metrics_data):
rows = []
for metric in metrics_data:
row_data = metric['metric']
timestamp, value = metric['value']
date_time = datetime.fromtimestamp(timestamp)
row_data.update({"timestamp": date_time, "value": value})
rows.append(row_data)
return rows
def get_range_rows(metrics_data):
rows = []
for metric in metrics_data:
ts_values = metric['values']
metric_labels = metric['metric']
for values in ts_values:
row_data = metric_labels.copy()
timestamp, value = values
date_time = datetime.fromtimestamp(timestamp)
row_data.update({'timestamp': date_time, 'value': value})
rows.append(row_data)
return rows
class Prometheus(BaseQueryRunner):
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string',
'title': 'Prometheus API URL'
}
},
"required": ["url"]
}
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "Prometheus"
def test_connection(self):
resp = requests.get(self.configuration.get("url", None))
return resp.ok
def get_schema(self, get_stats=False):
base_url = self.configuration["url"]
metrics_path = '/api/v1/label/__name__/values'
response = requests.get(base_url + metrics_path)
response.raise_for_status()
data = response.json()['data']
schema = {}
for name in data:
schema[name] = {'name': name}
return schema.values()
def run_query(self, query, user):
"""
Query Syntax, actually it is the URL query string.
Check the Prometheus HTTP API for the details of the supported query string.
https://prometheus.io/docs/prometheus/latest/querying/api/
example: instant query
query=http_requests_total
example: range query
query=http_requests_total&start=2018-01-20T00:00:00.000Z&end=2018-01-25T00:00:00.000Z&step=60s
example: until now range query
query=http_requests_total&start=2018-01-20T00:00:00.000Z&step=60s
query=http_requests_total&start=2018-01-20T00:00:00.000Z&end=now&step=60s
"""
base_url = self.configuration["url"]
columns = [
{
'friendly_name': 'timestamp',
'type': TYPE_DATETIME,
'name': 'timestamp'
},
{
'friendly_name': 'value',
'type': TYPE_STRING,
'name': 'value'
},
]
try:
error = None
query = query.strip()
# for backward compatibility
query = 'query={}'.format(query) if not query.startswith('query=') else query
payload = parse_qs(query)
query_type = 'query_range' if 'step' in payload.keys() else 'query'
# for the range of until now
if query_type == 'query_range' and ('end' not in payload.keys() or 'now' in payload['end']):
date_now = datetime.now()
payload.update({"end": [date_now.isoformat("T") + "Z"]})
api_endpoint = base_url + '/api/v1/{}'.format(query_type)
response = requests.get(api_endpoint, params=payload)
response.raise_for_status()
metrics = response.json()['data']['result']
if len(metrics) == 0:
return None, 'query result is empty.'
metric_labels = metrics[0]['metric'].keys()
for label_name in metric_labels:
columns.append({
'friendly_name': label_name,
'type': TYPE_STRING,
'name': label_name
})
if query_type == 'query_range':
rows = get_range_rows(metrics)
else:
rows = get_instant_rows(metrics)
json_data = json_dumps(
{
'rows': rows,
'columns': columns
}
)
except requests.RequestException as e:
return None, str(e)
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
return json_data, error
register(Prometheus)
|
Python
| 0.999999
|
@@ -1406,73 +1406,8 @@
se%0A%0A
- @classmethod%0A def type(cls):%0A return %22Prometheus%22%0A%0A
|
e822a1c863d5ff2b37f1123f2a5fae63061f7d44
|
fix heartbeat origin
|
alert-sqs/alert-sqs.py
|
alert-sqs/alert-sqs.py
|
#!/usr/bin/env python
import settings
from alert import Alert, Heartbeat, ApiClient
from kombu import BrokerConnection
from Queue import Empty
__version__ = '3.0.0'
from kombu.utils.debug import setup_logging
# setup_logging(loglevel='DEBUG', loggers=[''])
def main():
broker_url = getattr(settings, 'broker_url', 'sqs://')
transport_options = getattr(settings, 'transport_options', {'region': 'eu-west-1'})
sqs_queue = getattr(settings, 'sqs_queue', 'alerta')
connection = BrokerConnection(broker_url, transport_options=transport_options)
queue = connection.SimpleQueue(sqs_queue)
api = ApiClient()
while True:
try:
message = queue.get(block=True, timeout=20)
api.send_alert(Alert(**message.payload))
message.ack()
except Empty:
pass
except (KeyboardInterrupt, SystemExit):
break
api.send_heartbeat(Heartbeat(origin='alert-sqs', tags=[__version__]))
queue.close()
if __name__ == '__main__':
main()
|
Python
| 0.000003
|
@@ -16,16 +16,26 @@
python%0A%0A
+import os%0A
import s
@@ -722,16 +722,50 @@
out=20)%0A
+ print message.payload%0A
@@ -995,17 +995,36 @@
lert-sqs
-'
+/%25s' %25 os.uname()%5B1%5D
, tags=%5B
|
da006dee5771313c5e67f0ce8150bb3a216a0697
|
Bump the minor version number to reflect the relatively large scale removal of functionality.
|
PyFVCOM/__init__.py
|
PyFVCOM/__init__.py
|
"""
The FVCOM Python toolbox (PyFVCOM)
"""
__version__ = '1.3.4'
__author__ = 'Pierre Cazenave'
__credits__ = ['Pierre Cazenave']
__license__ = 'MIT'
__maintainer__ = 'Pierre Cazenave'
__email__ = 'pica@pml.ac.uk'
import inspect
from warnings import warn
# Import everything!
from PyFVCOM import buoy_tools
from PyFVCOM import cst_tools
from PyFVCOM import ctd_tools
from PyFVCOM import grid_tools
from PyFVCOM import ll2utm
from PyFVCOM import ocean_tools
from PyFVCOM import stats_tools
from PyFVCOM import tide_tools
from PyFVCOM import tidal_ellipse
from PyFVCOM import process_results
from PyFVCOM import read_results
|
Python
| 0
|
@@ -59,11 +59,11 @@
'1.
-3.4
+4.0
'%0A__
|
7c66a0b34806af9cf1ac6722318534643dea3865
|
Add classifiers
|
setup.py
|
setup.py
|
from __future__ import with_statement
import os.path
import setuptools
import sqlitebiter
REQUIREMENT_DIR = "requirements"
with open("README.rst") as fp:
long_description = fp.read()
with open(os.path.join("docs", "pages", "introduction", "summary.txt")) as f:
summary = f.read()
with open(os.path.join(REQUIREMENT_DIR, "requirements.txt")) as f:
install_requires = [line.strip() for line in f if line.strip()]
with open(os.path.join(REQUIREMENT_DIR, "test_requirements.txt")) as f:
tests_require = [line.strip() for line in f if line.strip()]
setuptools.setup(
name="sqlitebiter",
version=sqlitebiter.VERSION,
url="https://github.com/thombashi/sqlitebiter",
bugtrack_url="https://github.com/thombashi/sqlitebiter/issues",
author="Tsuyoshi Hombashi",
author_email="gogogo.vm@gmail.com",
description=summary,
include_package_data=True,
install_requires=install_requires,
keywords=["SQLite", "converter", "CSV", "JSON", "Excel", "Google Sheets"],
license="MIT License",
long_description=long_description,
packages=setuptools.find_packages(exclude=['test*']),
setup_requires=["pytest-runner"],
tests_require=tests_require,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Database",
],
entry_points={
"console_scripts": [
"sqlitebiter=sqlitebiter.sqlitebiter:cmd",
],
}
)
|
Python
| 0.000907
|
@@ -1394,24 +1394,117 @@
T License%22,%0A
+ %22Operating System :: Microsoft%22,%0A %22Operating System :: Microsoft :: Windows%22,%0A
%22Ope
|
be10ac8d89f68244c2a3a69208e68ebd92b7a7a6
|
Capitalize Luminoso
|
luminoso_api/cli.py
|
luminoso_api/cli.py
|
import argparse
import csv
import json
import os
import sys
from signal import signal, SIGPIPE, SIG_DFL
from urllib.parse import urlparse
from .client import LuminosoClient
from .constants import URL_BASE
from .errors import LuminosoError
# Python raises IOError when reading process (such as `head`) closes a pipe.
# Setting SIG_DFL as the SIGPIPE handler prevents this program from crashing.
signal(SIGPIPE, SIG_DFL)
DESCRIPTION = "Access the luminoso API via the command line."
USAGE = """
Supply an HTTP verb and a path, with optional parameters.
Output is returned as JSON, CSV, or an error message.
Parameters may be specified in one of three ways:
A user-friendly key=value parameter list:
-p 'key=value' -p 'key2=value'
A JSON object from the command line:
-j '{"key": "value", "key2": "value"}'
A file containing a JSON object:
filename.json
Parameter options may be combined. A JSON object on the command line is merged
over one in a file, and -p options are merged over both.
GET and DELETE requests append the parameters to the URL.
POST, PUT, and PATCH send the given parameters as the body of the
request with Content-Type set to 'application/json'.
"""
def _print_csv(result):
"""Print a JSON list of JSON objects in CSV format."""
first_line = result[0]
w = csv.DictWriter(sys.stdout, fieldnames=sorted(first_line.keys()))
w.writeheader()
for line in result:
w.writerow(line)
def _read_params(input_file, json_body, p_params):
"""Read parameters from input file, -j, and -p arguments, in that order."""
params = {}
try:
if input_file:
params.update(json.load(input_file))
if json_body is not None:
params.update(json.loads(json_body))
except ValueError as e:
raise ValueError("input is not valid JSON: %s" % e)
try:
params.update({p.split('=', 1)[0]: p.split('=', 1)[1] for p in p_params})
except IndexError:
raise ValueError("--param arguments must have key=value format")
return params
def _main():
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=USAGE,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-b', '--base-url', default=URL_BASE,
help="API root url, default: %s" % URL_BASE)
parser.add_argument('-t', '--token', help="API authentication token")
parser.add_argument('-s', '--save-token', action='store_true',
help="save --token for --base-url to"
" ~/.luminoso/tokens.json")
parser.add_argument('-p', '--param', action='append', default=[],
help="key=value parameters")
parser.add_argument('-j', '--json-body', help="JSON object parameter")
parser.add_argument('-c', '--csv', action='store_true',
help="print output in CSV format")
parser.add_argument('method',
choices=['get', 'post', 'put', 'patch', 'delete'])
parser.add_argument('path')
parser.add_argument('input_file', nargs='?', type=open)
args = parser.parse_args()
if args.save_token:
if not args.token:
raise Exception("error: no token provided")
LuminosoClient.save_token(args.token,
domain=urlparse(args.base_url).netloc)
client = LuminosoClient.connect(url=args.base_url, token=args.token)
if args.method == 'delete':
confirm = input('confirm %s %s? [Y/n] ' % (args.method, args.path))
if confirm not in ('', 'y', 'Y'):
sys.exit(os.EX_OK)
params = _read_params(args.input_file, args.json_body, args.param)
func = getattr(client, args.method)
result = func(args.path, **params)
if args.csv:
try:
_print_csv(result)
except TypeError as e:
raise ValueError("output not able to be displayed as CSV. %s" % e)
else:
print(json.dumps(result, sort_keys=True, indent=4))
def main():
try:
_main()
except (Exception, LuminosoError) as e:
print("lumi-api: %s" % e, file=sys.stderr)
sys.exit(1)
|
Python
| 0.999999
|
@@ -442,17 +442,17 @@
ess the
-l
+L
uminoso
|
0e5f1609dec977b3a5103add4807dedaab33632a
|
add tracer into the test
|
tests/test_weightless_tracer.py
|
tests/test_weightless_tracer.py
|
from gusto import *
from firedrake import PeriodicIntervalMesh, ExtrudedMesh, Expression, \
SpatialCoordinate, Constant, as_vector
from math import pi
import json
import pytest
def setup_tracer(dirname):
# declare grid shape, with length L and height H
L = 1000.
H = 1000.
nlayers = int(H / 100.)
ncolumns = int(L / 100.)
# make mesh
m = PeriodicIntervalMesh(ncolumns, L)
mesh = ExtrudedMesh(m, layers = nlayers, layer_height = H / nlayers)
fieldlist = ['u', 'rho', 'theta']
timestepping = TimesteppingParameters(dt = 1.0, maxk = 4, maxi = 1)
output = OutputParameters(dirname=dirname+"/tracer",
dumpfreq = 1,
#dumplist = ['u'],
perturbation_fields=['theta', 'rho'])
parameters = CompressibleParameters()
diagnostics = Diagnostics(*fieldlist)
state = State(mesh, vertical_degree = 1, horizontal_degree = 1,
family="CG",
timestepping = timestepping,
output = output,
parameters = parameters,
diagnostics = diagnostics,
fieldlist = fieldlist)
# Initial conditions
u0 = state.fields("u")
rho0 = state.fields("rho")
theta0 = state.fields("theta")
# spaces
Vu = u0.function_space()
Vt = theta0.function_space()
Vr = rho0.function_space()
# Isentropic background state
Tsurf = 300.
thetab = Constant(Tsurf)
theta_b = Function(Vt).interpolate(thetab)
rho_b = Function(Vr)
# Calculate initial rho
compressible_hydrostatic_balance(state, theta_b, rho_b, solve_for_rho=True)
# set up perturbation to theta
x = SpatialCoordinate(mesh)
theta_pert = Function(Vt).interpolate(Expression("sqrt(pow(x[0]-xc,2)+pow(x[1]-zc,2))" +
"> rc ? 0.0 : 0.25*(1. + cos((pi/rc)*" +
"(sqrt(pow((x[0]-xc),2)+pow((x[1]-zc),2)))))",
xc=500., zc=350., rc=250.))
theta0.interpolate(theta_b + theta_pert)
rho0.interpolate(rho_b)
state.initialise({'u': u0, 'rho': rho0, 'theta': theta0})
state.set_reference_profiles({'rho': rho_b, 'theta': theta_b})
# set up advection schemes
ueqn = EulerPoincare(state, Vu)
rhoeqn = AdvectionEquation(state, Vr, equation_form = "continuity")
thetaeqn = SUPGAdvection(state, Vt,
supg_params = {"dg_direction":"horizontal"},
equation_form = "advective")
# build advection dictionary
advection_dict = {}
advection_dict["u"] = ThetaMethod(state, u0, ueqn)
advection_dict["rho"] = SSPRK3(state, rho0, rhoeqn)
advection_dict["theta"] = SSPRK3(state, theta0, thetaeqn)
# Set up linear solver
schur_params = {'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'ksp_type': 'gmres',
'ksp_monitor_true_residual': True,
'ksp_max_it': 100,
'ksp_gmres_restart': 50,
'pc_fieldsplit_schur_fact_type': 'FULL',
'pc_fieldsplit_schur_precondition': 'selfp',
'fieldsplit_0_ksp_type': 'richardson',
'fieldsplit_0_ksp_max_it': 5,
'fieldsplit_0_pc_type': 'bjacobi',
'fieldsplit_0_sub_pc_type': 'ilu',
'fieldsplit_1_ksp_type': 'richardson',
'fieldsplit_1_ksp_max_it': 5,
"fieldsplit_1_ksp_monitor_true_residual": True,
'fieldsplit_1_pc_type': 'gamg',
'fieldsplit_1_pc_gamg_sym_graph': True,
'fieldsplit_1_mg_levels_ksp_type': 'chebyshev',
'fieldsplit_1_mg_levels_ksp_chebyshev_estimate_eigenvalues': True,
'fieldsplit_1_mg_levels_ksp_chebyshev_estimate_eigenvalues_random': True,
'fieldsplit_1_mg_levels_ksp_max_it': 5,
'fieldsplit_1_mg_levels_pc_type': 'bjacobi',
'fieldsplit_1_mg_levels_sub_pc_type': 'ilu'}
linear_solver = CompressibleSolver(state, params = schur_params)
compressible_forcing = CompressibleForcing(state)
# build time stepper
stepper = Timestepper(state, advection_dict, linear_solver,
compressible_forcing)
return stepper, 5.0
def run_tracer(dirname):
stepper, tmax = setup_tracer(dirname)
stepper.run(t=0, tmax=tmax)
def test_tracer_setup(tmpdir):
dirname = str(tmpdir)
run_tracer(dirname)
with open(path.join(dirname, "tracer/diagnostics.json"), "r") as f:
data = json.load(f)
print data.keys()
# Dl2 = data["D_error"]["l2"][-1]/data["D"]["l2"][0]
# ul2 = data["u_error"]["l2"][-1]/data["u"]["l2"][0]
# assert Dl2 < 5.e-4
|
Python
| 0.000001
|
@@ -717,17 +717,16 @@
-#
dumplist
@@ -848,50 +848,8 @@
rs()
-%0A diagnostics = Diagnostics(*fieldlist)
%0A%0A
@@ -1074,53 +1074,8 @@
rs,%0A
- diagnostics = diagnostics,%0A
@@ -1122,25 +1122,29 @@
#
-Initial condition
+declare initial field
s%0A
@@ -1338,24 +1338,94 @@
on_space()%0A%0A
+ # declare tracer field%0A tracer0 = state.fields(%22tracer%22, Vt)%0A%0A
# Isentr
@@ -1667,17 +1667,19 @@
_for_rho
-=
+ =
True)%0A%0A
@@ -2185,16 +2185,52 @@
e(rho_b)
+%0A tracer0.interpolate(theta_pert)
%0A%0A st
@@ -2282,16 +2282,36 @@
: theta0
+, 'tracer' : tracer0
%7D)%0A s
@@ -2920,16 +2920,80 @@
etaeqn)%0A
+ advection_dict%5B%22tracer%22%5D = SSPRK3(state, tracer0, thetaeqn)%0A
%0A%0A #
@@ -4558,17 +4558,17 @@
tepper,
-5
+2
.0%0A%0A%0Adef
|
5ca5dbd9113fe97223764f647806a81eebcf5d70
|
fix it pls
|
alexBot/cogs/sugery.py
|
alexBot/cogs/sugery.py
|
import logging
import math
from typing import TYPE_CHECKING
import aiohttp
from discord.ext import tasks
from alexBot.classes import SugeryZone, Thresholds
from ..tools import Cog, get_json
if TYPE_CHECKING:
from bot import Bot
log = logging.getLogger(__name__)
# https://github.com/nightscout/cgm-remote-monitor/blob/0aed5c93a08b2483e4bb53f988b347a34b55321a/lib/plugins/direction.js#L53
DIR2CHAR = {
"NONE": '⇼',
"TripleUp": '⤊',
"DoubleUp": '⇈',
"SingleUp": '↑',
"FortyFiveUp": '↗',
"Flat": '→',
"FortyFiveDown": '↘',
"SingleDown": '↓',
"DoubleDown": '⇊',
"TripleDown": '⤋',
'NOT COMPUTABLE': '-',
'RATE OUT OF RANGE': '⇕',
}
class Sugery(Cog):
def __init__(self, bot: "Bot"):
super().__init__(bot)
self.sugery_update.start()
@tasks.loop(minutes=5)
async def sugery_update(self):
for user in self.bot.config.suggery:
async with aiohttp.ClientSession() as session:
data = await get_json(session, f"{user.baseURL}/api/v1/entries/current.json")
device = await get_json(session, f"{user.baseURL}/api/v1/deviceStatus.json")
log.debug(f"fetching {user.user}'s current data..")
try:
sgv = data[0]['sgv']
direction = data[0]['direction']
battery = device[0]['uploader']['battery']
except IndexError:
continue
log.debug(f"{sgv=}, {user.thresholds=}")
name = None
zone = None
if sgv <= user.thresholds.veryLow:
zone = SugeryZone.VERYLOW
elif user.thresholds.veryLow <= sgv <= user.thresholds.low:
zone = SugeryZone.LOW
elif user.thresholds.low <= sgv <= user.thresholds.high:
zone = SugeryZone.NORMAL
elif user.thresholds.high <= sgv <= user.thresholds.veryHigh:
zone = SugeryZone.HIGH
elif user.thresholds.veryHigh <= sgv:
zone = SugeryZone.VERYHIGH
name = f"{user.names[zone]} {DIR2CHAR[direction]}"
member = self.bot.get_guild(user.guild).get_member(user.user)
if zone != user.lastGroup:
await member.send(
f"Hi! your sugery zone is now `{zone.name.lower()}`.\n"
f"your SGV is currently {sgv}.\n"
f"additionally, your phone battery is {battery}. \n"
f"the direction is {direction} ({DIR2CHAR[direction]})"
)
if battery < 20 and not zone == user.lastGroup:
await member.send(f"ur battery dyin friendo: {battery}%")
user.lastGroup = zone
if member.nick == f"{name} (\N{BATTERY}{r' ⡀⣀⣄⣤⣦⣶⣷⣿'[math.ceil(battery * 0.09)]})":
continue
await member.edit(
nick=f"{name} (\N{BATTERY}{r' ⡀⣀⣄⣤⣦⣶⣷⣿'[math.ceil(battery * 0.09)]})",
reason="user's bloodsuger group or direction changed",
)
@sugery_update.before_loop
async def before_sugery(self):
for user in self.bot.config.suggery:
async with aiohttp.ClientSession() as session:
data = await get_json(session, f"{user.baseURL}/api/v1/status.json")
log.debug(f"fetching {user.user}..")
t = data['settings']['thresholds']
user.thresholds = Thresholds(
veryHigh=t['bgHigh'],
high=t['bgTargetTop'],
low=t['bgTargetBottom'],
veryLow=t['bgLow'],
)
await self.bot.wait_until_ready()
def cog_unload(self):
self.sugery_update.cancel()
def setup(bot):
bot.add_cog(Sugery(bot))
|
Python
| 0.000262
|
@@ -683,16 +683,121 @@
'%E2%87%95',%0A%7D%0A%0A
+BATTERYINDICATORS = %22 %5CU00002840%5CU000028c0%5CU000028c4%5CU000028e4%5CU000028e6%5CU000028f6%5CU000028f7%5CU000028ff%22%0A%0A
%0Aclass S
@@ -2973,138 +2973,8 @@
ne%0A%0A
- if member.nick == f%22%7Bname%7D (%5CN%7BBATTERY%7D%7Br' %E2%A1%80%E2%A3%80%E2%A3%84%E2%A3%A4%E2%A3%A6%E2%A3%B6%E2%A3%B7%E2%A3%BF'%5Bmath.ceil(battery * 0.09)%5D%7D)%22:%0A continue%0A%0A
@@ -3055,20 +3055,25 @@
RY%7D%7B
-r' %E2%A1%80%E2%A3%80%E2%A3%84%E2%A3%A4%E2%A3%A6%E2%A3%B6%E2%A3%B7%E2%A3%BF'
+BATTERYINDICATORS
%5Bmat
|
cddf9b83383adfc41e80c441b4f8f3219893cc86
|
Bump version for release
|
setup.py
|
setup.py
|
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
from setuptools import setup, find_packages
def parse_requirements():
"""
Rudimentary parser for the `requirements.txt` file
We just want to separate regular packages from links to pass them to the
`install_requires` and `dependency_links` params of the `setup()`
function properly.
"""
try:
requirements = \
map(str.strip, local_file('requirements.txt'))
except IOError:
raise RuntimeError("Couldn't find the `requirements.txt' file :(")
links = []
pkgs = []
for req in requirements:
if not req:
continue
if 'http:' in req or 'https:' in req:
links.append(req)
name, version = re.findall("\#egg=([^\-]+)-(.+$)", req)[0]
pkgs.append('{0}=={1}'.format(name, version))
else:
pkgs.append(req)
return pkgs, links
local_file = lambda f: \
open(os.path.join(os.path.dirname(__file__), f)).readlines()
#install_requires, dependency_links = parse_requirements()
if __name__ == '__main__':
packages = find_packages(exclude=['*tests*'])
print packages
setup(
name="pyrelic",
license="GPL",
version='0.6.0',
description=u'Python API Wrapper for NewRelic API',
author=u'Andrew Gross',
author_email=u'andrew.w.gross@gmail.com',
package_data={ "pyrelic.packages.requests": ["*.pem"] },
include_package_data=True,
url='https://github.com/andrewgross/pyrelic',
packages=packages,
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: Microsoft',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
)
)
|
Python
| 0
|
@@ -1266,17 +1266,17 @@
on='0.6.
-0
+1
',%0A
|
6cdae4d9f11616c2e2b56917811f31e049bdc688
|
Support missing aafigure module
|
aafig/sphinxcontrib/aafig.py
|
aafig/sphinxcontrib/aafig.py
|
# -*- coding: utf-8 -*-
"""
sphinxcontrib.aafig
~~~~~~~~~~~~~~~~~~~
Allow embeded ASCII art to be rendered as nice looking images
using the aafigure reStructuredText extension.
See the README file for details.
:copyright: Copyright 2009 by Leandro Lucarella <llucax@gmail.com> \
(based on sphinxcontrib.mscgen).
:license: BSD, see LICENSE for details.
"""
import posixpath
from os import path
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.errors import SphinxError
from sphinx.util import ensuredir
from sphinx.util.compat import Directive
import aafigure
def merge_defaults(options, config):
# merge default options
for (k, v) in config.aafig_default_options.items():
if k not in options:
options[k] = v
return options
def get_basename(text, options, prefix='aafig'):
options = options.copy()
if 'format' in options:
del options['format']
hashkey = text.encode('utf-8') + str(options)
id = sha(hashkey).hexdigest()
return '%s-%s' % (prefix, id)
class AafigError(SphinxError):
category = 'aafig error'
class AafigDirective(directives.images.Image):
"""
Directive to insert an ASCII art figure to be rendered by aafigure.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
own_option_spec = dict(
line_width = float,
background = str,
foreground = str,
fill = str,
aspect = directives.nonnegative_int,
textual = directives.flag,
proportional = directives.flag,
)
option_spec = directives.images.Image.option_spec.copy()
option_spec.update(own_option_spec)
def run(self):
aafig_options = dict()
image_attrs = dict()
own_options_keys = self.own_option_spec.keys() + ['scale']
for (k, v) in self.options.items():
if k in own_options_keys:
# convert flags to booleans
if v is None:
v = True
# convert percentage to float
if k == 'scale' or k == 'aspec':
v = float(v) / 100
aafig_options[k] = v
del self.options[k]
self.arguments = ['']
(image_node,) = directives.images.Image.run(self)
if isinstance(image_node, nodes.system_message):
return [image_node]
text = '\n'.join(self.content)
image_node.aafig = dict(options = aafig_options, text = text)
return [image_node]
def render_aafig_images(app, doctree):
for img in doctree.traverse(nodes.image):
if not hasattr(img, 'aafig'):
continue
options = img.aafig['options']
text = img.aafig['text']
format = app.builder.format
merge_defaults(options, app.builder.config)
try:
try:
options['format'] = app.builder.config.aafig_format[format]
except:
app.builder.warn('unsupported builder format "%s", please '
'add a custom entry in aafig_format config option '
'for this builder' % format)
img.replace_self(nodes.literal_block(text, text))
continue
fname, outfn, id, extra = render_aafigure(app, text, options)
except AafigError, exc:
app.builder.warn('aafigure error: ' + str(exc))
img.replace_self(nodes.literal_block(text, text))
continue
img['uri'] = fname
# FIXME: find some way to avoid this hack in aafigure
if extra:
(width, height) = [x.split('"')[1] for x in extra.split()]
(img['width'], img['height']) = (width, height)
def render_aafigure(app, text, options):
"""
Render an ASCII art figure into the requested format output file.
"""
fname = get_basename(text, options)
fname = '%s.%s' % (get_basename(text, options), options['format'])
if hasattr(app.builder, 'imgpath'):
# HTML
relfn = posixpath.join(app.builder.imgpath, fname)
outfn = path.join(app.builder.outdir, '_images', fname)
else:
# LaTeX
relfn = fname
outfn = path.join(app.builder.outdir, fname)
metadata_fname = '%s.aafig' % outfn
try:
if path.isfile(outfn):
extra = None
if options['format'].lower() == 'svg':
f = None
try:
try:
f = file(metadata_fname, 'r')
extra = f.read()
except:
raise AafigError()
finally:
if f is not None:
f.close()
return relfn, outfn, id, extra
except AafigError:
pass
ensuredir(path.dirname(outfn))
try:
(visitor, output) = aafigure.render(text, outfn, options)
output.close()
except aafigure.UnsupportedFormatError, e:
raise AafigError(str(e))
extra = None
if options['format'].lower() == 'svg':
extra = visitor.get_size_attrs()
f = file(metadata_fname, 'w')
f.write(extra)
f.close()
return relfn, outfn, id, extra
def setup(app):
app.add_directive('aafig', AafigDirective)
app.connect('doctree-read', render_aafig_images)
app.add_config_value('aafig_format', dict(html='svg', latex='pdf'), 'html')
app.add_config_value('aafig_default_options', dict(), 'html')
|
Python
| 0
|
@@ -698,16 +698,25 @@
ective%0A%0A
+try:%0A
import a
@@ -723,16 +723,56 @@
afigure%0A
+except ImportError:%0A aafigure = None%0A
%0A%0Adef me
@@ -4103,16 +4103,100 @@
%22%22%22%0A%0A
+ if aafigure is None:%0A raise AafigError('aafigure module not installed')%0A%0A
fnam
|
55d9ed499d842246c74bc72ff0e141fa22fde9d8
|
add numpexpr dependency
|
setup.py
|
setup.py
|
from setuptools import setup
import codecs
import os
import re
# to release:
# python setup.py register sdist bdist_egg upload
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
# https://github.com/pypa/sampleproject/blob/master/setup.py
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
with codecs.open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyiso',
packages=['pyiso'],
version=find_version('pyiso', '__init__.py'),
description='Python client libraries for ISO and other power grid data sources.',
long_description=long_description,
author='Anna Schneider',
author_email='anna@watttime.org',
url='https://github.com/WattTime/pyiso',
license='Apache',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite = 'nose.collector',
install_requires=[
'beautifulsoup4',
'pandas==0.14.1',
'python-dateutil',
'pytz',
'requests',
'xlrd',
'selenium',
'celery>=3.1',
],
)
|
Python
| 0
|
@@ -2334,20 +2334,39 @@
lery%3E=3.1',%0A
+ 'numexpr',%0A
%5D,%0A)
|
f096dee1623936ed06340df1ee081a1f77eb8b77
|
Simplify plugin info declaration
|
pyexcel_xlsx/__init__.py
|
pyexcel_xlsx/__init__.py
|
"""
pyexcel_xlsx
~~~~~~~~~~~~~~~~~~~
The lower level xlsx file format handler using openpyxl
:copyright: (c) 2015-2017 by Onni Software Ltd & its contributors
:license: New BSD License
"""
# flake8: noqa
# this line has to be place above all else
# because of dynamic import
__FILE_TYPE__ = 'xlsx'
__pyexcel_io_plugins__ = [
{
'plugin_type': 'pyexcel-io reader',
'submodule': 'xlsxr',
'file_types': [__FILE_TYPE__, 'xlsm'],
'stream_type': 'binary'
},
{
'plugin_type': 'pyexcel-io writer',
'submodule': 'xlsxw',
'file_types': [__FILE_TYPE__, 'xlsm'],
'stream_type': 'binary'
},
]
from pyexcel_io.io import get_data as read_data, isstream, store_data as write_data
def save_data(afile, data, file_type=None, **keywords):
"""standalone module function for writing module supported file type"""
if isstream(afile) and file_type is None:
file_type = __FILE_TYPE__
write_data(afile, data, file_type=file_type, **keywords)
def get_data(afile, file_type=None, **keywords):
"""standalone module function for reading module supported file type"""
if isstream(afile) and file_type is None:
file_type = __FILE_TYPE__
return read_data(afile, file_type=file_type, **keywords)
|
Python
| 0.000001
|
@@ -290,16 +290,144 @@
import%0A
+from pyexcel_io.plugins import IORegistry%0Afrom pyexcel_io.io import get_data as read_data, isstream, store_data as write_data%0A%0A%0A
__FILE_T
@@ -470,69 +470,47 @@
_ =
-%5B%0A %7B%0A 'plugin_type': 'pyexcel-io
+IORegistry(__name__).add_a_
reader
-',%0A
+(%0A
- '
subm
@@ -510,27 +510,25 @@
submodule
-':
+=
'xlsxr',%0A
@@ -524,29 +524,24 @@
xlsxr',%0A
-
- '
file_types':
@@ -530,35 +530,33 @@
,%0A file_types
-':
+=
%5B__FILE_TYPE__,
@@ -560,37 +560,32 @@
_, 'xlsm'%5D,%0A
-
- '
stream_type': 'b
@@ -571,35 +571,33 @@
%0A stream_type
-':
+=
'binary'%0A %7D,%0A
@@ -593,74 +593,28 @@
ry'%0A
- %7D,%0A %7B%0A 'plugin_type': 'pyexcel-io
+).add_a_
writer
-',
+(
%0A
- '
subm
@@ -618,19 +618,17 @@
ubmodule
-':
+=
'xlsxw',
@@ -632,21 +632,16 @@
w',%0A
-
- '
file_typ
@@ -642,19 +642,17 @@
le_types
-':
+=
%5B__FILE_
@@ -676,13 +676,8 @@
- '
stre
@@ -687,11 +687,9 @@
type
-':
+=
'bin
@@ -697,102 +697,9 @@
ry'%0A
- %7D,%0A%5D%0A%0A%0Afrom pyexcel_io.io import get_data as read_data, isstream, store_data as write_data
+)
%0A%0A%0Ad
|
f1ab27dcb52212c3c818c3ef6d9be9410610c2d6
|
make these tests pass, please :)
|
tests/test_base_scraper.py
|
tests/test_base_scraper.py
|
from unittest import TestCase
from statscraper import BaseScraper, Dataset, Dimension, ROOT
class Scraper(BaseScraper):
def _fetch_itemslist(self, item):
yield Dataset("Dataset_1")
yield Dataset("Dataset_2")
yield Dataset("Dataset_3")
def _fetch_dimensions(self, dataset):
yield Dimension(u"date")
yield Dimension(u"municipality")
def _fetch_data(self, dataset, query=None):
yield {
"date": "2017-08-10",
"municipality": "Robertsfors",
"value": 127
}
class TestBaseScraper(TestCase):
def test_init(self):
""" Extending the basescraper """
scraper = Scraper()
self.assertTrue(scraper.current_item.id == ROOT)
def test_inspect_item(self):
""" Fecthing items from an itemlist """
scraper = Scraper()
self.assertTrue(scraper.items[0] == scraper.items.get("Dataset_1"))
def test_select_item(self):
scraper = Scraper()
scraper.select("Dataset_1")
self.assertTrue(isinstance(scraper.current_item, Dataset))
def test_fetch_dataset(self):
scraper = Scraper()
dataset = scraper.items[0]
self.assertTrue(dataset.data[0]["municipality"] == "Robertsfors")
|
Python
| 0.000001
|
@@ -1110,162 +1110,732 @@
est_
-fetch_dataset(self):%0A scraper = Scraper()%0A dataset = scraper.items%5B0%5D%0A self.assertTrue(dataset.data%5B0%5D%5B%22municipality%22%5D == %22Robertsfors%22
+select_missing_item(self):%0A # Should throw something like a KeyError?%0A scraper = Scraper()%0A scraper.select(%22non_existing_item%22)%0A%0A def test_fetch_dataset(self):%0A scraper = Scraper()%0A dataset = scraper.items%5B0%5D%0A self.assertTrue(dataset.data%5B0%5D%5B%22municipality%22%5D == %22Robertsfors%22)%0A%0A def test_select_dimension(self):%0A # I want to be able to select a%0A scraper = Scraper()%0A scraper.select(%22Dataset_1%22)%0A dataset = scraper.current_item%0A dim = dataset.dimension(%22date%22)%0A self.assertTrue(isinstance(dim, Dimension))%0A%0A # Or is %22select%22 a better method name?%0A dim = dataset.get(%22date%22)%0A self.assertTrue(isinstance(dim, Dimension)
)%0A
|
066299ce0aa6174c2b7c1070d801cbf540932697
|
fix some python 3 issues
|
tests/test_cachemanager.py
|
tests/test_cachemanager.py
|
import time
from datetime import datetime
from beaker.cache import CacheManager
from beaker.util import parse_cache_config_options
defaults = {'cache.data_dir':'./cache', 'cache.type':'dbm', 'cache.expire': 2}
def teardown():
import shutil
shutil.rmtree('./cache', True)
def make_cache_obj(**kwargs):
opts = defaults.copy()
opts.update(kwargs)
cache = CacheManager(**parse_cache_config_options(opts))
return cache
def make_region_cached_func():
global _cache_obj
opts = {}
opts['cache.regions'] = 'short_term, long_term'
opts['cache.short_term.expire'] = '2'
cache = make_cache_obj(**opts)
@cache.region('short_term', 'region_loader')
def load(person):
now = datetime.now()
return "Hi there %s, its currently %s" % (person, now)
_cache_obj = cache
return load
def make_cached_func():
global _cache_obj
cache = make_cache_obj()
@cache.cache('loader')
def load(person):
now = datetime.now()
return "Hi there %s, its currently %s" % (person, now)
_cache_obj = cache
return load
def test_parse_doesnt_allow_none():
opts = {}
opts['cache.regions'] = 'short_term, long_term'
for region, params in parse_cache_config_options(opts)['cache_regions'].iteritems():
for k, v in params.iteritems():
assert v != 'None', k
def test_parse_doesnt_allow_empty_region_name():
opts = {}
opts['cache.regions'] = ''
regions = parse_cache_config_options(opts)['cache_regions']
assert len(regions) == 0
def test_decorators():
for func in (make_region_cached_func, make_cached_func):
yield check_decorator, func()
def check_decorator(func):
result = func('Fred')
assert 'Fred' in result
result2 = func('Fred')
assert result == result2
result3 = func('George')
assert 'George' in result3
result4 = func('George')
assert result3 == result4
time.sleep(2)
result2 = func('Fred')
assert result != result2
def test_check_invalidate_region():
func = make_region_cached_func()
result = func('Fred')
assert 'Fred' in result
result2 = func('Fred')
assert result == result2
_cache_obj.region_invalidate(func, None, 'region_loader', 'Fred')
result3 = func('Fred')
assert result3 != result2
result2 = func('Fred')
assert result3 == result2
# Invalidate a non-existent key
_cache_obj.region_invalidate(func, None, 'region_loader', 'Fredd')
assert result3 == result2
def test_check_invalidate():
func = make_cached_func()
result = func('Fred')
assert 'Fred' in result
result2 = func('Fred')
assert result == result2
_cache_obj.invalidate(func, 'loader', 'Fred')
result3 = func('Fred')
assert result3 != result2
result2 = func('Fred')
assert result3 == result2
# Invalidate a non-existent key
_cache_obj.invalidate(func, 'loader', 'Fredd')
assert result3 == result2
def test_long_name():
func = make_cached_func()
name = 'Fred' * 250
result = func(name)
assert name in result
result2 = func(name)
assert result == result2
# This won't actually invalidate it since the key won't be sha'd
_cache_obj.invalidate(func, 'loader', name, key_length=8000)
result3 = func(name)
assert result3 == result2
# And now this should invalidate it
_cache_obj.invalidate(func, 'loader', name)
result4 = func(name)
assert result3 != result4
|
Python
| 0.000069
|
@@ -1271,20 +1271,16 @@
gions'%5D.
-iter
items():
@@ -1311,12 +1311,8 @@
ams.
-iter
item
|
408ef23f0227650c77dbaf3efae0dd569fb076dd
|
update version for release
|
setup.py
|
setup.py
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup
setup(
name='rst2html5-tools',
version='0.2.5',
author='Mariano Guerra',
description="Transform reStructuredText documents to html5 + twitter's bootstrap css, deck.js or reveal.js",
author_email='luismarianoguerra@gmail.com',
url='https://github.com/marianoguerra/rst2html5',
long_description=open('README.rst').read(),
packages=['html5css3'],
package_data={'html5css3': ['thirdparty/*/*.*']},
include_package_data=True,
install_requires=['docutils'],
entry_points={
'console_scripts': [
'rst2html5 = html5css3.main:main',
]},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Text Processing :: Markup',
'Topic :: Utilities',
],
)
|
Python
| 0
|
@@ -225,17 +225,17 @@
on='0.2.
-5
+6
',%0A a
|
239a4bad9c9ba249625d7a77da084df38a5b7d4b
|
Allow the residual plugin to function when nsteps = 1.
|
pyfr/plugins/residual.py
|
pyfr/plugins/residual.py
|
# -*- coding: utf-8 -*-
import numpy as np
from pyfr.mpiutil import get_comm_rank_root, get_mpi
from pyfr.plugins.base import BasePlugin, init_csv
class ResidualPlugin(BasePlugin):
name = 'residual'
systems = ['*']
def __init__(self, intg, cfgsect, suffix):
super().__init__(intg, cfgsect, suffix)
comm, rank, root = get_comm_rank_root()
# Output frequency
self.nsteps = self.cfg.getint(cfgsect, 'nsteps')
# The root rank needs to open the output file
if rank == root:
header = ['t'] + intg.system.elementscls.convarmap[self.ndims]
# Open
self.outf = init_csv(self.cfg, cfgsect, ','.join(header))
def __call__(self, intg):
# If an output is due next step
if (intg.nacptsteps + 1) % self.nsteps == 0:
self._prev = [s.copy() for s in intg.soln]
self._tprev = intg.tcurr
# If an output is due this step
elif intg.nacptsteps % self.nsteps == 0:
# MPI info
comm, rank, root = get_comm_rank_root()
# Previous and current solution
prev = self._prev
curr = intg.soln
# Square of the residual vector for each variable
resid = sum(np.linalg.norm(p - c, axis=(0, 2))**2
for p, c in zip(prev, curr))
# Reduce and, if we are the root rank, output
if rank != root:
comm.Reduce(resid, None, op=get_mpi('sum'), root=root)
else:
comm.Reduce(get_mpi('in_place'), resid, op=get_mpi('sum'),
root=root)
# Normalise
resid = np.sqrt(resid) / (intg.tcurr - self._tprev)
# Build the row
row = [intg.tcurr] + resid.tolist()
# Write
print(','.join(str(r) for r in row), file=self.outf)
# Flush to disk
self.outf.flush()
del self._prev, self._tprev
|
Python
| 0.000001
|
@@ -704,218 +704,122 @@
-def __call__(
+ # Call our
self
-,
in
-tg):%0A # If an output is due next step%0A if (intg.nacptsteps + 1) %25 self.nsteps == 0:%0A self._prev = %5Bs.copy() for s in intg.soln%5D%0A self._tprev = intg.tcurr
+ case output is needed after the first step%0A self(intg)%0A%0A def __call__(self, intg):
%0A
@@ -863,18 +863,16 @@
-el
if intg.
@@ -900,16 +900,36 @@
eps == 0
+ and intg.nacptsteps
:%0A
@@ -1950,12 +1950,198 @@
self._tprev%0A
+%0A # If an output is due next step%0A if (intg.nacptsteps + 1) %25 self.nsteps == 0:%0A self._prev = %5Bs.copy() for s in intg.soln%5D%0A self._tprev = intg.tcurr%0A
|
77dc6134be66bf16e346d6120c361ca2b11899f3
|
Add events
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(name='findatapy',
version='0.05',
description='Market data library',
author='Saeed Amen',
author_email='saeed@cuemacro.com',
license='Apache 2.0',
keywords = ['pandas', 'data', 'Bloomberg', 'tick', 'stocks', 'equities'],
url = 'https://github.com/cuemacro/findatapy',
packages = find_packages(),
include_package_data = True,
install_requires = ['pandas',
'twython',
'pytz',
'requests',
'numpy'],
zip_safe=False)
|
Python
| 0.00006
|
@@ -601,16 +601,63 @@
'numpy'
+,%0A 'pandas_datareader'
%5D,%0A%09 zi
|
e385a57804329356a2f4e7c44532cfa052441555
|
Fix test data broken due to updated behavior of PyFile#getImportBlock()
|
python/testData/refactoring/move/relativeImportsInsideMovedModule/after/src/subpkg1/mod1.py
|
python/testData/refactoring/move/relativeImportsInsideMovedModule/after/src/subpkg1/mod1.py
|
from pkg1 import subpkg2
from pkg1.subpkg2 import mod2
from pkg1.subpkg2.mod2 import VAR
from . import mod3
# malformed imports
from
from import
from pkg1.subpkg2 import
# absolute imports
import pkg1.subpkg2 as foo
from pkg1 import subpkg2 as bar
print(subpkg2, mod3, mod2, foo, bar, VAR)
|
Python
| 0
|
@@ -1,89 +1,66 @@
-from pkg1 import subpkg2%0Afrom pkg1.subpkg2 import mod2%0Afrom pkg1.subpkg2.mod2
+import%0Afrom%0Afrom%0A%0Aimport pkg1.subpkg2 as foo%0Afrom pkg1
import
VAR%0A
@@ -59,26 +59,33 @@
ort
-VAR
+subpkg2
%0Afrom
-.
+pkg1
import
mod3
@@ -84,140 +84,126 @@
ort
-mod3%0A%0A# malformed imports%0Afrom%0Afrom import%0Afrom pkg1.subpkg2 import%0A%0A# absolute imports%0Aimport pkg1.subpkg2 as foo
+subpkg2 as bar%0Afrom pkg1.subpkg2 import%0Afrom pkg1.subpkg2 import mod2%0Afrom pkg1.subpkg2.mod2 import VAR
%0Afrom
-pkg1
+.
import
subp
@@ -198,30 +198,20 @@
import
-subpkg2 as bar
+mod3
%0A%0Aprint(
|
72c669d71b797268870f00e2aa1c00018bcd638b
|
add local_asn test
|
tests/versions/base/test_bgp.py
|
tests/versions/base/test_bgp.py
|
Python
| 0.000001
|
@@ -0,0 +1,1767 @@
+#!/usr/bin/env python%0A%22%22%22%0ACopyright 2015 Brocade Communications Inc.%0A%0ALicensed under the Apache License, Version 2.0 (the %22License%22);%0Ayou may not use this file except in compliance with the License.%0AYou may obtain a copy of the License at%0A%0Ahttp://www.apache.org/licenses/LICENSE-2.0%0A%0AUnless required by applicable law or agreed to in writing, software%0Adistributed under the License is distributed on an %22AS IS%22 BASIS,%0AWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0ASee the License for the specific language governing permissions and%0Alimitations under the License.%0A%22%22%22%0A%0A%0Aimport unittest%0Aimport xml.etree.ElementTree as ET%0Aimport pynos.versions.base.bgp%0Aimport pynos.utilities%0A%0A%0Aclass TestBGP(unittest.TestCase):%0A %22%22%22%0A BGP unit tests. Compare expected XML to generated XML.%0A %22%22%22%0A def setUp(self):%0A self.bgp = pynos.versions.base.bgp.BGP(pynos.utilities.return_xml)%0A%0A def test_local_asn(self):%0A expected = '%3Cconfig%3E'%5C%0A '%3Crbridge-id xmlns=%22urn:brocade.com:mgmt:brocade-rbridge%22%3E'%5C%0A '%3Crbridge-id%3E2%3C/rbridge-id%3E%3Crouter%3E'%5C%0A '%3Cbgp xmlns=%22urn:brocade.com:mgmt:brocade-bgp%22%3E'%5C%0A '%3Cvrf-name%3Ex%3C/vrf-name%3E'%5C%0A '%3Crouter-bgp-cmds-holder%3E%3Crouter-bgp-attributes%3E'%5C%0A '%3Clocal-as%3E65535%3C/local-as%3E%3C/router-bgp-attributes%3E'%5C%0A '%3C/router-bgp-cmds-holder%3E%3C/bgp%3E%3C/router%3E%3C/rbridge-id%3E'%5C%0A '%3C/config%3E'%0A result = self.bgp.local_asn(local_as='65535', rbridge_id='2', vrf='x')%0A result = ET.tostring(result)%0A self.assertEquals(expected, result)%0A%0A def test_local_asn_exception(self):%0A with self.assertRaises(KeyError):%0A self.bgp.local_asn(rbridge='2', vrf='x')%0A
|
|
5a7f89735345ab0ff2fae68a28ad8d21e35e6751
|
use selenium to detect invisible element
|
core/drivers/extract/driver/spiders/form.py
|
core/drivers/extract/driver/spiders/form.py
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from driver.items import InputItem, FormItem
class FormSpider(CrawlSpider):
name = "form"
allowed_domains = ["127.0.0.1"]
def __init__(self, *args, **kwargs):
super(FormSpider, self).__init__(*args, **kwargs)
self.start_urls = [kwargs.get('start_url')]
follow = True if kwargs.get('follow') == 'true' else False
self.rules = (
Rule (SgmlLinkExtractor(allow=('')), callback='parse_form', follow=follow),
)
super(FormSpider, self)._compile_rules()
def parse_form(self, response):
for sel in response.xpath('//form'):
formItem = FormItem()
formItem['action'] = ''
try:
formItem['action'] = sel.xpath('@action').extract()[0]
except:
pass
formItem['url'] = response.url
formItem['method'] = ''
try:
formItem['method'] = sel.xpath('@method').extract()[0].lower()
except:
pass
formItem['inputs'] = []
for ip in sel.xpath('//input[@type="text" or @type="password" or @type="email"]|//textarea'):
try:
id = ip.xpath('@id').extract()[0]
except:
id = ''
name = ip.xpath('@name').extract()[0]
try:
type = ip.xpath('@type').extract()[0]
except:
type = ''
inputItem = InputItem()
inputItem['id'] = id
inputItem['name'] = name
inputItem['type'] = type
inputItem['value'] = ''
formItem['inputs'].append(inputItem)
yield formItem
|
Python
| 0.000002
|
@@ -187,16 +187,84 @@
FormItem
+%0Afrom selenium import webdriver%0Afrom db_webcrawler.settings import *
%0A%0Aclass
@@ -752,44 +752,336 @@
)%0A%0A
-%0A def parse_form(self, response):
+ try:%0A service_args = %5B%0A '--proxy=' + HTTP_PROXY,%0A '--proxy-type=http',%0A %5D%0A except:%0A service_args = None%0A self.browser = webdriver.PhantomJS(service_args=service_args)%0A%0A %0A def parse_form(self, response):%0A self.browser.get(response.url)
%0A
@@ -1806,24 +1806,215 @@
id = ''%0A
+ if id != '':%0A input_element = self.browser.find_element_by_id(id)%0A if not input_element.is_displayed():%0A continue%0A
|
eb80dc47f09dd186c0b438ff6d864c27090c83cc
|
Force element attributes to be a dict, workaround for 2.4 xml sax bug.
|
pyglet/layout/content.py
|
pyglet/layout/content.py
|
#!/usr/bin/env python
'''Describes document content (element) tree.
Use a module from pyglet.layout.builders to create the tree.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from pyglet.layout.css import *
__all__ = ['Document',
'DocumentListener',
'ContentElement',
'AnonymousElement',
'AnonymousTextElement',
]
class DocumentListener(object):
def on_set_root(self, root):
pass
def on_element_modified(self, element):
pass
def on_element_style_modified(self, element):
pass
class Document(object):
root = None
stylesheets = ()
title = ''
def __init__(self):
self.stylesheets = []
self.listeners = []
self.element_ids = {}
def add_listener(self, listener):
self.listeners.append(listener)
def remove_listener(self, listener):
self.listeners.remove(listener)
def set_root(self, root):
self.root = root
for l in self.listeners:
l.on_set_root(root)
def get_element(self, id):
return self.element_ids.get(id, None)
def element_modified(self, element):
'''Notify that an element's children or text have changed.
'''
for l in self.listeners:
l.on_element_modified(element)
def element_style_modified(self, element):
'''Notify that the element's style has changed.
'''
for l in self.listeners:
l.on_element_style_modified(element)
class ContentElementStyle(object):
'''The style of an element is proxied by this class, which provides
a friendly dict-like interface for changing properties.
'''
__slots__ = ['element']
def __init__(self, element):
self.element = element
def __getitem__(self, key):
if not self.element.element_declaration_set:
return None
for decl in self.element.element_declaration_set.declarations:
if decl.property == key:
return ''.join([str(v) for v in decl.values])
def __setitem__(self, key, value):
element = self.element
if element.element_declaration_set:
declarations = \
[d for d in element.element_declaration_set.declarations \
if d.property != key]
else:
declarations = []
if type(value) in (str, unicode):
value = parse_style_expression(value)
elif type(value) != list:
value = [list]
decl = Declaration(key, value, None)
declarations.append(decl)
# XXX Can't reuse old DeclarationSet because there is already a
# StyleNode that uses it. Unfortunately this can mean we leak
# memory as the StyleTree retains a reference to a potentially
# unused node. This will build incrementally for all changes
# to element style. One solution could be to maintain a declaration
# set cache.
element.element_declaration_set = DeclarationSet(declarations)
element.document.element_style_modified(element)
def __delitem__(self, key):
element = self.element
if element.element_declaration_set:
element.element_declaration_set = DeclarationSet(
[d for d in element.element_declaration_set.declarations \
if d.property != key])
element.document.element_style_modified(element)
def __contains__(self, key):
element = self.element
if not element.element_declaration_set:
return False
return len([d for d in element.element_declaration_set.declarations \
if d.property == key]) != 0
def __str__(self):
if not self.element.element_declaration_set:
return ''
return '; '.join([str(d) for d in \
self.element.element_declaration_set.declarations])
class ContentElement(SelectableElement):
# Either there are children or text; not both. AnonymousTextElements
# are created where necessary.
children = ()
text = ''
is_anonymous = False
element_declaration_set = None # style from style attribute
intrinsic_declaration_set = None # style on HTML presentation elements
frame = None
def __init__(self, document, name, attributes, parent, previous_sibling):
self.document = document
self.name = name
self.attributes = attributes
self.parent = parent
self.previous_sibling = previous_sibling
def add_child(self, element):
if self.text:
# Anonymous inline boxes, 9.2.2.1
anon = AnonymousTextElement(self.text, self, None)
self.text = None
self.children = [anon, element]
elif not self.children:
self.children = [element]
else:
self.children.append(element)
self.document.element_modified(self)
def add_text(self, text):
if self.children and type(self.children[-1]) == AnonymousTextElement:
self.children[-1].add_text(text)
elif self.children:
# Anonymous inline boxes, 9.2.2.1
anon = AnonymousTextElement(text, self, self.children[-1])
self.children.append(anon)
else:
self.text += text
self.document.element_modified(self)
def set_element_style(self, style):
self.element_declaration_set = parse_style_declaration_set(style)
self.document.element_style_modified(self)
style = property(lambda self: ContentElementStyle(self),
set_element_style)
def pprint(self, indent=''):
import textwrap
print '\n'.join(textwrap.wrap(repr(self),
initial_indent=indent,
subsequent_indent=indent))
for child in self.children:
child.pprint(indent + ' ')
if self.text:
print '\n'.join(textwrap.wrap(repr(self.text),
initial_indent=(indent+' '),
subsequent_indent=(indent+' ')))
class AnonymousElement(ContentElement):
is_anonymous = True
attributes = {}
def __init__(self, parent):
self.parent = parent
self.computed_properties = {}
def short_repr(self):
return '<%s>' % self.__class__.__name__
def __repr__(self):
return '<%s>(parent=%s)' % \
(self.__class__.__name__, self.parent.short_repr())
class AnonymousTextElement(AnonymousElement):
def __init__(self, text, parent, previous_sibling):
self.text = text
self.parent = parent
self.previous_sibling = previous_sibling
self.computed_properties = {}
def add_child(self, element):
assert False, "Can't add child to text element."
def add_text(self, text):
self.text += text
|
Python
| 0.998915
|
@@ -4473,45 +4473,8 @@
ame%0A
- self.attributes = attributes%0A
@@ -4548,16 +4548,214 @@
ibling%0A%0A
+ # Make attributes more like a dict%0A #self.attributes = attributes%0A self.attributes = %7B%7D%0A for key, value in attributes.items():%0A self.attributes%5Bkey%5D = value%0A%0A
def
|
02def5a9881e315c0ab4eb894b207502e0231df2
|
fix manage error
|
src/hammr/utils/generics_utils.py
|
src/hammr/utils/generics_utils.py
|
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import json
import sys
import re
import traceback
from os.path import expanduser
import os
import urllib
from uforge.objects.xsd0 import *
import download_utils
import printer
__author__="UShareSoft"
def extract_id(uri):
elements = uri.split("/");
return elements[len(elements) - 1];
def check_mandatory_stack(stack):
if not "name" in stack:
printer.out("no attribute [name] for [stack]", printer.ERROR)
return
if not "version" in stack:
printer.out("no attribute [version] for [stack]", printer.ERROR)
return
if not "os" in stack:
printer.out("no attribute [os] for [stack]", printer.ERROR)
return
else:
if not "name" in stack["os"]:
printer.out("no attribute [name] for [os]", printer.ERROR)
return
if not "version" in stack["os"]:
printer.out("no attribute [version] for [os]", printer.ERROR)
return
if not "arch" in stack["os"]:
printer.out("no attribute [arch] for [os]", printer.ERROR)
return
return stack
def check_mandatory_builders(builders):
return builders
#TODO
def check_mandatory_generate_scan(builders):
for builder in builders:
if not "installation" in builder:
printer.out("no attribute installation in builder", printer.ERROR)
return
if not "diskSize" in builder["installation"]:
printer.out("no attribute diskSize in the installation part of builder", printer.ERROR)
return
if not "hardwareSettings" in builder:
printer.out("no attribute hardwareSettings in builder", printer.ERROR)
return
if not "memory" in builder["hardwareSettings"]:
printer.out("no attribute diskSize in the memory part of hardwareSettings", printer.ERROR)
return
return builders
def check_mandatory_create_account(iterables, type):
#iterables can be builders or accounts
for iterable in iterables:
if type=="builders":
if not "account" in iterable:
printer.out("no attribute account in builder", printer.ERROR)
return
if not "type" in iterable and not "type" in iterable["account"]:
printer.out("no attribute type in builder", printer.ERROR)
return
if "file" in iterable["account"]:
file = get_file(iterable["account"]["file"])
if file is None:
return 2
data = check_json_syntax(file)
if data is None:
return 2
if "accounts" in data:
return check_mandatory_create_account(data["accounts"], "accounts")
if type=="accounts":
if not "type" in iterable:
printer.out("no attribute type in accounts", printer.ERROR)
return
#TODO
return iterables
def check_json_syntax(file):
try:
printer.out("Validating the template file ["+file+"] ...")
json_data=open(file)
data = json.load(json_data)
json_data.close()
printer.out("Syntax of template file ["+file+"] is ok", printer.OK)
return data
except ValueError as e:
printer.out("Syntax of template file ["+file+"] FAILED", printer.ERROR)
printer.out("JSON parsing error: "+str(e))
return
except IOError as e:
printer.out("File error: "+e.strerror, printer.ERROR)
return
def validate_json_file(file):
try:
data = check_json_syntax(file)
if data is None:
return
#check manadatory fields
if "stack" in data:
stack=check_mandatory_stack(data["stack"])
if stack is None:
return
#else:
# print "No stack section find in the template file"
# return
if "builders" in data:
check_mandatory_builders(data["builders"])
return data
except ValueError as e:
printer.out("JSON parsing error: "+str(e), printer.ERROR)
printer.out("Syntax of template file ["+file+"]: FAILED")
except IOError as e:
printer.out("unknown error template json file", printer.ERROR)
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
printer.out(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
printer.out("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
def remove_special_chars(string):
return (re.sub('[-]', '_', string)).lower()
def print_uforge_exception(e):
if type(e.args[0]) is uForgeError:
return "UForge Error '"+str(e.args[0].statusCode)+"' with method: "+e.args[0].requestMethod+" "+e.args[0].requestUri+"\n"+"Message:\n\t"+e.args[0].errorMsg
else:
traceback.print_exc()
def oder_list_object_by(objects, attribute):
if type(attribute) is str:
return sorted(objects, key=lambda x: getattr(x, attribute).lower(), reverse=False)
return objects
def get_uforge_url_from_ws_url(ws_url):
if ws_url[-1:]!='/':
return ws_url.rpartition('/')[0]
else:
return ws_url[:-1].rpartition('/')[0]
def get_home_dir():
return expanduser("~")
def get_hammr_dir():
dir = get_home_dir()+os.sep+".hammr"
if not os.path.isdir(dir):
os.mkdir(dir)
return dir
def get_remote_regex():
return 'http|ftp|svn'
def get_file(uri):
try:
regexp = re.compile(get_remote_regex())
if regexp.search(uri) is not None:
print "Downloadling file "+os.path.basename(uri)+": "
dlUtils = download_utils.Download()
file, headers = urllib.urlretrieve(uri, reporthook=dlUtils.progress_update)
dlUtils.progress_finish()
else:
file, headers = urllib.urlretrieve(uri)
return file
except Exception, e:
print("error downloading "+uri+": "+ str(e))
return
def remove_URI_forbidden_char(string):
chars= ' '
return re.sub(chars, '_', string)
def create_user_ssh_key(api, login, sshKey):
if not "name" in sshKey:
printer.out("sshKey name not found in builder", printer.ERROR)
return 2
if not "publicKey" in sshKey:
printer.out("publicKey in sshKey not found in builder", printer.ERROR)
return 2
mySshKey = sshKey()
mySshKey.name=sshKey["name"]
mySshKey.publicKey=sshKey["publicKey"]
key = self.api.Users(login).Sshkeys().Create(mySshKey)
if key is None:
printer.out("Impossible to create sshKey ["+mySshKey.name+"]", printer.ERROR)
return 2
return key
|
Python
| 0.000001
|
@@ -7006,24 +7006,42 @@
%0A if
+len(e.args)%3E1 and
type(e.args%5B
|
0200aef612dc236cc211cc089fa05d1a5e669c0f
|
Refactor HospitalEquipment
|
ambulances/viewsets.py
|
ambulances/viewsets.py
|
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework import mixins
from rest_framework import generics
from rest_framework import filters
from rest_framework.permissions import IsAuthenticated
from rest_framework import permissions
from .models import Ambulance, Hospital, Profile, HospitalEquipment
from .serializers import ExtendedProfileSerializer, \
AmbulanceSerializer, HospitalSerializer, HospitalEquipmentSerializer
# Django REST Framework Viewsets
class IsUserOrAdminOrSuper(permissions.BasePermission):
"""
Only user or staff can see or modify
"""
def has_object_permission(self, request, view, obj):
return (request.user.is_superuser or
request.user.is_staff or
obj.user == request.user)
# Profile viewset
class ProfileViewSet(mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = Profile.objects.all()
serializer_class = ExtendedProfileSerializer
permission_classes = (permissions.IsAuthenticated,
IsUserOrAdminOrSuper,)
lookup_field = 'user__username'
# AmbulancePermission
class AmbulancePermissionViewSet(viewsets.GenericViewSet):
queryset = Ambulance.objects.all()
def get_queryset(self):
#print('@get_queryset {}({})'.format(self.request.user,
# self.request.method))
# return all ambulances if superuser
user = self.request.user
if user.is_superuser:
return self.queryset
# return nothing if anonymous
if user.is_anonymous:
raise PermissionDenied()
# print('> METHOD = {}'.format(self.request.method))
# otherwise only return ambulances that the user can read or write to
if self.request.method == 'GET':
# ambulances that the user can read
can_do = user.profile.ambulances.filter(can_read=True).values('ambulance_id')
elif (self.request.method == 'PUT' or
self.request.method == 'PATCH' or
self.request.method == 'DELETE'):
# ambulances that the user can write to
can_do = user.profile.ambulances.filter(can_write=True).values('ambulance_id')
else:
raise PermissionDenied()
#print('> user = {}, can_do = {}'.format(user, can_do))
#print('> ambulances = {}'.format(Ambulance.objects.all()))
#print('> filtered ambulances = {}'.format(Ambulance.objects.filter(id__in=can_do)))
return self.queryset.filter(id__in=can_do)
# HospitalPermission
class HospitalPermissionViewSet(viewsets.GenericViewSet):
queryset = Hospital.objects.all()
def get_queryset(self):
#print('@get_queryset {}({})'.format(self.request.user,
# self.request.method))
# return all hospitals if superuser
user = self.request.user
if user.is_superuser:
return self.queryset
# return nothing if anonymous
if user.is_anonymous:
raise PermissionDenied()
#print('> METHOD = {}'.format(self.request.method))
# otherwise only return hospitals that the user can read or write to
if self.request.method == 'GET':
# hospitals that the user can read
can_do = user.profile.hospitals.filter(can_read=True).values('hospital_id')
elif (self.request.method == 'PUT' or
self.request.method == 'PATCH' or
self.request.method == 'DELETE'):
# hospitals that the user can write to
can_do = user.profile.hospitals.filter(can_write=True).values('hospital_id')
else:
raise PermissionDenied()
#print('> user = {}, can_do = {}'.format(user, can_do))
#print('> hospitals = {}'.format(Hospital.objects.all()))
#print('> filtered hospitals = {}'.format(Hospital.objects.filter(id__in=can_do)))
return self.queryset.filter(id__in=can_do)
# Ambulance viewset
class AmbulanceViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
AmbulancePermissionViewSet):
serializer_class = AmbulanceSerializer
def perform_create(self, serializer):
#print('@perform_create')
serializer.save(updated_by=self.request.user)
def perform_update(self, serializer):
#print('@perform_update')
serializer.save(updated_by=self.request.user)
# Hospital viewset
class HospitalViewSet(mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
HospitalPermissionViewSet):
serializer_class = HospitalSerializer
def perform_create(self, serializer):
serializer.save(updated_by=self.request.user)
def perform_update(self, serializer):
serializer.save(updated_by=self.request.user)
# HospitalEquipment viewset
class HospitalEquipmentViewSet(mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = HospitalEquipment.objects.all()
serializer_class = HospitalEquipmentSerializer
lookup_field = 'equipment__name'
lookup_fields = ('hospital_id', 'equipment__name')
def get_queryset(self):
#print('@get_queryset {}({})'.format(self.request.user,
# self.request.method))
# return all hospitals if superuser
user = self.request.user
if user.is_superuser:
return HospitalEquipment.objects.all()
# return nothing if anonymous
if user.is_anonymous:
raise PermissionDenied()
#print('> METHOD = {}'.format(self.request.method))
# otherwise only return hospitals that the user can read or write to
if self.request.method == 'GET':
# hospitals that the user can read
can_do = user.profile.hospitals.filter(can_read=True).values('hospital_id')
elif (self.request.method == 'PUT' or
self.request.method == 'PATCH' or
self.request.method == 'DELETE'):
# hospitals that the user can write to
can_do = user.profile.hospitals.filter(can_write=True).values('hospital_id')
else:
raise PermissionDenied()
#print('> user = {}, can_do = {}'.format(user, can_do))
#print('> hospitals = {}'.format(HospitalEquipment.objects.all()))
#print('> filtered hospitals = {}'.format(HospitalEquipment.objects.filter(id__in=can_do)))
return HospitalEquipment.objects.filter(id__in=can_do)
# make sure both fields are looked up
def get_object(self):
queryset = self.get_queryset()
filter = {}
for field in self.lookup_fields:
filter[field] = self.kwargs[field]
obj = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, obj)
return obj
|
Python
| 0
|
@@ -5394,32 +5394,34 @@
-viewsets.Generic
+HospitalPermission
ViewSet)
@@ -5622,1410 +5622,8 @@
')%0A%0A
- def get_queryset(self):%0A%0A #print('@get_queryset %7B%7D(%7B%7D)'.format(self.request.user,%0A # self.request.method))%0A %0A # return all hospitals if superuser%0A user = self.request.user%0A if user.is_superuser:%0A return HospitalEquipment.objects.all()%0A%0A # return nothing if anonymous%0A if user.is_anonymous:%0A raise PermissionDenied()%0A%0A #print('%3E METHOD = %7B%7D'.format(self.request.method))%0A # otherwise only return hospitals that the user can read or write to%0A if self.request.method == 'GET':%0A # hospitals that the user can read%0A can_do = user.profile.hospitals.filter(can_read=True).values('hospital_id')%0A%0A elif (self.request.method == 'PUT' or%0A self.request.method == 'PATCH' or%0A self.request.method == 'DELETE'):%0A # hospitals that the user can write to%0A can_do = user.profile.hospitals.filter(can_write=True).values('hospital_id')%0A %0A else:%0A raise PermissionDenied()%0A%0A #print('%3E user = %7B%7D, can_do = %7B%7D'.format(user, can_do))%0A #print('%3E hospitals = %7B%7D'.format(HospitalEquipment.objects.all()))%0A #print('%3E filtered hospitals = %7B%7D'.format(HospitalEquipment.objects.filter(id__in=can_do)))%0A return HospitalEquipment.objects.filter(id__in=can_do)%0A %0A
|
0b3843e159a09345f68158b7b5fb1967bff9da0a
|
remove unnecessary_outputs option
|
fmriprep/run_workflow.py
|
fmriprep/run_workflow.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: oesteban
# @Date: 2015-11-19 16:44:27
# @Last Modified by: oesteban
# @Last Modified time: 2016-10-05 15:03:18
"""
fMRI preprocessing workflow
=====
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import os.path as op
import glob
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from multiprocessing import cpu_count
def main():
"""Entry point"""
from fmriprep import __version__
parser = ArgumentParser(description='fMRI Preprocessing workflow',
formatter_class=RawTextHelpFormatter)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store', default=os.getcwd())
parser.add_argument('output_dir', action='store',
default=op.join(os.getcwd(), 'out'))
parser.add_argument('analysis_level', choices=['participant'])
# optional arguments
parser.add_argument('--participant_label', action='store', nargs='+')
parser.add_argument('-v', '--version', action='version',
version='fmriprep v{}'.format(__version__))
# Other options
g_input = parser.add_argument_group('fMRIprep specific arguments')
g_input.add_argument('-s', '--session-id', action='store', default='single_session')
g_input.add_argument('-r', '--run-id', action='store', default='single_run')
g_input.add_argument('-d', '--data-type', action='store', choices=['anat', 'func'])
g_input.add_argument('--debug', action='store_true', default=False,
help='run debug version of workflow')
g_input.add_argument('--nthreads', action='store', default=0,
type=int, help='number of threads')
g_input.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_input.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_input.add_argument('-w', '--work-dir', action='store',
default=op.join(os.getcwd(), 'work'))
g_input.add_argument('-t', '--workflow-type', default='ds005', required=False,
action='store', choices=['ds005', 'ds054', 'HPC', 'spiral'],
help='workflow type, a monkeypatch while it is not automatically identified')
# ANTs options
g_ants = parser.add_argument_group('specific settings for ANTs registrations')
g_ants.add_argument('--ants-nthreads', action='store', type=int,
help='number of threads that will be set in ANTs processes')
g_ants.add_argument('--skull-strip-ants', action='store_true', default=False,
help='use ANTs-based skull-stripping')
opts = parser.parse_args()
create_workflow(opts)
def create_workflow(opts):
import logging
from nipype import config as ncfg
from fmriprep.utils import make_folder
from fmriprep.viz.reports import run_reports
from fmriprep.workflows import base as fwb
from fmriprep.workflows.base import base_workflow_enumerator
settings = {
'bids_root': op.abspath(opts.bids_dir),
'write_graph': opts.write_graph,
'nthreads': opts.nthreads,
'debug': opts.debug,
'skull_strip_ants': opts.skull_strip_ants,
'output_dir': op.abspath(opts.output_dir),
'work_dir': op.abspath(opts.work_dir),
'remove_unnecessary_outputs': False
}
# set up logger
logger = logging.getLogger('cli')
if opts.debug:
settings['ants_t1-mni_settings'] = 't1-mni_registration_test'
logger.setLevel(logging.DEBUG)
if opts.ants_nthreads is not None:
settings['ants_threads'] = opts.ants_nthreads
log_dir = op.join(settings['output_dir'], 'log')
derivatives = op.join(settings['output_dir'], 'derivatives')
# Check and create output and working directories
# Using make_folder to prevent https://github.com/poldracklab/mriqc/issues/111
make_folder(settings['output_dir'])
make_folder(settings['work_dir'])
make_folder(derivatives)
make_folder(log_dir)
logger.addHandler(logging.FileHandler(op.join(log_dir, 'run_workflow')))
# Set nipype config
ncfg.update_config({
'logging': {'log_directory': log_dir, 'log_to_file': True},
'execution': {'crashdump_dir': log_dir}
})
# nipype plugin configuration
plugin_settings = {'plugin': 'Linear'}
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
else:
# Setup multiprocessing
if settings['nthreads'] == 0:
settings['nthreads'] = cpu_count()
if settings['nthreads'] > 1:
plugin_settings['plugin'] = 'MultiProc'
plugin_settings['plugin_args'] = {'n_procs': settings['nthreads']}
# Determine subjects to be processed
subject_list = opts.participant_label
if subject_list is None or not subject_list:
subject_list = [op.basename(subdir)[4:] for subdir in glob.glob(
op.join(settings['bids_root'], 'sub-*'))]
logger.info('Subject list: %s', ', '.join(subject_list))
# Build main workflow and run
preproc_wf = base_workflow_enumerator(subject_list, settings=settings)
preproc_wf.base_dir = settings['work_dir']
preproc_wf.run(**plugin_settings)
if opts.write_graph:
preproc_wf.write_graph()
run_reports(settings['output_dir'])
if __name__ == '__main__':
main()
|
Python
| 0.000342
|
@@ -3641,53 +3641,8 @@
dir)
-,%0A 'remove_unnecessary_outputs': False
%0A
|
434b57778e7cd75702e72dafc8c2c5efce0b1b86
|
Update test requirements
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import sys
import os
import re
from setuptools import setup
from setuptools.command.test import test as TestCommand
kwargs = {}
requires = []
packages = [
"github3",
"github3.gists",
"github3.repos",
"github3.issues",
"github3.search",
]
kwargs['tests_require'] = ['betamax >=0.1.6', 'pytest']
if sys.version_info < (3, 0):
kwargs['tests_require'].append('unittest2==0.5.1')
if sys.version_info < (3, 3):
kwargs['tests_require'].append('mock == 1.0.1')
if sys.argv[-1] in ("submit", "publish"):
os.system("python setup.py bdist_wheel sdist upload")
sys.exit()
requires.extend(["requests >= 2.0", "uritemplate.py >= 0.2.0"])
__version__ = ''
with open('github3/__init__.py', 'r') as fd:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
if not __version__:
raise RuntimeError('Cannot find version information')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name="github3.py",
version=__version__,
description=("Python wrapper for the GitHub API"
"(http://developer.github.com/v3)"),
long_description="\n\n".join([open("README.rst").read(),
open("HISTORY.rst").read()]),
license=open('LICENSE').read(),
author="Ian Cordasco",
author_email="graffatcolmingov@gmail.com",
url="https://github3py.readthedocs.org",
packages=packages,
install_requires=requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: CPython',
],
extras_require={'test': kwargs['tests_require']},
cmdclass={'test': PyTest},
**kwargs
)
|
Python
| 0
|
@@ -345,11 +345,11 @@
%3E=0.
-1.6
+2.0
', '
@@ -432,16 +432,17 @@
nittest2
+
==0.5.1'
@@ -516,17 +516,16 @@
'mock ==
-
1.0.1')%0A
|
f8da511cb61072b566ebd0113edd125395b8d422
|
Fix connection
|
tests/test_reconnection.py
|
tests/test_reconnection.py
|
"""
Collection of test cases to test connection module.
"""
from nose.tools import assert_true, assert_false, assert_equal, raises
import datajoint as dj
import numpy as np
from datajoint import DataJointError
from . import CONN_INFO, PREFIX
class TestReconnect:
"""
test reconnection
"""
@classmethod
def setup_class(cls):
cls.conn = dj.conn(reset=True, **CONN_INFO)
def test_close(self):
assert_true(self.conn.is_connected, "Connection should be alive")
self.conn.close()
assert_false(self.conn.is_connected, "Connection should now be closed")
def test_reconnect(self):
assert_true(self.conn.is_connected, "Connection should be alive")
self.conn.close()
self.conn.query('SHOW DATABASES;', reconnect=True).fetchall()
assert_true(self.conn.is_connected, "Connection should be alive")
@raises(DataJointError)
def reconnect_throws_error_in_transaction(self):
assert_true(self.conn.is_connected, "Connection should be alive")
self.conn.close()
with self.conn.transaction:
self.conn.query('SHOW DATABASES;', reconnect=True).fetchall()
|
Python
| 0.000006
|
@@ -307,44 +307,22 @@
-@classmethod%0A def setup_class(cls
+def setup(self
):%0A
@@ -332,11 +332,12 @@
-cls
+self
.con
|
be56cb9f15e7ea0348937c9c86518786e138e023
|
update setup.py
|
setup.py
|
setup.py
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='kaspar_gui',
version='0.1',
description='Internet based Front-End for the KASPAR Robot',
long_description=readme(),
classifiers=[
'Development Status :: 1 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
url='http://github.com/uh-nmb/KASPAR',
author='Nathan Burke',
author_email='n.burke@natbur.com',
license='MIT',
packages=['kasparGUI'],
install_requires=[
'dateutil',
'gevent',
'flask',
'werkzeug'
'flask-restless',
'flask-sqlalchemy',
'flask-socketio',
'sqlalchemy',
'robotActionController'
],
dependancy_links=['git+ssh://git@github.com/uh-nmb/robotActionController'],
data_files=[('/etc/init.d', ['systemConfigs/etc/init.d/kasparweb', ]),
('/etc/rsyslog.d', ['systemConfigs/etc/rsyslog.d/kaspar_log.conf', ]),
('/udev/rules.d', ['systemConfigs/udev/rules.d/98-keyPad.rules', 'systemConfigs/udev/rules.d/98-serial.rules', 'systemConfigs/udev/rules.d/99-input.rules'])],
include_package_data=True,
zip_safe=False)
|
Python
| 0.000001
|
@@ -634,16 +634,23 @@
'
+python-
dateutil
@@ -714,16 +714,17 @@
erkzeug'
+,
%0D%0A
|
e2e6cdac88ee03f78713ac4a50d0003a471a0027
|
Add Python 3.9 to the list of supported versions.
|
setup.py
|
setup.py
|
from setuptools import setup
long_description = open('README.rst').read()
setup(
name="celery-redbeat",
description="A Celery Beat Scheduler using Redis for persistent storage",
long_description=long_description,
version="2.0.0",
url="https://github.com/sibson/redbeat",
license="Apache License, Version 2.0",
author="Marc Sibson",
author_email="sibson+redbeat@gmail.com",
keywords="python celery beat redis".split(),
packages=["redbeat"],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Topic :: System :: Distributed Computing',
'Topic :: Software Development :: Object Brokering',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Operating System :: OS Independent',
],
install_requires=['redis>=3.2', 'celery>=4.2', 'python-dateutil', 'tenacity'],
tests_require=['pytest'],
)
|
Python
| 0
|
@@ -959,16 +959,65 @@
: 3.8',%0A
+ 'Programming Language :: Python :: 3.9',%0A
|
81475691a3fe745a66b69ebe4bda55d8ea4fd399
|
add unit test for Processor().validate
|
tests/test_scrapyscript.py
|
tests/test_scrapyscript.py
|
import unittest
import pytest
from scrapy.settings import Settings
from scrapy.spiders import Spider
import scrapy
from scrapyscript import Job, Processor, ScrapyScriptException
class MyItem(scrapy.Item):
bot = scrapy.Field()
data = scrapy.Field()
class MySpider(Spider):
name = "myspider"
def start_requests(self):
yield scrapy.Request(self.url)
def parse(self, response):
title = response.xpath("//title/text()").extract_first()
return MyItem(bot=self.settings["BOT_NAME"], data=title)
class BigSpider(Spider):
name = "bigspider"
def start_requests(self):
yield scrapy.Request(self.url)
def parse(self, response):
longstr = "x" * 1073741824 * 2 # 2gb
return longstr
class BadSpider(Spider):
name = "badspider"
def start_requests(self):
yield scrapy.Request("http://www.python.org")
def parse(self, response):
return True
class ParamReturnSpider(Spider):
name = "paramreturnspider"
start_urls = ["http://www.python.org"]
def __init__(self, category=None, *args, **kwargs):
super(ParamReturnSpider, self).__init__(*args, **kwargs)
self.category = category
def parse(self, response):
return dict(category=self.category, fruit=self.fruit)
class MyItemSpider(Spider):
name = "myitemspider"
def start_requests(self):
yield scrapy.Request(self.url)
def parse(self, response):
title = response.xpath("//title/text()").extract_first()
return MyItem(data=title + "x" * 1048576)
class ScrapyScriptTests(unittest.TestCase):
def test_create_valid_job(self):
spider = MySpider
job = Job(spider)
self.assertIsInstance(job, Job)
def test_parameters_passed_to_spider(self):
spider = ParamReturnSpider
job = Job(spider, "cat1", fruit="banana")
result = Processor().run(job)
self.assertEqual(result, [dict(category="cat1", fruit="banana")])
def test_no_spider_provided(self):
self.assertRaises(TypeError, Job)
def test_settings_flow_through_to_spider(self):
settings = Settings()
settings["BOT_NAME"] = "alpha"
job = Job(MySpider, url="http://www.python.org")
results = Processor(settings=settings).run(job)
self.assertEqual(results[0]["bot"], "alpha")
def test_multiple_jobs(self):
jobs = [
Job(MySpider, url="http://www.python.org"),
Job(MySpider, url="http://www.github.com"),
]
results = Processor().run(jobs)
data = [item["data"].lower() for item in results]
self.assertEqual(any("python" in s for s in data), True)
self.assertEqual(any("github" in s for s in data), True)
self.assertEqual(len(results), 2)
def test_bad_return_value(self):
job = Job(BadSpider, url="http://www.python.org")
results = Processor().run(job)
self.assertEqual(results, [])
def test_big_return_value(self):
job = Job(BigSpider, url="http://www.python.org")
results = Processor().run(job)
self.assertEqual(results, [])
# larger, long running jobs can deadlock see https://github.com/jschnurr/scrapyscript/issues/3
@pytest.mark.timeout(30)
def test_for_deadlock(self):
jobs = [Job(MyItemSpider, url="http://www.python.org") for i in range(50)]
results = Processor().run(jobs)
self.assertEqual(len(results), 50)
class ProcessorTests(unittest.TestCase):
def test_item_scraped(self):
p = Processor()
p._item_scraped("test")
self.assertEqual(p.items[0], "test")
if __name__ == "__main__":
unittest.main()
|
Python
| 0.000003
|
@@ -3662,16 +3662,215 @@
test%22)%0A%0A
+ def test_job_validate(self):%0A jobs = %5BJob(BigSpider, url=%22http://www.python.org%22), %22not a Job%22%5D%0A p = Processor()%0A self.assertRaises(ScrapyScriptException, p.validate, jobs)%0A%0A
%0Aif __na
|
38a555cb1eb2a5d0170ff1aab70fb0f2f01d6b2b
|
add x and y title options
|
pyfluka/utils/Plotter.py
|
pyfluka/utils/Plotter.py
|
import numpy as np
import os.path
from itertools import izip
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm, Normalize
def get_axes_range(axisdata):
start, end, nbins = axisdata
step = (end - start) / nbins
print start, end, nbins, step
return np.arange(start, end + step / 2., step)
# todo: duplicate of definition in UsrbinReader
def pack_data(dataraw, axesdata):
try:
bin_shape = (axesdata[0][2], axesdata[1][2], axesdata[2][2]) # x,y,z
except:
bin_shape = (axesdata[0][2], axesdata[1][2]) # x,y,z
reverse_bin_shape = list(bin_shape)
reverse_bin_shape.reverse()
try:
return np.reshape(np.array(dataraw), reverse_bin_shape).transpose()
except:
return np.reshape(np.array(dataraw[:-1]), reverse_bin_shape).transpose()
class Plotter(object):
def __init__(self, output_dir=".", file_format="png"):
"""
Constructor
:param output_dir (Optional[str]): Output directory. Defaults to current directory.
:param file_format: (Optional[str]): File format plots are stored. Defaults to png.
:return:
"""
self.outputDir = output_dir
self.format = file_format
def plot_matrix(self, mat, axesdata,
out_filename=None,
use_log=True,
vmin_log=None,
vmax_log=None,
aspect_ratio_equal=True,
geometry_data=None,
vmin=None,
vmax=None):
x = get_axes_range(axesdata[2])
y = get_axes_range(axesdata[1])
if use_log:
plt.pcolor(x, y, mat.astype(float), norm=LogNorm(vmin=vmin_log, vmax=vmax_log))
else:
plt.pcolor(x, y, mat[0], norm=Normalize(vmin=vmin, vmax=vmax))
plt.xlim(axesdata[0][0], axesdata[0][1])
plt.ylim(axesdata[1][0], axesdata[1][1])
if aspect_ratio_equal:
plt.axes().set_aspect('equal')
if geometry_data is not None:
for x, y in izip(*geometry_data):
plt.plot(x, y, 'k-', linewidth=2)
if out_filename:
plt.savefig(os.path.join(self.outputDir, out_filename), format=self.format)
return plt
def plot_matrix_short(self, data, axesdata, selection, transpose=False, use_log=True, vmin_log=None, vmax_log=None,
aspect_ratio_equal=True, geometry_data=None, savefilename=None, vmin=None, vmax=None):
selecteddata = data[selection]
selectedaxesdata = [axesdata[i] for (i, selectionItem) in enumerate(selection) if selectionItem == Ellipsis]
print selectedaxesdata
if transpose:
selecteddata = selecteddata.transpose()
else:
selectedaxesdata.reverse()
return self.plot_matrix(selecteddata, selectedaxesdata, use_log, vmin_log, vmax_log, aspect_ratio_equal,
geometry_data, savefilename, vmin=vmin, vmax=vmax)
class PlotConfig(object):
def __init__(self, name, kwargs):
self.name = name
for attr, val in kwargs.items():
self.__setattr__(attr, val)
def __eq__(self, other):
lhs = {(k, v) for k, v in self.__dict__.items() if not k == 'name'}
rhs = {(k, v) for k, v in other.__dict__.items() if not k == 'name'}
return lhs == rhs
|
Python
| 0.000067
|
@@ -1259,16 +1259,54 @@
esdata,%0A
+ plot_config=None,%0A
@@ -1332,24 +1332,24 @@
ename=None,%0A
-
@@ -2175,16 +2175,175 @@
idth=2)%0A
+ if %22xtitle%22 in plot_config:%0A plt.xlabel(plot_config.xtitle)%0A if %22ytitle%22 in plot_config:%0A plt.ylabel(plot_config.ytitle)%0A%0A
@@ -3342,16 +3342,16 @@
tems():%0A
-
@@ -3383,16 +3383,86 @@
, val)%0A%0A
+ def __contains__(self, item):%0A return hasattr(self, item)%0A%0A
def
|
29c40e1e5048c5f8d76486020be6464de0e2adc7
|
add more dependency
|
setup.py
|
setup.py
|
from setuptools import find_packages
from setuptools import setup
install_requires = [
'numpy',
'theano',
]
setup(
name="TheFramework",
version="0.0.1",
description="A nn lib",
packages=find_packages(),
include_package_data=False,
zip_safe=False,
install_requires=install_requires,
)
|
Python
| 0
|
@@ -108,16 +108,42 @@
heano',%0A
+ 'pyyaml',%0A 'h5py',%0A
%5D%0A%0Asetup
|
e3c794718d715f3226680beea9ba3ad2f3e344d9
|
test coverage improvement
|
tests/test_sheet_update.py
|
tests/test_sheet_update.py
|
import pyexcel
import os
import datetime
class TestReader:
def setUp(self):
"""
Make a test csv file as:
a,b,c,d
e,f,g,h
i,j,1.1,1
"""
self.testfile = "testcsv.csv"
self.rows = 3
w = pyexcel.Writer(self.testfile)
data=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 1.1, 1]
w.write_row(data[:4])
w.write_row(data[4:8])
w.write_row(data[8:12])
w.close()
def test_update_a_cell(self):
r = pyexcel.readers.PlainReader(self.testfile)
r.cell_value(0,0,'k')
assert r[0][0] == 'k'
d = datetime.date(2014, 10, 1)
r.cell_value(0,1,d)
assert isinstance(r[0][1], datetime.date) is True
assert r[0][1].strftime("%d/%m/%y") == "01/10/14"
def test_update_a_cell_with_a_filter(self):
"""
Filter the sheet first and then update the filtered now
with the filter, you can set its value. then clear
the filters, the value stays with the cell. so if you want
to save the change with original data, please clear the filter
first
"""
r = pyexcel.FilterableReader(self.testfile)
r.filter(pyexcel.filters.ColumnFilter([0, 2]))
r.cell_value(2, 1, "k")
assert r[2][1] == "k"
r.clear_filters()
assert r[2][3] == "k"
def test_set_item(self):
r = pyexcel.Reader(self.testfile)
content = ['r', 's', 't', 'o']
r[1] = content
assert r[1] == ['r', 's', 't', 'o']
content2 = [1, 2, 3, 4]
r[1:] = content2
assert r[2] == [1, 2, 3, 4]
content3 = [True, False, True, False]
r[0:0] = content3
assert r[0] == [True, False, True, False]
r[0:2:1] = [1, 1, 1, 1]
assert r[0] == [1, 1, 1, 1]
assert r[1] == [1, 1, 1, 1]
assert r[2] == [1, 2, 3, 4]
try:
r[2:1] = ['e', 'r', 'r', 'o']
assert 1==2
except ValueError:
assert 1==1
def test_extend_rows(self):
r = pyexcel.PlainReader(self.testfile)
content = [['r', 's', 't', 'o'],
[1, 2, 3, 4],
[True],
[1.1, 2.2, 3.3, 4.4, 5.5]]
r.extend_rows(content)
assert r[3] == ['r', 's', 't', 'o']
assert r[4] == [1, 2, 3, 4]
assert r[5] == [True, "", "", ""]
assert r[6] == [1.1, 2.2, 3.3, 4.4]
try:
r2 = pyexcel.Reader(self.testfile)
content = [['r', 's', 't', 'o'],
[1, 2, 3, 4],
[True],
[1.1, 2.2, 3.3, 4.4, 5.5]]
r2.extend_rows(content)
assert 1==2
except NotImplementedError:
assert 1==1
def test_delete_rows(self):
r = pyexcel.PlainReader(self.testfile)
r.delete_rows([0,1])
assert r[0] == ['i', 'j', 1.1, 1]
try:
r2 = pyexcel.Reader(self.testfile)
r2.delete_rows([1,2])
assert 1==2
except NotImplementedError:
assert 1==1
def tearDown(self):
if os.path.exists(self.testfile):
os.unlink(self.testfile)
|
Python
| 0
|
@@ -2804,16 +2804,230 @@
ert 1==1
+%0A%0A def test_set_column_at(self):%0A r = pyexcel.PlainReader(self.testfile)%0A try:%0A r.set_column_at(1,%5B11,1%5D, 1000)%0A assert 1==2%0A except ValueError:%0A assert 1==1
%0A
@@ -3187,32 +3187,152 @@
1%5D%0A try:%0A
+ r.delete_rows(%22hi%22)%0A assert 1==2%0A except ValueError:%0A assert 1==1%0A try:%0A
r2 =
|
67e6036c564f4e2eb9acf650acf5c33813af3003
|
make serve_image return an image
|
views.py
|
views.py
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
"""@app.route('/<username>')
def hello_world2(username):
return 'Hello %s' % username"""
if __name__ == '__main__':
app.run(debug=True)
|
Python
| 0.999961
|
@@ -33,16 +33,69 @@
template
+, make_response%0Afrom PIL import Image%0Aimport StringIO
%0Aapp = F
@@ -189,19 +189,16 @@
tml')%0A%0A%0A
-%22%22%22
@app.rou
@@ -207,83 +207,330 @@
('/%3C
-username%3E')%0Adef hello_world2(username):%0A return 'Hello %25s' %25 username%22%22%22
+width%3Ex%3Cheight%3E')%0A@app.route('/%3Cwidth%3EX%3Cheight%3E')%0Adef serve_image(width, height):%0A stringfile = StringIO.StringIO()%0A im = Image.open(%22static/images/annie.jpg%22)%0A im.save(stringfile, 'JPEG')%0A response = make_response(stringfile.getvalue())%0A response.headers%5B%22Content-Type%22%5D = %22image/jpeg%22%0A return response%0A
%0A%0Aif
@@ -576,10 +576,26 @@
bug=True
+, host='0.0.0.0'
)%0A
|
7ec9f01dac80c4249a33306d05884843239c8707
|
enforce correct order of pages exported
|
pages/admin/actions.py
|
pages/admin/actions.py
|
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.utils import simplejson
from django.conf import settings as global_settings
from django.db import transaction
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from pages import settings
from pages.http import get_language_from_request
from pages.utils import get_placeholders
from pages.models import Page
JSON_PAGE_EXPORT_NAME = 'gerbi_cms_page_export_version'
JSON_PAGE_EXPORT_VERSION = 1
JSON_PAGE_EXPORT_FILENAME = 'cms_pages.json'
# make it readable -- there are better ways to save space
JSON_PAGE_EXPORT_INDENT = 2
def export_pages_as_json(modeladmin, request, queryset):
response = HttpResponse(mimetype="application/json")
response['Content-Disposition'] = 'attachment; filename=%s' % (
JSON_PAGE_EXPORT_FILENAME,)
response.write(simplejson.dumps(
{JSON_PAGE_EXPORT_NAME: JSON_PAGE_EXPORT_VERSION,
'pages': [page.dump_json_data() for page in queryset]},
indent=JSON_PAGE_EXPORT_INDENT))
return response
export_pages_as_json.short_description = _("Export pages as JSON")
@transaction.commit_on_success
def import_pages_from_json(request,
template_name='admin/pages/page/import_pages.html'):
try:
d = simplejson.load(request.FILES['json'])
except KeyError:
return redirect('admin:page-index')
try:
errors = validate_pages_json_data(d, get_language_from_request(request))
except KeyError, e:
errors = [_('JSON file is invalid: %s') % (e.args[0],)]
pages_created = []
if not errors:
for p in d['pages']:
pages_created.append(
Page.objects.create_and_update_from_json_data(p, request.user))
return render_to_response(template_name, {
'errors': errors,
'pages_created': pages_created,
'app_label': 'pages',
'opts': Page._meta,
}, RequestContext(request))
def validate_pages_json_data(d, preferred_lang):
"""
Check if an import of d will succeed, and return errors.
errors is a list of strings. The import should proceed only if errors
is empty.
"""
errors = []
seen_complete_slugs = dict(
(lang[0], set()) for lang in settings.PAGE_LANGUAGES)
valid_templates = set(t[0] for t in settings.get_page_templates())
valid_templates.add(global_settings.PAGE_DEFAULT_TEMPLATE)
if d[JSON_PAGE_EXPORT_NAME] != JSON_PAGE_EXPORT_VERSION:
return [_('Unsupported file version: %s') % repr(
d[JSON_PAGE_EXPORT_NAME])], []
pages = d['pages']
for p in pages:
# use the complete slug as a way to identify pages in errors
slug = p['complete_slug'].get(preferred_lang, None)
seen_parent = False
for lang, s in p['complete_slug'].items():
if lang not in seen_complete_slugs:
continue
seen_complete_slugs[lang].add(s)
if '/' not in s: # root level, no parent req'd
seen_parent = True
if not seen_parent:
parent_slug, ignore = s.rsplit('/', 1)
if parent_slug in seen_complete_slugs[lang]:
seen_parent = True
elif Page.objects.from_path(parent_slug, lang,
exclude_drafts=False):
# parent not included, but exists on site
seen_parent = True
if not slug:
slug = s
if not slug:
errors.append(_("%s has no common language with this site")
% (p['complete_slug'].values()[0],))
continue
if not seen_parent:
errors.append(_("%s did not include its parent page and a matching"
" one was not found on this site") % (slug,))
if p['template'] not in valid_templates:
errors.append(_("%s uses a template not found on this site: %s")
% (slug, p['template']))
continue
if set(p.name for p in get_placeholders(p['template'])) != set(
p['content'].keys()):
errors.append(_("%s template contents are different than our "
"template: %s") % (slug, p['template']))
continue
return errors
|
Python
| 0.000002
|
@@ -894,16 +894,109 @@
ENAME,)%0A
+ # selection may be in the wrong order%0A queryset = queryset.order_by('tree_id', 'lft')%0A
resp
|
c9c91af31d60c6e9f0eaa971c52985418a4707d3
|
update whitelist for osd slow op wrn
|
teuthology/suite/placeholder.py
|
teuthology/suite/placeholder.py
|
import copy
class Placeholder(object):
"""
A placeholder for use with substitute_placeholders. Simply has a 'name'
attribute.
"""
def __init__(self, name):
self.name = name
def substitute_placeholders(input_dict, values_dict):
"""
Replace any Placeholder instances with values named in values_dict. In the
case of None values, the key is omitted from the result.
Searches through nested dicts.
:param input_dict: A dict which may contain one or more Placeholder
instances as values.
:param values_dict: A dict, with keys matching the 'name' attributes of all
of the Placeholder instances in the input_dict, and
values to be substituted.
:returns: The modified input_dict
"""
input_dict = copy.deepcopy(input_dict)
def _substitute(input_dict, values_dict):
for key, value in input_dict.items():
if isinstance(value, dict):
_substitute(value, values_dict)
elif isinstance(value, Placeholder):
if values_dict[value.name] is None:
del input_dict[key]
continue
# If there is a Placeholder without a corresponding entry in
# values_dict, we will hit a KeyError - we want this.
input_dict[key] = values_dict[value.name]
return input_dict
return _substitute(input_dict, values_dict)
# Template for the config that becomes the base for each generated job config
dict_templ = {
'branch': Placeholder('ceph_branch'),
'sha1': Placeholder('ceph_hash'),
'teuthology_branch': Placeholder('teuthology_branch'),
'archive_upload': Placeholder('archive_upload'),
'archive_upload_key': Placeholder('archive_upload_key'),
'machine_type': Placeholder('machine_type'),
'nuke-on-error': True,
'os_type': Placeholder('distro'),
'os_version': Placeholder('distro_version'),
'overrides': {
'admin_socket': {
'branch': Placeholder('ceph_branch'),
},
'ceph': {
'conf': {
'mon': {
'debug mon': 20,
'debug ms': 1,
'debug paxos': 20},
'osd': {
'debug filestore': 20,
'debug journal': 20,
'debug ms': 1,
'debug osd': 25
}
},
'log-whitelist': ['slow request'],
'sha1': Placeholder('ceph_hash'),
},
'ceph-deploy': {
'conf': {
'client': {
'log file': '/var/log/ceph/ceph-$name.$pid.log'
},
'mon': {
'osd default pool size': 2
}
}
},
'install': {
'ceph': {
'sha1': Placeholder('ceph_hash'),
}
},
'workunit': {
'branch': Placeholder('suite_branch'),
'sha1': Placeholder('suite_hash'),
}
},
'repo': Placeholder('ceph_repo'),
'suite': Placeholder('suite'),
'suite_repo': Placeholder('suite_repo'),
'suite_relpath': Placeholder('suite_relpath'),
'suite_branch': Placeholder('suite_branch'),
'suite_sha1': Placeholder('suite_hash'),
'tasks': [],
}
|
Python
| 0
|
@@ -2542,16 +2542,32 @@
request'
+, '%5C(SLOW_OPS%5C)'
%5D,%0A
|
c3572887921c5afad5115811afa401cee925cb0b
|
fix input array
|
chainer/functions/loss/sigmoid_cross_entropy.py
|
chainer/functions/loss/sigmoid_cross_entropy.py
|
import numpy
from chainer import cuda
from chainer import function
from chainer.functions.activation import sigmoid
from chainer import utils
from chainer.utils import type_check
class SigmoidCrossEntropy(function.Function):
"""Sigmoid activation followed by a sigmoid cross entropy loss."""
ignore_label = -1
def __init__(self, normalize=True, reduce='mean'):
self.normalize = normalize
if reduce not in ('mean', 'no'):
raise ValueError(
"only 'mean' and 'no' are valid for 'reduce', but '%s' is "
'given' % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
t_type.dtype == numpy.int32,
x_type.shape == t_type.shape
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x, t = inputs
self.ignore_mask = (t != self.ignore_label)
# stable computation of the cross entropy.
loss = -(
self.ignore_mask *
(x * (t - (x >= 0)) - xp.log1p(xp.exp(-xp.abs(x)))))
if not self.reduce == 'mean':
return utils.force_array(loss.astype(x.dtype)),
if self.normalize:
count = xp.maximum(1, self.ignore_mask.sum())
else:
count = max(1, len(x))
self.count = count
return utils.force_array(
xp.divide(xp.sum(loss), self.count, dtype=x.dtype)),
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
x, t = inputs
gloss = grad_outputs[0]
y, = sigmoid.Sigmoid().forward((x,))
if self.reduce == 'mean':
gx = xp.divide(
gloss * self.ignore_mask * (y - t), self.count,
dtype=y.dtype)
else:
gx = (gloss * self.ignore_mask * (y - t)).astype(y.dtype)
return gx, None
def sigmoid_cross_entropy(
x, t, normalize=True, reduce='mean'):
"""Computes cross entropy loss for pre-sigmoid activations.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): A variable object holding a matrix whose
(i, j)-th element indicates the unnormalized log probability of
the j-th unit at the i-th example.
t (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Variable holding an int32 vector of ground
truth labels. If ``t[i] == -1``, corresponding ``x[i]`` is ignored.
Loss is zero if all ground truth labels are ``-1``.
normalize (bool): Variable holding a boolean value which
determines the normalization constant. If true, this function
normalizes the cross entropy loss across all instances. If else,
it only normalizes along a batch size.
reduce (str): Variable holding a ``str`` which
determines whether to reduce the shape of the input.
If it is ``'mean'``, it computes the sum of cross entropy
and normalize it according to ``normalize`` option.
If is is ``'no'``, this function computes cross entropy for each
instance and does not normalize it (``normalize`` option is
ignored). In this case, the loss value of the ignored instance,
which has ``-1`` as its target value, is set to ``0``.
Returns:
Variable: A variable object holding an array of the cross entropy.
If ``reduce`` is ``'mean'``, it is a scalar array.
If ``reduce`` is ``'no'``, the shape is same as ``x``.
.. note::
This function is differentiable only by ``x``.
.. admonition:: Example
>>> x = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).astype('f')
>>> x
array([[-2. , 3. , 0.5],
[ 5. , 2. , -0.5]], dtype=float32)
>>> t = np.array([[0, -1, 0], [1, 1, -1]]).astype('i')
>>> t
array([[ 0, -1, 0],
[ 1, 1, -1]], dtype=int32)
>>> F.sigmoid_cross_entropy(x, t)
variable(0.308662086725235)
>>> F.sigmoid_cross_entropy(x, t, normalize=False)
variable(0.61732417345047)
>>> y = F.sigmoid_cross_entropy(x, t, reduce='no')
>>> y.shape
(2, 3)
>>> y.data
array([[ 0.126928 , 0. , 0.97407699],
[ 0.00671535, 0.126928 , -0. ]], dtype=float32)
"""
return SigmoidCrossEntropy(normalize, reduce)(x, t)
|
Python
| 0.000802
|
@@ -4071,17 +4071,16 @@
ay(%5B%5B0,
--
1, 0%5D, %5B
@@ -4137,17 +4137,17 @@
y(%5B%5B 0,
--
+
1, 0%5D,%0A
@@ -4254,22 +4254,24 @@
e(0.
-30866208672523
+2566471397876739
5)%0A
@@ -4352,20 +4352,22 @@
(0.6
-17324
+416
17
+8
345
-04
+68023
7)%0A
@@ -4510,24 +4510,24 @@
8 , 0.
-
+04858735
, 0.974
|
802c2b7c99554f1caf9c9ebf1e17935f3717e402
|
Fix pnsl module description (#776)
|
pajbot/modules/pnsl.py
|
pajbot/modules/pnsl.py
|
import logging
import requests
from pajbot.models.command import Command
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
class PNSLModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Run P&SL lists"
DESCRIPTION = "Run P&SL lists through !pnslrun command"
CATEGORY = "Moderation"
SETTINGS = [
ModuleSetting(
key="level",
label="Level required to use the command",
type="number",
required=True,
placeholder="",
default=750,
constraints={"min_value": 500, "max_value": 2000},
),
ModuleSetting(
key="per_chunk",
label="How many lines to process per chunk",
type="number",
required=True,
placeholder="",
default=30,
constraints={"min_value": 1, "max_value": 500},
),
ModuleSetting(
key="chunk_delay",
label="Delay between chunks (in seconds)",
type="number",
required=True,
placeholder="",
default=30,
constraints={"min_value": 5, "max_value": 60},
),
]
def __init__(self, bot):
super().__init__(bot)
self.pnsl_token = None
if bot:
if "pnsl" in bot.config:
self.pnsl_token = bot.config["pnsl"].get("token", None)
def run_pnsl(self, bot, source, message, event, args):
base_url = "https://bot.tetyys.com/api/v1/BotLists"
if not self.pnsl_token:
bot.whisper(source, f"Missing P&SL token in config.ini. talk to @{bot.admin} BabyRage")
return False
guid = message.replace("https://bot.tetyys.com/BotList/", "")
headers = {"Authorization": f"Bearer {self.pnsl_token}"}
res = requests.get(base_url + "/" + guid, headers=headers)
if not res.ok:
error_data = res.json()
bot.whisper(source, f"Something went wrong with the P&SL request: {error_data['errors']['Guid'][0]}")
return False
privmsg_list = res.text.splitlines()
log.info(f"[P&SL] User {source.name} running list {guid} with {len(privmsg_list)} entries")
bot.privmsg_arr_chunked(
privmsg_list, per_chunk=self.settings["per_chunk"], chunk_delay=self.settings["chunk_delay"]
)
def load_commands(self, **options):
self.commands["runpnsl"] = Command.raw_command(
self.run_pnsl,
delay_all=20,
delay_user=20,
level=self.settings["level"],
description="Run a P&SL list",
command="runpnsl",
)
|
Python
| 0
|
@@ -322,16 +322,20 @@
ugh
-!
+the !run
pnsl
-run
com
|
2efd23708a1e5198802056cd7b7c3c27c2fbdb08
|
Use `super()` in `_print_message()`.
|
shcol/cli.py
|
shcol/cli.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2016, Sebastian Linke
# Released under the Simplified BSD license
# (see LICENSE file for details).
"""
The command-line interface for `shcol`.
"""
import argparse
import sys
import traceback
from . import __version__, config, helpers, highlevel
__all__ = ['main']
class ArgumentParser(argparse.ArgumentParser):
"""
Implementation for generating help text and command-line parsing.
"""
def __init__(
self, prog_name, version, stdin=config.INPUT_STREAM,
stdout=config.TERMINAL_STREAM, stderr=config.ERROR_STREAM
):
"""
Initialize the parser.
`prog_name` defines the program name used when displaying information.
`version` should be a string containing the program's version.
`stdin`, `stdout` and `stderr` are the streams to use. They should be
file-like objects defining at least a `.read()` and a `.write()` method.
"""
self.version_string = '{} {}'.format(prog_name, version)
argparse.ArgumentParser.__init__(
self, prog=prog_name, formatter_class=argparse.RawTextHelpFormatter,
description='Generate columnized output for given string items.\n\n'
'Columize and sort:\n'
'%(prog)s -S foo bar baz\n\n'
'Columnize command output on Linux (Debian):\n'
'%(prog)s -S -c0 < /proc/modules\n'
'dpkg --get-selections \'python3*\' | %(prog)s -c0 -s4\n\n'
'Columnize process names on Windows PowerShell:\n'
'(ps).name | %(prog)s -U'
)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.init_arguments()
def print_message(self, message, stream=None):
"""
Print `message` to `stream`.
If `stream` is `sys.stdout` or `sys.stderr` then it is translated to its
corresponding instance variable (`self.stdout` or `self.stderr`).
This technique makes it possible to control the output streams of some
internal methods defined by the superclass. Most of them do not support
setting alternative streams directly. Instead, they invoke methods that
are based on `_print_message()`. By redefining that method, which does
expect the stream to be used as a parameter, the new implementation is
able to map those stream references to alternative streams.
Note that the actual work is done by `_print_message`. The "public"
version of it (`print_message`) is used a thin wrapper around it.
"""
self._print_message(message, stream)
def _print_message(self, message, stream):
"""
Redefined internal method called by the superclass. See docstring of
`print_message()` for details.
"""
if stream is sys.stdout:
stream = self.stdout
elif stream is sys.stderr:
stream = self.stderr
argparse.ArgumentParser._print_message(self, message, file=stream)
def init_arguments(self):
"""
Argument names and the whole CLI-syntax are defined by this method.
"""
self.add_argument(
'items', nargs='*', metavar='item',
help='an item to columnize\n'
'(read from stdin if item arguments are not present)'
)
self.add_argument(
'-s', '--spacing', metavar='N', type=helpers.num,
default=config.SPACING,
help='number of blanks between two columns (default: {})'
.format(config.SPACING)
)
self.add_argument(
'-w', '--width', metavar='N', type=helpers.num,
help='maximal amount of characters per line\n'
'(use terminal width by default)'
)
self.add_argument(
'-e', '--extra-sep', metavar='C', type=str,
default=config.EXTRA_SEP,
help='additional character between columns (default: {})'
.format(config.EXTRA_SEP)
)
self.add_argument(
'-c', '--column', metavar='N', type=helpers.num, dest='column',
help='choose a specific column per line via an index value\n'
'(indices start at 0, column separator is whitespace)\n'
'will only work when items are supplied via stdin'
)
self.add_argument(
'-F', '--filter', metavar='P', dest='pattern',
help='only columnize items which match the pattern P\n'
'(P should include wildcard symbols such as "?" or "*")'
)
self.add_argument(
'-S', '--sort', action='store_true', default=config.SORT_ITEMS,
help='sort the items'
)
self.add_argument(
'-U', '--unique', action='store_true', default=config.MAKE_UNIQUE,
help='process only the first occurrence of an item\n'
'(i.e. doublets are eliminated)'
)
self.add_argument(
'-v', '--version', action='version', version=self.version_string
)
def parse_args(self, args=None, namespace=None):
""""
Parse given arguments and return a Namespace-object that represents the
result of parsing.
`args` should be a sequence of strings. If this is `None`, then
`sys.argv` is used instead.
`namespace` defines the Namespace to use. If this is `None`, then a new
Namespace is created.
See the stdlib's `argparse`-module documentation for more details, since
this is a redefined method of `argparse.ArgumentParser`.
"""
args = argparse.ArgumentParser.parse_args(self, args, namespace)
if args.items:
if args.column is not None:
msg = 'can\'t use --column when items are given as arguments'
self.error(msg)
encoding = sys.getfilesystemencoding()
else:
input_stream = getattr(self.stdin, 'buffer', self.stdin)
args.items = helpers.get_lines(input_stream)
if args.column is not None:
args.items = helpers.get_column(args.column, args.items)
encoding = config.ENCODING
args.items = list(helpers.get_strings(args.items, encoding))
return args
def main(args=None, prog_name='shcol', version=__version__):
"""
Parse command-line arguments and invoke `highlevel.print_columnized()`
with the result.
`args` defines the arguments to parse. If this is `None` then `sys.argv[1:]`
is used instead.
`prog_name` defines the program name to use for the help text.
`version` should be a string containing the program's version.
If an exception occurs during running this function then its message (if
any) will be written to standard error and the interpreter is requested to
shut down (i.e. it exits with an error code if `SystemExit` is not caught).
"""
parser = ArgumentParser(prog_name, version)
try:
args = parser.parse_args(args)
highlevel.print_columnized(
args.items, spacing=args.spacing, line_width=args.width,
extra_sep=args.extra_sep, pattern=args.pattern,
make_unique=args.unique, sort_items=args.sort
)
except KeyboardInterrupt:
parser.exit(1)
except Exception as exc:
config.LOGGER.error(traceback.format_exc())
parser.error(exc)
|
Python
| 0.000001
|
@@ -2974,39 +2974,39 @@
-argparse.ArgumentParser
+super(type(self), self)
._print_
@@ -3005,38 +3005,32 @@
._print_message(
-self,
message, file=st
|
34d85cacb6e32601a8107613a8e4a15d8beffc8b
|
Drop not implemented node requests
|
code/marv/marv_node/io.py
|
code/marv/marv_node/io.py
|
# Copyright 2016 - 2018 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
from collections import namedtuple
from numbers import Integral
from .mixins import Keyed, Request, Task
from .stream import Handle
class Abort(Exception):
pass
def create_stream(name, **header):
"""Create a stream for publishing messages.
All keyword arguments will be used to form the header.
"""
assert isinstance(name, str), name
return CreateStream(parent=None, name=name, group=False, header=header)
def create_group(name, **header):
assert isinstance(name, str), name
return CreateStream(parent=None, name=name, group=True, header=header)
def fork(name, inputs, group):
assert isinstance(name, str), name
assert inputs
return Fork(name, inputs, group)
def get_logger():
return GetLogger()
def get_requested():
return GetRequested()
def get_stream(node, name='default', setid=None):
return GetStream(setid, node, name)
def make_file(name):
assert isinstance(name, str)
return MakeFile(None, name)
def pull(handle, enumerate=False):
"""Pull next message for handle.
Args:
handle: A :class:`.stream.Handle` or GroupHandle.
enumerate (bool): boolean to indicate whether a tuple ``(idx, msg)``
should be returned, not unlike Python's enumerate().
Returns:
A :class:`Pull` task to be yielded. Marv will send the
corresponding message as soon as it is available. For groups
this message will be a handle to a member of the
group. Members of groups are either streams or groups.
Examples:
Pulling (enumerated) message from stream::
msg = yield marv.pull(stream)
idx, msg = yield marv.pull(stream, enumerate=True)
Pulling stream from group and message from stream::
stream = yield marv.pull(group) # a group of streams
msg = yield marv.pull(stream)
"""
assert isinstance(handle, Handle), handle
return Pull(handle, enumerate)
def pull_all(*handles):
"""Pull next message of all handles."""
return PullAll(handles)
def push(msg):
return Push(msg)
def set_header(**header):
"""Set the header of a stream or group."""
# If a node is configured to have a header, the header needs to be
# set before yielding any messages or creating group members. Once a
# header is set, a handle is created and dependent nodes can be
# instantiated. For streams without headers this happens right away.
#
# @marv.node(header=True)
# def node():
# yield marv.set_header(title='Title')
#
# """
return SetHeader(header)
CreateStream = namedtuple('CreateStream', 'parent name group header')
Fork = namedtuple('Fork', 'name inputs group')
GetLogger = namedtuple('GetLogger', '')
GetRequested = namedtuple('GetRequested', '')
GetStream = namedtuple('GetStream', 'setid node name')
MakeFile = namedtuple('MakeFile', 'handle name')
Pull = namedtuple('Pull', 'handle enumerate')
PullAll = namedtuple('PullAll', 'handles')
PullAny = namedtuple('PullAny', 'handles')
PullSome = namedtuple('PullSome', 'handles')
Push = namedtuple('Push', 'output')
SetHeader = namedtuple('SetHeader', 'header')
# TODO: Rename
Request.register(Pull)
Request.register(PullAll)
Request.register(PullAny)
Request.register(PullSome)
Request.register(Push)
Request.register(SetHeader)
Request.register(CreateStream)
Request.register(Fork)
Request.register(GetLogger)
Request.register(GetRequested)
Request.register(GetStream)
Request.register(MakeFile)
class Signal(Task): # pylint: disable=too-few-public-methods
def __repr__(self):
return type(self).__name__.upper()
class Next(Signal): # pylint: disable=too-few-public-methods
"""Instruct to send next pending task."""
__slots__ = ()
class Paused(Signal): # pylint: disable=too-few-public-methods
"""Indicate a generator has paused."""
__slots__ = ()
class Resume(Signal): # pylint: disable=too-few-public-methods
"""Instruct a generator to resume."""
__slots__ = ()
class TheEnd(Signal): # pylint: disable=too-few-public-methods
"""Indicate the end of a stream, resulting in None being sent into consumers."""
__slots__ = ()
NEXT = Next()
PAUSED = Paused()
RESUME = Resume()
THEEND = TheEnd()
class MsgRequest(Task, Keyed):
__slots__ = ('_handle', '_idx', '__weakref__')
@property
def key(self):
return (self._handle, self._idx)
@property
def handle(self):
return self._handle
@property
def idx(self):
return self._idx
def __init__(self, handle, idx, requestor):
assert isinstance(idx, Integral), idx
self._handle = handle
self._idx = idx
self._requestor = requestor
def __iter__(self):
return iter(self.key)
def __repr__(self):
return f'MsgRequest({self._handle}, {self._idx!r})'
|
Python
| 0.000001
|
@@ -3095,96 +3095,8 @@
s')%0A
-PullAny = namedtuple('PullAny', 'handles')%0APullSome = namedtuple('PullSome', 'handles')%0A
Push
@@ -3242,61 +3242,8 @@
ll)%0A
-Request.register(PullAny)%0ARequest.register(PullSome)%0A
Requ
|
b14f520fbb15c9f455339631ed90b0d926befb27
|
Bump version
|
setup.py
|
setup.py
|
# encoding: utf-8
import io
import sys
import os.path
import setuptools
MISC_DIR = "misc"
REQUIREMENT_DIR = "requirements"
with io.open("README.rst", encoding="utf8") as f:
long_description = f.read()
with io.open(os.path.join(MISC_DIR, "summary.txt"), encoding="utf8") as f:
summary = f.read()
with open(os.path.join(REQUIREMENT_DIR, "requirements.txt")) as f:
install_requires = [line.strip() for line in f if line.strip()]
with open(os.path.join(REQUIREMENT_DIR, "test_requirements.txt")) as f:
tests_require = [line.strip() for line in f if line.strip()]
needs_pytest = set(["pytest", "test", "ptr"]).intersection(sys.argv)
pytest_runner = ["pytest-runner"] if needs_pytest else []
author = "Tsuyoshi Hombashi"
email = "gogogo.vm@gmail.com"
project_name = "DataProperty"
setuptools.setup(
name=project_name,
version="0.13.0",
url="https://github.com/thombashi/" + project_name,
bugtrack_url="https://github.com/thombashi/{:s}/issues".format(
project_name),
author=author,
author_email=email,
description=summary,
include_package_data=True,
install_requires=install_requires,
keywords=["data", "property"],
license="MIT License",
long_description=long_description,
maintainer=author,
maintainer_email=email,
packages=setuptools.find_packages(exclude=["test*"]),
setup_requires=[] + pytest_runner,
tests_require=tests_require,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|
Python
| 0
|
@@ -853,17 +853,17 @@
n=%220.13.
-0
+1
%22,%0A u
|
ee28b56ea88962857321150d74b2b7c25eb7d26d
|
fix folder creation issue
|
code_repo/ansible_repo.py
|
code_repo/ansible_repo.py
|
import json
import os
import re
import shutil
from git import Repo, RemoteProgress
from ansible_objects import *
class InvalidGitRepoUrlException(Exception):
def __init__(self, git_repo_url):
self.git_repo_url = git_repo_url
class Progress(RemoteProgress):
def update(self, *args):
self.callback_func(self)
class AnsibleRepoEncoder(JSONEncoder):
def default(self, o):
return {
"name": o.name,
"localDirectory": o.local_repo_path,
"layout": o.get_layout(),
"gitRepoUrl": o.git_repo_url,
"gitBranch": o.git_branch,
"inventories": json.loads(json.dumps(o.inventories, cls=AnsibleInventoryEncoder)),
"playbooks": json.loads(json.dumps(o.playbooks, cls=AnsilePlaybookEncoder)),
"roles": json.loads(json.dumps(o.roles, cls=AnsibleRoleEncoder))
}
class AnsibleRepo(object):
def __init__(self):
"""
Initialize an AnsibleRepo object.
:return: The instance
"""
self._log_path = '/etc/ansible-townhall/logs/'
self._regex = '(\w+://)(.+@)*([\w\d\.]+)(:[\d]+){0,1}/*(.*/(.*)\.git)'
self.name = None
self.local_repo_path = None
self.git_repo_url = None
self.git_branch = None
self._git_repo_obj = None
self._layout = None
self.roles = None
self.playbooks = None
self.inventories = None
def load_from_json(self, json_data):
"""
Rebuild the object from json
:param json_data:
:return:
"""
self.name = json_data['name']
self.local_repo_path = json_data['localDirectory']
self.git_repo_url = json_data['gitRepoUrl']
self.git_branch = json_data['gitBranch']
self._layout = json_data['layout']
self._git_repo_obj = Repo(self.local_repo_path)
def load_from_local(self, local_repo_path, layout):
self.name = local_repo_path.rstrip('/').split('/')[-1]
self.local_repo_path = local_repo_path
self._layout = layout
self._git_repo_obj = Repo(local_repo_path)
self.git_repo_url = self._git_repo_obj.remote().config_reader.get('url')
self.git_branch = self._git_repo_obj.active_branch.name
def load_from_git(self, git_repo_url, layout, git_branch='master', local_base_path='/etc/ansible-townhall/repos/'):
"""
Clone the specified code repo to local store and generate metadata for it.
:param git_repo_url: The URL of the git repo
:param git_branch: The git branch to clone
:param local_base_path: The local base path to clone the git repo to
:return: True if everything is OK.
"""
self.git_repo_url = git_repo_url
if git_branch:
self.git_branch = git_branch
else:
self.git_branch = 'master'
self._layout = layout
self.name = self._get_repo_name(self.git_repo_url)
if not local_base_path:
local_base_path = '/etc/ansible-townhall/repos/'
self.local_repo_path = ''.join([local_base_path, self.name])
self._create_repo_directory(self.local_repo_path)
# def write_gitprogress(progress):
# with open(''.join([self._log_path, 'gitlog.', self.name]), 'a') as f:
# f.write(progress._cur_line + '\n')
#progress = Progress(write_gitprogress)
self._git_repo_obj = Repo.clone_from(self.git_repo_url, self.local_repo_path, branch=self.git_branch,
progress=None)
def get_layout(self):
return self._layout
def generate_metadata(self):
"""
Generate the metadata for the repo against a specified layout.
:param layout: A JSON dict to specify the layout of the Ansible repo
:return: The metadata
"""
self.roles = self._process_roles(self._layout)
self.playbooks = self._process_playbooks(self._layout)
self.inventories = self._process_inventory(self._layout)
def _process_inventory(self, layout):
inventories = dict()
if 'inventory' not in layout:
return None
pattern_type = layout['inventory']['type']
if pattern_type == 'filenames':
for filename in layout['inventory']['pattern'].split(','):
inv_path = ''.join([self.local_repo_path, layout['root'], filename])
inventories[filename] = AnsibleInventory(filename,inv_path)
return inventories
def _process_playbooks(self, layout):
playbooks = dict()
if 'playbooks' not in layout:
return None
pattern_type = layout['playbooks']['type']
if pattern_type == 'regex':
playbooks_dir = ''.join([self.local_repo_path, layout['root']])
for item in os.listdir(playbooks_dir):
playbook_path = ''.join([playbooks_dir, item])
if os.path.isfile(playbook_path) and re.match(layout['playbooks']['pattern'], item):
key = item.split('.')[0]
playbooks[key] = AnsiblePlaybook(key, playbook_path)
return playbooks
def _process_roles(self, layout):
roles = dict()
if 'roles' not in layout:
return None
pattern_type = layout['roles']['type']
if pattern_type == 'dirnames':
for dir_role in layout['roles']['pattern'].split(','):
roles_dir = ''.join([self.local_repo_path, layout['root'], dir_role])
for directory in os.listdir(roles_dir):
role = AnsibleRole(''.join([roles_dir, directory]))
roles[role.name] = role
return roles
@staticmethod
def _create_repo_directory(local_repo_path):
if os.path.exists(local_repo_path):
shutil.rmtree(local_repo_path)
os.mkdir(local_repo_path)
def _get_repo_name(self, git_repo_url):
m = re.match(self._regex, git_repo_url)
if m:
return m.groups()[-1]
else:
raise InvalidGitRepoUrlException(git_repo_url)
|
Python
| 0
|
@@ -5955,12 +5955,15 @@
os.m
-k
+ake
dir
+s
(loc
@@ -6190,10 +6190,8 @@
po_url)%0A
-%0A%0A
|
9c07d26072c15147e47c15edd5c4d356686b14d7
|
Upgrade these.
|
setup.py
|
setup.py
|
import os
from setuptools import setup, find_packages
VERSION = os.path.join(os.path.dirname(__file__), 'VERSION')
VERSION = open(VERSION, 'r').read().strip()
README = os.path.join(os.path.dirname(__file__), 'README.rst')
README = open(README, 'r').read().strip()
setup(
name='grano-client',
version=VERSION,
description="Client library for grano, a social network analysis tool.",
long_description=README,
classifiers=[
],
keywords='data client rest grano sna ddj journalism',
author='Code for Africa',
author_email='support@codeforafrica.org',
url='https://github.com/granoproject/grano-client',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
zip_safe=False,
install_requires=[
"requests>=2.2.0",
"PyYAML==3.10"
],
tests_require=[],
entry_points=\
""" """,
)
|
Python
| 0
|
@@ -614,20 +614,21 @@
com/
-granoproject
+CodeForAfrica
/gra
@@ -797,11 +797,12 @@
%3E=2.
-2.0
+17.3
%22,%0A
@@ -819,14 +819,14 @@
YAML
-=
+%3E
=3.1
-0
+2
%22%0A
|
7689719e0ba8f577acbe5d919828a1abc5437be4
|
update version
|
setup.py
|
setup.py
|
from setuptools import setup
setup(name='lunchboy',
version='0.1',
description='Lunch without #lunch',
url='http://github.com/lisunshiny/lunchboy',
author='Liann Sun',
author_email='liann@appboy.com',
license='MIT',
packages=['lunchboy'],
install_requires=['Scrapy'],
scripts=['bin/lunch', 'bin/menu'],
zip_safe=False)
|
Python
| 0
|
@@ -68,9 +68,9 @@
='0.
-1
+2
',%0A
|
f3f2408370e76ec8338bfc1f816ca875c75acf5c
|
remove ez_setup
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='yandextank',
version='1.7.7',
description='a performance measurement tool',
longer_description='''
Yandex.Tank is a performance measurement and load testing automatization tool.
It uses other load generators such as JMeter, ab or phantom inside of it for
load generation and provides a common configuration system for them and
analytic tools for the results they produce.
''',
maintainer='Alexey Lavrenuke (load testing)',
maintainer_email='direvius@yandex-team.ru',
url='http://yandex.github.io/yandex-tank/',
packages=find_packages(exclude=["tests"]),
install_requires=[
'psutil',
'ipaddr',
'progressbar',
'importlib',
],
license='LGPLv2',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: POSIX',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Traffic Generation',
],
entry_points={
'console_scripts': [
'yandex-tank = yandextank.core.cli:main',
],
},
package_data={
'yandextank.core': ['config/*'],
'yandextank.plugins.GraphiteUploader': ['config/*'],
'yandextank.plugins.JMeter': ['config/*'],
'yandextank.plugins.Monitoring': ['config/*'],
'yandextank.plugins.Phantom': ['config/*'],
'yandextank.plugins.TipsAndTricks': ['config/*'],
},
)
|
Python
| 0.000014
|
@@ -20,51 +20,8 @@
on%0A%0A
-import ez_setup%0Aez_setup.use_setuptools()%0A%0A
from
@@ -108,17 +108,17 @@
on='1.7.
-7
+8
',%0A d
|
60b310d8fbd6b6130b4e8f23d20fc374eee65c74
|
Bump version
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
version = '0.3.1b1'
requires = [
'setuptools >= 2.2',
'eduid-userdb >= 0.0.5',
]
# Flavours
webapp_requires = [
'Flask>=0.12,<0.13',
'pysaml2 >= 4.0.3rc1', # version sync with dashboard to avoid pip catastrophes
'redis >= 2.10.5',
'pwgen == 0.4',
'vccs_client >= 0.4.1',
'PyNaCl >= 1.0.1',
'python-etcd >= 0.4.3',
'PyYAML >= 3.11',
'bleach>=2.0.0',
'marshmallow>=2.10,<2.11',
'Flask-Mail == 0.9.1',
'eduid_msg >= 0.10.2',
'eduid-am >= 0.6.2b2',
'statsd==3.2.1',
]
webapp_extras = webapp_requires + []
idp_requires = [
'pysaml2 >= 1.2.0beta2',
'redis >= 2.10.5',
'vccs_client >= 0.4.2',
'PyNaCl >= 1.0.1',
'statsd==3.2.1',
]
idp_extras = idp_requires + []
# No dependecies flavor, let the importing application handle dependencies
nodeps_requires = requires
test_requires = [
'mock == 1.0.1',
]
testing_extras = test_requires + webapp_extras + [
'nose',
'coverage',
'nosexcover',
]
long_description = open('README.txt').read()
setup(name='eduid-common',
version=version,
description="Common code for eduID applications",
long_description=long_description,
classifiers=[
"Programming Language :: Python",
],
keywords='',
author='SUNET',
author_email='',
url='https://github.com/SUNET/',
license='bsd',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['eduid_common'],
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=test_requires,
extras_require={
'testing': testing_extras,
'webapp': webapp_extras,
'idp': idp_extras,
'nodeps': []
},
entry_points="""
""",
)
|
Python
| 0
|
@@ -56,17 +56,17 @@
'0.3.1b
-1
+2
'%0A%0Arequi
|
785ea363f33551b2751f509bc92af59c9f144094
|
return setup.py to 0.3.dev
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (
register_commands, adjust_compiler, get_debug_option, get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.2.3'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Currently the only entry points installed by Astropy are hooks to
# zest.releaser for doing Astropy's releases
entry_points = {}
for hook in [('prereleaser', 'middle'), ('releaser', 'middle'),
('postreleaser', 'before'), ('postreleaser', 'middle')]:
hook_ep = 'zest.releaser.' + '.'.join(hook)
hook_name = 'astroquery.release.' + '.'.join(hook)
hook_func = 'astropy.utils.release:' + '_'.join(hook)
entry_points[hook_ep] = ['%s = %s' % (hook_name, hook_func)]
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
required_packages = ['astropy>=0.4.1', 'requests>=2.4.1', 'keyring', 'beautifulsoup4',
'html5lib']
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy', 'requests', 'keyring', 'beautifulsoup4',
'html5lib'],
install_requires=required_packages,
include_package_data=True,
provides=[PACKAGENAME],
license=LICENSE,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
**package_info
)
|
Python
| 0.000016
|
@@ -1479,11 +1479,13 @@
'0.
-2.3
+3.dev
'%0A%0A#
|
0abe13f8157dc12e70da4c10149c4ab94dfb2a2e
|
fix failing test
|
pyramid_mailer/mailer.py
|
pyramid_mailer/mailer.py
|
import smtplib
from repoze.sendmail.mailer import SMTPMailer
from repoze.sendmail.delivery import DirectMailDelivery
from repoze.sendmail.delivery import QueuedMailDelivery
class DummyMailer(object):
"""
Dummy mailing instance, used for example in unit tests.
Keeps all sent messages internally in list as **outbox** property.
Queued messages are instead added to **queue** property.
"""
def __init__(self):
self.outbox = []
self.queue = []
def send(self, message):
"""
Mocks sending a transactional message. The message is added to the
**outbox** list.
:param message: a **Message** instance.
"""
self.outbox.append(message)
def send_immediately(self, message, fail_silently=False):
"""
Mocks sending an immediate (non-transactional) message. The message
is added to the **outbox** list.
:param message: a **Message** instance.
:param fail_silently: swallow connection errors (ignored here)
"""
self.outbox.append(message)
def send_to_queue(self, message):
"""
Mocks sending to a maildir queue. The message is added to the **queue**
list.
:param message: a **Message** instance.
"""
self.queue.append(message)
class SMTP_SSLMailer(SMTPMailer):
"""
Subclass of SMTPMailer enabling SSL.
"""
try:
# support disabled if pre-2.6
smtp = smtplib.SMTP_SSL
ssl_support = True
except AttributeError:
smtp = smtplib.SMTP
ssl_support = False
def __init__(self, *args, **kwargs):
self.keyfile = kwargs.pop('keyfile', None)
self.certfile = kwargs.pop('certfile', None)
super(SMTP_SSLMailer, self).__init__(*args, **kwargs)
def smtp_factory(self):
if self.ssl_support is False:
return super(SMTP_SSLMailer, self).smtp_factory()
connection = self.smtp(self.hostname, str(self.port),
keyfile=self.keyfile,
certfile=self.certfile)
connection.set_debuglevel(self.debug_smtp)
return connection
class Mailer(object):
"""
Manages sending of email messages.
:param host: SMTP hostname
:param port: SMTP port
:param username: SMTP username
:param password: SMPT password
:param tls: use TLS
:param ssl: use SSL
:param keyfile: SSL key file
:param certfile: SSL certificate file
:param queue_path: path to maildir for queued messages
:param default_sender: default "from" address
:param debug: SMTP debug level
"""
def __init__(self,
host='localhost',
port=25,
username=None,
password=None,
tls=False,
ssl=False,
keyfile=None,
certfile=None,
queue_path=None,
default_sender=None,
debug=0):
if ssl:
self.smtp_mailer = SMTP_SSLMailer(
hostname=host,
port=port,
username=username,
password=password,
no_tls=not(tls),
force_tls=tls,
debug_smtp=debug,
keyfile=keyfile,
certfile=certfile)
else:
self.smtp_mailer = SMTPMailer(
hostname=host,
port=port,
username=username,
password=password,
no_tls=not(tls),
force_tls=tls,
debug_smtp=debug)
self.direct_delivery = DirectMailDelivery(self.smtp_mailer)
if queue_path:
self.queue_delivery = QueuedMailDelivery(queue_path)
else:
self.queue_delivery = None
self.default_sender = default_sender
@classmethod
def from_settings(cls, settings, prefix='mail.'):
"""
Creates a new instance of **Message** from settings dict.
:param settings: a settings dict-like
:param prefix: prefix separating **pyramid_mailer** settings
"""
settings = settings or {}
kwarg_names = [prefix + k for k in (
'host', 'port', 'username',
'password', 'tls', 'ssl', 'keyfile',
'certfile', 'queue_path', 'debug')]
size = len(prefix)
kwargs = dict(((k[size:], settings[k]) for k in settings.keys() if
k in kwarg_names))
return cls(**kwargs)
def send(self, message):
"""
Sends a message. The message is handled inside a transaction, so
in case of failure (or the message fails) the message will not be sent.
:param message: a **Message** instance.
"""
return self.direct_delivery.send(*self._message_args(message))
def send_immediately(self, message, fail_silently=False):
"""
Sends a message immediately, outside the transaction manager.
If there is a connection error to the mail server this will have to
be handled manually. However if you pass ``fail_silently`` the error
will be swallowed.
:param message: a **Message** instance.
:param fail_silently: silently handle connection errors.
"""
try:
return self.smtp_mailer.send(*self._message_args(message))
except smtplib.socket.error, e:
if not fail_silently:
raise
def send_to_queue(self, message):
"""
Adds a message to a maildir queue.
In order to handle this, the setting **mail.queue_path** must be
provided and must point to a valid maildir.
:param message: a **Message** instance.
"""
if not self.queue_delivery:
raise RuntimeError, "No queue_path provided"
return self.queue_delivery.send(*self._message_args(message))
def _message_args(self, message):
message.sender = message.sender or self.default_sender
return (message.sender,
message.recipients,
message.to_message())
|
Python
| 0.000006
|
@@ -908,32 +908,60 @@
outbox** list.%0A%0A
+ :versionadded: 0.3%0A%0A
:param m
@@ -5351,16 +5351,44 @@
lowed.%0A%0A
+ :versionadded: 0.3%0A%0A
|
83b51969d55a81c34cae483d11901fe90e1c2fa9
|
fix importlib for rtfd
|
pyrealsense/importlib.py
|
pyrealsense/importlib.py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache-2.0 License, see LICENSE for details.
"""This module loads rsutilwrapper and librealsense library."""
import ctypes
import sys
import os
os_name = sys.platform
lrs_prefix_mapping = {'darwin': 'lib', 'linux': 'lib', 'linux2': 'lib', 'win32': ''}
lrs_suffix_mapping = {'darwin': '.dylib', 'linux': '.so', 'linux2': '.so', 'win32': '.dll'}
try:
lrs_prefix = lrs_prefix_mapping[os_name]
lrs_suffix = lrs_suffix_mapping[os_name]
except KeyError:
raise OSError('OS not supported.')
## import C lib
try:
lrs = ctypes.CDLL(lrs_prefix+'realsense'+lrs_suffix)
except OSError:
import warnings
warnings.warn("librealsense not found.")
lrs = None
## try import since docs will crash here
try:
from . import rsutilwrapper
except ImportError:
warnings.warn("rsutilwrapper not found.")
rsutilwrapper = None
|
Python
| 0
|
@@ -183,16 +183,32 @@
mport os
+%0Aimport warnings
%0A%0Aos_nam
@@ -653,28 +653,8 @@
or:%0A
- import warnings%0A
@@ -763,15 +763,8 @@
%0A
- from .
imp
|
238dd56b20418178ac8b4357ac70491b73b52dda
|
Add new interface.
|
pykeg/core/Interfaces.py
|
pykeg/core/Interfaces.py
|
"""
This library defines a set of interfaces used by parts of the kegbot.
In general, the interfaces defined here are nothing more than a well-known
class name and one or more function prototypes, which define the interface.
Modules wishing to advertise implementation of one or more of these interfaces
may do so by subclassing that interface. An implementation of a particular
interface must interface all functions defined by that interface.
"""
class AbstractInterfaceError(Exception):
pass
class IRelay:
""" Relay interface """
STATUS_ENABLED = 1
STATUS_DISABLED = 0
STATUS_UNKNOWN = -1
def Enable(self):
raise AbstractInterfaceError
def Disable(self):
raise AbstractInterfaceError
def Status(self):
raise AbstractInterfaceError
class ITemperatureSensor:
def SensorName(self):
""" Return a descriptive string name """
raise AbstractInterfaceError
def GetTemperature(self):
"""
Get the last recorded temperature.
Returns a tuple of (float temp_in_c, float last_reading_timestamp). If
last_reading_timestamp is not none, then it is the approximate timestamp
of the last temperature reading.
"""
raise AbstractInterfaceError
class IFlowmeter:
def GetTicks(self):
"""
Get monotonically increasing tick value. Returns integer.
"""
raise AbstractInterfaceError
class IAuthDevice:
""" Interface for an access control device """
def AuthorizedUsers(self):
""" Return a list of all newly authorized users """
raise AbstractInterfaceError
class IDisplayDevice:
""" A device that can handle alerts """
def Activity(self):
""" Register that some activity has occured at this instant in time """
raise AbstractInterfaceError
def Alert(self, message):
""" A string message to raise """
raise AbstractInterfaceError
class IFlowListener:
""" Something that can listen to flow events """
def FlowStart(self, flow):
""" Called when a flow is started """
raise AbstractInterfaceError
def FlowUpdate(self, flow):
""" Called periodically during the life of a flow """
raise AbstractInterfaceError
def FlowEnd(self, flow, drink):
""" Called at the end of a flow """
raise AbstractInterfaceError
class IThermoListener:
""" Something interested in periodic temperature events """
def ThermoUpdate(self, sensor, temperature):
raise AbstractInterfaceError
|
Python
| 0
|
@@ -2473,28 +2473,110 @@
ise AbstractInterfaceError%0A%0A
+class IEventListener:%0A def PostEvent(self, ev):%0A raise AbstractInterfaceError%0A
|
85ed80a230530daae913ec289466a186238b68a9
|
Version 1.1.45
|
setup.py
|
setup.py
|
"""
The setup package to install the SeleniumBase Test Framework plugins
"""
from setuptools import setup, find_packages # noqa
setup(
name='seleniumbase',
version='1.1.44',
url='http://seleniumbase.com',
author='Michael Mintz',
author_email='@mintzworld',
maintainer='Michael Mintz',
description='Reliable Browser Automation - http://seleniumbase.com',
license='The MIT License',
install_requires=[
'selenium>=2.53.2',
'nose==1.3.7',
'pytest==2.8.7',
'flake8==2.5.4',
'requests==2.9.1',
'urllib3==1.14',
'BeautifulSoup==3.2.1',
'unittest2==1.1.0',
'chardet==2.3.0',
'simplejson==3.8.2',
'boto==2.39.0',
'ipdb==0.8.1',
'pyvirtualdisplay==0.1.5',
],
packages=['seleniumbase',
'seleniumbase.core',
'seleniumbase.plugins',
'seleniumbase.fixtures',
'seleniumbase.common',
'seleniumbase.config'],
entry_points={
'nose.plugins': [
'base_plugin = seleniumbase.plugins.base_plugin:Base',
'selenium = seleniumbase.plugins.selenium_plugin:SeleniumBrowser',
'page_source = seleniumbase.plugins.page_source:PageSource',
'screen_shots = seleniumbase.plugins.screen_shots:ScreenShots',
'test_info = seleniumbase.plugins.basic_test_info:BasicTestInfo',
('db_reporting = '
'seleniumbase.plugins.db_reporting_plugin:DBReporting'),
's3_logging = seleniumbase.plugins.s3_logging_plugin:S3Logging',
('hipchat_reporting = seleniumbase.plugins'
'.hipchat_reporting_plugin:HipchatReporting'),
]
}
)
|
Python
| 0
|
@@ -174,17 +174,17 @@
n='1.1.4
-4
+5
',%0A u
|
2e5c50039bc91c6de267804ab5374e82cbd3af5a
|
Fix serial.load to use joblib if available
|
pylearn2/utils/serial.py
|
pylearn2/utils/serial.py
|
import cPickle
import pickle
import numpy as np
import os
import time
import warnings
import sys
from pylearn2.utils.string_utils import preprocess
from cPickle import BadPickleGet
io = None
hdf_reader = None
def load(filepath, recurse_depth=0):
if recurse_depth == 0:
filepath = preprocess(filepath)
if filepath.endswith('.npy'):
return np.load(filepath)
if filepath.endswith('.mat'):
global io
if io is None:
import scipy.io
io = scipy.io
try:
return io.loadmat(filepath)
except NotImplementedError, nei:
if str(nei).find('HDF reader') != -1:
global hdf_reader
if hdf_reader is None:
import h5py
hdf_reader = h5py
return hdf_reader.File(filepath)
else:
raise
#this code should never be reached
assert False
def exponential_backoff():
if recurse_depth > 9:
print ('Max number of tries exceeded while trying to open ' +
filepath)
print 'attempting to open via reading string'
f = open(filepath, 'rb')
lines = f.readlines()
f.close()
content = ''.join(lines)
return cPickle.loads(content)
else:
nsec = 0.5 * (2.0 ** float(recurse_depth))
print "Waiting " + str(nsec) + " seconds and trying again"
time.sleep(nsec)
return load(filepath, recurse_depth + 1)
try:
f = open(filepath, 'rb')
obj = cPickle.load(f)
f.close()
return obj
except BadPickleGet, e:
print ('Failed to open ' + str(filepath) +
' due to BadPickleGet with exception string ' + str(e))
return exponential_backoff()
except EOFError, e:
print ('Failed to open ' + str(filepath) +
' due to EOFError with exception string ' + str(e))
return exponential_backoff()
except ValueError, e:
print ('Failed to open ' + str(filepath) +
' due to ValueError with string ' + str(e))
return exponential_backoff()
except Exception, e:
#assert False
exc_str = str(e)
if len(exc_str) > 0:
import pdb
tb = pdb.traceback.format_exc()
raise Exception("Couldn't open '" + str(filepath) +
"' due to: " + str(type(e)) + ', ' + str(e) +
". Orig traceback:\n" + tb)
else:
print ("Couldn't open '" + str(filepath) +
"' and exception has no string. Opening it again outside "
"the try/catch so you can see whatever error it prints on "
"its own.")
f = open(filepath, 'rb')
obj = cPickle.load(f)
f.close()
return obj
def save(filepath, obj):
filepath = preprocess(filepath)
try:
_save(filepath, obj)
except RuntimeError, e:
""" Sometimes for large theano graphs, pickle/cPickle exceed the
maximum recursion depth. This seems to me like a fundamental
design flaw in pickle/cPickle. The workaround I employ here
is the one recommended to someone who had a similar problem
on stackexchange:
http://stackoverflow.com/questions/2134706/hitting-maximum-recursion-depth-using-pythons-pickle-cpickle
Obviously this does not scale and could cause a crash
but I don't see another solution short of writing our
own implementation of pickle.
"""
if str(e).find('recursion') != -1:
warnings.warn('pylearn2.utils.save encountered the following '
'error: ' + str(e) +
'\nAttempting to resolve this error by calling ' +
'sys.setrecusionlimit and retrying')
old_limit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(50000)
_save(filepath, obj)
finally:
sys.setrecursionlimit(old_limit)
def _save(filepath, obj):
try:
import joblib
joblib_available = True
except ImportError:
joblib_available = False
if filepath.endswith('.npy'):
np.save(filepath, obj)
return
assert filepath.endswith('.pkl')
save_dir = os.path.dirname(filepath)
if not os.path.exists(save_dir) or not os.path.isdir(save_dir):
raise IOError("save path %s is not an existing directory" % save_dir)
elif not os.access(save_dir, os.W_OK):
raise IOError("permission error creating %s" % filepath)
try:
if joblib_available:
joblib.dump(obj, filepath)
else:
with open(filepath, 'wb') as filehandle:
cPickle.dump(obj, filehandle)
except Exception, e:
print "cPickle has failed to write an object to "+filepath
if str(e).find('maximum recursion depth exceeded') != -1:
raise
try:
print 'retrying with pickle'
f = open(filepath, "wb")
pickle.dump(obj, f)
f.close()
except Exception, e2:
try:
f.close()
except:
pass
if str(e) == '' and str(e2) == '':
print (
'neither cPickle nor pickle could write to ' + str(filepath)
)
print (
'moreover, neither of them raised an exception that '
'can be converted to a string'
)
print (
'now re-attempting to write with cPickle outside the '
'try/catch loop so you can see if it prints anything '
'when it dies'
)
f = open(filepath, 'wb')
cPickle.dump(obj, f)
f.close()
print ('Somehow or other, the file write worked once '
'we quit using the try/catch.')
else:
if str(e2) == 'env':
raise
import pdb
tb = pdb.traceback.format_exc()
raise Exception(str(obj) +
' could not be written to '+
str(filepath) +
' by cPickle due to '+str(e)+
' nor by pickle due to '+str(e2)+
'. \nTraceback '+ tb)
print ('Warning: ' + str(filepath) +
' was written by pickle instead of cPickle, due to '
+ str(e) +
' (perhaps your object is really big?)')
def clone_via_serialize(obj):
str = cPickle.dumps(obj)
return cPickle.loads(str)
def to_string(obj):
return cPickle.dumps(obj)
def mkdir(filepath):
try:
os.makedirs(filepath)
except:
print ("couldn't make directory '" + str(filepath) +
"', maybe it already exists")
|
Python
| 0
|
@@ -241,16 +241,136 @@
pth=0):%0A
+ try:%0A import joblib%0A joblib_available = True%0A except ImportError:%0A joblib_available = False%0A
if r
@@ -1693,35 +1693,73 @@
try:%0A
+i
f
-=
+not joblib_available:%0A with
open(filepath,
@@ -1763,25 +1763,34 @@
h, 'rb')
-%0A
+ as f:%0A
obj = cP
@@ -1773,32 +1773,37 @@
s f:%0A
+
obj = cPickle.lo
@@ -1812,32 +1812,68 @@
(f)%0A
-f.close(
+else:%0A obj = joblib.load(filepath
)%0A re
|
e4cc67edc7570bfe3ca7cecfa147ed88d79a978d
|
Version bump.
|
setup.py
|
setup.py
|
import os
from setuptools import setup, find_packages
VERSION = '0.2'
if __name__ == '__main__':
setup(
name = 'django-tastypie-mongoengine',
version = VERSION,
description = "MongoEngine support for django-tastypie.",
long_description = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
author = 'Matevz',
author_email = 'matevz.mihalic@gmail.com',
url = 'https://github.com/mitar/django-tastypie-mongoengine',
keywords = "REST RESTful tastypie mongo mongodb mongoengine django",
license = 'AGPLv3',
packages = find_packages(exclude=('*.tests', '*.tests.*', 'tests.*', 'tests')),
classifiers = (
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
),
zip_safe = False,
install_requires = (
'Django>=1.4',
'django-tastypie>=0.9.11',
'mongoengine>=0.6.9',
),
test_suite = 'tests.runtests.runtests',
tests_require = (
'Django>=1.4',
'django-tastypie>=0.9.11',
'mongoengine>=0.6.9',
),
)
|
Python
| 0
|
@@ -63,16 +63,18 @@
N = '0.2
+.1
'%0A%0Aif __
|
71b7faf519a45de7fc349930cf2d4268e27ae36c
|
Bump version to 0.8.0
|
setup.py
|
setup.py
|
import os
import fnmatch
from setuptools import setup, find_packages
from codecs import open
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def schema_files():
'''Return all CSV and JSON files paths in datapackage/schemas
The paths are relative to ./datapackage
'''
def recursive_glob(path, patterns):
results = []
for base, dirs, files in os.walk(path):
matching_files = []
for pattern in patterns:
matching_files.extend(fnmatch.filter(files, pattern))
results.extend(os.path.join(base, f) for f in matching_files)
return results
base_folder = 'datapackage'
remove_base_folder = lambda path: path[len(base_folder) + 1:]
path = os.path.join(base_folder, 'schemas')
files_paths = recursive_glob(path, ['*.csv', '*.json'])
return [remove_base_folder(f) for f in files_paths]
setup(
name='datapackage',
version='0.7.0',
description=(
'Utilities to work with Data Packages as defined on dataprotocols.org'
),
long_description=long_description,
url='https://github.com/frictionlessdata/datapackage-py',
author='Open Knowledge Foundation',
author_email='info@okfn.org',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='data dataprotocols jsontableschema frictionlessdata datascience',
packages=find_packages(exclude=['tests']),
package_data={'datapackage': schema_files()},
install_requires=[
'six >= 1.10.0',
'requests >= 2.8.0',
'jsonschema >= 2.5.1',
'tabulator >= 0.4.0',
'jsontableschema >= 0.5.1',
'unicodecsv>=0.14',
],
)
|
Python
| 0
|
@@ -1036,17 +1036,17 @@
sion='0.
-7
+8
.0',%0A
|
b0878122e5ef212592a678f61698d726a7f8d768
|
Fix query string order
|
post.py
|
post.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cgi
import sqlite3
import time
import config
def fs2dict(fs):
'''Field strage to dict'''
params = {}
for k in fs.keys():
params[k] = fs[k].value
return params
def valid(qs):
required_keys = ['title', 'comment', 'posted_by', 'latitude', 'longitude']
return all([qs.has_key(k) for k in required_keys])
def post(title, comment, posted_by, latitude, longitude):
rate = 0
created_at = int(time.time())
updated_at = created_at
sql = u'insert into posts (id, title, comment, posted_by, rate, latitude, longitude, created_at, updated_at) values (null,?,?,?,?,?,?,?,?);'
con = sqlite3.connect(config.db_path, isolation_level=None)
con.execute(sql, (title, comment, posted_by, rate, latitude, longitude, created_at, updated_at))
con.close()
if __name__ == '__main__':
qs = fs2dict(cgi.FieldStorage())
if valid(qs):
query_string = [qs[k].decode('utf-8') for k in ['latitude', 'longitude', 'title', 'comment', 'posted_by']]
post(*query_string)
result = '{"message": "Successfully posted!"}'
else:
result = '{"message": "Invalid query string"}'
import utils
utils.cgi_header()
print result
|
Python
| 0.999999
|
@@ -915,16 +915,86 @@
age())%0A%0A
+ keys = %5B'title', 'comment', 'posted_by', 'latitude', 'longitude'%5D%0A
if v
@@ -1062,66 +1062,12 @@
in
-%5B'latitude', 'longitude', 'title', 'comment', 'posted_by'%5D
+keys
%5D%0A
|
084893374cf5a1585f8b7c18747ec8b11e0c0ce4
|
Update 02-02_cleanse.py
|
scikit/src/nosql/02-02_cleanse.py
|
scikit/src/nosql/02-02_cleanse.py
|
import commons, sys, os
import logging as log
import pandas as pd
import xgboost as xgb
import numpy as np
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report, confusion_matrix
def add_activations(df):
for ind in commons.indicators:
log.info("Adding activations for {}".format(ind))
ind_prev = ind + "_1"
res = df[ind].sub(df[ind_prev])
res[res < 0] = 0
df["act_" + ind] = res.fillna(0)
return df
|
Python
| 0
|
@@ -571,16 +571,20 @@
llna(0)%0A
+
return d
|
d7a8192c5f1bbb8fc076ceef3a6b835cd37050d8
|
update classifiers
|
setup.py
|
setup.py
|
#!/usr/bin/env python
#from setuptools import setup
from setuptools.command.bdist_rpm import bdist_rpm
from distutils.core import setup
import os
try:
from sphinx.setup_command import BuildDoc as _BuildDoc
class BuildDoc(_BuildDoc):
def finalize_options(self):
super().finalize_options()
if not self.project:
self.project = self.distribution.name
if not self.version:
self.version = self.distribution.version
except ImportError:
BuildDoc = None
def readme():
with open(os.path.join('README')) as r:
return r.read()
setup(
name="sqltoolchain",
version="0.0.1",
description='The toolkit to make work with SQL easier',
packages=["sqltoolchain", "sqltoolchain.syntax"],
requires=["pyparsing"],
author="@bg",
author_email='gaifullinbf@gmail.com',
maintainer='@bg',
maintainer_email='gaifullinbf@gmail.com',
url='https://github.com/WebSQL/toolkit',
license='MIT',
long_description=readme(),
classifiers=[
"Development Status :: 5 - Beta",
"Environment :: Other Environment",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Operating System :: Unix",
"Programming Language :: C",
"Programming Language :: Python3",
"Topic :: Database",
"Topic :: Database :: Database Engines/Servers",
],
entry_points={
'console_scripts': [
'sql-pygen=sqltoolchain.pygen:main',
'sql-preprocessor=sqltoolchain.preprocessor:main',
],
}
)
|
Python
| 0.000002
|
@@ -1087,9 +1087,9 @@
::
-5
+4
- B
@@ -1178,34 +1178,19 @@
::
-GNU General Public
+MIT
License
(GP
@@ -1189,14 +1189,8 @@
ense
- (GPL)
%22,%0A
@@ -1438,9 +1438,68 @@
::
-C
+Python :: 3%22,%0A %22Programming Language :: Python :: 3.3
%22,%0A
@@ -1536,17 +1536,23 @@
: Python
-3
+ :: 3.4
%22,%0A
|
b3066ad8e5af59d12a8b28f0e6b69e0305535094
|
edit doc
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(
name = "nicosearch",
py_modules=['nicosearch'],
version = "0.0.3",
license = open('./LICENSE').read(),
download_url = "http://backloglib.googlecode.com/files/backloglib-0.1.1.tar.g://github.com/ymizushi/nicosearch/archive/master.zip",
platforms = ['POSIX'],
description = "https://github.com/ymizushi/nicosearch",
author = "ymizushi",
author_email = "mizushi@gmail.com",
url = "https://github.com/ymizushi/nicosearch",
keywords = ["search", "niconico"],
classifiers = [
'License :: OSI Approved :: MIT License',
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Topic :: Utilities",
"Topic :: Software Development",
],
long_description = open('README.md').read()
)
|
Python
| 0
|
@@ -119,9 +119,9 @@
0.0.
-3
+4
%22,%0A
@@ -139,32 +139,21 @@
e =
-open('./LICENSE').read()
+'MIT License'
,%0A
|
1102293fd73c4091fd21b011d4e790da6df23031
|
remove README deps
|
setup.py
|
setup.py
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages
from setuptools import setup
DEPENDENCIES = [
'google-auth==0.8.0',
'googleapis-common-protos==1.5.0',
'grpcio==1.1.0',
'requests==2.13.0',
'requests-oauthlib==0.8.0',
'six==1.10.0',
'urllib3[secure]==1.20',
]
with open('README.md', 'r') as f:
long_description = f.read()
def load_test_suite():
import unittest
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
setup(
name='google-assistant',
version='0.0.1',
author='Google Embedded Assistant team',
author_email='proppy@google.com',
description='Google Embedded Assistant Sample client',
long_description=long_description,
url='TODO(proppy) add external repo url',
packages=find_packages(exclude=('tests')),
namespace_packages=('googlesamples',),
install_requires=DEPENDENCIES,
extras_require={
'MAIN': ['tqdm==4.11.2', 'PyAudio==0.2.10']
},
setup_requires=['flake8'],
tests_require=['flake8'],
test_suite='setup.load_test_suite',
entry_points={
'console_scripts': [
'googlesamples-assistant'
'=googlesamples.assistant.__main__:main [MAIN]'
],
},
license='Apache 2.0',
keywords='google assistant client sample',
classifiers=(
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
),
)
|
Python
| 0.000004
|
@@ -852,76 +852,8 @@
%0A%5D%0A%0A
-with open('README.md', 'r') as f:%0A long_description = f.read()%0A%0A%0A
def
@@ -1243,32 +1243,57 @@
ription=
-long_description
+'Google Embedded Assistant Sample client'
,%0A ur
|
588769c12207a01903b7947598e3bef1bd993d8b
|
Version bump.
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
try:
# Workaround for http://bugs.python.org/issue15881
import multiprocessing
except ImportError:
pass
VERSION = '0.4.1'
if __name__ == '__main__':
setup(
name = 'django-tastypie-mongoengine',
version = VERSION,
description = "MongoEngine support for django-tastypie.",
long_description = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
author = 'Mitar',
author_email = 'mitar.django@tnode.com',
url = 'https://github.com/mitar/django-tastypie-mongoengine',
keywords = "REST RESTful tastypie mongo mongodb mongoengine django",
license = 'AGPLv3',
packages = find_packages(exclude=('*.tests', '*.tests.*', 'tests.*', 'tests')),
classifiers = (
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
),
zip_safe = False,
install_requires = (
'Django>=1.4',
'django-tastypie>=0.9.12',
'mongoengine>=0.6.11,<0.8.2',
),
test_suite = 'tests.runtests.runtests',
tests_require = (
'Django>=1.4',
'django-tastypie>=0.9.12',
'mongoengine>=0.6.11,<0.8.2',
'nose',
),
)
|
Python
| 0
|
@@ -204,17 +204,17 @@
= '0.4.
-1
+2
'%0A%0Aif __
|
81c5d5eea267cd35517bae1ed50d4bdeb8b3a62c
|
clean up interface class
|
pymba/vimba_interface.py
|
pymba/vimba_interface.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from . import vimba_structure as structs
from .vimba_object import VimbaObject
from .vimba_exception import VimbaException
from .vimba_dll import VimbaDLL
from ctypes import *
# interface features are automatically readable as object attributes.
class VimbaInterface(VimbaObject):
"""
A Vimba interface object. This class provides the minimal access
to Vimba functions required to control the interface.
"""
@property
def interfaceIdString(self):
return self._interfaceIdString
# own handle is inherited as self._handle
def __init__(self, interfaceIdString):
# call super constructor
super(VimbaInterface, self).__init__()
# set ID
self._interfaceIdString = interfaceIdString
def openInterface(self):
"""
Open the interface.
"""
errorCode = VimbaDLL.interfaceOpen(self._interfaceIdString,
byref(self._handle))
if errorCode != 0:
raise VimbaException(errorCode)
def closeInterface(self):
"""
Close the interface.
"""
errorCode = VimbaDLL.interfaceClose(self._handle)
if errorCode != 0:
raise VimbaException(errorCode)
|
Python
| 0.000001
|
@@ -1,107 +1,29 @@
-# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import%0Afrom . import vimba_structure as structs
+from ctypes import byref%0A
%0Afro
@@ -111,25 +111,16 @@
om .
-vimba_dll
import
Vimb
@@ -119,108 +119,15 @@
ort
-V
+v
imba
-DLL%0Afrom ctypes import *%0A%0A# interface features are automatically readable as object attributes.
+_c
%0A%0A%0Ac
@@ -160,17 +160,16 @@
bject):%0A
-%0A
%22%22%22%0A
@@ -312,325 +312,172 @@
-@property%0A def interfaceIdString(self):%0A return self._interfaceIdS
+def __init__(self, id_string: str):%0A self._id_string = id_s
tring%0A
-%0A
-# own handle is inherited as self._handle%0A def __init__(self, interfaceIdString):%0A%0A # call super constructor%0A super(VimbaInterface, self).__init__()%0A%0A # set ID%0A self._interfaceIdString = interfaceIdS
+ super().__init__()%0A%0A @property%0A def id_string(self):%0A return self._id_s
trin
@@ -491,25 +491,16 @@
def open
-Interface
(self):%0A
@@ -564,32 +564,31 @@
error
-Code
=
-V
+v
imba
-DLL.
+_c.vmb_
interfac
@@ -588,17 +588,18 @@
nterface
-O
+_o
pen(self
@@ -605,19 +605,11 @@
f._i
-nterfaceIdS
+d_s
trin
@@ -687,33 +687,24 @@
if error
-Code != 0
:%0A
@@ -731,20 +731,16 @@
on(error
-Code
)%0A%0A d
@@ -747,25 +747,16 @@
ef close
-Interface
(self):%0A
@@ -825,24 +825,23 @@
rror
-Code
=
-V
+v
imba
-DLL.
+_c.vmb_
inte
@@ -845,17 +845,18 @@
nterface
-C
+_c
lose(sel
@@ -886,17 +886,8 @@
rror
-Code != 0
:%0A
@@ -926,10 +926,6 @@
rror
-Code
)%0A
|
440c8e679b5939da0f5e32342440f7151c11bb61
|
Add checking value of "XWALK_OS_ANDROID" during parsing xwalk deps
|
tools/generate_gclient-xwalk.py
|
tools/generate_gclient-xwalk.py
|
#!/usr/bin/env python
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is responsible for generating .gclient-xwalk in the top-level
source directory from DEPS.xwalk.
User-configurable values such as |cache_dir| are fetched from .gclient instead.
"""
import logging
import optparse
import os
import pprint
CROSSWALK_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
GCLIENT_ROOT = os.path.dirname(os.path.dirname(CROSSWALK_ROOT))
def ParseGClientConfig():
"""
Parses the top-level .gclient file (NOT .gclient-xwalk) and returns the
values set there as a dictionary.
"""
with open(os.path.join(GCLIENT_ROOT, '.gclient')) as dot_gclient:
config = {}
exec(dot_gclient, config)
return config
def GenerateGClientXWalk(options):
with open(os.path.join(CROSSWALK_ROOT, 'DEPS.xwalk')) as deps_file:
deps_contents = deps_file.read()
if 'XWALK_OS_ANDROID' in os.environ:
deps_contents += 'target_os = [\'android\']\n'
gclient_config = ParseGClientConfig()
if options.cache_dir:
logging.warning('--cache_dir is deprecated and will be removed in '
'Crosswalk 8. You should set cache_dir in .gclient '
'instead.')
cache_dir = options.cache_dir
else:
cache_dir = gclient_config.get('cache_dir')
deps_contents += 'cache_dir = %s\n' % pprint.pformat(cache_dir)
with open(os.path.join(GCLIENT_ROOT, '.gclient-xwalk'), 'w') as gclient_file:
gclient_file.write(deps_contents)
def main():
option_parser = optparse.OptionParser()
# TODO(rakuco): Remove in Crosswalk 8.
option_parser.add_option('--cache-dir',
help='DEPRECATED Set "cache_dir" in .gclient-xwalk '
'to this directory, so that all git '
'repositories are cached there.')
options, _ = option_parser.parse_args()
GenerateGClientXWalk(options)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1006,16 +1006,31 @@
)%0A%0A if
+os.environ.get(
'XWALK_O
@@ -1043,22 +1043,16 @@
OID'
- in os.environ
+) == '1'
:%0A
|
d1c62e413eeefb105538d5f8b53bc58441951535
|
change class names to hint their C-library agents
|
pymmrouting/datamodel.py
|
pymmrouting/datamodel.py
|
"""
Data adapter for reading and parsing multimodal transportation networks and
related abstraction of facilities
"""
from ctypes import *
class Edge(Structure):
pass
Edge._fields_ = [("mode_id", c_int),
("length", c_double),
("length_factor", c_double),
("speed_factor", c_double),
("from_vertex_id", c_longlong),
("to_vertex_id", c_longlong),
("adjNext", POINTER(Edge))]
class Vertex(Structure):
pass
Vertex._fields_ = [("id", c_longlong),
("temp_cost", c_double),
("distance", c_double),
("elapsed_time", c_double),
("walking_distance", c_double),
("walking_time", c_double),
("parent", POINTER(Vertex)),
("outdegree", c_int),
("outgoing", POINTER(Edge)),
("status", c_int),
("next", POINTER(Vertex))]
VERTEX_VALIDATION_CHECKER = CFUNCTYPE(c_int, POINTER(Vertex))
"""
from pymmspa4pg import connect_db, create_routing_plan, set_mode, \
set_public_transit_mode, set_cost_factor, parse, dispose, \
set_switch_condition, set_switching_constraint, set_target_constraint
class MultimodalNetwork(object):
# Represent multimodal transportation networks in memory
def __init__(self):
self.stub = ""
def connect_db(self, conn_info):
if connect_db(conn_info) != 0:
raise Exception("Connect to database error!")
def assemble_networks(self, plan):
create_routing_plan(len(plan.mode_list), len(plan.public_transit_set))
# set mode list
i = 0
for mode in plan.mode_list:
set_mode(i, mode)
i += 1
# set switch conditions and constraints if the plan is multimodal
if len(plan.mode_list) > 1:
for i in range(len(plan.mode_list) - 1):
set_switch_condition(i, plan.switch_condition_list[i])
set_switching_constraint(i, plan.switch_constraint_list[i])
# set public transit modes if there are
if plan.has_public_transit:
i = 0
for mode in plan.public_transit_set:
set_public_transit_mode(i, mode)
i += 1
set_target_constraint(plan.target_constraint)
set_cost_factor(plan.cost_factor)
if parse() != 0:
raise Exception("Assembling multimodal networks failed!")
def disassemble_networks(self):
dispose()
"""
|
Python
| 0
|
@@ -135,9 +135,66 @@
ort
-*
+Structure, c_int, c_double, c_longlong, POINTER, CFUNCTYPE
%0A%0A%0Ac
@@ -198,16 +198,17 @@
%0A%0Aclass
+C
Edge(Str
@@ -226,16 +226,17 @@
pass%0A%0A
+C
Edge._fi
@@ -263,32 +263,33 @@
c_int),%0A
+
@@ -328,32 +328,33 @@
+
(%22length_factor%22
@@ -359,32 +359,33 @@
r%22, c_double),%0A
+
@@ -432,16 +432,17 @@
+
(%22from_v
@@ -457,32 +457,33 @@
%22, c_longlong),%0A
+
@@ -507,32 +507,33 @@
c_longlong),%0A
+
@@ -556,24 +556,25 @@
POINTER(
+C
Edge))%5D%0A%0Acla
@@ -570,22 +570,24 @@
dge))%5D%0A%0A
+%0A
class
+C
Vertex(S
@@ -607,16 +607,17 @@
pass%0A%0A
+C
Vertex._
@@ -680,16 +680,17 @@
+
(%22temp_c
@@ -705,32 +705,33 @@
c_double),%0A
+
@@ -784,16 +784,17 @@
+
(%22elapse
@@ -809,32 +809,33 @@
c_double),%0A
+
@@ -880,32 +880,33 @@
+
(%22walking_time%22,
@@ -940,16 +940,17 @@
+
(%22parent
@@ -962,32 +962,33 @@
POINTER(
+C
Vertex)),%0A
@@ -996,24 +996,25 @@
+
(%22outdegree%22
@@ -1022,32 +1022,33 @@
c_int),%0A
+
@@ -1083,16 +1083,17 @@
TER(
+C
Edge)),%0A
@@ -1084,24 +1084,25 @@
ER(CEdge)),%0A
+
@@ -1129,32 +1129,33 @@
c_int),%0A
+
@@ -1178,32 +1178,33 @@
POINTER(
+C
Vertex))%5D%0A%0AVERTE
@@ -1251,16 +1251,17 @@
POINTER(
+C
Vertex))
|
413f628a750c59cf2ced27738513497adfc779c1
|
Implement agent deletion.
|
pynessus/models/agent.py
|
pynessus/models/agent.py
|
"""
Copyright 2014 Quentin Kaiser
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nessusobject import NessusObject
class Agent(NessusObject):
"""
A Nessus scanning agent.
Attributes:
id(int): identification
name(str): group's name
_Google Python Style Guide:
http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
"""
def __init__(self, server):
"""Constructor"""
super(Agent, self).__init__(server)
self._distros = None
self._id = None
self._ip = None
self._last_scanned = None
self._name = None
self._platform = None
self._token = None
self._uuid = None
self._scanner_id = None
@staticmethod
def list():
"""
Returns the agents list.
Params:
Returns:
"""
return
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = int(value)
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, value):
self._uuid = str(value)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def ip(self):
return self._ip
@ip.setter
def ip(self, value):
self._ip = str(value)
@property
def last_scanned(self):
return self._last_scanned
@last_scanned.setter
def last_scanned(self, value):
self._last_scanned = str(value)
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = int(value)
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = str(value)
@property
def scanner_id(self):
return self._scanner_id
@scanner_id.setter
def scanner_id(self, value):
self._scanner_id = int(value)
|
Python
| 0
|
@@ -1339,16 +1339,534 @@
return%0A%0A
+ def delete(self):%0A %22%22%22%0A Delete the agent instance.%0A Params:%0A Returns:%0A %22%22%22%0A if self._server.server_version%5B0%5D == %226%22:%0A response = self._server._api_request(%0A %22DELETE%22,%0A %22/scanners/%25d/agents/%25d%22 %25 (self._scanner_id, self.id),%0A %22%22%0A )%0A if response is not None:%0A return True%0A else:%0A return False%0A else:%0A raise Exception(%22Not supported.%22)%0A%0A
@pro
|
f16195aa8b1569e0260c48d4159b7d2ce0ea2fab
|
add Top._to_proto convenience function
|
python/caffe/net_spec.py
|
python/caffe/net_spec.py
|
"""Python net specification.
This module provides a way to write nets directly in Python, using a natural,
functional style. See examples/python_nets/caffenet.py for an example.
Currently this works as a thin wrapper around the Python protobuf interface,
with layers and parameters automatically generated for the "layers" and
"params" pseudo-modules, which are actually objects using __getattr__ magic
to generate protobuf messages.
Note that when using to_proto or Top.to_proto, names of intermediate blobs will
be automatically generated. To explicitly specify blob names, use the NetSpec
class -- assign to its attributes directly to name layers, and call
NetSpec.to_proto to serialize all assigned layers.
This interface is expected to continue to evolve as Caffe gains new capabilities
for specifying nets. In particular, the automatically generated layer names
are not guaranteed to be forward-compatible.
"""
from collections import OrderedDict, Counter
from .proto import caffe_pb2
from google import protobuf
import six
def param_name_dict():
"""Find out the correspondence between layer names and parameter names."""
layer = caffe_pb2.LayerParameter()
# get all parameter names (typically underscore case) and corresponding
# type names (typically camel case), which contain the layer names
# (note that not all parameters correspond to layers, but we'll ignore that)
param_names = [s for s in dir(layer) if s.endswith('_param')]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
# strip the final '_param' or 'Parameter'
param_names = [s[:-len('_param')] for s in param_names]
param_type_names = [s[:-len('Parameter')] for s in param_type_names]
return dict(zip(param_type_names, param_names))
def to_proto(*tops):
"""Generate a NetParameter that contains all layers needed to compute
all arguments."""
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
def assign_proto(proto, name, val):
"""Assign a Python object to a protobuf message, based on the Python
type (in recursive fashion). Lists become repeated fields/messages, dicts
become messages, and other types are assigned directly."""
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for k, v in six.iteritems(item):
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in six.iteritems(val):
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)
class Top(object):
"""A Top specifies a single output blob (which could be one of several
produced by a layer.)"""
def __init__(self, fn, n):
self.fn = fn
self.n = n
def to_proto(self):
"""Generate a NetParameter that contains all layers needed to compute
this top."""
return to_proto(self)
class Function(object):
"""A Function specifies a layer, its parameters, and its inputs (which
are Tops from other layers)."""
def __init__(self, type_name, inputs, params):
self.type_name = type_name
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
# use del to make sure kwargs are not double-processed as layer params
if 'ntop' in self.params:
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if 'in_place' in self.params:
del self.params['in_place']
self.tops = tuple(Top(self, n) for n in range(self.ntop))
def _get_name(self, top, names, autonames):
if top not in names:
autonames[top.fn.type_name] += 1
names[top] = top.fn.type_name + str(autonames[top.fn.type_name])
return names[top]
def _to_proto(self, layers, names, autonames):
if self in layers:
return
bottom_names = []
for inp in self.inputs:
inp.fn._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_name(top, names, autonames))
layer.name = self._get_name(self.tops[0], names, autonames)
for k, v in six.iteritems(self.params):
# special case to handle generic *params
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer,
_param_names[self.type_name] + '_param'), k, v)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer
class NetSpec(object):
"""A NetSpec contains a set of Tops (assigned directly as attributes).
Calling NetSpec.to_proto generates a NetParameter containing all of the
layers needed to produce all of the assigned Tops, using the assigned
names."""
def __init__(self):
super(NetSpec, self).__setattr__('tops', OrderedDict())
def __setattr__(self, name, value):
self.tops[name] = value
def __getattr__(self, name):
return self.tops[name]
def to_proto(self):
names = {v: k for k, v in six.iteritems(self.tops)}
autonames = Counter()
layers = OrderedDict()
for name, top in six.iteritems(self.tops):
top.fn._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
class Layers(object):
"""A Layers object is a pseudo-module which generates functions that specify
layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top
specifying a 3x3 convolution applied to bottom."""
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 1:
return fn.tops[0]
else:
return fn.tops
return layer_fn
class Parameters(object):
"""A Parameters object is a pseudo-module which generates constants used
in layer parameters; e.g., Parameters().Pooling.MAX is the value used
to specify max pooling."""
def __getattr__(self, name):
class Param:
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name)
return Param()
_param_names = param_name_dict()
layers = Layers()
params = Parameters()
|
Python
| 0
|
@@ -3193,16 +3193,127 @@
(self)%0A%0A
+ def _to_proto(self, layers, names, autonames):%0A return self.fn._to_proto(layers, names, autonames)%0A%0A
%0Aclass F
@@ -4382,19 +4382,16 @@
inp.
-fn.
_to_prot
@@ -6018,35 +6018,32 @@
top.
-fn.
_to_proto(layers
|
3c7758ce4f4ee844212e0dc86e3e35a5ea34d13f
|
Update setup.py
|
setup.py
|
setup.py
|
from cx_Freeze import setup, Executable
build_exe_options = {
"bin_includes": [
"libssl.so",
"libz.so"
],
"bin_path_includes": [
"/usr/lib/x86_64-linux-gnu"
],
"include_files": [
("client/dist", "client"),
"LICENSE",
"templates",
"readme.md"
],
"includes": [
"asyncio.base_events"
],
"packages": [
"asyncio",
"idna",
"gzip",
"motor",
"numpy",
"uvloop",
"sentry_sdk",
"ssl"
]
}
options = {
"build_exe": build_exe_options
}
executables = [
Executable('run.py', base="Console")
]
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7"
]
setup(name="virtool", executables=executables, options=options, classifiers=classifiers)
|
Python
| 0.000001
|
@@ -2,17 +2,18 @@
rom
-cx_Freeze
+setuptools
imp
@@ -25,239 +25,310 @@
etup
-, Executable%0A%0Abuild_exe_options = %7B%0A %22bin_includes%22: %5B%0A %22libssl.so%22,%0A %22libz.so%22
+%0A%0Aclassifiers=%5B%0A %22Programming Language :: Python :: 3.7%22,%0A %22Programming Language :: Python :: 3.8%22,%0A%5D%0A%0Asetup(%0A name=%22virtool%22,%0A classifiers=classifiers,%0A install_requires=%5B%0A %22aiofiles%22,
%0A
-%5D,%0A
%22
-bin_path_includes%22: %5B%0A %22/usr/lib/x86_64-linux-gnu%22
+aiohttp%22,%0A %22aiojobs%22,
%0A
-%5D,%0A
%22
-include_files%22: %5B%0A (%22client/
+aionotify%22,%0A %22aiore
dis
-t
%22,
- %22client%22)
+%0A %22arrow%22
,%0A
@@ -338,15 +338,14 @@
%22
-LICENSE
+bcrypt
%22,%0A
@@ -356,17 +356,17 @@
%22
-templates
+biopython
%22,%0A
@@ -377,98 +377,78 @@
%22
-readme.md%22
+Cerberus%22,
%0A
-%5D,%0A
%22
-includes%22: %5B%0A %22asyncio.base_events%22
+cchardet%22,%0A %22click%22,
%0A
-%5D,%0A
%22
-packages%22: %5B
+coloredlogs%22,
%0A
@@ -457,15 +457,16 @@
%22
-asyncio
+coverage
%22,%0A
@@ -477,12 +477,18 @@
%22
-idna
+dictdiffer
%22,%0A
@@ -499,12 +499,12 @@
%22
-gzip
+Mako
%22,%0A
@@ -532,13 +532,14 @@
%22
-numpy
+psutil
%22,%0A
@@ -550,14 +550,14 @@
%22
-uvloop
+semver
%22,%0A
@@ -570,17 +570,17 @@
%22sentry
-_
+-
sdk%22,%0A
@@ -590,323 +590,169 @@
%22
-ssl%22%0A %5D%0A%7D%0A%0Aoptions = %7B%0A %22build_exe%22: build_exe_options%0A%7D%0A%0Aexecutables = %5B%0A Executable('run.py', base=%22Console%22)%0A%5D%0A%0Aclassifiers=%5B%0A %22Programming Language :: Python :: 3.6%22,%0A %22Programming Language :: Python :: 3.7%22%0A%5D%0A%0Asetup(name=%22virtool%22, executables=executables, options=options, classifiers=classifiers
+uvloop%22,%0A %22visvalingamwyatt%22%0A %5D,%0A py_modules=%5B%22virtool%22%5D,%0A entry_points='''%0A %5Bconsole_scripts%5D%0A virtool=virtool.config:entry%0A '''%0A
)%0A
|
5377d3a817c8a03a205e9557b4614f59e8877416
|
update Peercoin network parameters
|
pypeerassets/networks.py
|
pypeerassets/networks.py
|
from collections import namedtuple
from decimal import Decimal
from btcpy.structs.transaction import TxOut
from btcpy.structs.script import NulldataScript
from pypeerassets.exceptions import UnsupportedNetwork
class PeercoinTxOut(TxOut):
def get_dust_threshold(self, size_to_relay_fee) -> float:
if isinstance(self.script_pubkey, NulldataScript):
return 0
return 0.01
# constants to be consumed by the backend
Constants = namedtuple('Constants', [
'name',
'shortname',
'base58_prefixes',
'base58_raw_prefixes',
'bech32_hrp',
'bech32_net',
'xkeys_prefix',
'xpub_version',
'xprv_version',
'wif_prefix',
'from_unit',
'to_unit',
'min_tx_fee',
'tx_timestamp',
'tx_out_cls',
'op_return_max_bytes'
])
'''
Network name should be lowercase, for testnet append "-testnet".
For abbreviation prefix testnet of the network with "t".
'''
PeercoinMainnet = Constants(
name='peercoin',
shortname='ppc',
base58_prefixes={
'P': 'p2pkh',
'p': 'p2sh',
},
base58_raw_prefixes={
'p2pkh': bytearray(b'\x37'),
'p2sh': bytearray(b'\x75'),
},
bech32_hrp='bc',
bech32_net='mainnet',
xkeys_prefix='x',
xpub_version=b'\x04\x88\xb2\x1e',
xprv_version=b'\x04\x88\xad\xe4',
wif_prefix=0xb7,
from_unit=Decimal('1e-6'),
to_unit=Decimal('1e6'),
min_tx_fee=Decimal(0.01),
tx_timestamp=True,
tx_out_cls=PeercoinTxOut,
op_return_max_bytes=80
)
PeercoinTestnet = Constants(
name='peercoin-testnet',
shortname='tppc',
base58_prefixes={
'm': 'p2pkh',
'n': 'p2pkh',
},
base58_raw_prefixes={
'p2pkh': bytearray(b'\x6f'),
'p2sh': bytearray(b'\xc4'),
},
bech32_hrp='tb',
bech32_net='testnet',
xkeys_prefix='t',
xpub_version=b'\x04\x35\x87\xcf',
xprv_version=b'\x04\x35\x83\x94',
wif_prefix=0xef,
from_unit=Decimal('1e-6'),
to_unit=Decimal('1e6'),
min_tx_fee=Decimal(0.001),
tx_timestamp=True,
tx_out_cls=PeercoinTxOut,
op_return_max_bytes=256
)
networks = (PeercoinMainnet, PeercoinTestnet,)
def net_query(name: str) -> Constants:
'''Find the NetworkParams for a network by its long or short name. Raises
UnsupportedNetwork if no NetworkParams is found.
'''
for net_params in networks:
if name in (net_params.name, net_params.shortname,):
return net_params
raise UnsupportedNetwork
|
Python
| 0.000001
|
@@ -1509,10 +1509,11 @@
tes=
-80
+256
%0A)%0A%0A
@@ -2022,17 +2022,16 @@
cimal(0.
-0
01),%0A
|
707c0a38d6cac6f3f481e8e3c401eee61477a4bf
|
print diagnostics
|
scripts/bg_ols_multiprocessing.py
|
scripts/bg_ols_multiprocessing.py
|
"""
author: Marusa Zerjal 2019 - 07 - 29
Determine background overlaps using means and covariances for both
background and stars.
Covariance matrices for the background are Identity*bandwidth.
Parameters
----------
background_means: [nstars,6] float array_like
Phase-space positions of some star set that greatly envelops points
in question. Typically contents of gaia_xyzuvw.npy, or the output of
>> tabletool.build_data_dict_from_table(
'../data/gaia_cartesian_full_6d_table.fits',
historical=True)['means']
star_means: [npoints,6] float array_like
Phase-space positions of stellar data that we are fitting components to
star_covs: [npoints,6,6] float array_like
Phase-space covariances of stellar data that we are fitting components to
Output is a file with ln_bg_ols. Same order as input datafile.
No return.
bg_lnols: [nstars] float array_like
Background log overlaps of stars with background probability density
function.
Notes
-----
We invert the vertical values (Z and U) because the typical background
density should be symmetric along the vertical axis, and this distances
stars from their siblings. I.e. association stars aren't assigned
higher background overlaps by virtue of being an association star.
Edits
-----
TC 2019-05-28: changed signature such that it follows similar usage as
get_kernel_densitites
"""
from __future__ import print_function, division
import numpy as np
import itertools
from mpi4py import MPI
import time
import logging
# The placement of logsumexp varies wildly between scipy versions
import scipy
_SCIPY_VERSION= [int(v.split('rc')[0])
for v in scipy.__version__.split('.')]
if _SCIPY_VERSION[0] == 0 and _SCIPY_VERSION[1] < 10:
from scipy.maxentropy import logsumexp
elif ((_SCIPY_VERSION[0] == 1 and _SCIPY_VERSION[1] >= 3) or
_SCIPY_VERSION[0] > 1):
from scipy.special import logsumexp
else:
from scipy.misc import logsumexp
import sys
sys.path.insert(0, '..')
from chronostar import tabletool
try:
print('Using C implementation')
#from _overlap import get_lnoverlaps
from chronostar._overlap import get_lnoverlaps
except:
print("WARNING: Couldn't import C implementation, using slow pythonic overlap instead")
logging.info("WARNING: Couldn't import C implementation, using slow pythonic overlap instead")
from chronostar.likelihood import slow_get_lnoverlaps as get_lnoverlaps
def log_message(msg, symbol='.', surround=False):
"""Little formatting helper"""
res = '{}{:^40}{}'.format(5*symbol, msg, 5*symbol)
if surround:
res = '\n{}\n{}\n{}'.format(50*symbol, res, 50*symbol)
logging.info(res)
comm = MPI.COMM_WORLD
size=comm.Get_size()
rank=comm.Get_rank()
if rank == 0:
# PREPARE STELLAR DATA
datafile = '../scocen/data_table_cartesian_100k.fits' # SHOULD BE CARTESIAN
data_table = tabletool.read(datafile)
historical = 'c_XU' in data_table.colnames
#data_table = data_table[:20] #TODO for testing
print('DATA_TABLE READ', len(data_table))
data_dict = tabletool.build_data_dict_from_table(
data_table,
get_background_overlaps=False, # bg overlap not available yet
historical=historical,
)
star_means = data_dict['means']
star_covs = data_dict['covs']
# PREPARE BACKGROUND DATA
print('Read background Gaia data')
background_means = tabletool.build_data_dict_from_table(
'/home/tcrun/chronostar/data/gaia_cartesian_full_6d_table.fits',
only_means=True,
)
# Inverting the vertical values
star_means = np.copy(star_means)
star_means[:, 2] *= -1
star_means[:, 5] *= -1
# Background covs with bandwidth using Scott's rule
d = 6.0 # number of dimensions
nstars = background_means.shape[0]
bandwidth = nstars**(-1.0 / (d + 4.0))
background_cov = np.cov(background_means.T) * bandwidth ** 2
background_covs = np.array(nstars * [background_cov]) # same cov for every star
# SPLIT DATA into multiple processes
indices_chunks = np.array_split(range(len(star_means)), size)
star_means = [star_means[i] for i in indices_chunks]
star_covs = [star_covs[i] for i in indices_chunks]
#TODO: delete the time line
time_start = time.time()
else:
nstars=None
star_means=None
star_covs=None
background_means=None
background_covs=None
# BROADCAST CONSTANTS
nstars = comm.bcast(nstars, root=0)
background_means = comm.bcast(background_means, root=0)
background_covs = comm.bcast(background_covs, root=0)
# SCATTER DATA
star_means = comm.scatter(star_means, root=0)
star_covs = comm.scatter(star_covs, root=0)
#print(rank, len(star_means))
# EVERY PROCESS DOES THIS FOR ITS DATA
bg_ln_ols=[]
for star_cov, star_mean in zip(star_covs, star_means):
try:
bg_lnol = get_lnoverlaps(star_cov, star_mean, background_covs,
background_means, nstars)
bg_lnol = logsumexp(bg_lnol) # sum in linear space
except:
# TC: Changed sign to negative (surely if it fails, we want it to
# have a neglible background overlap?
print('bg ln overlap failed, setting it to -inf')
bg_lnol = -np.inf
bg_ln_ols.append(bg_lnol)
#print(rank, bg_ln_ols)
# GATHER DATA
bg_ln_ols_result = comm.gather(bg_ln_ols, root=0)
if rank == 0:
bg_ln_ols_result = list(itertools.chain.from_iterable(bg_ln_ols_result))
np.savetxt('bgols_multiprocessing.dat', bg_ln_ols_result)
time_end = time.time()
print(rank, 'done', time_end - time_start)
#print('master collected: ', bg_ln_ols_result)
|
Python
| 0.000001
|
@@ -4272,16 +4272,35 @@
me line%0A
+ print('Start')%0A
time
|
3d02b8368b6fa43bf66600110c22da323590ec0b
|
Bump flake8-bugbear from 19.3.0 to 19.8.0
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import codecs
import re
import sys
from setuptools import setup
INSTALL_REQUIRES = ["click>=4.0", "click-completion>=0.3.1", "click-didyoumean>=0.0.3"]
if "win32" in str(sys.platform).lower():
# Terminal colors for Windows
INSTALL_REQUIRES.append("colorama>=0.2.4")
EXTRAS_REQUIRE = {
"tests": [
"pytest",
'IPython<6; python_version < "3"',
'IPython==6.5.0; python_version >= "3"',
],
"lint": [
"flake8==3.7.8",
'flake8-bugbear==19.3.0; python_version >= "3.5"',
"pre-commit==1.18.0",
],
}
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["lint"] + ["tox"]
def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ""
with codecs.open(fname, "r", encoding="utf-8") as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError("Cannot find version information")
return version
def read(fname):
with codecs.open(fname, "r", encoding="utf-8") as fp:
content = fp.read()
return content
setup(
name="doitlive",
version=find_version("doitlive/__version__.py"),
description="Because sometimes you need to do it live.",
long_description=read("README.rst"),
author="Steven Loria",
author_email="sloria1@gmail.com",
url="https://github.com/sloria/doitlive",
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
license="MIT",
zip_safe=False,
keywords="doitlive cli live coding presentations shell",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Environment :: Console",
],
packages=["doitlive"],
entry_points={"console_scripts": ["doitlive = doitlive.cli:cli"]},
tests_require=["pytest"],
project_urls={
"Bug Reports": "https://github.com/sloria/doitlive/issues",
"Source": "https://github.com/sloria/doitlive/",
},
)
|
Python
| 0.000001
|
@@ -513,17 +513,17 @@
ear==19.
-3
+8
.0; pyth
|
2dbae58c0b520c3e59f587fafd766b8a39de661b
|
Make A, not A9 files the default.
|
python/hxActor/subaru.py
|
python/hxActor/subaru.py
|
import logging
import multiprocessing
import socket
import astropy.io.fits as pyfits
headerAddr = 'rhodey', 6666
def fetchSeqno(prefix='A9', instrument='CRS'):
""" Request frame_id from Gen2. """
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
query = "seqno %s\n" % (prefix)
logging.info("sending query: %s ", query[:-1])
try:
# Connect to server and send data
sock.connect(headerAddr)
sock.sendall(query)
except Exception as e:
logging.error("failed to send: %s" % (e))
received = ""
return '%s%s%0*d' % (instrument, prefix, 9-len(prefix), 9999)
logging.debug("sent query: %s ", query[:-1])
try:
received = ""
while True:
# Receive data from the server and shut down
oneBlock = sock.recv(1024)
logging.debug("received: %s", oneBlock)
received = received + oneBlock
if len(received) >= 12:
break
except Exception as e:
logging.error("failed to read: %s" % (e))
received = ""
finally:
sock.close()
logging.debug("final received: %s", received)
return received
def fetchHeader(fullHeader=True, frameid=9999, mode=1, itime=0.0):
"""Request FITS cards from the Gen2 side. """
try:
gen2Frameid = "CRSA%08d" % (frameid)
except:
gen2Frameid = 'None'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
query = "hdr %s %s %0.2f %s\n" % (gen2Frameid, mode, itime, fullHeader)
logging.info("sending query: %s ", query[:-1])
try:
# Connect to server and send data
sock.connect(headerAddr)
sock.sendall(query)
except Exception as e:
logging.error("failed to send: %s" % (e))
received = ""
return pyfits.Header()
logging.debug("sent query: %s ", query[:-1])
try:
received = ""
while True:
# Receive data from the server and shut down
oneBlock = sock.recv(2880)
logging.debug("received: %s", oneBlock)
received = received + oneBlock
if received.strip().endswith('END'):
break
except Exception as e:
logging.error("failed to read: %s" % (e))
received = ""
finally:
sock.close()
logging.debug("final received: %s", len(received) / 80.0)
hdr = pyfits.Header.fromstring(received)
logging.info("read %d bytes, %0.4f blocks, header len=%d" % (len(received), len(received) / 2880.0, len(hdr)))
return hdr
class FetchHeader(multiprocessing.Process):
def __init__(self, logger=None, fullHeader=True, timeLimit=15, frameId=9999, itime=0.0):
super(FetchHeader, self).__init__(name="FetchHeader")
self.daemon = True
self.q = multiprocessing.Queue()
self.timeLimit = timeLimit
self.frameId = frameId
self.itime = itime
self.fullHeader = fullHeader
if logger is None:
self.logger = logging.getLogger('fetchHeader')
self.logger.setLevel(logging.DEBUG)
self.logger.debug('inited process %s (frameId=%s)' % (self.name, frameId))
def run(self):
self.logger.info('starting process %s (%s)' % (self.name, self.frameId))
try:
hdr = fetchHeader(self.fullHeader,self.frameId, mode=1, itime=self.itime)
hdrString = hdr.tostring()
except Exception as e:
self.logger.warn('fetchHeader failed: %s', e)
self.q.put(pyfits.Header().tostring())
return
self.logger.info('header: %s' % (len(hdrString)))
self.q.put(hdrString)
|
Python
| 0
|
@@ -133,17 +133,16 @@
refix='A
-9
', instr
|
6cee45e34feed064f81535b314a29c48a49b7089
|
fix returned value in fibre for RSS API
|
python/marvin/api/rss.py
|
python/marvin/api/rss.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# rss.py
#
# Licensed under a 3-clause BSD license.
#
# Revision history:
# 11 Apr 2016 J. Sánchez-Gallego
# Initial version
from __future__ import division
from __future__ import print_function
from flask import jsonify
from flask_classy import route
from marvin.tools.rss import RSS
from marvin.api.base import BaseView, arg_validate as av
from marvin.core.exceptions import MarvinError
from marvin.utils.general import parseIdentifier
def _getRSS(name, **kwargs):
"""Retrieves a RSS Marvin object."""
rss = None
results = {}
# Pop the release to remove a duplicate input to Maps
release = kwargs.pop('release', None)
# parse name into either mangaid or plateifu
try:
idtype = parseIdentifier(name)
except Exception as e:
results['error'] = 'Failed to parse input name {0}: {1}'.format(name, str(e))
return rss, results
try:
if idtype == 'plateifu':
plateifu = name
mangaid = None
elif idtype == 'mangaid':
mangaid = name
plateifu = None
else:
raise MarvinError('invalid plateifu or mangaid: {0}'.format(idtype))
rss = RSS(mangaid=mangaid, plateifu=plateifu, mode='local', release=release)
results['status'] = 1
except Exception as e:
results['error'] = 'Failed to retrieve RSS {0}: {1}'.format(name, str(e))
return rss, results
class RSSView(BaseView):
"""Class describing API calls related to RSS files."""
route_base = '/rss/'
@route('/<name>/', methods=['GET', 'POST'], endpoint='getRSS')
@av.check_args()
def get(self, args, name):
"""This method performs a get request at the url route /rss/<id>.
.. :quickref: RSS; Get an RSS given a plate-ifu or mangaid
:param name: The name of the cube as plate-ifu or mangaid
:form release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson json data: dictionary of returned data
:json string empty: the data dict is empty
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/rss/8485-1901/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": {}
}
"""
# Pop any args we don't want going into Rss
args = self._pop_args(args, arglist='name')
rss, results = _getRSS(name, **args)
self.update_results(results)
if rss:
# For now we don't return anything here, maybe later.
self.results['data'] = {}
return jsonify(self.results)
@route('/<name>/fibers/', methods=['GET', 'POST'], endpoint='getRSSAllFibers')
@av.check_args()
def getAllFibers(self, args, name):
"""Returns a list of all the flux, ivar, mask, and wavelength arrays for all fibres.
.. :quickref: RSS; Get a list of flux, ivar, mask, and wavelength arrays for all fibers
:param name: The name of the cube as plate-ifu or mangaid
:form release: the release of MaNGA
:resjson int status: status of response. 1 if good, -1 if bad.
:resjson string error: error message, null if None
:resjson json inconfig: json of incoming configuration
:resjson json utahconfig: json of outcoming configuration
:resjson string traceback: traceback of an error, null if None
:resjson json data: dictionary of returned data
:json list rssfiber: the flux, ivar, mask arrays for the given rssfiber index
:json list wavelength: the wavelength arrays for all fibers
:resheader Content-Type: application/json
:statuscode 200: no error
:statuscode 422: invalid input parameters
**Example request**:
.. sourcecode:: http
GET /marvin2/api/rss/8485-1901/fibers/ HTTP/1.1
Host: api.sdss.org
Accept: application/json, */*
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": 1,
"error": null,
"inconfig": {"release": "MPL-5"},
"utahconfig": {"release": "MPL-5", "mode": "local"},
"traceback": null,
"data": {"wavelength": [3621.6, 3622.43, 3623.26, ...],
"0": [flux, ivar, mask],
"1": [flux, ivar, mask],
...
"170": [flux, ivar, mask]
}
}
"""
# Pop any args we don't want going into Rss
args = self._pop_args(args, arglist='name')
rss, results = _getRSS(name, **args)
self.update_results(results)
if rss:
self.results['data'] = {}
self.results['data']['wavelength'] = rss[0].wavelength.tolist()
for ii, fiber in enumerate(rss):
flux = fiber.flux.tolist()
ivar = fiber.ivar.tolist()
mask = fiber.mask.tolist()
self.results['data'][ii] = [flux, ivar, mask]
return jsonify(self.results)
|
Python
| 0
|
@@ -5787,20 +5787,21 @@
= fiber.
-flux
+value
.tolist(
|
b31f6cc920a99fe4e4d17f823d4e2b24f7ea7e6a
|
bump version
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from distutils.command.install import INSTALL_SCHEMES
from os.path import dirname, join, abspath
from setuptools import setup
from setuptools.command.install import install
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
setup_args = {
'cmdclass': {'install': install},
'name': 'nerodia',
'version': "0.1.0",
'license': 'MIT',
'description': 'Python port of WATIR',
'long_description': open(join(abspath(dirname(__file__)), 'README.rst')).read(),
'url': 'https://github.com/lmtierney/nerodia',
'classifiers': ['Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'],
'install_requires': ['selenium', 'six'],
'package_dir': {'nerodia': 'nerodia'},
'packages': ['nerodia',
'nerodia.elements',
'nerodia.locators',
'nerodia.locators.button',
'nerodia.locators.cell',
'nerodia.locators.element',
'nerodia.locators.row',
'nerodia.locators.text_area',
'nerodia.locators.text_field',
'nerodia.wait'],
'package_data': {
'nerodia.atoms': ['*.js']
},
'data_files': [('nerodia/atoms', ['nerodia/atoms/fireEvent.js']),
('nerodia/atoms', ['nerodia/atoms/getInnerHtml.js']),
('nerodia/atoms', ['nerodia/atoms/getOuterHtml.js']),
('nerodia/atoms', ['nerodia/atoms/selectText.js'])],
'zip_safe': False
}
setup(**setup_args)
|
Python
| 0
|
@@ -367,17 +367,17 @@
': %220.1.
-0
+3
%22,%0A '
|
7d97f7e6d7c467fda4b2aea4d028ee376f9c71d3
|
Bump version.
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(name='pyrc',
version='0.6.0',
description='Simple, clean Python IRC library',
author='David Peter',
author_email='david.a.peter@gmail.com',
url='http://github.com/sarenji/pyrc',
packages=['pyrc', 'pyrc/utils'],
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
Python
| 0
|
@@ -64,17 +64,17 @@
on='0.6.
-0
+1
',%0A
|
0e2593f863b56ffeb4df4e8fbec9d5d7866574de
|
Remove unused code
|
pysswords/db/database.py
|
pysswords/db/database.py
|
import fnmatch
import os
import re
import yaml
from pysswords.crypt import create_keyring, getgpg, is_encrypted
from .credential import (
Credential,
CredentialNotFoundError,
CredentialExistsError,
content,
expandpath,
exists,
clean,
asstring
)
from pysswords.python_two import makedirs
class Database(object):
def __init__(self, path):
self.path = path
self.keys_path = os.path.join(self.path, ".keys")
self.gpg = getgpg(self.keys_path)
@classmethod
def create(cls, path, passphrase):
os.makedirs(path)
create_keyring(os.path.join(path, ".keys"), passphrase)
return Database(path)
@property
def credentials(self):
creds = []
for root, dirnames, filenames in os.walk(self.path):
for filename in fnmatch.filter(filenames, '*.pyssword'):
with open(os.path.join(root, filename)) as f:
creds.append(yaml.load(f))
return creds
def key(self, private=False):
key = next(k for k in self.gpg.list_keys(secret=private))
return key.get("fingerprint")
def build_credential(self, name, login, password, comment, encrypt=True):
if encrypt and not is_encrypted(password):
password = self.encrypt(password)
return Credential(
name=name,
login=login,
password=password,
comment=comment
)
def write_credential(self, credential):
if exists(self.path, credential.name, credential.login):
raise CredentialExistsError()
cred_path = expandpath(self.path, credential.name, credential.login)
makedirs(os.path.dirname(cred_path), exist_ok=True)
with open(cred_path, "w") as f:
f.write(content(credential))
return cred_path
def add(self, name, login, password, comment):
credential = self.build_credential(name, login, password, comment)
self.write_credential(credential)
return credential
def update(self, name, login, to_update):
found = self.get(name, login)
if not found:
raise CredentialNotFoundError()
updated = []
for credential in found:
new_credential = self.build_credential(
name=to_update.get("name", credential.name),
login=to_update.get("login", credential.login),
password=to_update.get("password", credential.password),
comment=to_update.get("comment", credential.comment),
encrypt=True if to_update.get("password") else False
)
self.remove(credential.name, credential.login)
self.add(
name=new_credential.name,
login=new_credential.login,
password=new_credential.password,
comment=new_credential.comment,
)
updated.append(new_credential)
return updated
def remove(self, name, login):
found = self.get(name, login)
if not found:
raise CredentialNotFoundError()
for credential in found:
clean(self.path, credential.name, credential.login)
def get(self, name, login=None):
found = [c for c in self.credentials
if c.name == name and ((login is None) or c.login == login)]
if not found:
raise CredentialNotFoundError()
else:
return found
def search(self, query):
rgx = re.compile(query)
return [c for c in self.credentials if rgx.search(asstring(c))]
def encrypt(self, text):
encrypted = self.gpg.encrypt(
text,
self.key(),
cipher_algo="AES256")
return str(encrypted)
def decrypt(self, text, passphrase):
decrypted = str(self.gpg.decrypt(text, passphrase=passphrase))
return decrypted
def check(self, passphrase):
sign = self.gpg.sign(
"testing",
default_key=self.key(True),
passphrase=passphrase
)
return True if sign else False
|
Python
| 0.000006
|
@@ -2132,75 +2132,8 @@
in)%0A
- if not found:%0A raise CredentialNotFoundError()%0A%0A
@@ -3008,75 +3008,8 @@
in)%0A
- if not found:%0A raise CredentialNotFoundError()%0A%0A
|
4ed7c876e825b6fa28d31ed257ecbd0023cff605
|
handle missing db
|
pytest_cagoule/select.py
|
pytest_cagoule/select.py
|
from itertools import chain
import os
import re
import sqlite3
import six
from . import DB_FILE
spec_re = re.compile(
r'(?P<filename>[^:]+)(:(?P<start_line>\d+))?(-(?P<end_line>\d+))?'
)
def parse_spec(spec):
match = spec_re.match(spec)
if match is None:
return []
matches = match.groupdict()
filename = matches['filename']
start_line = matches.get('start_line')
if start_line is not None:
start_line = int(start_line)
end_line = matches.get('end_line')
if end_line is not None:
end_line = int(end_line)
return filename, start_line, end_line
def get_query(specs):
query_list = []
params_list = []
for spec in specs:
query, params = get_spec_filter(spec)
query_list.append(query)
params_list.append(params)
if query_list:
clauses = '\n OR '.join(map("({})".format, query_list))
filters = """
WHERE
{}
""".format(clauses)
else:
return None, None
full_params = tuple(chain(*params_list))
full_query = """
SELECT DISTINCT(node_id) FROM coverage
{}
ORDER BY node_id
""".format(filters)
return full_query, full_params
def get_spec_filter(spec):
# TODO: find where to best do this
if isinstance(spec, six.string_types):
spec = parse_spec(spec)
filename, start_line, end_line = spec
filename = os.path.abspath(filename)
lines_query, line_params = get_line_number_filter(start_line, end_line)
query = 'filename = ? ' + lines_query
params = (filename,) + line_params
return query, params
def get_line_number_filter(start_line, end_line):
if start_line is None:
return '', ()
if end_line is None:
end_line = start_line
lines = tuple(range(start_line, end_line + 1))
query = 'AND ({})'.format(
' OR '.join('line = ?' for line in lines)
)
return query, lines
def get_nodes_from_db(specs):
query, params = get_query(specs)
if query is None:
return []
connection = sqlite3.connect(DB_FILE)
cursor = connection.cursor()
cursor.execute(query, params)
return list(node_id for (node_id,) in cursor.fetchall())
def get_node_ids(specs):
return get_nodes_from_db(specs)
|
Python
| 0.000014
|
@@ -2063,16 +2063,71 @@
urn %5B%5D%0A%0A
+ if not os.path.exists(DB_FILE):%0A return %5B%5D%0A%0A
conn
|
91b7b88e8659d80ecc49c0e4c86ce0935a785b3b
|
Version 0.2
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
setup(name='vscode-launcher-tray',
version='0.1.0',
description='A system tray tool to launch Visual Studio code quickly.',
author='Yan-ren Tsai',
author_email='elleryq@gmail.com',
url='https://github.com/elleryq/vscode-launcher-tray',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Utilities',
# Pick your license as you wish (should match"license"above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='tools',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# install_requires=['PyQt5'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# include_package_data=True, # If use MANIFEST.in
package_data={
'': ['*.txt', '*.rst', '*.md'],
# And include any *.msg files found in the 'hello' package, too:
# 'hello': ['*.msg'],
'vscode_launcher_tray': ['pixmaps/*.png', 'data/*.desktop'],
},
data_files=[
('share/applications', ['vscode_launcher_tray/data/vscode-launcher-tray.desktop']),
('share/pixmaps', ['vscode_launcher_tray/pixmaps/vscode-launcher-tray.png']),
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'vscode-launcher-tray=vscode_launcher_tray:main',
],
},
)
|
Python
| 0.000001
|
@@ -205,9 +205,9 @@
='0.
-1
+2
.0',
|
30423ad07fa5613eecfe140364aa6c2c3a0f94cd
|
Fix import.
|
cumulusci/utils/yaml/tests/test_model_parser.py
|
cumulusci/utils/yaml/tests/test_model_parser.py
|
from io import StringIO
from unittest.mock import Mock
import pytest
from cumulusci.utils.yaml.model_parser import (
CCIDictModel,
CCIModel,
ValidationError,
Field,
)
class Foo(CCIModel):
bar: str = None
fields_ = Field([], alias="fields")
class Document(CCIModel):
__root__: Foo
class TestCCIModel:
def test_fields_property(self):
# JSON is YAML. Strange but true.
foo = Document.parse_from_yaml(StringIO("{bar: 'blah'}"))
assert type(foo) == Foo
assert foo.fields_ == []
assert foo.fields == []
foo = Document.parse_from_yaml(StringIO("{bar: 'blah', fields: [1,2]}"))
assert foo.fields == [1, 2]
foo.fields = ["a", "b"]
assert foo.fields == ["a", "b"]
def test_parse_from_dict(self):
assert Document.parse_obj({"bar": "blah"})
def test_validate_data__success(self):
assert Document.validate_data({"bar": "blah"})
def test_validate_data__without_error_handler(self):
assert not Document.validate_data({"foo": "fail"}, context="pytest")
def test_validate_data__with_error_handler(self):
lf = Mock()
assert not Document.validate_data(
{"foo": "fail"}, context="pytest", on_error=lf
)
lf.assert_called()
assert "pytest" in str(lf.mock_calls[0][1][0])
assert "foo" in str(lf.mock_calls[0][1][0])
def test_validate_on_error_param(self):
with pytest.raises(Exception) as e:
assert not Document.validate_data({"qqq": "zzz"}, on_error="barn")
assert e.value.__class__ in [ValueError, TypeError]
def test_getattr_missing(self):
with pytest.raises(AttributeError):
x = Document.parse_obj({})
assert x
x.foo
def test_error_messages(self):
class FooWithError(CCIModel):
bar: int = None
class DocumentWithError(CCIModel):
__root__: FooWithError
s = StringIO("{bar: 'blah'}")
s.name = "some_filename"
with pytest.raises(ValidationError) as e:
DocumentWithError.parse_from_yaml(s)
assert "some_filename" in str(e.value)
def test_error_messages__nested(self):
class Foo(CCIModel):
bar: int # required
class Bar(CCIModel):
foo: Foo = None
class Baz(CCIModel):
bar: Bar = None
class Document(CCIModel):
__root__: Baz
s = StringIO("{bar: {foo: {}}}")
s.name = "some_filename"
with pytest.raises(ValidationError) as e:
Document.parse_from_yaml(s)
assert "some_filename" in str(e.value)
def test_fields_no_alias(self):
class Foo(CCIDictModel):
bar: str = None
x = Foo.parse_obj({})
assert x
with pytest.raises(AttributeError):
x.fields
class TestCCIDictModel:
def test_fields_items(self):
class Foo(CCIDictModel):
bar: str = None
fields_ = Field([], alias="fields")
class Document(CCIDictModel):
__root__: Foo
# JSON is YAML. Strange but true.
foo = Document.parse_from_yaml(StringIO("{bar: 'blah'}"))
assert type(foo) == Foo
assert foo["fields"] == []
foo = Document.parse_from_yaml(StringIO("{bar: 'blah', fields: [1,2]}"))
assert foo["fields"] == [1, 2]
foo["fields"] = ["a", "b"]
assert foo["fields"] == ["a", "b"]
def test_getitem_missing(self):
class Foo(CCIDictModel):
bar: str = None
fields_ = Field([], alias="fields")
x = Foo.parse_obj({})
assert x
with pytest.raises(IndexError):
x["foo"]
assert "bar" in x
assert "fields" in x
assert x["fields"] == []
def test_get(self):
class Foo(CCIDictModel):
bar: str = None
fields_ = Field([], alias="fields")
x = Foo.parse_obj({"bar": "q"})
assert x.get("bar") == x.bar == x["bar"] == "q"
assert x.get("xyzzy", 0) == 0
assert x.get("xyzzy") is None
assert x.get("fields") == []
def test_del(self):
class Foo(CCIDictModel):
bar: str = None
fields_ = Field([], alias="fields")
x = Foo.parse_obj({"bar": "q"})
assert x["bar"] == x.bar == "q"
assert "bar" in x
del x["bar"]
assert "bar" not in x
assert x.get("bar") is None
assert x["fields"] == x.fields == []
assert "fields" in x
del x["fields"]
assert "fields" not in x
assert x.get("fields") is None
|
Python
| 0
|
@@ -168,20 +168,36 @@
or,%0A
-
+)%0Afrom pydantic import
Field
-,%0A)
%0A%0A%0Ac
|
b3890ace8f1f6d849e8bd6b01465b3388629335b
|
add aes param
|
toughradius/manage/webserver.py
|
toughradius/manage/webserver.py
|
#!/usr/bin/env python
#coding:utf-8
import sys
import os
import time
import importlib
import cyclone.web
from twisted.python import log
from twisted.internet import reactor
from mako.lookup import TemplateLookup
from sqlalchemy.orm import scoped_session, sessionmaker
from toughlib import logger, utils, dispatch
from toughradius.manage import models
from toughlib.dbengine import get_engine
from toughlib.permit import permit, load_handlers
from toughradius.manage.settings import *
from toughlib import db_session as session
from toughlib import db_cache as cache
from toughlib import dispatch
from toughlib.db_backup import DBBackup
import toughradius
class WebManageServer(cyclone.web.Application):
def __init__(self, config=None, dbengine=None, **kwargs):
self.config = config
settings = dict(
cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
login_url="/admin/login",
template_path=os.path.join(os.path.dirname(__file__), "views"),
static_path=os.path.join(os.path.dirname(toughradius.__file__), "static"),
xsrf_cookies=True,
config=self.config,
debug=self.config.system.debug,
xheaders=True,
)
self.tp_lookup = TemplateLookup(
directories=[settings['template_path']],
default_filters=['decode.utf8'],
input_encoding='utf-8',
output_encoding='utf-8',
encoding_errors='replace',
module_directory="/tmp/admin"
)
self.db_engine = dbengine or get_engine(config)
self.db = scoped_session(sessionmaker(bind=self.db_engine, autocommit=False, autoflush=False))
self.session_manager = session.SessionManager(settings["cookie_secret"], self.db_engine, 600)
self.mcache = cache.CacheManager(self.db_engine)
self.db_backup = DBBackup(models.get_metadata(self.db_engine), excludes=[
'tr_online','system_session','system_cache','tr_ticket'])
self.aes = utils.AESCipher(key=self.config.system.secret)
permit.add_route(cyclone.web.StaticFileHandler,
r"/admin/backup/download/(.*)",
u"下载数据",
MenuSys,
handle_params={"path": self.config.database.backup_path},
order=1.0405)
self.init_route()
# cache event init
dispatch.register(self.mcache)
# app event init
event_path = os.path.join(os.path.abspath(os.path.dirname(toughradius.manage.events.__file__)))
pkg_prefix="toughradius.manage.events"
self.load_events(event_path,pkg_prefix)
cyclone.web.Application.__init__(self, permit.all_handlers, **settings)
def init_route(self):
handler_path = os.path.join(os.path.abspath(os.path.dirname(__file__)))
load_handlers(handler_path=handler_path, pkg_prefix="toughradius.manage",
excludes=['views','webserver','radius'])
conn = self.db()
try:
oprs = conn.query(models.TrOperator)
for opr in oprs:
if opr.operator_type > 0:
for rule in self.db.query(models.TrOperatorRule).filter_by(operator_name=opr.operator_name):
permit.bind_opr(rule.operator_name, rule.rule_path)
elif opr.operator_type == 0: # 超级管理员授权所有
permit.bind_super(opr.operator_name)
except Exception as err:
dispatch.pub(logger.EVENT_ERROR,"init route error , %s" % str(err))
finally:
conn.close()
def load_events(self,event_path=None,pkg_prefix=None):
_excludes = ['__init__','settings']
evs = set(os.path.splitext(it)[0] for it in os.listdir(event_path))
evs = [it for it in evs if it not in _excludes]
for ev in evs:
try:
sub_module = os.path.join(event_path, ev)
if os.path.isdir(sub_module):
dispatch.pub(logger.EVENT_INFO,'load sub event %s' % ev)
self.load_events(
event_path=sub_module,
pkg_prefix="{0}.{1}".format(pkg_prefix, ev)
)
_ev = "{0}.{1}".format(pkg_prefix, ev)
dispatch.pub(logger.EVENT_INFO,'load_event %s' % _ev)
dispatch.register(importlib.import_module(_ev).__call__(
dbengine=self.db_engine, mcache=self.mcache))
except Exception as err:
dispatch.pub(logger.EVENT_EXCEPTION,err)
dispatch.pub(logger.EVENT_ERROR,"%s, skip event %s.%s" % (str(err),pkg_prefix,ev))
continue
def run(config, dbengine):
app = WebManageServer(config, dbengine)
reactor.listenTCP(int(config.admin.port), app, interface=config.admin.host)
|
Python
| 0.000001
|
@@ -4534,16 +4534,30 @@
f.mcache
+, aes=self.aes
))%0A
|
6c1750336e09e6ed2a48413aedc1142d8d7dd39f
|
Remove font tag.
|
machines/stealth.py
|
machines/stealth.py
|
# coding: utf-8
from pcounter import pcounter, util
COUNT_INDEX_STEALTH_CHANCETIME = pcounter.COUNT_INDEX.USER
def init():
return pcounter.ICounter("stealth", switchon_handler,
switchoff_handler,
output_handler)
def switchon_handler(cbittype, iostatus, counts, history):
if cbittype == pcounter.USBIO_BIT.COUNT:
counts[pcounter.COUNT_INDEX.COUNT] += 1
if not util.bit_is_enable(iostatus, pcounter.USBIO_BIT.CHANCE):
counts[pcounter.COUNT_INDEX.TOTALCOUNT] += 1
if cbittype == pcounter.USBIO_BIT.BONUS:
counts[pcounter.COUNT_INDEX.BONUS] += 1
counts[COUNT_INDEX_STEALTH_CHANCETIME] = 1
if util.bit_is_enable(iostatus, pcounter.USBIO_BIT.CHANCE): # チャンス中なら
counts[pcounter.COUNT_INDEX.CHAIN] += 1
if cbittype == pcounter.USBIO_BIT.CHANCE:
counts[pcounter.COUNT_INDEX.CHANCE] += 1
history.append((None, counts[pcounter.COUNT_INDEX.COUNT]))
if cbittype == pcounter.USBIO_BIT.SBONUS:
counts[pcounter.COUNT_INDEX.SBONUS] += 1
def switchoff_handler(cbittype, iostatus, counts, history):
if cbittype == pcounter.USBIO_BIT.BONUS:
counts[pcounter.COUNT_INDEX.COUNT] = 0
if cbittype == pcounter.USBIO_BIT.CHANCE:
counts[pcounter.COUNT_INDEX.CHAIN] = 0
counts[COUNT_INDEX_STEALTH_CHANCETIME] = 0
def output_handler(counts, history):
display_data = {
'nowcount' : util.decolate_number(counts[pcounter.COUNT_INDEX.COUNT], 3),
'totalcount' : util.decolate_number(counts[pcounter.COUNT_INDEX.TOTALCOUNT], 4),
'bonus' : util.decolate_number(counts[pcounter.COUNT_INDEX.BONUS], 2),
'firstbonus' : util.decolate_number(counts[pcounter.COUNT_INDEX.CHANCE], 2),
'bonusrate' : util.gen_bonusrate(counts[pcounter.COUNT_INDEX.TOTALCOUNT],
counts[pcounter.COUNT_INDEX.CHANCE]),
'chain' : util.gen_chain(counts[pcounter.COUNT_INDEX.CHAIN], "Chain"),
'history' : util.gen_history(history, 3, sep=' ', isfill=True),
}
gamecount_fmt = (
'<u>START</u>\n'
'<span size="x-large">{nowcount}</span>/{totalcount}\n'
'<u>BONUS</u>\n'
'<span size="large">{bonus}</span>/{firstbonus}({bonusrate})'
'{chain}'
)
if counts[COUNT_INDEX_STEALTH_CHANCETIME] == 1:
gamecount_fmt = '<span color="#ffff33">' + gamecount_fmt + '</span>'
return ''.join((
'<span font-desc="Sui Generis Regular 12">',
gamecount_fmt,
'</span>'
)).format(**display_data)
|
Python
| 0
|
@@ -174,17 +174,16 @@
handler,
-
%0A
@@ -1717,18 +1717,16 @@
CE%5D, 2),
-
%0A 'bo
@@ -1796,17 +1796,16 @@
LCOUNT%5D,
-
%0A
@@ -2379,136 +2379,21 @@
urn
-''.join((%0A '%3Cspan font-desc=%22Sui Generis Regular 12%22%3E',%0A gamecount_fmt, %0A '%3C/span%3E'%0A ))
+gamecount_fmt
.for
|
b8d377f564d3d650048bc4b20a231a280de92cfe
|
Update setup
|
setup.py
|
setup.py
|
#! /usr/bin/env python
#
# Copyright (C) 2015-2016 Jacob Graving <jgraving@gmail.com>
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "pinpoint: behavioral tracking using Python and OpenCV"
LONG_DESCRIPTION = """\
pinpoint is a Python library for generating and tracking 2D barcode tags.
The library uses numpy and matplotlib to generate barcode tags and uses OpenCV to automatically track each tag.
It provides a high-level API for the automated measurement of animal behavior and locomotion.
"""
DISTNAME = 'pinpoint'
MAINTAINER = 'Jacob Graving'
MAINTAINER_EMAIL = 'jgraving@gmail.com'
URL = 'http://jakegraving.com'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://jgraving@github.com/jgraving/pinpoint.git'
VERSION = '0.0.1'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Make sure dependencies exist
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import cv2
except ImportError:
install_requires.append('cv2')
try:
import pickle
except ImportError:
install_requires.append('pickle')
try:
import glob
except ImportError:
install_requires.append('glob')
try:
import sklearn
except ImportError:
install_requires.append('sklearn')
try:
import h5py
except ImportError:
install_requires.append('h5py')
try:
import numba
except ImportError:
install_requires.append('numba')
try:
import types
except ImportError:
install_requires.append('types')
try:
import warnings
except ImportError:
install_requires.append('warnings')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['pinpoint'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Multimedia :: Video'
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
|
Python
| 0.000001
|
@@ -819,22 +819,35 @@
= '
-BSD (3-clause)
+Apache Software License 2.0
'%0ADO
@@ -2980,11 +2980,23 @@
::
-BSD
+Apache Software
Lic
|
3451498e718c59349f7ba91bf9f8cec19f202484
|
Fix typo
|
warpq.py
|
warpq.py
|
"""
WARP-Q: Quality Prediction For Generative Neural Speech Codecs
This code is to run the WARP-Q speech quality metric described in our papers:
[1] W. A. Jassim, J. Skoglund, M. Chinen, and A. Hines, “Speech quality assessment
with WARP‐Q: From similarity to subsequence dynamic time warp cost,”
IET Signal Processing, 1– 21 (2022)
[2] W. A. Jassim, J. Skoglund, M. Chinen, and A. Hines, “WARP-Q: Quality prediction
for generative neural speech codecs,” ICASSP 2021 - 2021 IEEE International
Conference on Acoustics, Speech and Signal Processing (ICASSP), 2021, pp. 401-405
Warning: While this code has been tested and commented giving invalid input
files may cause unexpected results and will not be caught by robust exception
handling or validation checking. It will just fail or give you the wrong answer.
Dr Wissam Jassim
wissam.a.jassim@gmail.com
November 7, 2022
"""
# Load libraries
from WARPQ.WARPQmetric import warpqMetric
import pandas as pd
import seaborn as sns
from scipy.stats import pearsonr, spearmanr
import argparse
import os
from tqdm import tqdm
'''
###############################################################################
###################### Main Test Function ####################################
###############################################################################
'''
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, help='Either predict_file or predict_csv')
parser.add_argument('--csv_input', type=str, #default='./audio_paths.csv',
help='''Path of a csv file which contains
paths and info of the input audio files. The csv file consists of four columns:
- Ref_Wave: path of reference (original) audio file
- Test_Wave: path of test (degraded) audio file
- MOS: subjective rating score (optional, for plotting only)
- Codec: type of speech codec, condition, or noise type (optional, for plotting only)
See ./audio_samples.csv as an example of this file.''')
parser.add_argument('--org', type=str, help='Path of the original (reference) speech file')
parser.add_argument('--deg', type=str, help='Path of the degraded (processed) speech file')
parser.add_argument('--sr', type=int, default=16000, help='Sampling frequency of speech signals in Hz. Only two sr values are currently supported: 16000 and 8000 Hz')
parser.add_argument('--n_mfcc', type=int, default=13, help='Number of MFCCs')
parser.add_argument('--fmax', type=int, default=5000, help='Cutoff frequency for MFCC in Hz')
parser.add_argument('--patch_size', type=float, default=0.4, help='Size of MFCC patch in seconds')
parser.add_argument('--sigma', type=list, default=[[1,0],[0,3],[1,3]], help='Step size conditon for DTW')
parser.add_argument('--apply_vad', type=bool, default=True, help='Condition for using vad algorithm')
parser.add_argument('--mapping_model', type=str, required=True, help='File name of pretrained model to map raw WARP-Q scores onto MOS')
parser.add_argument('--csv_output', type=str, help='Path and name of a csv file to save WARP-Q results')
parser.add_argument('--getPlots', type=bool, default=True, help='To plot the predicted scores vs MOS. If True, MOS and Codec type should be provided in the input csv file')
args = parser.parse_args()
args = vars(args)
if args['mode'] == 'predict_csv':
if args['csv_input'] is None:
raise ValueError('--csv_file argument with input csv file name is required')
if args['csv_output'] is None:
raise ValueError('--csv_output argument with output csv file name is required')
elif args['mode'] == 'predict_file':
if args['org'] is None:
raise ValueError('--org argument with path to input original speech file is required')
if args['deg'] is None:
raise ValueError('--deg argument with path to input degraded speech file is required')
else:
raise NotImplementedError('--mode argument given is not available')
def main(args):
# Object of WARP-Q class
warpq = warpqMetric(args)
warpq_rawScore = [] # List to add WARP-Q scores
warpq_mappedScore = []
if args['mode'] == 'predict_csv':
# Load path of speech files stored in a csv file
# The csv file consists of data with four columns: Ref_Wave, Test_Wave, MOS, and Codec
df = pd.read_csv(args['csv_input'], index_col=None)
# Iterative process
for index, row in tqdm(df.iterrows(), total = df.shape[0], desc="Compute quality sores..."):
rawScore, mappedScore = warpq.evaluate(ref_path = row['Ref_Wave'], test_path = row['Test_Wave'])
warpq_rawScore.append(rawScore)
warpq_mappedScore.append(mappedScore)
# Add computed score to the same csv file
df['Raw WARP-Q Score'] = warpq_rawScore
df['Mapped WARP-Q Score'] = warpq_mappedScore
# Save the results
if not os.path.exists(os.path.dirname(args['csv_output'])):
os.makedirs(os.path.dirname(args['csv_output']))
df.to_csv(args['csv_output'], index = None)
if args['getPlots']:
# Compute per-sample Pearsonr and Spearmanr correlation coefficients for raw scores
pearson_coef, p_pearson = pearsonr(df['Raw WARP-Q Score'], df['MOS'])
Spearmanr_coef, p_spearman = spearmanr(df['Raw WARP-Q Score'], df['MOS'])
sns.relplot(x="MOS", y="Raw WARP-Q Score", hue="Codec", palette="muted",
data=df).fig.suptitle('Correlations: Pearsonr= '+ str(round(pearson_coef,2)) +
', Spearman='+str(round(Spearmanr_coef,2)))
# Compute per-sample Pearsonr and Spearmanr correlation coefficients for mapped scores
pearson_coef, p_value = pearsonr(df['Mapped WARP-Q Score'], df['MOS'])
Spearmanr_coef, p_spearman = spearmanr(df['Mapped WARP-Q Score'], df['MOS'])
sns.relplot(x="MOS", y="Mapped WARP-Q Score", hue="Codec", palette="muted",
data=df).fig.suptitle('Correlations: Pearsonr= '+ str(round(pearson_coef,2)) +
', Spearman='+str(round(Spearmanr_coef,2)))
print('\nResults are saved in ' + args['csv_output'])
else: #predict_file mode
print("Compute quality sores...")
warpq_rawScore, warpq_mappedScore = warpq.evaluate(args['org'], args['deg'])
print('\nRaw WARP-Q score (lower rating means better quality): ' + str(warpq_rawScore))
print('Mapped WARP-Q score (higher rating means better quality): ' + str(warpq_mappedScore))
print('Done!')
if __name__ == '__main__':
main(args)
|
Python
| 0.999999
|
@@ -6849,32 +6849,33 @@
ompute quality s
+c
ores...%22)%0D%0A
@@ -7257,20 +7257,22 @@
in(args)%0D%0A %0D%0A
+%0D%0A
|
5da10d6e9916af224acfaa5150a0c151d4af42f9
|
version 0.4.1 bump
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
from setuptools import setup
from setuptools.command.install import install
LONG_DESCRIPTION = ""
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
LONG_DESCRIPTION = f.read()
setup(
name='shadho',
version='0.4.1',
description='Hyperparameter optimizer with distributed hardware at heart',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/jeffkinnison/shadho',
author='Jeff Kinnison',
author_email='jkinniso@nd.edu',
python_requires='>=3.5',
packages=['shadho',
'shadho.installers',
'shadho.managers',
'shadho.workers',],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: POSIX',
'Operating System :: Unix',
],
keywords='machine_learning hyperparameters distributed_computing',
install_requires=[
'numpy',
'scipy',
'scikit-learn',
'pyrameter'
],
tests_require=['pytest'],
include_package_data=True,
)
|
Python
| 0
|
@@ -280,9 +280,9 @@
0.4.
-1
+2
',%0A
|
ec4bbc6c6b766ac1c530cf3f1b4ebab40c60fe01
|
Update instrument.py
|
fx_collect/instrument.py
|
fx_collect/instrument.py
|
class InstrumentAttributes(object):
def __init__(
self, broker, instrument, time_frames,
market_status, last_update, utc_now, wk_str, wk_end
):
# Start of Trading Week
self.utc_now = utc_now
self.wk_str = wk_str
self.wk_end = wk_end
self.str_hour = wk_str.hour
self.td = wk_str.hour - 22
# Passport
self.instrument = instrument
self.market_status = market_status
self.last_update = last_update
self.time_frames = time_frames
# Time frame storage dict
self.attrib = {}
for time_frame in time_frames:
self.attrib[time_frame] = {
'db_min' : None,
'db_max' : None,
'finbar' : None
}
def update_instrument_status(
self, lastupdate, market_status, utc_now
):
self.utc_now = utc_now
self.last_update = lastupdate
self.market_status = market_status
def _update_database_datetime(
self, time_frame, pdfm, pdto
):
if pdfm < self.attrib[time_frame]['db_min']:
self.attrib[time_frame]['db_min'] = pdfm
if pdto >= self.attrib[time_frame]['db_max']:
self.attrib[time_frame]['db_max'] = pdto
|
Python
| 0
|
@@ -986,17 +986,16 @@
def
-_
update_d
|
c2be940ea7c0a11bc0ffb5660d5f902bbaee29d6
|
Fix closure binding problem
|
patchboard/resource.py
|
patchboard/resource.py
|
# resource.py
#
# Copyright 2014 BitVault.
from __future__ import print_function
import json
from action import Action
from exception import PatchboardError
class ResourceType(type):
"""A metaclass for resource classes."""
# Must override to supply default arguments
def __new__(cls, name, patchboard, definition, schema, mapping):
return type.__new__(cls, name, (Resource,), {})
def __init__(cls, name, patchboard, definition, schema, mapping):
setattr(cls, 'api', classmethod(lambda(self_): patchboard.api))
setattr(cls, 'schema', classmethod(lambda(self_): schema))
setattr(cls, 'mapping', classmethod(lambda(self_): mapping))
if schema:
if u'properties' in schema:
for name, schema_def in schema[u'properties'].iteritems():
setattr(
cls,
name,
lambda(self): self.attributes[name])
if schema.get(u'additionalProperties', False) is not False:
def additional_fn(self, name, *args):
# TODO: see if this is the right implementation; the
# ruby code intercepts calls *before* normal lookup,
# so possibly this should use __getattribute__. OTOH
# that may just be an artifact of Ruby and not part
# of the design.
try:
return self.attributes[name]
except KeyError:
raise AttributeError
setattr(cls, '__getattr__', additional_fn)
setattr(
cls,
'generate_url',
classmethod(
lambda(self_, params): mapping.generate_url(params)))
for name, action in definition[u'actions'].iteritems():
action = Action(patchboard, name, action)
def action_fn(self, *args):
return action.request(self, self.url, *args)
setattr(cls, name, action_fn)
# Must be called last
super(ResourceType, cls).__init__(name, (Resource,), {})
class Resource(object):
"""Base class for resources"""
@classmethod
def decorate(cls, instance, attributes):
# TODO: non destructive decoration
# TODO: add some sort of validation for the input attributes.
try:
class_schema = cls.schema
except AttributeError:
class_schema = None
if class_schema and u'properties' in class_schema:
context = instance.context
properties = class_schema[u'properties']
for key, sub_schema in properties.iteritems():
if key not in attributes:
next
value = attributes[key]
mapping = cls.api.find_mapping(sub_schema)
if mapping:
if mapping.query:
# TODO: find a way to define this at runtime,
# not once for every instance.
def fn(self, params={}):
params[u'url'] = value[u'url']
url = mapping.generate_url(params)
return mapping.cls(context, {u'url': url})
setattr(instance, key, fn)
else:
attributes[key] = mapping.cls(context, value)
else:
attributes[key] = cls.api.decorate(
context,
sub_schema,
value)
return attributes
def __init__(self, context, attributes={}):
self.context = context
self.attributes = Resource.decorate(self, attributes)
self.url = self.attributes[u'url']
# TODO: implement
#def __str__(self):
def __len__(self):
return len(self.attributes)
def __getitem__(self, key):
return self.attributes[key]
def __setitem__(self, key, value):
self.attributes[key] = value
#def __delitem__(self, key):
# del self.attributes[key]
def __contains__(self, obj):
return (obj in self.attributes)
def curl(self):
raise PatchboardError(u"Resource.curl() not implemented")
def to_hash(self):
return self.attributes
def to_json(self):
return json.dumps(self.attributes)
|
Python
| 0.000002
|
@@ -1916,24 +1916,58 @@
e, action)%0A%0A
+ def bind(action):%0A
@@ -1986,32 +1986,36 @@
n(self, *args):%0A
+
@@ -2058,16 +2058,49 @@
, *args)
+%0A return action_fn
%0A%0A
@@ -2124,25 +2124,28 @@
, name,
+bind(
action
-_fn
+)
)%0A%0A
|
cd3711cf8e7d34085544f7f622bb77fb89072a4d
|
make websockets respect listening address and port
|
pywebdlmon/controller.py
|
pywebdlmon/controller.py
|
#!/usr/bin/env python
import json
import os.path
from twisted.python import log
from twisted.web import server
from twisted.web.static import File as StaticFile
from txroutes import Dispatcher
from autobahn.websocket import WebSocketServerFactory
from mako import exceptions
from pywebdlmon.model import UnknownInstance, UnknownStation, UnknownFormat
from pywebdlmon import wsmagic
from pywebdlmon.ws import RequestishProtocol
class UnknownTransport(Exception): pass
class WebsocketsError(Exception): pass
def is_sync(transport):
if transport == 'http':
return True
elif transport == 'ws':
return False
raise UnknownTransport(transport)
class Controller(object):
def __init__(self, cfg, instances):
self.cfg = cfg
self.instances = instances
self._ws_factory = WebSocketServerFactory("ws://0.0.0.0:6999")
self._ws_factory.protocol = RequestishProtocol
def _error(self, request, format, code, msg):
# TODO return JSON error object for json queries
request.setHeader("content-type", "text/html")
request.setHeader("response-code", code)
if format == 'json':
err = dict(error=dict(code=code, msg=msg))
buffer = json.dumps(err)
else:
template = self.cfg.templates.get_template('error.html')
buffer = str(template.render(cfg=self.cfg, code=code, msg=msg))
request.write(buffer)
request.finish()
return server.NOT_DONE_YET
def root(self, request):
return self.cfg.templates.get_template('index.html').render().encode('utf-8')
def static(self, request, file):
# TODO santize file
return StaticFile(os.path.join('static', file)).render(request)
def _handler_helper(inner_func):
def wrapper_func(self, request, format, transport, *args, **kwargs):
if not isinstance(request, RequestishProtocol):
if request.getHeader('Upgrade') == 'websocket':
# Change the connection from HTTP to websockets.
request = wsmagic.upgrade(request, self._ws_factory)
if not hasattr(request, 'repeat'):
# Handlers can query this flag to change their behavior for the
# first request, e.g. to send the current state first then
# subsequent updates.
request.repeat = False
try:
deferred = inner_func(self, request, format, transport, *args, **kwargs)
except UnknownInstance, e:
return self._error(request, format, 404, "Unknown DLMon Instance '%s'" % e)
except UnknownStation, e:
return self._error(request, format, 404, "Unknown Station: '%s'" % e)
except UnknownFormat, e:
return self._error(request, format, 400, "Unknown Format: '%s'" % e)
except UnknownTransport, e:
return self._error(request, format, 400, "Unknown Transport: '%s'" % e)
request.repeat = True
def cb(buffer):
assert buffer is not None
request.setHeader("response-code", 200)
if format == 'json':
request.setHeader("content-type", "application/json")
# JSONP magic
if request.args.has_key('callback'):
request.setHeader("content-type", "application/javascript")
buffer = request.args['callback'][0] + '(' + buffer + ')'
elif format == 'html':
request.setHeader("content-type", "text/html")
else:
return self._error(request, format, 400, "Unknown Format: '%s'" % format)
request.write(buffer)
if isinstance(request, RequestishProtocol):
wrapper_func(self, request, format, transport, *args, **kwargs)
else:
request.finish()
return server.NOT_DONE_YET
deferred.addCallback(cb)
return server.NOT_DONE_YET
return wrapper_func
@_handler_helper
def station_list(self, request, format, transport, instance):
instance = self.instances.get_instance(instance)
if request.repeat:
deferred = instance.station_list.get_format(format, immediate=is_sync(transport))
else:
deferred = instance.station_list.get_format(format, immediate=True)
return deferred
@_handler_helper
def station_status(self, request, format, transport, instance, station):
instance = self.instances.get_instance(instance)
station = instance.instance_status.get_station(station)
if request.repeat:
deferred = station.get_format(format, immediate=is_sync(transport))
else:
# Send full status immediately.
deferred = station.get_format(format, immediate=True)
return deferred
@_handler_helper
def instances_handler(self, request, format, transport):
# This data is static during runtime.
deferred = self.instances.get_format(format, immediate=True)
return deferred
@_handler_helper
def instance_status(self, request, format, transport, instance):
instance = self.instances.get_instance(instance)
if request.repeat:
deferred = instance.instance_update.get_format(format, immediate=is_sync(transport))
else:
# Send full status immediately.
deferred = instance.instance_status.get_format(format, immediate=True)
return deferred
def get_dispatcher(cfg, instances):
c = Controller(cfg, instances)
d = Dispatcher()
def connect(name, url):
d.connect(name, url, c, action=name)
connect('root', '/')
connect('static', '/static/{file}')
connect('instances_handler', '/{transport}/dlmon/instances{.format}')
connect('instance_status', '/{transport}/dlmon/instances/{instance}/status{.format}')
connect('station_list', '/{transport}/dlmon/instances/{instance}/stations{.format}')
connect('station_status', '/{transport}/dlmon/instances/{instance}/stations/{station}/status{.format}')
return d
|
Python
| 0
|
@@ -853,21 +853,45 @@
s://
-0.0.0.0:6999%22
+%25s:%25d%22 %25 (cfg.bind_address, cfg.port)
)%0A
|
5cbc61943b3488719c3e0de2596ce64458935538
|
add include_package_data to setup.py
|
setup.py
|
setup.py
|
from os.path import join, dirname
with open(join(dirname(__file__), 'scrapyd/VERSION')) as f:
version = f.read().strip()
setup_args = {
'name': 'Scrapyd',
'version': version,
'url': 'https://github.com/scrapy/scrapyd',
'description': 'A service for running Scrapy spiders, with an HTTP API',
'long_description': open('README.rst').read(),
'author': 'Scrapy developers',
'maintainer': 'Scrapy developers',
'maintainer_email': 'info@scrapy.org',
'license': 'BSD',
'packages': ['scrapyd'],
'scripts': ['bin/scrapyd'],
'classifiers': [
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Topic :: Internet :: WWW/HTTP',
],
}
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
else:
setup_args['install_requires'] = ['Twisted>=8.0', 'Scrapy>=0.17']
setup(**setup_args)
|
Python
| 0.000001
|
@@ -554,24 +554,58 @@
/scrapyd'%5D,%0A
+ 'include_package_data': True,%0A
'classif
|
d15f7314f7c899b4c91532a07604a6728bc8d45d
|
Drop support for Python 3.4
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from __future__ import print_function
import os
import subprocess
import sys
import contextlib
from distutils.command.build_ext import build_ext
from distutils.sysconfig import get_python_inc
from distutils import ccompiler, msvccompiler
try:
from setuptools import Extension, setup
except ImportError:
from distutils.core import Extension, setup
PACKAGES = [
'preshed',
'preshed.tests']
MOD_NAMES = [
'preshed.maps',
'preshed.counter']
# By subclassing build_extensions we have the actual compiler that will be used which is really known only after finalize_options
# http://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used
compile_options = {'msvc' : ['/Ox', '/EHsc'],
'other' : ['-O3', '-Wno-strict-prototypes', '-Wno-unused-function']}
link_options = {'msvc' : [],
'other' : []}
class build_ext_options:
def build_options(self):
for e in self.extensions:
e.extra_compile_args = compile_options.get(
self.compiler.compiler_type, compile_options['other'])
for e in self.extensions:
e.extra_link_args = link_options.get(
self.compiler.compiler_type, link_options['other'])
class build_ext_subclass(build_ext, build_ext_options):
def build_extensions(self):
build_ext_options.build_options(self)
build_ext.build_extensions(self)
def generate_cython(root, source):
print('Cythonizing sources')
p = subprocess.call([sys.executable,
os.path.join(root, 'bin', 'cythonize.py'),
source])
if p != 0:
raise RuntimeError('Running cythonize failed')
def is_source_release(path):
return os.path.exists(os.path.join(path, 'PKG-INFO'))
def clean(path):
for name in MOD_NAMES:
name = name.replace('.', '/')
for ext in ['.so', '.html', '.cpp', '.c']:
file_path = os.path.join(path, name + ext)
if os.path.exists(file_path):
os.unlink(file_path)
@contextlib.contextmanager
def chdir(new_dir):
old_dir = os.getcwd()
try:
os.chdir(new_dir)
sys.path.insert(0, new_dir)
yield
finally:
del sys.path[0]
os.chdir(old_dir)
def setup_package():
root = os.path.abspath(os.path.dirname(__file__))
if len(sys.argv) > 1 and sys.argv[1] == 'clean':
return clean(root)
with chdir(root):
with open(os.path.join(root, 'preshed', 'about.py')) as f:
about = {}
exec(f.read(), about)
with open(os.path.join(root, 'README.rst')) as f:
readme = f.read()
include_dirs = [
get_python_inc(plat_specific=True),
]
if (ccompiler.new_compiler().compiler_type == 'msvc'
and msvccompiler.get_build_version() == 9):
include_dirs.append(os.path.join(root, 'include', 'msvc9'))
ext_modules = []
for mod_name in MOD_NAMES:
mod_path = mod_name.replace('.', '/') + '.cpp'
ext_modules.append(
Extension(mod_name, [mod_path],
language='c++', include_dirs=include_dirs))
if not is_source_release(root):
generate_cython(root, 'preshed')
setup(
name=about['__title__'],
zip_safe=False,
packages=PACKAGES,
package_data={'': ['*.pyx', '*.pxd']},
description=about['__summary__'],
long_description=readme,
author=about['__author__'],
author_email=about['__email__'],
version=about['__version__'],
url=about['__uri__'],
license=about['__license__'],
ext_modules=ext_modules,
setup_requires=['wheel>=0.32.0,<0.33.0'],
install_requires=['cymem>=2.0.2,<2.1.0'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering'],
cmdclass = {
'build_ext': build_ext_subclass},
)
if __name__ == '__main__':
setup_package()
|
Python
| 0.000002
|
@@ -4460,65 +4460,8 @@
7',%0A
- 'Programming Language :: Python :: 3.4',%0A
|
ab1ed886552ed737e6da3373e80a1e0c4c319cac
|
Disable Main File Feature
|
director/projects/templatetags/source_extras.py
|
director/projects/templatetags/source_extras.py
|
import typing
from os.path import splitext
from django import template
from django.urls import reverse
from lib.google_docs_facade import build_google_document_url
from projects.project_models import Project
from projects.source_item_models import DirectoryListEntry, DirectoryEntryType
from projects.source_models import DiskSource, GoogleDocsSource
register = template.Library()
@register.simple_tag
def source_path(project: Project, directory_entry: DirectoryListEntry) -> str:
if isinstance(directory_entry.source, GoogleDocsSource):
source = typing.cast(GoogleDocsSource, directory_entry.source)
return build_google_document_url(source.doc_id)
if directory_entry.is_directory and directory_entry.path:
view_name = 'project_files_path'
view_args = project.pk, directory_entry.path
return reverse(view_name, args=view_args)
if directory_entry.type == DirectoryEntryType.FILE:
if isinstance(directory_entry.source, DiskSource):
return reverse('disk_file_source_open', args=(project.pk, directory_entry.path))
else:
return reverse('file_source_open', args=(project.pk, directory_entry.source.pk, directory_entry.path))
return ""
@register.simple_tag
def download_url(project: Project, directory_entry: DirectoryListEntry) -> str:
if directory_entry.is_directory:
return ''
if isinstance(directory_entry.source, DiskSource):
return reverse('disk_file_source_download', args=(project.pk, directory_entry.path))
else:
return reverse('file_source_download', args=(project.pk, directory_entry.source.pk, directory_entry.path))
def mimetype_text_editable(mimetype: str) -> bool:
if mimetype in ('Unknown', 'application/javascript', 'application/json', 'application/xml'):
return True
return '/' in mimetype and mimetype.split('/')[0] == 'text'
@register.filter
def is_text_editable(directory_entry: typing.Any) -> bool:
if not isinstance(directory_entry, DirectoryListEntry):
return False
directory_entry = typing.cast(DirectoryListEntry, directory_entry)
if isinstance(directory_entry.source, GoogleDocsSource):
return True
if directory_entry.is_directory or not directory_entry.mimetype:
return False
return mimetype_text_editable(directory_entry.mimetype)
@register.filter
def is_main_file(directory_entry: typing.Any, project: typing.Any) -> bool:
if not isinstance(directory_entry, DirectoryListEntry):
return False
directory_entry = typing.cast(DirectoryListEntry, directory_entry)
if not isinstance(project, Project):
return False
project = typing.cast(Project, project)
return directory_entry.path == project.main_file_path
@register.filter
def can_be_main_file(directory_entry: typing.Any, project: typing.Any) -> bool:
if not isinstance(directory_entry, DirectoryListEntry):
return False
directory_entry = typing.cast(DirectoryListEntry, directory_entry)
if not isinstance(project, Project):
return False
project = typing.cast(Project, project)
if directory_entry.is_directory:
return False
return not is_main_file(directory_entry, project)
@register.filter
def edit_menu_text(directory_entry: typing.Any) -> str:
if not isinstance(directory_entry, DirectoryListEntry):
return ''
directory_entry = typing.cast(DirectoryListEntry, directory_entry)
if isinstance(directory_entry.source, GoogleDocsSource):
return 'Open in Google Docs'
return 'Open in Code Editor'
@register.filter
def file_icon(directory_entry: typing.Any) -> str:
if not isinstance(directory_entry, DirectoryListEntry):
return ''
directory_entry = typing.cast(DirectoryListEntry, directory_entry)
if directory_entry.is_directory:
return 'folder'
name, ext = splitext(directory_entry.name)
if ext:
ext = ext[1:].lower()
if ext in ['py', 'r', 'rmd', 'json', 'js', 'xml', 'html']:
return 'file-code'
return 'file'
|
Python
| 0.000037
|
@@ -2865,32 +2865,97 @@
g.Any) -%3E bool:%0A
+ return False # TODO: Main File Feature is disabled for now%0A%0A
if not isins
|
fe3f75d20f215230e4c02f40b97b634cb468a096
|
Fix indexing in format
|
python/compare_recon1.py
|
python/compare_recon1.py
|
#!/usr/bin/env python
import glob
import hashlib
import os
import sys
import tempfile
import tarfile
import subprocess
import shutil
VOL_DIRS = ['mri', 'mri/orig', 'mri/tranforms']
SURFACE_DIRS = ['surf']
VOLUMES = ['rawavg.mgz',
'orig.mgz',
'nu.mgz',
'T1.mgz',
'brainmask.mgz']
APARCS = ['aparc.a2009s', 'aparc']
STATS_FILES = ['aseg.stats']
def run_command(command):
"""
Run a command and return results
:param command: command to run
:return: a tuple (signal, exit code)
"""
dev_null = open(os.devnull, 'w')
retval = subprocess.call(command, stdout=dev_null, stderr=dev_null, shell=True)
return get_exitcode(retval)
def get_exitcode(return_code):
"""
Calculate and return exit code from a return code
:param return_code: code in os.wait format
:return: a tuple (signal, exit code)
"""
signal = return_code & 0x00FF
exitcode = (return_code & 0xFF00) >> 8
return signal, exitcode
def compare_volumes(subject1_dir, subject2_dir):
"""
Compare the volumes generated by Freesurfer for two inputs and
indicate whether they are the same or different
:param subject1_dir: path to files for first subject
:param subject2_dir: path to files for second subject
:return: True if files have different volumes, False otherwise
"""
differences = False
sys.stdout.write("Comparing volumes\n")
subj1_files = os.listdir(subject1_dir)
subj2_files = os.listdir(subject2_dir)
if len(subj1_files) != len(subj2_files):
sys.stdout.write("Number of files in " +
"{0} ".format(subject1_dir) +
"and {1} differ\n".format(subject2_dir))
for volume in VOLUMES:
sys.stdout.write("Comparing volume {0}... ".format(volume))
volume_1 = os.path.join(subject1_dir,
"mri",
"{0}".format(volume))
volume_2 = os.path.join(subject2_dir,
"mri",
"{0}".format(volume))
cmd = "mri_diff --thresh 0 "
cmd += "{0} {1}".format(volume_1, volume_2)
signal, exit_code = run_command(cmd)
if signal != 0:
differences = True
sys.stdout.write("Signal {0} occurred\n".format(signal))
if exit_code != 0:
differences = True
if exit_code == 0:
sys.stdout.write("OK\n")
elif exit_code == 1:
sys.stdout.write("An error occurred\n")
elif exit_code == 101:
sys.stdout.write("Files differ in dimension\n")
elif exit_code == 102:
sys.stdout.write("Files differ in resolution\n")
elif exit_code == 103:
sys.stdout.write("Files differ in acquisition parameters\n")
elif exit_code == 104:
sys.stdout.write("Files differ in geometry\n")
elif exit_code == 105:
sys.stdout.write("Files differ in precision\n")
elif exit_code == 106:
sys.stdout.write("Files differ in pixel data\n")
elif exit_code == 107:
sys.stdout.write("Files differ in orientation\n")
else:
differences = True
sys.stdout.write("Files differ, "
"exit code: {0}\n".format(exit_code))
sys.stdout.write("Comparing seg overlap... ")
return differences
def main(work_dir):
"""
Compare two MRI results and list any differences
:param work_dir: directory to use as a working directory
:return: exit code (0 on success, 1 on failure)
"""
sys.stdout.write("Using {0} as scratch dir\n".format(work_dir))
input_1 = sys.argv[1]
input_2 = sys.argv[2]
sys.stdout.write("Extracting tarballs from {0} and {1}\n".format(input_1,
input_2))
input_1_tarball = tarfile.open(input_1, 'r:*')
input_2_tarball = tarfile.open(input_2, 'r:*')
subject1 = input_1_tarball.getmembers()[0].path
subject2 = input_2_tarball.getmembers()[0].path
subject1_dir = os.path.join(work_dir, 'input1')
subject2_dir = os.path.join(work_dir, 'input2')
input_1_tarball.extractall(subject1_dir)
input_2_tarball.extractall(subject2_dir)
input_1_dir = os.path.join(subject1_dir, subject1)
input_2_dir = os.path.join(subject2_dir, subject2)
# setup symlinks for parcellation comparisons
subjects_dir = os.path.join(work_dir, "subjects")
os.mkdir(subjects_dir)
os.symlink(input_1_dir, os.path.join(subjects_dir, 'subject1'))
os.symlink(input_2_dir, os.path.join(subjects_dir, 'subject2'))
# Do comparisons
inputs_different = False
inputs_different |= compare_volumes(input_1_dir, input_2_dir)
if inputs_different:
sys.stdout.write("Differences between the two files!")
else:
sys.stdout.write("Files check out!")
return 0
if __name__ == '__main__':
try:
scratch_dir = tempfile.mkdtemp()
sys.exit(main(scratch_dir))
finally:
shutil.rmtree(scratch_dir)
|
Python
| 0.99961
|
@@ -1692,17 +1692,17 @@
%22and %7B
-1
+0
%7D differ
|
a6effe7080fb66f7bd4e930727ed5d1ecff21523
|
Fix setup requirements to not contain transitional dependencies and exact versions
|
setup.py
|
setup.py
|
from setuptools import setup
from pytui.settings import VERSION
setup(
name='pytest-ui',
description='Text User Interface for running python tests',
version=VERSION,
license='MIT',
platforms=['linux', 'osx', 'win32'],
packages=['pytui'],
url='https://github.com/martinsmid/pytest-ui',
author_email='martin.smid@gmail.com',
author='Martin Smid',
entry_points={
'console_scripts': [
'pytui = pytui.ui:main',
]
},
install_requires=[
'attrs==17.4.0',
'future==0.16.0',
'pluggy==0.6.0',
'py==1.5.2',
'pytest==3.4.1',
'six==1.11.0',
'tblib==1.3.2',
'urwid==2.0.1',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Utilities',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
|
Python
| 0
|
@@ -515,141 +515,32 @@
'
-attrs==17.4.0',%0A 'future==0.16.0',%0A 'pluggy==0.6.0',%0A 'py==1.5.2',%0A 'pytest==3.4.1',%0A 'six==1.11.0
+future',%0A 'pytest
',%0A
@@ -556,15 +556,8 @@
blib
-==1.3.2
',%0A
@@ -573,15 +573,8 @@
rwid
-==2.0.1
',%0A
|
131cead153dd29cacf03fbf841f26fc85482b57c
|
Set version redactor 0.2 on setup file
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from setuptools import setup, find_packages
import opps
install_requires = ["Django==1.5",
"south>=0.7",
"Pillow==1.7.8",
"thumbor==3.7.1",
"django-tagging==0.3.1",
"django-redis",
"django-redactor"]
classifiers = ["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Framework :: Django",
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
'Topic :: Software Development :: Libraries :: Python Modules',]
try:
long_description = open('README.md').read()
except:
long_description = opps.__description__
setup(name='opps',
version = opps.__version__,
description = opps.__description__,
long_description = long_description,
classifiers = classifiers,
keywords = 'opps cms django apps magazines websites',
author = opps.__author__,
author_email = opps.__email__,
url = 'http://oppsproject.org',
download_url = "https://github.com/avelino/opps/tarball/master",
license = opps.__license__,
packages = find_packages(exclude=('doc',)),
package_dir = {'opps': 'opps'},
install_requires = install_requires,
include_package_data = True,
)
|
Python
| 0
|
@@ -360,16 +360,21 @@
redactor
+==0.2
%22%5D%0A%0Aclas
|
f99728387b87787f7f05b9878cd85c8128461d3f
|
remove previous builds
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
about = {}
with open(os.path.join(here, "pipenv", "__version__.py")) as f:
exec(f.read(), about)
if sys.argv[-1] == "publish":
os.system("python setup.py sdist bdist_wheel upload")
sys.exit()
required = [
'virtualenv',
'pew>=0.1.26'
]
if sys.version_info < (2, 7):
required.append('requests[security]')
required.append('ordereddict')
# https://pypi.python.org/pypi/stdeb/0.8.5#quickstart-2-just-tell-me-the-fastest-way-to-make-a-deb
class DebCommand(Command):
"""Support for setup.py deb"""
description = 'Build and publish the .deb package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.status(u'Creating debian mainfest…')
os.system('python setup.py --command-packages=stdeb.command sdist_dsc')
self.status(u'Building .deb…')
os.chdir('deb_dist/pipenv-{0}'.format(about['__version__']))
os.system('dpkg-buildpackage -rfakeroot -uc -us')
class UploadCommand(Command):
"""Support setup.py publish."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except FileNotFoundError:
pass
self.status('Building Source distribution…')
os.system('{0} setup.py sdist'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name='pipenv',
version=about['__version__'],
description='Python Development Workflow for Humans.',
long_description=long_description,
author='Kenneth Reitz',
author_email='me@kennethreitz.org',
url='https://github.com/pypa/pipenv',
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': ['pipenv=pipenv:cli'],
},
install_requires=required,
include_package_data=True,
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={
'upload': UploadCommand,
'deb': DebCommand
},
)
|
Python
| 0
|
@@ -1155,32 +1155,200 @@
def run(self):%0A
+ try:%0A self.status('Removing previous builds%E2%80%A6')%0A rmtree(os.path.join(here, 'deb_dist'))%0A except FileNotFoundError:%0A pass%0A
self.sta
|
0aee4834970714c399f1375830400ff66104bd92
|
fix setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
import re
import sys
from codecs import open
from setuptools import setup
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'requests',
'requests.packages',
'requests.packages.chardet',
'requests.packages.chardet.cli',
'requests.packages.idna',
'requests.packages.urllib3',
'requests.packages.urllib3.packages',
'requests.packages.urllib3.contrib',
'requests.packages.urllib3.util',
'requests.packages.urllib3.packages.ssl_match_hostname',
'requests.packages.urllib3.packages.backports',
'requests.packages.urllib3.contrib._securetransport',
]
requires = []
test_requirements = ['pytest>=2.8.0', 'pytest-httpbin==0.0.7', 'pytest-cov', 'pytest-mock']
about = {}
with open(os.path.join(here, 'requests', '__version__.py')) as f:
exec(f.read(), about)
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
with open('HISTORY.rst', 'r', 'utf-8') as f:
history = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme + '\n\n' + history,
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
packages=packages,
package_data={'': ['LICENSE', 'NOTICE'], 'requests': ['*.pem']},
package_dir={'requests': 'requests'},
include_package_data=True,
install_requires=requires,
license=about['__license__'],
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
cmdclass={'test': PyTest},
tests_require=test_requirements,
extras_require={
'security': ['pyOpenSSL>=0.14', 'cryptography>=1.3.4', 'idna>=2.0.0'],
'socks': ['PySocks>=1.5.6, !=1.5.7'],
'socks:sys_platform == "win32" and (python_version == "2.7" or python_version == "2.6")': ['win_inet_pton'],
},
)
|
Python
| 0.000001
|
@@ -781,14 +781,56 @@
ist
-upload
+bdist_wheel')%0A os.system('twine upload dist/*
')%0A
|
72e4efe764dfcb85b633e59fbebd3aa82a95f6de
|
Use setuptools.
|
setup.py
|
setup.py
|
from distutils.core import setup
from sentinel import __version__ as VERSION
from codecs import open
with open('README.rst', encoding='UTF-8') as readme:
long_description = readme.read()
setup(
name='sentinel',
version=VERSION,
url='https://github.com/eddieantonio/sentinel',
license='MIT',
author='Eddie Antonio Santos',
author_email='easantos@ualberta.ca',
description='Create sentinel and singleton objects',
long_description=long_description,
py_modules=['sentinel'],
platforms='any',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
],
download_url = 'https://github.com/eddieantonio/sentinel/tarball/v' + VERSION,
)
|
Python
| 0
|
@@ -2,22 +2,18 @@
rom
-distutils.core
+setuptools
imp
|
85cf93e99c78c3091e8a19b34338db90fba01773
|
set default # of hts to 2
|
python/khmer/__init__.py
|
python/khmer/__init__.py
|
__version__ = "0.2"
import _khmer
from _khmer import new_ktable
from _khmer import new_hashtable
from _khmer import _new_hashbits
from _khmer import new_readmask
from _khmer import new_minmax
from _khmer import consume_genome
from _khmer import forward_hash, forward_hash_no_rc, reverse_hash
from _khmer import set_reporting_callback
from _khmer import do_intersection_partition
from filter_utils import filter_fasta_file_any, filter_fasta_file_all, filter_fasta_file_limit_n
###
def new_hashbits(k, starting_size, n_tables=8):
primes = get_n_primes_above_x(n_tables, starting_size)
print primes
# primes = [ 22906493, 22906519, 22906561, 22906567,
# 22906619, 22906649, 22906657, 22906661 ]
return _new_hashbits(k, primes)
def _default_reporting_callback(info, n_reads, other):
print '...', info, n_reads, other
def reset_reporting_callback():
set_reporting_callback(_default_reporting_callback)
reset_reporting_callback()
###
class KmerCount(object):
def __init__(self, size, report_zero=False):
self._kt = new_ktable(size)
self.report_zero = report_zero
def consume(self, seq):
self._kt.consume(seq)
def _get_pairs(self):
kt = self._kt
size = kt.n_entries()
for i in range(0, size):
count = kt.get(i)
if count or self.report_zero:
kmer = kt.reverse_hash(i)
yield kmer, kt.get(i)
pairs = property(_get_pairs)
def __getitem__(self, k):
return self._kt.get(k)
def is_prime(n):
'''
checks if a number is prime
'''
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
for x in range(3, int(n**0.5)+1, 2):
if n % x == 0:
return False
return True
def get_n_primes_near_x(n, x):
'''
steps backward until n primes (other than 2) have been
found that are smaller than x.
'''
primes = []
i = x-1
if i % 2 == 0:
i -= 1
while len(primes) != n and i > 0:
if is_prime(i):
primes.append(i)
i -= 2
return primes
def get_n_primes_above_x(n, x):
'''
steps forward until n primes (other than 2) have been
found that are smaller than x.
'''
primes = []
i = x+1
if i % 2 == 0:
i += 1
while len(primes) != n and i > 0:
if is_prime(i):
primes.append(i)
i += 2
return primes
# from http://www.rsok.com/~jrm/printprimes.html
PRIMES_1m = [1000003, 1009837]
PRIMES_100m = [100009979, 100000007]
PRIMES_1b = [1000000007, 1000000919]
PRIMES_2b = [1999999973, 1999999943]
PRIMES_4b = [4000000007, 4000000009]
PRIMES_8b = [8000000011, 8000000051]
class HashtableIntersect(object):
def __init__(self, k, size1, size2):
self._kh1 = new_hashtable(k, size1)
self._kh2 = new_hashtable(k, size2)
def consume(self, seq):
self._kh1.consume(seq)
self._kh2.consume(seq)
def get_min_count(self, seq):
return min(self._kh1.get_min_count(seq),
self._kh2.get_min_count(seq))
def get_max_count(self, seq):
return min(self._kh1.get_max_count(seq),
self._kh2.get_max_count(seq))
|
Python
| 0.000003
|
@@ -521,17 +521,17 @@
_tables=
-8
+2
):%0A p
@@ -605,121 +605,8 @@
mes%0A
-# primes = %5B 22906493, 22906519, 22906561, 22906567,%0A# 22906619, 22906649, 22906657, 22906661 %5D%0A
|
1bfb63c704ae9d947310c8f0f8250ef43aae6217
|
Update setup.py
|
setup.py
|
setup.py
|
from setuptools import setup
import rainwaveclient
setup(
name='python-rainwave-client',
version=rainwaveclient.__version__,
author=rainwaveclient.__author__,
author_email='william@subtlecoolness.com',
url='https://gutter.readthedocs.org/',
description='Python Rainwave client library',
packages=['rainwaveclient'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries'
],
license=open('LICENSE').read()
)
|
Python
| 0.000001
|
@@ -235,30 +235,54 @@
://g
-utter.readthedocs.org/
+ithub.com/williamjacksn/python-rainwave-client
',%0A
@@ -308,17 +308,8 @@
hon
-Rainwave
clie
@@ -318,16 +318,29 @@
library
+ for Rainwave
',%0A p
|
32c4ac486ded1ef4d4e37f182072bb1a3350db0c
|
Update 1.1 -> 1.2
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='junit-xml',
author='Brian Beyer',
author_email='brian@kyr.us',
url='https://github.com/kyrus/python-junit-xml',
license='MIT',
packages=find_packages(),
description='Creates JUnit XML test result documents that can be read by tools such as Jenkins',
long_description=read('README.rst'),
version = "1.1",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: Freely Distributable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
],
)
|
Python
| 0.000003
|
@@ -476,17 +476,17 @@
on = %221.
-1
+2
%22,%0A%09clas
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.