commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
8157af3da0e535074b18c76f0e5391d8cac806e8 | Add error field to expected JSON | whats_fresh/whats_fresh_api/tests/views/test_stories.py | whats_fresh/whats_fresh_api/tests/views/test_stories.py | from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from whats_fresh_api.models import *
from django.contrib.gis.db import models
import json
class StoriesTestCase(TestCase):
fixtures = ['whats_fresh_api/tests/testdata/test_fixtures.json']
def setUp(self):
self.expected_json = """"
{
error": {
"error_status": false,
"error_name": null,
"error_text": null,
"error_level": null
},
{
"story": "These are the voyages of the Starfish Enterblub; her five year mission -- to seek out new fish and new fishilizations..."
}
}"""
def test_url_endpoint(self):
url = reverse('story-details', kwargs={'id': '1'})
self.assertEqual(url, '/stories/1')
def test_json_equals(self):
c = Client()
response = c.get(reverse('story-details', kwargs={'id': '1'})).content
parsed_answer = json.loads(response)
expected_answer = json.loads(self.expected_json)
self.assertTrue(parsed_answer == expected_answer)
| from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from whats_fresh_api.models import *
from django.contrib.gis.db import models
import json
class StoriesTestCase(TestCase):
fixtures = ['whats_fresh_api/tests/testdata/test_fixtures.json']
def setUp(self):
self.expected_json = """
{
"story": "These are the voyages of the Starfish Enterblub; her five year mission -- to seek out new fish and new fishilizations..."
}"""
def test_url_endpoint(self):
url = reverse('story-details', kwargs={'id': '1'})
self.assertEqual(url, '/stories/1')
def test_json_equals(self):
c = Client()
response = c.get(reverse('story-details', kwargs={'id': '1'})).content
parsed_answer = json.loads(response)
expected_answer = json.loads(self.expected_json)
self.assertTrue(parsed_answer == expected_answer)
| Python | 0.000001 |
feab9b1067a42a6d5d8586361ab1d02f1844aa7e | Remove unused imports | tests/integration/api/conftest.py | tests/integration/api/conftest.py | """
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
API-specific fixtures
"""
import pytest
from .helpers import assemble_authorization_header
API_TOKEN = 'just-say-PLEASE!'
@pytest.fixture(scope='package')
# `admin_app` fixture is required because it sets up the database.
def api_app(admin_app, make_admin_app):
config_overrides = {
'API_TOKEN': API_TOKEN,
'SERVER_NAME': 'api.acmecon.test',
}
app = make_admin_app(**config_overrides)
with app.app_context():
yield app
@pytest.fixture(scope='package')
def api_client(api_app):
"""Provide a test HTTP client against the API."""
return api_app.test_client()
@pytest.fixture(scope='package')
def api_client_authz_header():
"""Provide a test HTTP client against the API."""
return assemble_authorization_header(API_TOKEN)
| """
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
API-specific fixtures
"""
import pytest
from tests.conftest import CONFIG_PATH_DATA_KEY
from tests.helpers import create_admin_app
from .helpers import assemble_authorization_header
API_TOKEN = 'just-say-PLEASE!'
@pytest.fixture(scope='package')
# `admin_app` fixture is required because it sets up the database.
def api_app(admin_app, make_admin_app):
config_overrides = {
'API_TOKEN': API_TOKEN,
'SERVER_NAME': 'api.acmecon.test',
}
app = make_admin_app(**config_overrides)
with app.app_context():
yield app
@pytest.fixture(scope='package')
def api_client(api_app):
"""Provide a test HTTP client against the API."""
return api_app.test_client()
@pytest.fixture(scope='package')
def api_client_authz_header():
"""Provide a test HTTP client against the API."""
return assemble_authorization_header(API_TOKEN)
| Python | 0.000001 |
f2139cad673ee50f027164bda80d86979d5ce7a0 | Add more imports for further functionality | passenger_wsgi.py | passenger_wsgi.py | import os
import sys
try:
from flask import Flask
import flask_login
from flask_restless import APIManager
from flask_sqlalchemy import SQLAlchemy
import requests
except ImportError:
INTERP = "venv/bin/python"
if os.path.relpath(sys.executable, os.getcwd()) != INTERP:
try:
os.execl(INTERP, INTERP, *sys.argv)
except OSError:
sys.exit("Could not find virtual environment. Run `:~$ ./setup.sh`")
else:
sys.exit("Could not find requirements. Are they all included in requirements.txt? Run `:~$ ./setup.sh`")
application = Flask(__name__)
@application.route("/")
def index():
return "Hello, world!"
| import os
import sys
try:
from flask import Flask, render_template, send_file, Response
import requests
except ImportError:
INTERP = "venv/bin/python"
if os.path.relpath(sys.executable, os.getcwd()) != INTERP:
try:
os.execl(INTERP, INTERP, *sys.argv)
except OSError:
sys.exit("Could not find virtual environment. Run `:~$ ./setup.sh`")
else:
sys.exit("Could not find requirements. Are they all included in requirements.txt? Run `:~$ ./setup.sh`")
application = Flask(__name__)
@application.route("/")
def index():
return "Hello, world!"
| Python | 0 |
62ec46d6dddf1eb0054861d886ab6493d56670d5 | Switch `open()` for `salt.utils.fopen()` | tests/integration/shell/syndic.py | tests/integration/shell/syndic.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.shell.syndic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
import os
import yaml
import signal
import shutil
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class SyndicTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-syndic'
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(integration.TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
for fname in ('master', 'minion'):
pid_path = os.path.join(config_dir, '{0}.pid'.format(fname))
with salt.utils.fopen(self.get_config_file_path(fname), 'r') as fhr:
config = yaml.load(fhr.read())
config['log_file'] = config['syndic_log_file'] = 'file:///tmp/log/LOG_LOCAL3'
config['root_dir'] = config_dir
if 'ret_port' in config:
config['ret_port'] = int(config['ret_port']) + 10
config['publish_port'] = int(config['publish_port']) + 10
with salt.utils.fopen(os.path.join(config_dir, fname), 'w') as fhw:
fhw.write(
yaml.dump(config, default_flow_style=False)
)
ret = self.run_script(
self._call_binary_,
'--config-dir={0} --pid-file={1} -l debug'.format(
config_dir,
pid_path
),
timeout=5,
catch_stderr=True,
with_retcode=True
)
# Now kill it if still running
if os.path.exists(pid_path):
with salt.utils.fopen(pid_path) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError:
pass
try:
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
self.assertIn(
'Failed to setup the Syslog logging handler', '\n'.join(ret[1])
)
self.assertEqual(ret[2], 2)
finally:
os.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
if __name__ == '__main__':
from integration import run_tests
run_tests(SyndicTest)
| # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
tests.integration.shell.syndic
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
import os
import yaml
import signal
import shutil
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class SyndicTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-syndic'
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(integration.TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
for fname in ('master', 'minion'):
pid_path = os.path.join(config_dir, '{0}.pid'.format(fname))
with salt.utils.fopen(self.get_config_file_path(fname), 'r') as fhr:
config = yaml.load(fhr.read())
config['log_file'] = config['syndic_log_file'] = 'file:///tmp/log/LOG_LOCAL3'
config['root_dir'] = config_dir
if 'ret_port' in config:
config['ret_port'] = int(config['ret_port']) + 10
config['publish_port'] = int(config['publish_port']) + 10
with salt.utils.fopen(os.path.join(config_dir, fname), 'w') as fhw:
fhw.write(
yaml.dump(config, default_flow_style=False)
)
ret = self.run_script(
self._call_binary_,
'--config-dir={0} --pid-file={1} -l debug'.format(
config_dir,
pid_path
),
timeout=5,
catch_stderr=True,
with_retcode=True
)
# Now kill it if still running
if os.path.exists(pid_path):
try:
os.kill(int(open(pid_path).read()), signal.SIGKILL)
except OSError:
pass
try:
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
self.assertIn(
'Failed to setup the Syslog logging handler', '\n'.join(ret[1])
)
self.assertEqual(ret[2], 2)
finally:
os.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
if __name__ == '__main__':
from integration import run_tests
run_tests(SyndicTest)
| Python | 0 |
e019a2b5de66dbbc0ed76942824ec3d33bcac6fd | Add integration test for @returns | tests/integration/test_returns.py | tests/integration/test_returns.py | # Standard library imports
import collections
# Local imports.
import uplink
# Constants
BASE_URL = "https://api.github.com/"
# Schemas
User = collections.namedtuple("User", "id name")
Repo = collections.namedtuple("Repo", "owner name")
# Converters
@uplink.loads(User)
def user_reader(cls, response):
return cls(**response.json())
@uplink.loads.from_json(Repo)
def repo_json_reader(cls, json):
return cls(**json)
@uplink.dumps.to_json(Repo)
def repo_json_writer(_, repo):
return {"owner": repo.owner, "name": repo.name}
# Service
class GitHub(uplink.Consumer):
@uplink.returns(User)
@uplink.get("/users/{user}")
def get_user(self, user):
pass
@uplink.returns.from_json(type=Repo)
@uplink.get("/users/{user}/repos/{repo}")
def get_repo(self, user, repo):
pass
@uplink.returns.from_json(type=uplink.types.List[Repo], key="data")
@uplink.get("/users/{user}/repos")
def get_repos(self, user):
pass
@uplink.json
@uplink.post("/users/{user}/repos", args={"repo": uplink.Body(Repo)})
def create_repo(self, user, repo):
pass
# Tests
def test_returns_with_type(mock_client, mock_response):
# Setup
mock_response.with_json({"id": 123, "name": "prkumar"})
mock_client.with_response(mock_response)
github = GitHub(
base_url=BASE_URL, client=mock_client, converters=user_reader
)
# Run
user = github.get_user("prkumar")
# Verify
assert User(id=123, name="prkumar") == user
def test_returns_json_with_type(mock_client, mock_response):
# Setup
mock_response.with_json({"owner": "prkumar", "name": "uplink"})
mock_client.with_response(mock_response)
github = GitHub(
base_url=BASE_URL, client=mock_client, converters=repo_json_reader
)
# Run
repo = github.get_repo("prkumar", "uplink")
# Verify
assert Repo(owner="prkumar", name="uplink") == repo
def test_returns_json_with_list(mock_client, mock_response):
# Setup
mock_response.with_json(
{
"data": [
{"owner": "prkumar", "name": "uplink"},
{"owner": "prkumar", "name": "uplink-protobuf"},
],
"errors": [],
}
)
mock_client.with_response(mock_response)
github = GitHub(
base_url=BASE_URL, client=mock_client, converters=repo_json_reader
)
# Run
repo = github.get_repos("prkumar")
# Verify
assert [
Repo(owner="prkumar", name="uplink"),
Repo(owner="prkumar", name="uplink-protobuf"),
] == repo
def test_post_json(mock_client):
# Setup
github = GitHub(
base_url=BASE_URL, client=mock_client, converters=repo_json_writer
)
github.create_repo("prkumar", Repo(owner="prkumar", name="uplink"))
request = mock_client.history[0]
assert request.json == {"owner": "prkumar", "name": "uplink"}
| # Standard library imports
import collections
# Local imports.
import uplink
# Constants
BASE_URL = "https://api.github.com/"
# Schemas
Repo = collections.namedtuple("Repo", "owner name")
# Converters
@uplink.loads.from_json(Repo)
def repo_loader(cls, json):
return cls(**json)
@uplink.dumps.to_json(Repo)
def repo_dumper(_, repo):
return {"owner": repo.owner, "name": repo.name}
# Service
class GitHub(uplink.Consumer):
@uplink.returns.from_json(type=Repo)
@uplink.get("/users/{user}/repos/{repo}")
def get_repo(self, user, repo):
pass
@uplink.returns.from_json(type=uplink.types.List[Repo], key="data")
@uplink.get("/users/{user}/repos")
def get_repos(self, user):
pass
@uplink.json
@uplink.post("/users/{user}/repos", args={"repo": uplink.Body(Repo)})
def create_repo(self, user, repo):
pass
# Tests
def test_returns_json_with_type(mock_client, mock_response):
# Setup
mock_response.with_json({"owner": "prkumar", "name": "uplink"})
mock_client.with_response(mock_response)
github = GitHub(
base_url=BASE_URL, client=mock_client, converters=repo_loader
)
# Run
repo = github.get_repo("prkumar", "uplink")
# Verify
assert Repo(owner="prkumar", name="uplink") == repo
def test_returns_json_with_list(mock_client, mock_response):
# Setup
mock_response.with_json(
{
"data": [
{"owner": "prkumar", "name": "uplink"},
{"owner": "prkumar", "name": "uplink-protobuf"},
],
"errors": [],
}
)
mock_client.with_response(mock_response)
github = GitHub(
base_url=BASE_URL, client=mock_client, converters=repo_loader
)
# Run
repo = github.get_repos("prkumar")
# Verify
assert [
Repo(owner="prkumar", name="uplink"),
Repo(owner="prkumar", name="uplink-protobuf"),
] == repo
def test_post_json(mock_client):
# Setup
github = GitHub(
base_url=BASE_URL, client=mock_client, converters=repo_dumper
)
github.create_repo("prkumar", Repo(owner="prkumar", name="uplink"))
request = mock_client.history[0]
assert request.json == {"owner": "prkumar", "name": "uplink"}
| Python | 0 |
f4e6f2c6eb77876b646da14805ee496b0b25f0bc | Support PortOpt from oslo.cfg | dragonflow/common/common_params.py | dragonflow/common/common_params.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.i18n import _
df_opts = [
cfg.StrOpt('remote_db_ip',
default='127.0.0.1',
help=_('The remote db server ip address')),
cfg.PortOpt('remote_db_port',
default=4001,
help=_('The remote db server port')),
cfg.StrOpt('nb_db_class',
default='dragonflow.db.drivers.etcd_db_driver.EtcdDbDriver',
help=_('The driver class for the NB DB driver')),
cfg.StrOpt('local_ip',
default='127.0.0.1',
help=_('Local host IP')),
cfg.StrOpt('tunnel_type',
default='geneve',
help=_('The encapsulation type for the tunnel')),
cfg.StrOpt('apps_list',
default='l2_app.L2App,l3_app.L3App',
help=_('List of openflow applications classes to load')),
cfg.BoolOpt('use_centralized_ipv6_DHCP',
default=False,
help=_("Enable IPv6 DHCP by using DHCP agent"))
]
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.i18n import _
df_opts = [
cfg.StrOpt('remote_db_ip',
default='127.0.0.1',
help=_('The remote db server ip address')),
cfg.IntOpt('remote_db_port',
default=4001,
help=_('The remote db server port')),
cfg.StrOpt('nb_db_class',
default='dragonflow.db.drivers.etcd_db_driver.EtcdDbDriver',
help=_('The driver class for the NB DB driver')),
cfg.StrOpt('local_ip',
default='127.0.0.1',
help=_('Local host IP')),
cfg.StrOpt('tunnel_type',
default='geneve',
help=_('The encapsulation type for the tunnel')),
cfg.StrOpt('apps_list',
default='l2_app.L2App,l3_app.L3App',
help=_('List of openflow applications classes to load')),
cfg.BoolOpt('use_centralized_ipv6_DHCP',
default=False,
help=_("Enable IPv6 DHCP by using DHCP agent"))
]
| Python | 0.000001 |
4ec09eb10aa352175769cc00f189ece719802ea6 | remove temperature for now | lled.py | lled.py | #!/usr/bin/env python
"""Mookfist LimitlessLED Control
This tool can be used to control your LimitlessLED based lights.
Usage:
lled.py fade <start> <end> (--group=<GROUP>)... [options]
lled.py fadec <start> <end> (--group=<GROUP>)... [options]
lled.py fadeb <startb> <endb> <startc> <endc> (--group=<GROUP>)... [options]
lled.py on (--group=<group>)... [options]
lled.py off (--group=<group>)... [options]
lled.py color <color> (--group=<GROUP>)... [options]
lled.py colorcycle (--group=<GROUP>)... [options]
lled.py rgb <r> <g> <b> (--group=<GROUP>)... [options]
lled.py white (--group=<GROUP>)... [options]
lled.py brightness <brightness> (--group=<GROUP>)... [options]
lled.py scan [options]
Options:
-h --bridge-ip=HOST IP / Hostname of the bridge
-p --bridge-port=PORT Port number of the bridge (defaults to 8899 or 5987)
--bridge-version=VERSION Bridge version (defaults to 4)
-g GROUP --group=GROUP Group number (defaults to 1)
--bulb=BULB Bulb type
-r RC --repeat=RC Number of times to repeat a command
--pause=PAUSE Number of milliseconds to wait between commands
--debug Enable debugging output
-h --help Show this help
--help-bulbtypes Display possible bulb type values
"""
import logging
from docopt import docopt
from mookfist_lled_controller.cli import configure_logger
from mookfist_lled_controller.cli import Main
def main():
"""Main function!"""
arguments = docopt(__doc__, version='Mookfist LimitlessLED Control 0.0.1')
configure_logger(arguments['--debug'])
log = logging.getLogger('lled')
log.info('Welcome to the Mookfist LimitlessLED Controller')
try:
m = Main(arguments)
m.run()
except KeyboardInterrupt:
log.warning('Stopping')
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""Mookfist LimitlessLED Control
This tool can be used to control your LimitlessLED based lights.
Usage:
lled.py fade <start> <end> (--group=<GROUP>)... [options]
lled.py fadec <start> <end> (--group=<GROUP>)... [options]
lled.py fadeb <startb> <endb> <startc> <endc> (--group=<GROUP>)... [options]
lled.py on (--group=<group>)... [options]
lled.py off (--group=<group>)... [options]
lled.py color <color> (--group=<GROUP>)... [options]
lled.py colorcycle (--group=<GROUP>)... [options]
lled.py rgb <r> <g> <b> (--group=<GROUP>)... [options]
lled.py white (--group=<GROUP>)... [options]
lled.py brightness <brightness> (--group=<GROUP>)... [options]
lled.py temperature <temp> (--group=<GROUP>)... [options]
lled.py scan [options]
Options:
-h --bridge-ip=HOST IP / Hostname of the bridge
-p --bridge-port=PORT Port number of the bridge (defaults to 8899 or 5987)
--bridge-version=VERSION Bridge version (defaults to 4)
-g GROUP --group=GROUP Group number (defaults to 1)
--bulb=BULB Bulb type
-r RC --repeat=RC Number of times to repeat a command
--pause=PAUSE Number of milliseconds to wait between commands
--debug Enable debugging output
-h --help Show this help
--help-bulbtypes Display possible bulb type values
"""
import logging
from docopt import docopt
from mookfist_lled_controller.cli import configure_logger
from mookfist_lled_controller.cli import Main
def main():
"""Main function!"""
arguments = docopt(__doc__, version='Mookfist LimitlessLED Control 0.0.1')
configure_logger(arguments['--debug'])
log = logging.getLogger('lled')
log.info('Welcome to the Mookfist LimitlessLED Controller')
try:
m = Main(arguments)
m.run()
except KeyboardInterrupt:
log.warning('Stopping')
if __name__ == '__main__':
main()
| Python | 0 |
a324e8de7dc0bcb1676a8ae506d139f05751b233 | fix lint for tests | tests/test_relation_identifier.py | tests/test_relation_identifier.py | from __future__ import absolute_import
import pytest
from catpy.client import ConnectorRelation, CatmaidClient
from catpy.applications import RelationIdentifier
from tests.common import relation_identifier, connectors_types # noqa
def test_from_id(relation_identifier): # noqa
assert relation_identifier.from_id(0) == ConnectorRelation.presynaptic_to
def test_to_id(relation_identifier): # noqa
assert relation_identifier.to_id(ConnectorRelation.presynaptic_to) == 0
@pytest.fixture
def real_relation_identifier(credentials):
return RelationIdentifier(CatmaidClient(**credentials))
def populate_relid(relid):
relid._get_dict(False, None)
relid._get_dict(True, None)
def test_from_id_real(real_relation_identifier):
populate_relid(real_relation_identifier)
assert real_relation_identifier.id_to_relation
def test_to_id_real(real_relation_identifier):
populate_relid(real_relation_identifier)
assert real_relation_identifier.relation_to_id
| from __future__ import absolute_import
import pytest
from catpy.client import ConnectorRelation, CatmaidClient
from catpy.applications import RelationIdentifier
from tests.common import relation_identifier, connectors_types # noqa
def test_from_id(relation_identifier): # noqa
assert relation_identifier.from_id(0) == ConnectorRelation.presynaptic_to
def test_to_id(relation_identifier): # noqa
assert relation_identifier.to_id(ConnectorRelation.presynaptic_to) == 0
@pytest.fixture
def real_relation_identifier(credentials):
return RelationIdentifier(CatmaidClient(**credentials))
def populate_relid(relation_identifier):
relation_identifier._get_dict(False, None)
relation_identifier._get_dict(True, None)
def test_from_id_real(real_relation_identifier):
populate_relid(real_relation_identifier)
assert real_relation_identifier.id_to_relation
def test_to_id_real(real_relation_identifier):
populate_relid(real_relation_identifier)
assert real_relation_identifier.relation_to_id
| Python | 0.000001 |
ad4b9ffb7292a5b810df033088008cd503bc1169 | Add pre-fabricated fake PyPI envs at the top. | tests/unit/test_spec_resolving.py | tests/unit/test_spec_resolving.py | import unittest
from piptools.datastructures import SpecSet
from piptools.package_manager import FakePackageManager
def print_specset(specset, round):
print('After round #%s:' % (round,))
for spec in specset:
print(' - %s' % (spec.description(),))
simple = {
'foo-0.1': ['bar'],
'bar-1.2': ['qux', 'simplejson'],
'qux-0.1': ['simplejson<2.6'],
'simplejson-2.4.0': [],
'simplejson-2.6.2': [],
}
class TestDependencyResolving(unittest.TestCase):
def test_find_dependencies_simple(self):
"""A simple scenario for finding dependencies."""
pkgmgr = FakePackageManager(simple)
spec_set = SpecSet()
spec_set.add_spec('foo')
round = 1
print_specset(spec_set, round)
while True:
round += 1
new_deps = []
for spec in spec_set.normalize():
name, version = pkgmgr.find_best_match(spec)
new_deps += pkgmgr.get_dependencies(name, version)
if not new_deps:
break
# TODO: We should detect whether adding the new_deps really
# "changes anything" to the spec set. In order words: if no
# significant new constraints are added, we're done
# XXX: FIXME: Current, we "just stop" after X rounds (to prevent
# endless loops), but obviously this is not the correct impl!
if round > 4:
break
spec_set.add_specs(new_deps)
print_specset(spec_set, round)
# Print the final result:
print_specset(spec_set.normalize(), 'final')
spec_set = spec_set.normalize()
self.assertItemsEqual(['foo', 'qux', 'bar', 'simplejson<2.6'], map(str, spec_set))
| import unittest
from piptools.datastructures import SpecSet
from piptools.package_manager import FakePackageManager
def print_specset(specset, round):
print('After round #%s:' % (round,))
for spec in specset:
print(' - %s' % (spec.description(),))
class TestDependencyResolving(unittest.TestCase):
def test_find_dependencies_simple(self):
"""A simple scenario for finding dependencies."""
content = {
'foo-0.1': ['bar'],
'bar-1.2': ['qux', 'simplejson'],
'qux-0.1': ['simplejson<2.6'],
'simplejson-2.4.0': [],
'simplejson-2.6.2': [],
}
pkgmgr = FakePackageManager(content)
spec_set = SpecSet()
spec_set.add_spec('foo')
round = 1
print_specset(spec_set, round)
while True:
round += 1
new_deps = []
for spec in spec_set.normalize():
name, version = pkgmgr.find_best_match(spec)
new_deps += pkgmgr.get_dependencies(name, version)
if not new_deps:
break
# TODO: We should detect whether adding the new_deps really
# "changes anything" to the spec set. In order words: if no
# significant new constraints are added, we're done
# XXX: FIXME: Current, we "just stop" after X rounds (to prevent
# endless loops), but obviously this is not the correct impl!
if round > 4:
break
spec_set.add_specs(new_deps)
print_specset(spec_set, round)
# Print the final result:
print_specset(spec_set.normalize(), 'final')
spec_set = spec_set.normalize()
self.assertItemsEqual(['foo', 'qux', 'bar', 'simplejson<2.6'], map(str, spec_set))
| Python | 0 |
bbfa9c3135ebdc5a99257d62556b691f8c87a26c | Update irrigate.py | device/src/irrigate.py | device/src/irrigate.py | #!/usr/bin/env python
#In this project, I use a servo to simulate the water tap.
#Roating to 90 angle suggest that the water tap is open, and 0 angle means close.
#Pin connection:
#deep red <--> GND
#red <--> VCC
#yellow <--> signal(X1)
#Update!!!!!
#Use real water pump(RS360) to irrigate the plants, need to use relay to drive the pump which is powered by 5V power.
#
from pyb import Servo
servo=Servo(1) # X1
def irrigate_start():
servo.angle(90)
def irrigate_stop():
servo.angle(0)
| #!/usr/bin/env python
#In this project, I use a servo to simulate the water tap.
#Roating to 90 angle suggest that the water tap is open, and 0 angle means close.
#Pin connection:
#deep red <--> GND
#red <--> VCC
#yellow <--> signal(X1)
from pyb import Servo
servo=Servo(1) # X1
def irrigate_start():
servo.angle(90)
def irrigate_stop():
servo.angle(0)
| Python | 0.000001 |
173d7ffefe10e8896055bd5b41272c2d0a1f8889 | Update version to 0.1.6 for upcoming release | pdblp/_version.py | pdblp/_version.py | __version__ = "0.1.6"
| __version__ = "0.1.5"
| Python | 0 |
b87ebc9dbbc33928345a83ac8ea0ce71806ac024 | simplify play down to wall and standard defense | soccer/gameplay/plays/Defend_Restart_Defensive/BasicDefendRestartDefensive.py | soccer/gameplay/plays/Defend_Restart_Defensive/BasicDefendRestartDefensive.py | import main
import robocup
import behavior
import constants
import enum
import standard_play
import tactics.positions.submissive_goalie as submissive_goalie
import tactics.positions.submissive_defender as submissive_defender
import evaluation.opponent as eval_opp
import tactics.positions.wing_defender as wing_defender
import skills.mark as mark
import tactics.wall as wall
import situational_play_selection
## Restart that uses standard defense and uses the remaining
# robots to form a wall
#
class BasicDefendRestartDefensive(standard_play.StandardPlay):
_situationList = [
situational_play_selection.SituationalPlaySelector.Situation.DEFEND_RESTART_DEFENSIVE
] # yapf: disable
def __init__(self, num_defenders=2):
super().__init__(continuous=True)
self.num_defenders = num_defenders
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'Immediately')
self.add_subbehavior(wall.Wall(), 'wall', required=False)
| import main
import robocup
import behavior
import constants
import enum
import standard_play
import tactics.positions.submissive_goalie as submissive_goalie
import tactics.positions.submissive_defender as submissive_defender
import evaluation.opponent as eval_opp
import tactics.positions.wing_defender as wing_defender
import skills.mark as mark
import tactics.defense
import situational_play_selection
## Play that uses submissive defenders to defend
# an attack close to our goal.
#
# By default, we will use standard defense (two submissive
# defenders, one goalie) and additional marking robots.
#
class BasicDefendRestartDefensive(standard_play.StandardPlay):
_situationList = [
situational_play_selection.SituationalPlaySelector.Situation.DEFEND_RESTART_DEFENSIVE
] # yapf: disable
def __init__(self, num_defenders=2):
super().__init__(continuous=True)
self.num_defenders = num_defenders
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'Immediately')
for i in range(num_defenders):
self.add_subbehavior(mark.Mark(), 'mark' + str(i), required=False)
# Keep track of which robots are currently being defended
self.defended = {}
for i in range(len(main.their_robots())):
self.defended[i] = False
def execute_running(self):
for i in range(len(main.their_robots())):
if not eval_opp.is_marked(main.their_robots()[i].pos):
self.defended[i] = False
# mark highest threat robot
for i in range(self.num_defenders):
mark_bhvr = self.subbehavior_with_name('mark' + str(i))
threat_found = False
for threat_pt, _, _ in eval_opp.get_threat_list([mark_bhvr]):
print(threat_pt)
closest_opp = eval_opp.get_closest_opponent(threat_pt)
if not threat_found and (closest_opp.pos - main.ball().pos).mag() > constants.Field.CenterRadius + constants.Robot.Radius * 2:
print((closest_opp.pos - main.ball().pos).mag())
# print(constants.Field.CenterRadius)
mark_bhvr.mark_robot = closest_opp
threat_found = True
| Python | 0.000027 |
abae242bbcdc3eefcd0ab1ff29f660f89d47db1a | Add absolute URL for Surprises | mirigata/surprise/models.py | mirigata/surprise/models.py | from django.core.urlresolvers import reverse
from django.db import models
class Surprise(models.Model):
link = models.URLField(max_length=500)
description = models.TextField(max_length=1000)
def get_absolute_url(self):
return reverse('surprise-detail', kwargs={"pk": self.id})
| from django.db import models
class Surprise(models.Model):
link = models.URLField(max_length=500)
description = models.TextField(max_length=1000)
| Python | 0 |
c0fdbf78fcc6b74086cc40e8e0deb273dee6d03c | Update BUILD_OSS to 4666. | src/data/version/mozc_version_template.bzl | src/data/version/mozc_version_template.bzl | # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4666
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
| # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 26
# BUILD number used for the OSS version.
BUILD_OSS = 4660
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 10
| Python | 0 |
c5225c00191595b6d1a824ee808465e0c488769b | Add missing arg which didn't make it because of the bad merge conflict resolution. | st2stream/st2stream/controllers/v1/stream.py | st2stream/st2stream/controllers/v1/stream.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common import log as logging
from st2common.router import Response
from st2common.util.jsonify import json_encode
from st2stream.listener import get_listener
LOG = logging.getLogger(__name__)
def format(gen):
message = '''event: %s\ndata: %s\n\n'''
for pack in gen:
if not pack:
# Note: gunicorn wsgi handler expect bytes, not unicode
yield six.binary_type('\n')
else:
(event, body) = pack
# Note: gunicorn wsgi handler expect bytes, not unicode
yield six.binary_type(message % (event, json_encode(body, indent=None)))
class StreamController(object):
def get_all(self, requester_user):
def make_response():
res = Response(content_type='text/event-stream',
app_iter=format(get_listener().generator()))
return res
stream = make_response()
return stream
stream_controller = StreamController()
| # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common import log as logging
from st2common.router import Response
from st2common.util.jsonify import json_encode
from st2stream.listener import get_listener
LOG = logging.getLogger(__name__)
def format(gen):
message = '''event: %s\ndata: %s\n\n'''
for pack in gen:
if not pack:
# Note: gunicorn wsgi handler expect bytes, not unicode
yield six.binary_type('\n')
else:
(event, body) = pack
# Note: gunicorn wsgi handler expect bytes, not unicode
yield six.binary_type(message % (event, json_encode(body, indent=None)))
class StreamController(object):
def get_all(self):
def make_response():
res = Response(content_type='text/event-stream',
app_iter=format(get_listener().generator()))
return res
stream = make_response()
return stream
stream_controller = StreamController()
| Python | 0 |
1df3dc91f71bf2a02b059d414ea5b041a382f1ad | change CSS selectors | shot.py | shot.py | # -*- coding: utf-8 -*-
import redis
import urllib2
from bs4 import BeautifulSoup
from datetime import datetime
url = 'http://www.x-kom.pl'
FORMAT_DATETIME = '%Y-%m-%d %H:%M:%S.%f'
redis_server = redis.Redis(host='localhost', port=6379)
def get_number(number):
return float(number.strip().split()[0].replace(',', '.'))
def get_element(soup, tag, class_name):
return soup.find(tag, {'class': class_name}).get_text()
def get_data(url):
html = urllib2.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
title = get_element(soup, 'p', 'product-name')
price = get_element(soup, 'div', 'new-price')
price_first = get_element(soup, 'div', 'old-price')
return { 'title': title.encode('utf-8'), 'price': get_number(price), 'price_first': get_number(price_first), 'date': datetime.now()}
def save_to_db():
item = get_data(url)
date = item['date'].strftime(FORMAT_DATETIME)
redis_server.hmset(date, item)
def show_all():
keys = redis_server.keys()
for i, key in enumerate(keys):
print '{}: {}'.format(i, redis_server.hgetall(key))
if __name__ == '__main__':
save_to_db()
# show_all()
| # -*- coding: utf-8 -*-
import redis
import urllib2
from bs4 import BeautifulSoup
from datetime import datetime
url = 'http://www.x-kom.pl'
FORMAT_DATETIME = '%Y-%m-%d %H:%M:%S.%f'
redis_server = redis.Redis(host='localhost', port=6379)
def get_number(number):
return float(number.strip().split()[0].replace(',', '.'))
def get_element(soup, tag, class_name):
return soup.find(tag, {'class': class_name}).get_text()
def get_data(url):
html = urllib2.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
title = get_element(soup, 'div', 'killer-product-title')
price = get_element(soup, 'div', 'killer-price')
price_first = get_element(soup, 'div', 'discount-price')
return { 'title': title.encode('utf-8'), 'price': get_number(price), 'price_first': get_number(price_first), 'date': datetime.now()}
def save_to_db():
item = get_data(url)
date = item['date'].strftime(FORMAT_DATETIME)
redis_server.hmset(date, item)
def show_all():
keys = redis_server.keys()
for i, key in enumerate(keys):
print '{}: {}'.format(i, redis_server.hgetall(key))
if __name__ == '__main__':
save_to_db()
# show_all()
| Python | 0.000001 |
8b944f04ebf9b635029182a3137e9368edafe9d2 | Handle exception for bad search strings | pgsearch/utils.py | pgsearch/utils.py | from django.contrib.postgres.search import SearchVector, SearchRank, SearchQuery
import shlex
import string
def parseSearchString(search_string):
try:
search_strings = shlex.split(search_string)
translator = str.maketrans({key: None for key in string.punctuation})
search_strings = [s.translate(translator) for s in search_strings]
except:
search_strings = []
return search_strings
def createSearchQuery(list_of_terms):
if len(list_of_terms) > 0:
q = SearchQuery(list_of_terms[0])
for term in list_of_terms[1:]:
q = q & SearchQuery(term)
return q
else:
return None
def searchPostgresDB(search_string, Table, config, rank, *fields):
list_of_terms = parseSearchString(search_string)
search_query = createSearchQuery(list_of_terms)
if rank == True:
vector = SearchVector(*fields, config=config)
objs = Table.objects.annotate(rank=SearchRank(vector, search_query)).\
order_by('-rank')
else:
objs = Table.objects.annotate(search=SearchVector(*fields,
config=config),).\
filter(search=search_query)
return objs
| from django.contrib.postgres.search import SearchVector, SearchRank, SearchQuery
import shlex
import string
def parseSearchString(search_string):
search_strings = shlex.split(search_string)
translator = str.maketrans({key: None for key in string.punctuation})
search_strings = [s.translate(translator) for s in search_strings]
return search_strings
def createSearchQuery(list_of_terms):
if len(list_of_terms) > 0:
q = SearchQuery(list_of_terms[0])
for term in list_of_terms[1:]:
q = q & SearchQuery(term)
return q
else:
return None
def searchPostgresDB(search_string, Table, config, rank, *fields):
list_of_terms = parseSearchString(search_string)
search_query = createSearchQuery(list_of_terms)
if rank == True:
vector = SearchVector(*fields, config=config)
objs = Table.objects.annotate(rank=SearchRank(vector, search_query)).\
order_by('-rank')
else:
objs = Table.objects.annotate(search=SearchVector(*fields,
config=config),).\
filter(search=search_query)
return objs
| Python | 0.000006 |
6df0e3efd239f7be073057ede44033dc95064a23 | Fix StringIO import | teuthology/task/tests/test_run.py | teuthology/task/tests/test_run.py | import logging
import pytest
from io import StringIO
from teuthology.exceptions import CommandFailedError
log = logging.getLogger(__name__)
class TestRun(object):
"""
Tests to see if we can make remote procedure calls to the current cluster
"""
def test_command_failed_label(self, ctx, config):
result = ""
try:
ctx.cluster.run(
args=["python", "-c", "assert False"],
label="working as expected, nothing to see here"
)
except CommandFailedError as e:
result = str(e)
assert "working as expected" in result
def test_command_failed_no_label(self, ctx, config):
with pytest.raises(CommandFailedError):
ctx.cluster.run(
args=["python", "-c", "assert False"],
)
def test_command_success(self, ctx, config):
result = StringIO()
ctx.cluster.run(
args=["python", "-c", "print('hi')"],
stdout=result
)
assert result.getvalue().strip() == "hi"
| import logging
import pytest
from StringIO import StringIO
from teuthology.exceptions import CommandFailedError
log = logging.getLogger(__name__)
class TestRun(object):
"""
Tests to see if we can make remote procedure calls to the current cluster
"""
def test_command_failed_label(self, ctx, config):
result = ""
try:
ctx.cluster.run(
args=["python", "-c", "assert False"],
label="working as expected, nothing to see here"
)
except CommandFailedError as e:
result = str(e)
assert "working as expected" in result
def test_command_failed_no_label(self, ctx, config):
with pytest.raises(CommandFailedError):
ctx.cluster.run(
args=["python", "-c", "assert False"],
)
def test_command_success(self, ctx, config):
result = StringIO()
ctx.cluster.run(
args=["python", "-c", "print('hi')"],
stdout=result
)
assert result.getvalue().strip() == "hi"
| Python | 0.000001 |
3c1a658195145ff1c0f20b677c50f5932e5ac66a | fix yield statement | dusty/compiler/compose/__init__.py | dusty/compiler/compose/__init__.py | import yaml
import pprint
from .. import get_assembled_specs
from ...source import repo_path
from ..port_spec import port_spec_document
from ... import constants
def write_compose_file():
compose_dict = get_compose_dict()
print pprint.pformat(compose_dict)
with open("{}/docker-compose.yml".format(constants.COMPOSE_DIR), 'w') as f:
f.write(yaml.dump(compose_dict, default_flow_style=False, width=10000))
yield "Written to {}".format("{}/docker-compose.yml".format(constants.COMPOSE_DIR)).encode('utf-8')
def get_compose_dict():
assembled_specs = get_assembled_specs()
port_specs = port_spec_document(assembled_specs)
compose_dict = {}
for app_name in assembled_specs['apps'].keys():
compose_dict[app_name] = _composed_app_dict(app_name, assembled_specs, port_specs)
for service_name in assembled_specs.get('services', []):
compose_dict[service_name] = _composed_service_dict(service_name, assembled_specs)
return compose_dict
def _composed_app_dict(app_name, assembled_specs, port_specs):
app_spec = assembled_specs['apps'][app_name]
compose_bundle = app_spec.get("compose", {})
compose_bundle['image'] = app_spec['image']
compose_bundle['command'] = _compile_docker_command(app_spec)
compose_bundle['links'] = app_spec.get('depends', {}).get('services', [])
compose_bundle['volumes'] = _get_compose_volumes(app_name, assembled_specs)
port_str = _get_ports_list(app_name, port_specs)
if port_str:
compose_bundle['ports'] = port_str
return compose_bundle
def _composed_service_dict(service_name, assembled_specs):
return assembled_specs['services'][service_name]
def _get_ports_list(app_name, port_specs):
if app_name not in port_specs['docker_compose']:
return None
return ["{}:{}".format(port_specs['docker_compose'][app_name]['mapped_host_port'],
port_specs['docker_compose'][app_name]['in_container_port'])]
def _compile_docker_command(app_spec):
first_run_file = constants.FIRST_RUN_FILE
command = []
command.append("export PATH=$PATH:{}".format(_container_code_path(app_spec)))
command.append("if [ ! -f {} ]".format(first_run_file))
once_command = app_spec['commands'].get("once", "")
command.append("then touch {}; fi".format(first_run_file))
if once_command:
command.append(once_command)
command.append(app_spec['commands']['always'])
return "bash -c \"{}\"".format('; '.join(command))
def _get_compose_volumes(app_name, assembled_specs):
app_spec = assembled_specs['apps'][app_name]
volumes = []
volumes.append(_get_app_volume_mount(app_spec))
volumes += _get_libs_volume_mounts(app_name, assembled_specs)
return volumes
def _get_app_volume_mount(app_spec):
app_repo_path = repo_path(app_spec['repo'])
return "{}:{}".format(app_repo_path, _container_code_path(app_spec))
def _container_code_path(spec):
return "/gc/{}".format(spec['repo'].split('/')[-1])
def _get_libs_volume_mounts(app_name, assembled_specs):
volumes = []
for lib_name in assembled_specs['apps'][app_name].get('depends', {}).get('libs', []):
lib_spec = assembled_specs['libs'][lib_name]
lib_repo_path = repo_path(lib_spec['repo'])
volumes.append("{}:{}".format(lib_repo_path, _container_code_path(lib_spec)))
return volumes
| import yaml
import pprint
from .. import get_assembled_specs
from ...source import repo_path
from ..port_spec import port_spec_document
from ... import constants
def write_compose_file():
compose_dict = get_compose_dict()
print pprint.pformat(compose_dict)
with open("{}/docker-compose.yml".format(constants.COMPOSE_DIR), 'w') as f:
f.write(yaml.dump(compose_dict, default_flow_style=False, width=10000))
yield "Written to {}".format(constants.COMPOSE_YML_PATH).encode('utf-8')
def get_compose_dict():
assembled_specs = get_assembled_specs()
port_specs = port_spec_document(assembled_specs)
compose_dict = {}
for app_name in assembled_specs['apps'].keys():
compose_dict[app_name] = _composed_app_dict(app_name, assembled_specs, port_specs)
for service_name in assembled_specs.get('services', []):
compose_dict[service_name] = _composed_service_dict(service_name, assembled_specs)
return compose_dict
def _composed_app_dict(app_name, assembled_specs, port_specs):
app_spec = assembled_specs['apps'][app_name]
compose_bundle = app_spec.get("compose", {})
compose_bundle['image'] = app_spec['image']
compose_bundle['command'] = _compile_docker_command(app_spec)
compose_bundle['links'] = app_spec.get('depends', {}).get('services', [])
compose_bundle['volumes'] = _get_compose_volumes(app_name, assembled_specs)
port_str = _get_ports_list(app_name, port_specs)
if port_str:
compose_bundle['ports'] = port_str
return compose_bundle
def _composed_service_dict(service_name, assembled_specs):
return assembled_specs['services'][service_name]
def _get_ports_list(app_name, port_specs):
if app_name not in port_specs['docker_compose']:
return None
return ["{}:{}".format(port_specs['docker_compose'][app_name]['mapped_host_port'],
port_specs['docker_compose'][app_name]['in_container_port'])]
def _compile_docker_command(app_spec):
first_run_file = constants.FIRST_RUN_FILE
command = []
command.append("export PATH=$PATH:{}".format(_container_code_path(app_spec)))
command.append("if [ ! -f {} ]".format(first_run_file))
once_command = app_spec['commands'].get("once", "")
command.append("then touch {}; fi".format(first_run_file))
if once_command:
command.append(once_command)
command.append(app_spec['commands']['always'])
return "bash -c \"{}\"".format('; '.join(command))
def _get_compose_volumes(app_name, assembled_specs):
app_spec = assembled_specs['apps'][app_name]
volumes = []
volumes.append(_get_app_volume_mount(app_spec))
volumes += _get_libs_volume_mounts(app_name, assembled_specs)
return volumes
def _get_app_volume_mount(app_spec):
app_repo_path = repo_path(app_spec['repo'])
return "{}:{}".format(app_repo_path, _container_code_path(app_spec))
def _container_code_path(spec):
return "/gc/{}".format(spec['repo'].split('/')[-1])
def _get_libs_volume_mounts(app_name, assembled_specs):
volumes = []
for lib_name in assembled_specs['apps'][app_name].get('depends', {}).get('libs', []):
lib_spec = assembled_specs['libs'][lib_name]
lib_repo_path = repo_path(lib_spec['repo'])
volumes.append("{}:{}".format(lib_repo_path, _container_code_path(lib_spec)))
return volumes
| Python | 0.000001 |
cc5c52084fedf172d11534a465e155b8948da9b7 | Add support for command arguments | skal.py | skal.py | # Copyright 2012 Loop Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.0.4'
__project_url__ = 'https://github.com/looplab/skal'
import sys
import errno
import argparse
import inspect
import types
class SkalApp(object):
"""A base class for command-subcommand apps.
This class is meant to be used as a base class for the actual application
class in which methods are defined that represents the subcommands.
Consider a simple case:
>>> class MyApp(SkalApp):
... @command
... def first(self):
... print("first")
...
>>> app = MyApp()
>>> app.run()
This will create a simple app which has one method that is made a command
by uisng the @command decorator. If run from the command line it will
respond to a call like this: "python myapp.py first"
"""
def __init__(self):
"""Creates the argparser using metadata from decorators.
"""
main_module = sys.modules['__main__']
version = ''
if hasattr(main_module, '__version__'):
version = str(main_module.__version__)
# Add main parser and help
self.__argparser = argparse.ArgumentParser(description = self.__doc__)
self.__argparser.add_argument(
'--version',
action = 'version',
version = ('%(prog)s v' + version))
# Add all global arguments from the __args__ dictionary
if hasattr(self.__class__, '__args__'):
_add_arguments(self.__class__.__args__, self.__argparser)
# Add all subcommands by introspection
self.__subparser = self.__argparser.add_subparsers(dest = 'command')
methods = inspect.getmembers(self.__class__, inspect.ismethod)
for name, method in methods:
if (hasattr(method, '_args')):
command = self.__subparser.add_parser(
name, help = inspect.getdoc(method))
_add_arguments(method._args, command)
bound_method = types.MethodType(method, self, self.__class__)
command.set_defaults(cmd = bound_method)
def run(self, args = None):
"""Applicatin starting point.
This will run the associated method/function/module or print a help
list if it's an unknown keyword or the syntax is incorrect.
The suggested usage is as an argument to sys.exit():
>>> sys.exit(app.run())
Keyword arguments:
args -- Custom application arguments (default sys.argv)
"""
self.args = self.__argparser.parse_args(args = args)
try:
if 'cmd' in self.args:
return self.args.cmd()
except KeyboardInterrupt:
return errno.EINTR
def command(func_or_args = None):
"""Decorator to tell Skal that the method/function is a command.
"""
def decorator(f):
f._args = args
return f
if type(func_or_args) == type(decorator):
args = {}
return decorator(func_or_args)
args = func_or_args
return decorator
def default():
"""Decorator to tell Skal that the method/function is the default.
"""
raise NotImplementedError
def _add_arguments(args, argparser):
for k in args:
arg = []
if type(k) == str:
arg.append(k)
elif type(k) == tuple:
short, full = k
if type(short) == str:
arg.append(short)
if type(full) == str:
arg.append(full)
options = args[k]
argparser.add_argument(*arg, **options)
| # Copyright 2012 Loop Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Implement tests and remove sloppy test class and main
# TODO: Detect subcommands from another module
# TODO: Detect subcommands from each module in a package
# TODO: Don't crash app if a subcommand is broken, just don't add it
# TODO: Create decorators for each subcommand to export
__version__ = '0.0.3'
__project_url__ = 'https://github.com/looplab/skal'
import sys
import errno
import argparse
import inspect
import types
class SkalApp(object):
"""A base class for command-subcommand apps.
This class is meant to be used as a base class for the actual application
class in which methods are defined that represents the subcommands.
Consider a simple case:
>>> class MyApp(SkalApp):
... @command
... def first(self):
... print("first")
...
>>> app = MyApp()
>>> app.run()
This will create a simple app which has one method that is made a command
by uisng the @command decorator. If run from the command line it will
respond to a call like this: "python myapp.py first"
"""
def __init__(self):
"""Creates the argparser using metadata from decorators.
"""
main_module = sys.modules['__main__']
version = ''
if hasattr(main_module, '__version__'):
version = str(main_module.__version__)
self.__argparser = argparse.ArgumentParser(description = self.__doc__)
self.__argparser.add_argument(
'--version',
action = 'version',
version = ('%(prog)s v' + version))
if hasattr(self.__class__, '__skal__'):
for k in self.__class__.__skal__:
arg = []
if type(k) == str:
arg.append(k)
elif type(k) == tuple:
short, full = k
if type(short) == str:
arg.append(short)
if type(full) == str:
arg.append(full)
options = self.__class__.__skal__[k]
self.__argparser.add_argument(*arg, **options)
# Add all subcommands by introspection
self.__subparser = self.__argparser.add_subparsers(dest = 'command')
methods = inspect.getmembers(self.__class__, inspect.ismethod)
for name, method in methods:
if (hasattr(method, 'skal_meta')):
command = self.__subparser.add_parser(
name, help = inspect.getdoc(method))
bound_method = types.MethodType(method, self, self.__class__)
command.set_defaults(cmd = bound_method)
def run(self, args = None):
"""Applicatin starting point.
This will run the associated method/function/module or print a help
list if it's an unknown keyword or the syntax is incorrect.
The suggested usage is as an argument to sys.exit():
>>> sys.exit(app.run())
Keyword arguments:
args -- Custom application arguments (default sys.argv)
"""
self.args = self.__argparser.parse_args(args = args)
try:
if 'cmd' in self.args:
return self.args.cmd()
except KeyboardInterrupt:
return errno.EINTR
def command(f):
"""Decorator to tell Skal that the method/function is a command.
"""
f.skal_meta = {}
return f
| Python | 0.000113 |
8e24d3139c11428cda1e07da62ff007be9c77424 | Add convenience method. | abilian/testing/__init__.py | abilian/testing/__init__.py | """Base stuff for testing.
"""
import os
import subprocess
import requests
assert not 'twill' in subprocess.__file__
from flask.ext.testing import TestCase
from abilian.application import Application
__all__ = ['TestConfig', 'BaseTestCase']
class TestConfig(object):
SQLALCHEMY_DATABASE_URI = "sqlite://"
SQLALCHEMY_ECHO = False
TESTING = True
SECRET_KEY = "SECRET"
CSRF_ENABLED = False
class BaseTestCase(TestCase):
config_class = TestConfig
application_class = Application
def create_app(self):
config = self.config_class()
self.app = self.application_class(config)
return self.app
def setUp(self):
self.app.create_db()
self.session = self.db.session
def tearDown(self):
self.db.session.remove()
self.db.drop_all()
self.db.engine.dispose()
@property
def db(self):
return self.app.extensions['sqlalchemy'].db
# Useful for debugging
def dump_routes(self):
rules = list(self.app.url_map.iter_rules())
rules.sort(key=lambda x: x.rule)
for rule in rules:
print rule, rule.methods, rule.endpoint
def assert_302(self, response):
self.assert_status(response, 302)
#
# Validates HTML if asked by the config or the Unix environment
#
def get(self, url, validate=True):
response = self.client.get(url)
if not validate or response != 200:
return response
validator_url = self.app.config.get('VALIDATOR_URL') \
or os.environ.get('VALIDATOR_URL')
if not validator_url:
return response
content_type = response.headers['Content-Type']
if content_type.split(';')[0].strip() != 'text/html':
return response
return self.validate(url, response.data, content_type, validator_url)
# TODO: post(), put(), etc.
def assert_valid(self, response):
validator_url = self.app.config.get('VALIDATOR_URL') \
or os.environ.get('VALIDATOR_URL')
if validator_url:
self.validate(None, response.data,
response.headers['Content-Type'], validator_url)
def validate(self, url, content, content_type, validator_url):
response = requests.post(validator_url + '?out=json', content,
headers={'Content-Type': content_type})
body = response.json()
for message in body['messages']:
if message['type'] == 'error':
detail = u'on line %s [%s]\n%s' % (
message['lastLine'],
message['extract'],
message['message'])
self.fail((u'Got a validation error for %r:\n%s' %
(url, detail)).encode('utf-8'))
| """Base stuff for testing.
"""
import os
import subprocess
import requests
assert not 'twill' in subprocess.__file__
from flask.ext.testing import TestCase
from abilian.application import Application
__all__ = ['TestConfig', 'BaseTestCase']
class TestConfig(object):
SQLALCHEMY_DATABASE_URI = "sqlite://"
SQLALCHEMY_ECHO = False
TESTING = True
SECRET_KEY = "SECRET"
CSRF_ENABLED = False
class BaseTestCase(TestCase):
config_class = TestConfig
application_class = Application
def create_app(self):
config = self.config_class()
self.app = self.application_class(config)
return self.app
def setUp(self):
self.app.create_db()
self.session = self.db.session
def tearDown(self):
self.db.session.remove()
self.db.drop_all()
self.db.engine.dispose()
@property
def db(self):
return self.app.extensions['sqlalchemy'].db
# Useful for debugging
def dump_routes(self):
rules = list(self.app.url_map.iter_rules())
rules.sort(key=lambda x: x.rule)
for rule in rules:
print rule, rule.methods, rule.endpoint
#
# Validates HTML if asked by the config or the Unix environment
#
def get(self, url, validate=True):
response = self.client.get(url)
if not validate or response != 200:
return response
validator_url = self.app.config.get('VALIDATOR_URL') \
or os.environ.get('VALIDATOR_URL')
if not validator_url:
return response
content_type = response.headers['Content-Type']
if content_type.split(';')[0].strip() != 'text/html':
return response
return self.validate(url, response.data, content_type, validator_url)
# TODO: post(), put(), etc.
def assert_valid(self, response):
validator_url = self.app.config.get('VALIDATOR_URL') \
or os.environ.get('VALIDATOR_URL')
if validator_url:
self.validate(None, response.data,
response.headers['Content-Type'], validator_url)
def validate(self, url, content, content_type, validator_url):
response = requests.post(validator_url + '?out=json', content,
headers={'Content-Type': content_type})
body = response.json()
for message in body['messages']:
if message['type'] == 'error':
detail = u'on line %s [%s]\n%s' % (
message['lastLine'],
message['extract'],
message['message'])
self.fail((u'Got a validation error for %r:\n%s' %
(url, detail)).encode('utf-8'))
| Python | 0 |
434f5d394cc9f70962abc8c6ba19b596e6647b4c | Reformat and update copyright. | spotseeker_server/test/hours/get.py | spotseeker_server/test/hours/get.py | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotAvailableHours
import simplejson as json
from django.test.utils import override_settings
from mock import patch
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE="spotseeker_server.auth.all_ok")
class SpotHoursGETTest(TestCase):
def setUp(self):
spot = Spot.objects.create(name="This spot has available hours")
# Intentionally out of order - make sure windows are sorted, not
# just in db happenstance order
hours2 = SpotAvailableHours.objects.create(
spot=spot, day="m", start_time="11:00", end_time="14:00"
)
hours1 = SpotAvailableHours.objects.create(
spot=spot, day="m", start_time="00:00", end_time="10:00"
)
hours3 = SpotAvailableHours.objects.create(
spot=spot, day="t", start_time="11:00", end_time="14:00"
)
hours4 = SpotAvailableHours.objects.create(
spot=spot, day="w", start_time="11:00", end_time="14:00"
)
hours5 = SpotAvailableHours.objects.create(
spot=spot, day="th", start_time="11:00", end_time="14:00"
)
hours6 = SpotAvailableHours.objects.create(
spot=spot, day="f", start_time="11:00", end_time="14:00"
)
# Saturday is intentionally missing
hours8 = SpotAvailableHours.objects.create(
spot=spot, day="su", start_time="11:00", end_time="14:00"
)
self.spot = spot
def test_hours(self):
"""Tests that a Spot's available hours can be retrieved successfully.
"""
c = Client()
url = "/api/v1/spot/%s" % self.spot.pk
response = c.get(url)
spot_dict = json.loads(response.content)
valid_data = {
"monday": [["00:00", "10:00"], ["11:00", "14:00"]],
"tuesday": [["11:00", "14:00"]],
"wednesday": [["11:00", "14:00"]],
"thursday": [["11:00", "14:00"]],
"friday": [["11:00", "14:00"]],
"saturday": [],
"sunday": [["11:00", "14:00"]],
}
available_hours = spot_dict["available_hours"]
self.assertEqual(available_hours, valid_data)
| # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
""" Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase
from django.conf import settings
from django.test.client import Client
from spotseeker_server.models import Spot, SpotAvailableHours
import simplejson as json
from django.test.utils import override_settings
from mock import patch
from spotseeker_server import models
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok')
class SpotHoursGETTest(TestCase):
def setUp(self):
spot = Spot.objects.create(name="This spot has available hours")
# Intentionally out of order - make sure windows are sorted, not
# just in db happenstance order
hours2 = SpotAvailableHours.objects.create(spot=spot,
day="m",
start_time="11:00",
end_time="14:00")
hours1 = SpotAvailableHours.objects.create(spot=spot,
day="m",
start_time="00:00",
end_time="10:00")
hours3 = SpotAvailableHours.objects.create(spot=spot,
day="t",
start_time="11:00",
end_time="14:00")
hours4 = SpotAvailableHours.objects.create(spot=spot,
day="w",
start_time="11:00",
end_time="14:00")
hours5 = SpotAvailableHours.objects.create(spot=spot,
day="th",
start_time="11:00",
end_time="14:00")
hours6 = SpotAvailableHours.objects.create(spot=spot,
day="f",
start_time="11:00",
end_time="14:00")
# Saturday is intentionally missing
hours8 = SpotAvailableHours.objects.create(spot=spot,
day="su",
start_time="11:00",
end_time="14:00")
self.spot = spot
def test_hours(self):
""" Tests that a Spot's available hours can be retrieved successfully.
"""
c = Client()
url = "/api/v1/spot/%s" % self.spot.pk
response = c.get(url)
spot_dict = json.loads(response.content)
valid_data = {
'monday': [["00:00", "10:00"], ["11:00", "14:00"]],
'tuesday': [["11:00", "14:00"]],
'wednesday': [["11:00", "14:00"]],
'thursday': [["11:00", "14:00"]],
'friday': [["11:00", "14:00"]],
'saturday': [],
'sunday': [["11:00", "14:00"]],
}
available_hours = spot_dict["available_hours"]
self.assertEqual(available_hours, valid_data)
| Python | 0 |
7292b2d276db056870993a108466fccc18debcae | Update count-different-palindromic-subsequences.py | Python/count-different-palindromic-subsequences.py | Python/count-different-palindromic-subsequences.py | # Time: O(n^2)
# Space: O(n^2)
# Given a string S, find the number of different non-empty palindromic subsequences in S,
# and return that number modulo 10^9 + 7.
#
# A subsequence of a string S is obtained by deleting 0 or more characters from S.
#
# A sequence is palindromic if it is equal to the sequence reversed.
#
# Two sequences A_1, A_2, ... and B_1, B_2, ... are different if there is some i for which A_i != B_i.
#
# Example 1:
# Input:
# S = 'bccb'
# Output: 6
# Explanation:
# The 6 different non-empty palindromic subsequences are 'b', 'c', 'bb', 'cc', 'bcb', 'bccb'.
# Note that 'bcb' is counted only once, even though it occurs twice.
#
# Example 2:
# Input:
# S = 'abcdabcdabcdabcdabcdabcdabcdabcddcbadcbadcbadcbadcbadcbadcbadcba'
# Output: 104860361
#
# Explanation:
# There are 3104860382 different non-empty palindromic subsequences, which is 104860361 modulo 10^9 + 7.
# Note:
# - The length of S will be in the range [1, 1000].
# - Each character S[i] will be in the set {'a', 'b', 'c', 'd'}.
class Solution(object):
def countPalindromicSubsequences(self, S):
"""
:type S: str
:rtype: int
"""
def dp(i, j, prv, nxt, lookup):
if lookup[i][j] is not None:
return lookup[i][j]
result = 1
if i <= j:
for x in xrange(4):
i0 = nxt[i][x]
j0 = prv[j][x]
if i <= i0 <= j:
result = (result + 1) % P
if None < i0 < j0:
result = (result + dp(i0+1, j0-1, prv, nxt, lookup)) % P
result %= P
lookup[i][j] = result
return result
prv = [None] * len(S)
nxt = [None] * len(S)
last = [None] * 4
for i in xrange(len(S)):
last[ord(S[i])-ord('a')] = i
prv[i] = tuple(last)
last = [None] * 4
for i in reversed(xrange(len(S))):
last[ord(S[i])-ord('a')] = i
nxt[i] = tuple(last)
P = 10**9 + 7
lookup = [[None] * len(S) for _ in xrange(len(S))]
return dp(0, len(S)-1, prv, nxt, lookup) - 1
| # Time: O(n^2)
# Space: O(n^2)
class Solution(object):
def countPalindromicSubsequences(self, S):
"""
:type S: str
:rtype: int
"""
def dp(i, j, prv, nxt, lookup):
if lookup[i][j] is not None:
return lookup[i][j]
result = 1
if i <= j:
for x in xrange(4):
i0 = nxt[i][x]
j0 = prv[j][x]
if i <= i0 <= j:
result = (result + 1) % P
if None < i0 < j0:
result = (result + dp(i0+1, j0-1, prv, nxt, lookup)) % P
result %= P
lookup[i][j] = result
return result
prv = [None] * len(S)
nxt = [None] * len(S)
last = [None] * 4
for i in xrange(len(S)):
last[ord(S[i])-ord('a')] = i
prv[i] = tuple(last)
last = [None] * 4
for i in reversed(xrange(len(S))):
last[ord(S[i])-ord('a')] = i
nxt[i] = tuple(last)
P = 10**9 + 7
lookup = [[None] * len(S) for _ in xrange(len(S))]
return dp(0, len(S)-1, prv, nxt, lookup) - 1
| Python | 0.000001 |
358de4c3ce20569e217b1caf5c25ce826b536bbc | Reformat datastructuretools | supriya/tools/datastructuretools/__init__.py | supriya/tools/datastructuretools/__init__.py | # -*- encoding: utf-8 -*-
r"""
Tools for working with generic datastructures.
"""
from abjad.tools import systemtools
systemtools.ImportManager.import_structured_package(
__path__[0],
globals(),
)
| # -*- encoding: utf-8 -*-
r'''
Tools for working with generic datastructures.
'''
from abjad.tools import systemtools
systemtools.ImportManager.import_structured_package(
__path__[0],
globals(),
)
| Python | 0.000001 |
0e2bc29486fc1e09b6d90ccdbe21095f73848d48 | remove the event listener check | speakerbot/listenable.py | speakerbot/listenable.py | from dynamic_class import Singleton
class NotEventException(Exception):
pass
class GlobalEventDispatcher(object):
"""not quite there yet"""
__metaclass__ = Singleton
def __init__(self):
pass
def event(method):
"""Must be called first in a decorator chain, otherwise we lose the correct name property"""
def wrapped(*args, **kwargs):
self = args[0]
if self.dispatch_events(self._interrogators, method.__name__, *args, **kwargs):
#Self will be removed and put back in the run_manglers routine.
args, kwargs = self.run_manglers(method.__name__, *args, **kwargs)
result = method(*args, **kwargs)
kwargs["event_result"] = result
self.dispatch_events(self._listeners, method.__name__, *args, **kwargs)
return result
wrapped.is_event = True
method.is_event = True
return wrapped
def listenable(klass):
"""
Class decorator to implement a lightweight event-dispatch model.
@listenable on the class
@event on the method you want to monitor
listeners must implement the function signature of the event exactly (or take *args, **kwargs generically),
plus a special argument called "event_result" that contains the return value of the method invocation.
TODO: Make it work with other decorators, inheritance
"""
def _attach(self, event, func, handler_collection_name):
#Hell is interacting with other people's code.
#if not hasattr(getattr(self, event), "is_event"):
# raise NotEventException("This method hasn't been decorated as an event listener")
handler_collection = getattr(self, handler_collection_name)
handlers = handler_collection.get(event, [])
handlers.append(func)
handler_collection[event] = handlers
setattr(self, handler_collection_name, handler_collection)
def attach_interrogator(self, event, interrogator):
_attach(self, event, interrogator, "_interrogators")
def attach_listener(self, event, listener):
_attach(self, event, listener, "_listeners")
def attach_mangler(self, event, listener):
_attach(self, event, listener, "_manglers")
def run_manglers(self, method_name, *args, **kwargs):
old_self = args[0] #Get the self reference
args = args[1:] #Remove the self reference
for mangler in self._manglers.get(method_name, []):
try:
#pop off the instance information. We just want the function signature
args, kwargs = mangler(*args, **kwargs)
except Exception as e:
print "Argument mangler %s failed with exception %s. It reported the following: %s" % (mangler.__name__, e.__class__.__name__, str(e))
args = list(args)
args.insert(0, old_self)
args = tuple(args)
return args, kwargs
def dispatch_events(self, handler_collection, method_name, *args, **kwargs):
please_do_continue = True
for handler in handler_collection.get(method_name, []):
try:
#pop off the instance information. We just want the function signature
please_do_continue = handler(*args[1:], **kwargs)
if please_do_continue == None:
please_do_continue = True
if not please_do_continue:
print "The event processing was cancelled by %s" % handler.__name__
break
except Exception as e:
print "Event listener %s failed with exception %s. It reported the following: %s" % (handler.__name__, e.__class__.__name__, str(e))
return please_do_continue
setattr(klass, "_listeners", {})
setattr(klass, "_interrogators", {})
setattr(klass, "_manglers", {})
setattr(klass, "attach_listener", attach_listener)
setattr(klass, "attach_interrogator", attach_interrogator)
setattr(klass, "attach_mangler", attach_mangler)
setattr(klass, "dispatch_events", dispatch_events)
setattr(klass, "run_manglers", run_manglers)
return klass | from dynamic_class import Singleton
class NotEventException(Exception):
pass
class GlobalEventDispatcher(object):
"""not quite there yet"""
__metaclass__ = Singleton
def __init__(self):
pass
def event(method):
"""Must be called first in a decorator chain, otherwise we lose the correct name property"""
def wrapped(*args, **kwargs):
self = args[0]
if self.dispatch_events(self._interrogators, method.__name__, *args, **kwargs):
#Self will be removed and put back in the run_manglers routine.
args, kwargs = self.run_manglers(method.__name__, *args, **kwargs)
result = method(*args, **kwargs)
kwargs["event_result"] = result
self.dispatch_events(self._listeners, method.__name__, *args, **kwargs)
return result
wrapped.is_event = True
method.is_event = True
return wrapped
def listenable(klass):
"""
Class decorator to implement a lightweight event-dispatch model.
@listenable on the class
@event on the method you want to monitor
listeners must implement the function signature of the event exactly (or take *args, **kwargs generically),
plus a special argument called "event_result" that contains the return value of the method invocation.
TODO: Make it work with other decorators, inheritance
"""
def _attach(self, event, func, handler_collection_name):
if not hasattr(getattr(self, event), "is_event"):
raise NotEventException("This method hasn't been decorated as an event listener")
handler_collection = getattr(self, handler_collection_name)
handlers = handler_collection.get(event, [])
handlers.append(func)
handler_collection[event] = handlers
setattr(self, handler_collection_name, handler_collection)
def attach_interrogator(self, event, interrogator):
_attach(self, event, interrogator, "_interrogators")
def attach_listener(self, event, listener):
_attach(self, event, listener, "_listeners")
def attach_mangler(self, event, listener):
_attach(self, event, listener, "_manglers")
def run_manglers(self, method_name, *args, **kwargs):
old_self = args[0] #Get the self reference
args = args[1:] #Remove the self reference
for mangler in self._manglers.get(method_name, []):
try:
#pop off the instance information. We just want the function signature
args, kwargs = mangler(*args, **kwargs)
except Exception as e:
print "Argument mangler %s failed with exception %s. It reported the following: %s" % (mangler.__name__, e.__class__.__name__, str(e))
args = list(args)
args.insert(0, old_self)
args = tuple(args)
return args, kwargs
def dispatch_events(self, handler_collection, method_name, *args, **kwargs):
please_do_continue = True
for handler in handler_collection.get(method_name, []):
try:
#pop off the instance information. We just want the function signature
please_do_continue = handler(*args[1:], **kwargs)
if please_do_continue == None:
please_do_continue = True
if not please_do_continue:
print "The event processing was cancelled by %s" % handler.__name__
break
except Exception as e:
print "Event listener %s failed with exception %s. It reported the following: %s" % (handler.__name__, e.__class__.__name__, str(e))
return please_do_continue
setattr(klass, "_listeners", {})
setattr(klass, "_interrogators", {})
setattr(klass, "_manglers", {})
setattr(klass, "attach_listener", attach_listener)
setattr(klass, "attach_interrogator", attach_interrogator)
setattr(klass, "attach_mangler", attach_mangler)
setattr(klass, "dispatch_events", dispatch_events)
setattr(klass, "run_manglers", run_manglers)
return klass | Python | 0.000004 |
79fd01202255e0b00ca2fe90834dbd4e15dd84bc | Print NVIDIA license notice only when actually downloading the CUDA headers repository. | third_party/cuda/dependencies.bzl | third_party/cuda/dependencies.bzl | """CUDA headers repository."""
def _download_nvidia_headers(repository_ctx, output, url, sha256, strip_prefix):
# Keep the mirror up-to-date manually (see b/154869892) with:
# /google/bin/releases/tensorflow-devinfra-team/cli_tools/tf_mirror <url>
repository_ctx.download_and_extract(
url = [
"http://gitlab.com/nvidia/headers/" + url,
"http://mirror.tensorflow.org/gitlab.com/nvidia/headers/" + url,
],
output = output,
sha256 = sha256,
stripPrefix = strip_prefix,
)
def _cuda_headers_impl(repository_ctx):
build_file = Label("//third_party/cuda:cuda_headers.BUILD")
print("\n\033[22;33mNOTICE:\033[0m The following command will download " +
"NVIDIA proprietary software. By using the software you agree to " +
"comply with the terms of the license agreement that accompanies " +
"the software. If you do not agree to the terms of the license " +
"agreement, do not use the software.")
tag = "cuda-10-2"
for name, sha256 in [
("cublas", "9537c3e89a85ea0082217e326cd8e03420f7723e05c98d730d80bda8b230c81b"),
("cudart", "8a203bd87a2fde37608e8bc3c0c9347b40586906c613b6bef0bfc3995ff40099"),
("cufft", "bac1602183022c7a9c3e13078fcac59e4eee0390afe99c3c7348c894a97e19dd"),
("cusolver", "68e049c1d27ad3558cddd9ad82cf885b6789f1f01934f9b60340c391fa8e6279"),
("misc", "5e208a8e0f25c9df41121f0502eadae903fa64f808437516198004bdbf6af04b"),
]:
url = "cuda-individual/{name}/-/archive/{tag}/{name}-{tag}.tar.gz".format(name = name, tag = tag)
strip_prefix = "{name}-{tag}".format(name = name, tag = tag)
_download_nvidia_headers(repository_ctx, "cuda", url, sha256, strip_prefix)
repository_ctx.symlink(build_file, "BUILD")
def _cudnn_headers_impl(repository_ctx):
build_file = Label("//third_party/cuda:cudnn_headers.BUILD")
tag = "v7.6.5"
url = "cudnn/-/archive/{tag}/cudnn-{tag}.tar.gz".format(tag = tag)
strip_prefix = "cudnn-{tag}".format(tag = tag)
sha256 = "ef45f4649328da678285b8ce589a8296cedcc93819ffdbb5eea5346a0619a766"
_download_nvidia_headers(repository_ctx, "cudnn", url, sha256, strip_prefix)
repository_ctx.symlink(build_file, "BUILD")
_cuda_headers = repository_rule(
implementation = _cuda_headers_impl,
# remotable = True,
)
_cudnn_headers = repository_rule(
implementation = _cudnn_headers_impl,
# remotable = True,
)
def cuda_dependencies():
_cuda_headers(name = "cuda_headers")
_cudnn_headers(name = "cudnn_headers")
| """CUDA headers repository."""
def _download_nvidia_headers(repository_ctx, output, url, sha256, strip_prefix):
# Keep the mirror up-to-date manually (see b/154869892) with:
# /google/bin/releases/tensorflow-devinfra-team/cli_tools/tf_mirror <url>
repository_ctx.download_and_extract(
url = [
"http://gitlab.com/nvidia/headers/" + url,
"http://mirror.tensorflow.org/gitlab.com/nvidia/headers/" + url,
],
output = output,
sha256 = sha256,
stripPrefix = strip_prefix,
)
def _cuda_headers_impl(repository_ctx):
tag = "cuda-10-2"
for name, sha256 in [
("cublas", "9537c3e89a85ea0082217e326cd8e03420f7723e05c98d730d80bda8b230c81b"),
("cudart", "8a203bd87a2fde37608e8bc3c0c9347b40586906c613b6bef0bfc3995ff40099"),
("cufft", "bac1602183022c7a9c3e13078fcac59e4eee0390afe99c3c7348c894a97e19dd"),
("cusolver", "68e049c1d27ad3558cddd9ad82cf885b6789f1f01934f9b60340c391fa8e6279"),
("misc", "5e208a8e0f25c9df41121f0502eadae903fa64f808437516198004bdbf6af04b"),
]:
url = "cuda-individual/{name}/-/archive/{tag}/{name}-{tag}.tar.gz".format(name = name, tag = tag)
strip_prefix = "{name}-{tag}".format(name = name, tag = tag)
_download_nvidia_headers(repository_ctx, "cuda", url, sha256, strip_prefix)
repository_ctx.symlink(Label("//third_party/cuda:cuda_headers.BUILD"), "BUILD")
def _cudnn_headers_impl(repository_ctx):
tag = "v7.6.5"
url = "cudnn/-/archive/{tag}/cudnn-{tag}.tar.gz".format(tag = tag)
strip_prefix = "cudnn-{tag}".format(tag = tag)
sha256 = "ef45f4649328da678285b8ce589a8296cedcc93819ffdbb5eea5346a0619a766"
_download_nvidia_headers(repository_ctx, "cudnn", url, sha256, strip_prefix)
repository_ctx.symlink(Label("//third_party/cuda:cudnn_headers.BUILD"), "BUILD")
_cuda_headers = repository_rule(
implementation = _cuda_headers_impl,
# remotable = True,
)
_cudnn_headers = repository_rule(
implementation = _cudnn_headers_impl,
# remotable = True,
)
def cuda_dependencies():
print("The following command will download NVIDIA proprietary " +
"software. By using the software you agree to comply with the " +
"terms of the license agreement that accompanies the software. " +
"If you do not agree to the terms of the license agreement, do " +
"not use the software.")
_cuda_headers(name = "cuda_headers")
_cudnn_headers(name = "cudnn_headers")
| Python | 0 |
9098692bf431b4947da96dc054fe8e1559e27aa5 | Update hexagon_nn_headers to v1.10.3.1.3 Changes Includes: * Support soc_id:371 * New method exposed that returns the version of hexagon_nn used in libhexagon_interface.so | third_party/hexagon/workspace.bzl | third_party/hexagon/workspace.bzl | """Loads the Hexagon NN Header files library, used by TF Lite."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "hexagon_nn",
sha256 = "281d46b47f7191f03a8a4071c4c8d2af9409bb9d59573dc2e42f04c4fd61f1fd",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.cloud.google.com/download.tensorflow.org/tflite/hexagon_nn_headers_v1.10.3.1.3.tgz",
],
build_file = "//third_party/hexagon:BUILD",
)
| """Loads the Hexagon NN Header files library, used by TF Lite."""
load("//third_party:repo.bzl", "third_party_http_archive")
def repo():
third_party_http_archive(
name = "hexagon_nn",
sha256 = "4cbf3c18834e24b1f64cc507f9c2f22b4fe576c6ff938d55faced5d8f1bddf62",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.cloud.google.com/download.tensorflow.org/tflite/hexagon_nn_headers_v1.10.3.1.2.tgz",
],
build_file = "//third_party/hexagon:BUILD",
)
| Python | 0 |
2a2224a2babaf20919c0091bcfd4b6109eadcecb | Fix issue with internal user and auditor | polyaxon/api/repos/views.py | polyaxon/api/repos/views.py | import logging
import os
from rest_framework.generics import RetrieveUpdateDestroyAPIView, get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from django.conf import settings
from django.http import Http404, HttpResponseServerError
import auditor
from api.repos.serializers import RepoSerializer
from api.repos.tasks import handle_new_files
from api.utils.views import ProtectedView, UploadView
from db.models.repos import Repo
from event_manager.events.repo import REPO_CREATED, REPO_DOWNLOADED
from libs.permissions.authentication import InternalAuthentication, is_internal_user
from libs.permissions.internal import IsAuthenticatedOrInternal, IsInternal
from libs.permissions.projects import get_permissible_project
from libs.repos import git
from libs.repos.git import set_git_repo
logger = logging.getLogger(__name__)
class RepoDetailView(RetrieveUpdateDestroyAPIView):
queryset = Repo.objects.all()
serializer_class = RepoSerializer
permission_classes = (IsAuthenticated,)
def get_object(self):
return get_object_or_404(Repo, project=get_permissible_project(view=self))
class DownloadFilesView(ProtectedView):
HANDLE_UNAUTHENTICATED = False
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES + [
InternalAuthentication,
]
permission_classes = (IsAuthenticatedOrInternal, )
def get_object(self):
project = get_permissible_project(view=self)
try:
repo = Repo.objects.get(project=project)
except Repo.DoesNotExist:
raise Http404('Repo does not exist.')
if not is_internal_user(self.request.user):
auditor.record(event_type=REPO_DOWNLOADED, instance=repo, actor_id=self.request.user.id)
return repo
def get(self, request, *args, **kwargs):
repo = self.get_object()
archive_path, archive_name = git.archive_repo(repo.git, repo.project.name)
return self.redirect(path='/archived_repos/{}'.format(archive_name))
class UploadFilesView(UploadView):
def get_object(self):
project = get_permissible_project(view=self)
if project.has_notebook:
self.permission_denied(
self.request,
'The Project `{}` is currently running a Notebook. '
'You must stop it before uploading a new version of the code.'.format(project.name))
repo, created = Repo.objects.get_or_create(project=project)
if not created and not os.path.isdir(repo.user_path):
set_git_repo(repo)
else:
auditor.record(event_type=REPO_CREATED, instance=repo, actor_id=self.request.user.id)
return repo
def put(self, request, *args, **kwargs):
user = request.user
repo = self.get_object()
path = os.path.join(settings.UPLOAD_ROOT, user.username)
if not os.path.exists(path):
os.makedirs(path)
try:
tar_file_name = self._handle_posted_data(request=request,
filename='{}.tar.gz'.format(repo.project.name),
directory=path,
upload_filename='repo')
except (IOError, os.error) as e: # pragma: no cover
logger.warning(
'IOError while trying to save posted data (%s): %s', e.errno, e.strerror)
return HttpResponseServerError()
json_data = self._handle_json_data(request)
is_async = json_data.get('async')
if is_async is False:
file_handler = handle_new_files
else:
file_handler = handle_new_files.delay
file_handler(user_id=user.id, repo_id=repo.id, tar_file_name=tar_file_name)
# do some stuff with uploaded file
return Response(status=204)
| import logging
import os
from rest_framework.generics import RetrieveUpdateDestroyAPIView, get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from django.conf import settings
from django.http import Http404, HttpResponseServerError
import auditor
from api.repos.serializers import RepoSerializer
from api.repos.tasks import handle_new_files
from api.utils.views import ProtectedView, UploadView
from db.models.repos import Repo
from event_manager.events.repo import REPO_CREATED, REPO_DOWNLOADED
from libs.permissions.authentication import InternalAuthentication, is_internal_user
from libs.permissions.internal import IsAuthenticatedOrInternal, IsInternal
from libs.permissions.projects import get_permissible_project
from libs.repos import git
from libs.repos.git import set_git_repo
logger = logging.getLogger(__name__)
class RepoDetailView(RetrieveUpdateDestroyAPIView):
queryset = Repo.objects.all()
serializer_class = RepoSerializer
permission_classes = (IsAuthenticated,)
def get_object(self):
return get_object_or_404(Repo, project=get_permissible_project(view=self))
class DownloadFilesView(ProtectedView):
HANDLE_UNAUTHENTICATED = False
authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES + [
InternalAuthentication,
]
permission_classes = (IsAuthenticatedOrInternal, )
def get_object(self):
project = get_permissible_project(view=self)
try:
repo = Repo.objects.get(project=project)
except Repo.DoesNotExist:
raise Http404('Repo does not exist.')
if is_internal_user(self.request.user):
auditor.record(event_type=REPO_DOWNLOADED, instance=repo, actor_id=self.request.user.id)
return repo
def get(self, request, *args, **kwargs):
repo = self.get_object()
archive_path, archive_name = git.archive_repo(repo.git, repo.project.name)
return self.redirect(path='/archived_repos/{}'.format(archive_name))
class UploadFilesView(UploadView):
def get_object(self):
project = get_permissible_project(view=self)
if project.has_notebook:
self.permission_denied(
self.request,
'The Project `{}` is currently running a Notebook. '
'You must stop it before uploading a new version of the code.'.format(project.name))
repo, created = Repo.objects.get_or_create(project=project)
if not created and not os.path.isdir(repo.user_path):
set_git_repo(repo)
else:
auditor.record(event_type=REPO_CREATED, instance=repo, actor_id=self.request.user.id)
return repo
def put(self, request, *args, **kwargs):
user = request.user
repo = self.get_object()
path = os.path.join(settings.UPLOAD_ROOT, user.username)
if not os.path.exists(path):
os.makedirs(path)
try:
tar_file_name = self._handle_posted_data(request=request,
filename='{}.tar.gz'.format(repo.project.name),
directory=path,
upload_filename='repo')
except (IOError, os.error) as e: # pragma: no cover
logger.warning(
'IOError while trying to save posted data (%s): %s', e.errno, e.strerror)
return HttpResponseServerError()
json_data = self._handle_json_data(request)
is_async = json_data.get('async')
if is_async is False:
file_handler = handle_new_files
else:
file_handler = handle_new_files.delay
file_handler(user_id=user.id, repo_id=repo.id, tar_file_name=tar_file_name)
# do some stuff with uploaded file
return Response(status=204)
| Python | 0.000004 |
86391ed76c49578321c026187f159c53c2cf4ed1 | Fix slack welcome message display bug and add user handle | orchestra/slack.py | orchestra/slack.py | import base64
from uuid import uuid1
from django.conf import settings
import slacker
from orchestra.utils.settings import run_if
class SlackService(object):
"""
Wrapper slack service to allow easy swapping and mocking out of API.
"""
def __init__(self, api_key):
self._service = slacker.Slacker(api_key)
for attr_name in ('chat', 'groups', 'users'):
setattr(self, attr_name, getattr(self._service, attr_name))
@run_if('SLACK_EXPERTS')
def add_worker_to_project_team(worker, project):
slack = SlackService(settings.SLACK_EXPERTS_API_KEY)
try:
user_id = slack.users.get_user_id(worker.slack_username)
response = slack.groups.invite(project.slack_group_id, user_id)
if not response.body.get('already_in_group'):
welcome_message = (
'<@{}|{}> has been added to the team. '
'Welcome aboard!').format(user_id, worker.slack_username)
slack.chat.post_message(project.slack_group_id, welcome_message)
except:
# TODO(jrbotros): for now, using slack on a per-worker basis is
# optional; we'll want to rethink this in the future
pass
@run_if('SLACK_EXPERTS')
def create_project_slack_group(project):
"""
Create slack channel for project team communication
"""
slack = SlackService(settings.SLACK_EXPERTS_API_KEY)
response = slack.groups.create(_project_slack_group_name(project))
project.slack_group_id = response.body['group']['id']
slack.groups.set_topic(project.slack_group_id, project.short_description)
slack.groups.set_purpose(project.slack_group_id,
'Discussing work on `{}`'.format(
project.short_description))
project.save()
return project.slack_group_id
def _project_slack_group_name(project):
"""
Return a unique identifier for project slack groups; must fit into slack's
21 char limit for group names.
"""
return base64.b64encode(uuid1().bytes)
| import base64
from uuid import uuid1
from django.conf import settings
import slacker
from orchestra.utils.settings import run_if
class SlackService(object):
"""
Wrapper slack service to allow easy swapping and mocking out of API.
"""
def __init__(self, api_key):
self._service = slacker.Slacker(api_key)
for attr_name in ('chat', 'groups', 'users'):
setattr(self, attr_name, getattr(self._service, attr_name))
@run_if('SLACK_EXPERTS')
def add_worker_to_project_team(worker, project):
slack = SlackService(settings.SLACK_EXPERTS_API_KEY)
try:
response = slack.groups.invite(project.slack_group_id,
slack.users.get_user_id(
worker.slack_username))
if not response.body['already_in_group']:
welcome_message = ('{} has been added to the team. '
'Welcome aboard!').format(worker.user.username)
slack.chat.post_message(project.slack_group_id, welcome_message)
except:
# TODO(jrbotros): for now, using slack on a per-worker basis is
# optional; we'll want to rethink this in the future
pass
@run_if('SLACK_EXPERTS')
def create_project_slack_group(project):
"""
Create slack channel for project team communication
"""
slack = SlackService(settings.SLACK_EXPERTS_API_KEY)
response = slack.groups.create(_project_slack_group_name(project))
project.slack_group_id = response.body['group']['id']
slack.groups.set_topic(project.slack_group_id, project.short_description)
slack.groups.set_purpose(project.slack_group_id,
'Discussing work on `{}`'.format(
project.short_description))
project.save()
return project.slack_group_id
def _project_slack_group_name(project):
"""
Return a unique identifier for project slack groups; must fit into slack's
21 char limit for group names.
"""
return base64.b64encode(uuid1().bytes)
| Python | 0 |
e7b50269a6d83234b283f769265bf474666b6cd2 | Update project model with property has_description | polyaxon/projects/models.py | polyaxon/projects/models.py | import uuid
from django.conf import settings
from django.core.validators import validate_slug
from django.db import models
from libs.blacklist import validate_blacklist_name
from libs.models import DescribableModel, DiffModel
class Project(DiffModel, DescribableModel):
"""A model that represents a set of experiments to solve a specific problem."""
uuid = models.UUIDField(
default=uuid.uuid4,
editable=False,
unique=True,
null=False)
name = models.CharField(
max_length=256,
validators=[validate_slug, validate_blacklist_name])
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='projects')
is_public = models.BooleanField(
default=True,
help_text='If project is public or private.')
def __str__(self):
return self.unique_name
class Meta:
unique_together = (('user', 'name'),)
@property
def unique_name(self):
return '{}.{}'.format(self.user.username, self.name)
@property
def has_code(self):
return hasattr(self, 'repo')
@property
def has_description(self):
return bool(self.description)
@property
def tensorboard(self):
if settings.DEPLOY_RUNNER:
return self.tensorboard_jobs.last()
return None
@property
def notebook(self):
if settings.DEPLOY_RUNNER:
return self.notebook_jobs.last()
return None
@property
def has_tensorboard(self):
tensorboard = self.tensorboard
return tensorboard and tensorboard.is_running
@property
def has_notebook(self):
notebook = self.notebook
return notebook and notebook.is_running
| import uuid
from django.conf import settings
from django.core.validators import validate_slug
from django.db import models
from libs.blacklist import validate_blacklist_name
from libs.models import DescribableModel, DiffModel
class Project(DiffModel, DescribableModel):
"""A model that represents a set of experiments to solve a specific problem."""
uuid = models.UUIDField(
default=uuid.uuid4,
editable=False,
unique=True,
null=False)
name = models.CharField(
max_length=256,
validators=[validate_slug, validate_blacklist_name])
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='projects')
is_public = models.BooleanField(
default=True,
help_text='If project is public or private.')
def __str__(self):
return self.unique_name
class Meta:
unique_together = (('user', 'name'),)
@property
def unique_name(self):
return '{}.{}'.format(self.user.username, self.name)
@property
def has_code(self):
return hasattr(self, 'repo')
@property
def tensorboard(self):
if settings.DEPLOY_RUNNER:
return self.tensorboard_jobs.last()
return None
@property
def notebook(self):
if settings.DEPLOY_RUNNER:
return self.notebook_jobs.last()
return None
@property
def has_tensorboard(self):
tensorboard = self.tensorboard
return tensorboard and tensorboard.is_running
@property
def has_notebook(self):
notebook = self.notebook
return notebook and notebook.is_running
| Python | 0 |
76bf774f3af2fb4fc2518945944b9f64c413712a | Simplify "cursor" function in "misc" module | autoload/breeze/utils/misc.py | autoload/breeze/utils/misc.py | # -*- coding: utf-8 -*-
"""
breeze.utils.misc
~~~~~~~~~~~~~~~~~
This module defines various utility functions and some tiny wrappers
around vim functions.
"""
import vim
import breeze.utils.settings
def echom(msg):
"""Gives a simple feedback to the user via the command line."""
vim.command('echom "[breeze] {0}"'.format(msg.replace('"', '\"')))
def echov(msg):
"""Gives a feedback only if g:breeze_verbosity = 1."""
if breeze.utils.settings.get("verbosity", bool):
echom(msg)
def cursor(target=None):
"""Moves the cursor or returs the current cursor position."""
if not target:
return vim.current.window.cursor
else:
vim.current.window.cursor = target
def window_bundaries():
"""Returns the top and bottom lines number for the current window."""
curr_pos = cursor()
scrolloff = vim.eval("&scrolloff")
vim.command("setlocal scrolloff=0")
# :help keepjumps -> Moving around in {command} does not change the '',
# '. and '^ marks, the jumplist or the changelist.
vim.command("keepjumps normal! H")
top = cursor()[0]
vim.command("keepjumps normal! L")
bot = cursor()[0]
# restore position and changed options
cursor(curr_pos)
vim.command("setlocal scrolloff={0}".format(scrolloff))
return top, bot
def highlight(group, patt, priority=10):
"""Wraps the matchadd() vim function."""
vim.eval("matchadd('{0}', '{1}', {2})".format(
group, patt, priority))
def subst_char(buffer, v, row, col):
"""Substitutes a character in the buffer with the given character at the
given position. Return the substituted character."""
if row >= len(buffer):
raise ValueError("row index out of bound")
new_line = list(buffer[row])
if col >= len(new_line):
raise ValueError("column index out of bound")
old = buffer[row][col]
new_line[col] = v
buffer[row] = "".join(new_line)
return old
def clear_highlighting():
"""Clears Breeze highlightings."""
for match in vim.eval("getmatches()"):
if match['group'] in ('BreezeJumpMark', 'BreezeShade', 'BreezeHl'):
vim.command("call matchdelete({0})".format(match['id']))
| # -*- coding: utf-8 -*-
"""
breeze.utils.misc
~~~~~~~~~~~~~~~~~
This module defines various utility functions and some tiny wrappers
around vim functions.
"""
import vim
import breeze.utils.settings
def echom(msg):
"""Gives a simple feedback to the user via the command line."""
vim.command('echom "[breeze] {0}"'.format(msg.replace('"', '\"')))
def echov(msg):
"""Gives a feedback only if g:breeze_verbosity = 1."""
if breeze.utils.settings.get("verbosity", bool):
echom(msg)
def cursor(target=None, kj=False):
"""Moves the cursor.
If the kj parameter is set to True, then the command behaves as following:
:help keepjumps -> Moving around in {command} does not change the '', '.
and '^ marks, the jumplist or the changelist...
"""
if not target:
return vim.current.window.cursor
vim.command("{0}call cursor({1}, {2})".format(
"keepjumps " if kj else "", target[0], target[1]))
def window_bundaries():
"""Returns the top and bottom lines number for the current window."""
curr_pos = cursor()
scrolloff = vim.eval("&scrolloff")
vim.command("setlocal scrolloff=0")
# :help keepjumps -> Moving around in {command} does not change the '',
# '. and '^ marks, the jumplist or the changelist.
vim.command("keepjumps normal! H")
top = cursor()[0]
vim.command("keepjumps normal! L")
bot = cursor()[0]
# restore position and changed options
cursor(curr_pos)
vim.command("setlocal scrolloff={0}".format(scrolloff))
return top, bot
def highlight(group, patt, priority=10):
"""Wraps the matchadd() vim function."""
vim.eval("matchadd('{0}', '{1}', {2})".format(
group, patt, priority))
def subst_char(buffer, v, row, col):
"""Substitutes a character in the buffer with the given character at the
given position. Return the substituted character."""
if row >= len(buffer):
raise ValueError("row index out of bound")
new_line = list(buffer[row])
if col >= len(new_line):
raise ValueError("column index out of bound")
old = buffer[row][col]
new_line[col] = v
buffer[row] = "".join(new_line)
return old
def clear_highlighting():
"""Clears Breeze highlightings."""
for match in vim.eval("getmatches()"):
if match['group'] in ('BreezeJumpMark', 'BreezeShade', 'BreezeHl'):
vim.command("call matchdelete({0})".format(match['id']))
| Python | 0.000291 |
0b311b67e1cf5831a6e1af317409fc6e854e8ce6 | Remove debug artifacts | emission_events/scraper/scraper.py | emission_events/scraper/scraper.py | from datetime import datetime
from bs4 import BeautifulSoup
class Scraper(object):
def __init__(self, html, tracking_number):
self.html = html
self.soup = BeautifulSoup(html)
self.tracking_number = tracking_number
def __call__(self):
tds = self.soup.table.find_all('td')
metas = self.soup.find_all('meta')
began_date = self.parse_date(tds[5].string.strip())
ended_date = self.parse_date(tds[7].string.strip())
return {
'tracking_number': self.tracking_number,
'dc_date_meta': self.get_dc_date_meta(metas),
'regulated_entity_name': self.clean(tds[0].string, 30),
'physical_location': self.clean(tds[1].string),
'regulated_entity_rn_number': self.clean(tds[2].string, 50),
'city_county': self.clean(tds[3].string, 50),
'type_of_air_emissions_event': self.clean(tds[4].string, 50).upper(),
'based_on_the': self.clean(tds[6].string, 50).upper(),
'event_began': self.clean(tds[5].string, 30),
'event_ended': self.clean(tds[7].string, 30),
'cause': self.clean(tds[8].string),
'action_taken': self.clean(tds[9].string),
'emissions_estimation_method': self.clean(tds[10].string),
'city': self.get_city(tds[3].string),
'county': self.get_county(tds[3].string),
'began_date': began_date,
'ended_date': ended_date,
'duration': self.get_duration(began_date, ended_date),
'dc_date': self.parse_date(self.get_dc_date_meta(metas))
}
def clean(self, cad, limit=200):
if cad == None:
return ''
else:
return cad.strip()[0:limit]
def get_dc_date_meta(self, metas):
for meta in metas:
try:
if meta['name'] == 'DC.Date':
return meta['content']
except KeyError:
pass
return None
def get_city(self, cad):
city = cad.split(',')[0].strip()
if city == '':
return None
else:
return city
def get_county(self, cad):
county = cad.split(',')[1].strip()
if county == '':
return None
else:
return county
def parse_date(self, cad):
try:
if len(cad.split()) == 2:
return datetime.strptime(cad, "%m/%d/%Y %I:%M%p")
else:
return datetime.strptime(cad, "%m/%d/%Y")
except ValueError:
print "fallo: " + cad
return None
def get_duration(self, begin, end):
if begin == None or end == None:
return None
else:
return (end - begin).total_seconds()/3600
| from datetime import datetime
from bs4 import BeautifulSoup
class Scraper(object):
def __init__(self, html, tracking_number):
self.html = html
self.soup = BeautifulSoup(html)
self.tracking_number = tracking_number
def __call__(self):
tds = self.soup.table.find_all('td')
metas = self.soup.find_all('meta')
began_date = self.parse_date(tds[5].string.strip())
ended_date = self.parse_date(tds[7].string.strip())
return {
'tracking_number': self.tracking_number,
'dc_date_meta': self.get_dc_date_meta(metas),
'regulated_entity_name': self.clean(tds[0].string, 30),
'physical_location': self.clean(tds[1].string),
'regulated_entity_rn_number': self.clean(tds[2].string, 50),
'city_county': self.clean(tds[3].string, 50),
'type_of_air_emissions_event': self.clean(tds[4].string, 50).upper(),
'based_on_the': self.clean(tds[6].string, 50).upper(),
'event_began': self.clean(tds[5].string, 30),
'event_ended': self.clean(tds[7].string, 30),
'cause': self.clean(tds[8].string),
'action_taken': self.clean(tds[9].string),
'emissions_estimation_method': self.clean(tds[10].string),
'city': self.get_city(tds[3].string),
'county': self.get_county(tds[3].string),
'began_date': began_date,
'ended_date': ended_date,
'duration': self.get_duration(began_date, ended_date)
}
def clean(self, cad, limit=200):
if cad == None:
return ''
else:
return cad.strip()[0:limit]
def get_dc_date_meta(self, metas):
for meta in metas:
try:
if meta['name'] == 'DC.Date':
return meta['content']
except KeyError:
pass
return None
def get_city(self, cad):
city = cad.split(',')[0].strip()
if city == '':
return None
else:
return city
def get_county(self, cad):
county = cad.split(',')[1].strip()
if county == '':
return None
else:
return county
def parse_date(self, cad):
try:
if len(cad.split()) == 2:
print "parsing " + cad
print datetime.strptime(cad, "%m/%d/%Y %I:%M%p")
return datetime.strptime(cad, "%m/%d/%Y %I:%M%p")
else:
print "parsing (short) " + cad
print datetime.strptime(cad, "%m/%d/%Y")
return datetime.strptime(cad, "%m/%d/%Y")
except ValueError:
print "fallo: " + cad
return None
def get_duration(self, begin, end):
if begin == None or end == None:
return None
else:
return (end - begin).total_seconds()/3600
| Python | 0.000001 |
bd6eec33e59e3d46e5da931fbe9e1094bbb7c0bb | Add all primitives to known interactions. | enactiveagents/experiment/basic.py | enactiveagents/experiment/basic.py | """
Module to build experiments (worlds, agents, etc.).
"""
import model.interaction
import model.agent
import experiment
class BasicExperiment(experiment.Experiment):
world_representation = [
"wwwwwwwwwwwwwww",
"w.............w",
"w.wwwwwww.....w",
"w.......wwwww.w",
"w.wwwww.......w",
"w.w.......w...w",
"w.w.wwwww.w...w",
"w.w.w...w.ww.ww",
"w.www.....w...w",
"w.....wwwww.a.w",
"wwwwwwwwwwwwwww"
]
def __init__(self):
super(BasicExperiment, self).__init__()
# Parse world
self.world = self.parse_world(self.world_representation)
# Set up primitives
step = model.interaction.PrimitiveInteraction("Step")
turn_right = model.interaction.PrimitiveInteraction("Turn Right")
turn_left = model.interaction.PrimitiveInteraction("Turn Left")
feel = model.interaction.PrimitiveInteraction("Feel")
no_feel = model.interaction.PrimitiveInteraction("No Feel")
bump = model.interaction.PrimitiveInteraction("Bump")
# Define environment logic for primitives, these functions will be
# registered to the primitive interactions and will be called once
# the agent attempts to enact the primitive interaction.
# The function can manipulate the world and the agents.
# The return value is the actual enacted interaction (i.e., can be
# different form the attempted interaction).
def _step(world, agent, interaction):
if world.can_step(agent):
agent.step()
return step
else:
return bump
def _turn_right(world, agent, interaction):
agent.add_rotation(-90)
return turn_right
def _turn_left(world, agent, interaction):
agent.add_rotation(90)
return turn_left
def _feel(world, agent, interaction):
if world.can_step(agent):
return no_feel
else:
return feel
# Register the previously defined functions.
enact_logic = {}
enact_logic[step] = _step
enact_logic[turn_right] = _turn_right
enact_logic[turn_left] = _turn_left
enact_logic[feel] = _feel
# Set primitives known/enactable by the agents.
primitives = []
primitives.append(step)
primitives.append(turn_right)
primitives.append(turn_left)
primitives.append(feel)
primitives.append(no_feel)
primitives.append(bump)
# Set intrinsic motivation values.
motivation = {}
motivation[step] = 7
motivation[turn_right] = -1
motivation[turn_left] = -1
motivation[feel] = 0
motivation[no_feel] = -1
motivation[bump] = -10
for entity in self.world.get_entities():
if isinstance(entity, model.agent.Agent):
self.world.add_enact_logic(entity, enact_logic)
entity.set_primitives(primitives)
entity.set_motivation(motivation)
def get_world(self):
return self.world | """
Module to build experiments (worlds, agents, etc.).
"""
import model.interaction
import model.agent
import experiment
class BasicExperiment(experiment.Experiment):
world_representation = [
"wwwwwwwwwwwwwww",
"w.............w",
"w.wwwwwww.....w",
"w.......wwwww.w",
"w.wwwww.......w",
"w.w.......w...w",
"w.w.wwwww.w...w",
"w.w.w...w.ww.ww",
"w.www.....w...w",
"w.....wwwww.a.w",
"wwwwwwwwwwwwwww"
]
def __init__(self):
super(BasicExperiment, self).__init__()
# Parse world
self.world = self.parse_world(self.world_representation)
# Set up primitives
step = model.interaction.PrimitiveInteraction("Step")
turn_right = model.interaction.PrimitiveInteraction("Turn Right")
turn_left = model.interaction.PrimitiveInteraction("Turn Left")
feel = model.interaction.PrimitiveInteraction("Feel")
no_feel = model.interaction.PrimitiveInteraction("No Feel")
bump = model.interaction.PrimitiveInteraction("Bump")
# Define environment logic for primitives, these functions will be
# registered to the primitive interactions and will be called once
# the agent attempts to enact the primitive interaction.
# The function can manipulate the world and the agents.
# The return value is the actual enacted interaction (i.e., can be
# different form the attempted interaction).
def _step(world, agent, interaction):
if world.can_step(agent):
agent.step()
return step
else:
return bump
def _turn_right(world, agent, interaction):
agent.add_rotation(-90)
return turn_right
def _turn_left(world, agent, interaction):
agent.add_rotation(90)
return turn_left
def _feel(world, agent, interaction):
if world.can_step(agent):
return no_feel
else:
return feel
# Register the previously defined functions.
enact_logic = {}
enact_logic[step] = _step
enact_logic[turn_right] = _turn_right
enact_logic[turn_left] = _turn_left
enact_logic[feel] = _feel
# Set primitives known/enactable by the agents.
primitives = []
primitives.append(step)
primitives.append(turn_right)
primitives.append(turn_left)
primitives.append(feel)
# Set intrinsic motivation values.
motivation = {}
motivation[step] = 7
motivation[turn_right] = -1
motivation[turn_left] = -1
motivation[feel] = 0
motivation[no_feel] = -1
motivation[bump] = -10
for entity in self.world.get_entities():
if isinstance(entity, model.agent.Agent):
self.world.add_enact_logic(entity, enact_logic)
entity.set_primitives(primitives)
entity.set_motivation(motivation)
def get_world(self):
return self.world | Python | 0 |
8da02c7c4ad382f4e7a2f7a017b32c0cff51547e | set limit of tw id over 5 letters | build_attendee.py | build_attendee.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from pyquery import PyQuery as pq
import json
if __name__ == "__main__":
## ref: pyquery
# https://media.readthedocs.org/pdf/pyquery/latest/pyquery.pdf
data = dict()
file = open('data/attendees.json', "w")
dom = pq(url='https://2016.europe.wordcamp.org/attendees/')
entries = dom.find('ul.tix-attendee-list')
for x in entries('li'):
twitter_name = pq(x).find('a.tix-attendee-twitter').text()
full_name = pq(x).find('div.tix-attendee-name').text()
if twitter_name != None:
# have more than 3 characters ?
if len(twitter_name) > 4:
data[full_name.lower()] = twitter_name
json.dump(data, file, indent=2)
file.close()
file = open('data/list_of_attendees', "w")
for x in data.keys():
file.write(x.encode('utf8'))
file.write("\n")
file.close()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from pyquery import PyQuery as pq
import json
if __name__ == "__main__":
## ref: pyquery
# https://media.readthedocs.org/pdf/pyquery/latest/pyquery.pdf
data = dict()
file = open('data/attendees.json', "w")
dom = pq(url='https://2016.europe.wordcamp.org/attendees/')
entries = dom.find('ul.tix-attendee-list')
for x in entries('li'):
twitter_name = pq(x).find('a.tix-attendee-twitter').text()
full_name = pq(x).find('div.tix-attendee-name').text()
if twitter_name != None:
# have more than 3 characters ?
if len(twitter_name) > 3:
data[full_name.lower()] = twitter_name
json.dump(data, file, indent=2)
file.close()
file = open('data/list_of_attendees', "w")
for x in data.keys():
file.write(x.encode('utf8'))
file.write("\n")
file.close()
| Python | 0.000011 |
dc54a12bfd2124e7203270940928e47198ed914e | bump version | bulbs/__init__.py | bulbs/__init__.py | __version__ = "0.6.24"
| __version__ = "0.6.23"
| Python | 0 |
64383b6d8095f27af775d3c6030b22ee36055b29 | Change summoner example function name, add params | examples/summoner.py | examples/summoner.py | import cassiopeia as cass
from cassiopeia.core import Summoner
def print_summoner(name: str, region: str):
summoner = Summoner(name=name, region=region)
print("Name:", summoner.name)
print("ID:", summoner.id)
print("Account ID:", summoner.account.id)
print("Level:", summoner.level)
print("Revision date:", summoner.revision_date)
print("Profile icon ID:", summoner.profile_icon.id)
print("Profile icon name:", summoner.profile_icon.name)
print("Profile icon URL:", summoner.profile_icon.url)
print("Profile icon image:", summoner.profile_icon.image)
# These are equivalent ways of obtaining a Summoner.
# Note that the region defaults to NA.
# summoner = cass.get_summoner(name)
# summoner = cass.get_summoner(name=summoner.name)
# summoner = cass.get_summoner(id=summoner.id)
# summoner = cass.get_summoner(account_id=summoner.account.id)
if __name__ == "__main__":
print_summoner("Kalturi", "NA")
| import cassiopeia as cass
from cassiopeia.core import Summoner
def test_cass():
name = "Kalturi"
me = Summoner(name=name)
print("Name:", me.name)
print("Id:", me.id)
print("Account id:", me.account.id)
print("Level:", me.level)
print("Revision date:", me.revision_date)
print("Profile icon id:", me.profile_icon.id)
print("Profile icon name:", me.profile_icon.name)
print("Profile icon url:", me.profile_icon.url)
print("Profile icon image:", me.profile_icon.image)
name = me.name
id = me.id
account_id = me.account.id
me = cass.get_summoner(name)
me = cass.get_summoner(name=name)
me = cass.get_summoner(id=id)
me = cass.get_summoner(account_id=account_id)
if __name__ == "__main__":
test_cass()
| Python | 0 |
69091ea58fcd67c61dae3837eb0b9261825d44b3 | Use except as notation | examples/tor_info.py | examples/tor_info.py | #!/usr/bin/env python
# Simple usage example of TorInfo. This class does some magic so that
# once it's set up, all the attributes it has (or appears to) are
# GETINFO ones, in a heirarchy. So where GETINFO specifies
# "net/listeners/dns" TorInfo will have a "net" attribute that
# contains at least "listeners", etcetera. The leaves are all methods
# which return a Deferred. If the corresponding GETINFO takes an
# argument, so does the leaf.
#
# Go straight to "setup_complete" for the goods -- this is called
# after TorInfo and the underlying TorControlProtocol are set up.
#
# If you want to issue multiple GETINFO calls in one network
# transaction, you'll have to use TorControlProtocol's get_info
# instead.
from __future__ import print_function
import sys
from twisted.internet import reactor, defer
from txtorcon import TorInfo, build_local_tor_connection
def error(x):
print("ERROR", x)
return x
@defer.inlineCallbacks
def recursive_dump(indent, obj, depth=0):
if callable(obj):
try:
print("%s: " % obj, end=' ')
sys.stdout.flush()
if obj.takes_arg:
v = yield obj('arrrrrg')
v = yield obj()
v = v.replace('\n', '\\')
if len(v) > 60:
v = v[:50] + '...' + v[-7:]
except Exception as e:
v = 'ERROR: ' + str(e)
print(v)
else:
indent = indent + ' '
for x in obj:
yield recursive_dump(indent, x, depth + 1)
@defer.inlineCallbacks
def setup_complete(info):
print("Top-Level Things:", dir(info))
if True:
# some examples of getting specific GETINFO callbacks
v = yield info.version()
ip = yield info.ip_to_country('1.2.3.4')
boot_phase = yield info.status.bootstrap_phase()
ns = yield info.ns.name('moria1')
guards = yield info.entry_guards()
print('version:', v)
print('1.2.3.4 is in', ip)
print('bootstrap-phase:', boot_phase)
print('moria1:', ns)
print('entry guards:', guards)
# now we dump everything, one at a time
d = recursive_dump('', info)
d.addCallback(lambda x: reactor.stop())
d.addErrback(error)
def setup_failed(arg):
print("SETUP FAILED", arg)
reactor.stop()
def bootstrap(c):
info = TorInfo(c)
info.post_bootstrap.addCallback(setup_complete).addErrback(setup_failed)
d = build_local_tor_connection(reactor, build_state=False)
# do not use addCallbacks() here, in case bootstrap has an error
d.addCallback(bootstrap).addErrback(setup_failed)
reactor.run()
| #!/usr/bin/env python
# Simple usage example of TorInfo. This class does some magic so that
# once it's set up, all the attributes it has (or appears to) are
# GETINFO ones, in a heirarchy. So where GETINFO specifies
# "net/listeners/dns" TorInfo will have a "net" attribute that
# contains at least "listeners", etcetera. The leaves are all methods
# which return a Deferred. If the corresponding GETINFO takes an
# argument, so does the leaf.
#
# Go straight to "setup_complete" for the goods -- this is called
# after TorInfo and the underlying TorControlProtocol are set up.
#
# If you want to issue multiple GETINFO calls in one network
# transaction, you'll have to use TorControlProtocol's get_info
# instead.
from __future__ import print_function
import sys
from twisted.internet import reactor, defer
from txtorcon import TorInfo, build_local_tor_connection
def error(x):
print("ERROR", x)
return x
@defer.inlineCallbacks
def recursive_dump(indent, obj, depth=0):
if callable(obj):
try:
print("%s: " % obj, end=' ')
sys.stdout.flush()
if obj.takes_arg:
v = yield obj('arrrrrg')
v = yield obj()
v = v.replace('\n', '\\')
if len(v) > 60:
v = v[:50] + '...' + v[-7:]
except Exception, e:
v = 'ERROR: ' + str(e)
print(v)
else:
indent = indent + ' '
for x in obj:
yield recursive_dump(indent, x, depth + 1)
@defer.inlineCallbacks
def setup_complete(info):
print("Top-Level Things:", dir(info))
if True:
# some examples of getting specific GETINFO callbacks
v = yield info.version()
ip = yield info.ip_to_country('1.2.3.4')
boot_phase = yield info.status.bootstrap_phase()
ns = yield info.ns.name('moria1')
guards = yield info.entry_guards()
print('version:', v)
print('1.2.3.4 is in', ip)
print('bootstrap-phase:', boot_phase)
print('moria1:', ns)
print('entry guards:', guards)
# now we dump everything, one at a time
d = recursive_dump('', info)
d.addCallback(lambda x: reactor.stop())
d.addErrback(error)
def setup_failed(arg):
print("SETUP FAILED", arg)
reactor.stop()
def bootstrap(c):
info = TorInfo(c)
info.post_bootstrap.addCallback(setup_complete).addErrback(setup_failed)
d = build_local_tor_connection(reactor, build_state=False)
# do not use addCallbacks() here, in case bootstrap has an error
d.addCallback(bootstrap).addErrback(setup_failed)
reactor.run()
| Python | 0.000023 |
b077df615eb4354f416877cc2857fb9848e158eb | Fix get_sort_by_toggle to work with QueryDicts with multiple values | saleor/core/templatetags/shop.py | saleor/core/templatetags/shop.py | from __future__ import unicode_literals
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from django.template import Library
from django.utils.http import urlencode
register = Library()
@register.filter
def slice(items, group_size=1):
args = [iter(items)] * group_size
return (filter(None, group)
for group in zip_longest(*args, fillvalue=None))
@register.simple_tag(takes_context=True)
def get_sort_by_url(context, field, descending=False):
request = context['request']
request_get = request.GET.dict()
if descending:
request_get['sort_by'] = '-' + field
else:
request_get['sort_by'] = field
return '%s?%s' % (request.path, urlencode(request_get))
@register.simple_tag(takes_context=True)
def get_sort_by_url_toggle(context, field):
request = context['request']
request_get = request.GET.copy()
if field == request_get.get('sort_by'):
new_sort_by = u'-%s' % field # descending sort
else:
new_sort_by = field # ascending sort
request_get['sort_by'] = new_sort_by
return '%s?%s' % (request.path, request_get.urlencode())
| from __future__ import unicode_literals
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from django.template import Library
from django.utils.http import urlencode
register = Library()
@register.filter
def slice(items, group_size=1):
args = [iter(items)] * group_size
return (filter(None, group)
for group in zip_longest(*args, fillvalue=None))
@register.simple_tag(takes_context=True)
def get_sort_by_url(context, field, descending=False):
request = context['request']
request_get = request.GET.dict()
if descending:
request_get['sort_by'] = '-' + field
else:
request_get['sort_by'] = field
return '%s?%s' % (request.path, urlencode(request_get))
@register.simple_tag(takes_context=True)
def get_sort_by_url_toggle(context, field):
request = context['request']
request_get = request.GET.dict()
if field == request_get.get('sort_by'):
new_sort_by = '-%s' % field # descending sort
else:
new_sort_by = field # ascending sort
request_get['sort_by'] = new_sort_by
return '%s?%s' % (request.path, urlencode(request_get))
| Python | 0 |
dab8e1af4091a18a6251668b9c2475ee6b1e8f66 | Fix diffuse.explicit() for constant non-zero extrapolation | phi/physics/diffuse.py | phi/physics/diffuse.py | """
Functions to simulate diffusion processes on `phi.field.Field` objects.
"""
from phi import math
from phi.field import Grid, Field, laplace, solve_linear, jit_compile_linear
from phi.field._field import FieldType
from phi.field._grid import GridType
from phi.math import copy_with
def explicit(field: FieldType,
diffusivity: float or math.Tensor or Field,
dt: float or math.Tensor,
substeps: int = 1) -> FieldType:
"""
Simulate a finite-time diffusion process of the form dF/dt = α · ΔF on a given `Field` FieldType with diffusion coefficient α.
If `field` is periodic (set via `extrapolation='periodic'`), diffusion may be simulated in Fourier space.
Otherwise, finite differencing is used to approximate the
Args:
field: CenteredGrid, StaggeredGrid or ConstantField
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
substeps: number of iterations to use (Default value = 1)
field: FieldType:
Returns:
Diffused field of same type as `field`.
"""
amount = diffusivity * dt
if isinstance(amount, Field):
amount = amount.at(field)
ext = field.extrapolation
for i in range(substeps):
field += amount / substeps * laplace(field).with_extrapolation(ext)
field = field.with_extrapolation(ext)
return field
def implicit(field: FieldType,
diffusivity: float or math.Tensor or Field,
dt: float or math.Tensor,
order: int = 1,
solve=math.Solve('CG', 1e-5, 0)) -> FieldType:
"""
Diffusion by solving a linear system of equations.
Args:
order: Order of method, 1=first order. This translates to `substeps` for the explicit sharpening.
field: `phi.field.Field` to diffuse.
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
solve:
Returns:
Diffused field of same type as `field`.
"""
@jit_compile_linear
def sharpen(x):
return explicit(x, diffusivity, -dt, substeps=order)
if not solve.x0:
solve = copy_with(solve, x0=field)
return solve_linear(sharpen, y=field, solve=solve)
def fourier(field: GridType,
diffusivity: float or math.Tensor,
dt: float or math.Tensor) -> FieldType:
"""
Exact diffusion of a periodic field in frequency space.
For non-periodic fields or non-constant diffusivity, use another diffusion function such as `explicit()`.
Args:
field:
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
Returns:
Diffused field of same type as `field`.
"""
assert isinstance(field, Grid), "Cannot diffuse field of type '%s'" % type(field)
assert field.extrapolation == math.extrapolation.PERIODIC, "Fourier diffusion can only be applied to periodic fields."
amount = diffusivity * dt
k = math.fftfreq(field.resolution)
k2 = math.vec_squared(k)
fft_laplace = -(2 * math.PI) ** 2 * k2
diffuse_kernel = math.exp(fft_laplace * amount)
result_k = math.fft(field.values) * diffuse_kernel
result_values = math.real(math.ifft(result_k))
return field.with_values(result_values)
| """
Functions to simulate diffusion processes on `phi.field.Field` objects.
"""
from phi import math
from phi.field import Grid, Field, laplace, solve_linear, jit_compile_linear
from phi.field._field import FieldType
from phi.field._grid import GridType
from phi.math import copy_with
def explicit(field: FieldType,
diffusivity: float or math.Tensor or Field,
dt: float or math.Tensor,
substeps: int = 1) -> FieldType:
"""
Simulate a finite-time diffusion process of the form dF/dt = α · ΔF on a given `Field` FieldType with diffusion coefficient α.
If `field` is periodic (set via `extrapolation='periodic'`), diffusion may be simulated in Fourier space.
Otherwise, finite differencing is used to approximate the
Args:
field: CenteredGrid, StaggeredGrid or ConstantField
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
substeps: number of iterations to use (Default value = 1)
field: FieldType:
Returns:
Diffused field of same type as `field`.
"""
amount = diffusivity * dt
if isinstance(amount, Field):
amount = amount.at(field)
for i in range(substeps):
field += amount / substeps * laplace(field).with_extrapolation(field.extrapolation)
return field
def implicit(field: FieldType,
diffusivity: float or math.Tensor or Field,
dt: float or math.Tensor,
order: int = 1,
solve=math.Solve('CG', 1e-5, 0)) -> FieldType:
"""
Diffusion by solving a linear system of equations.
Args:
order: Order of method, 1=first order. This translates to `substeps` for the explicit sharpening.
field: `phi.field.Field` to diffuse.
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
solve:
Returns:
Diffused field of same type as `field`.
"""
@jit_compile_linear
def sharpen(x):
return explicit(x, diffusivity, -dt, substeps=order)
if not solve.x0:
solve = copy_with(solve, x0=field)
return solve_linear(sharpen, y=field, solve=solve)
def fourier(field: GridType,
diffusivity: float or math.Tensor,
dt: float or math.Tensor) -> FieldType:
"""
Exact diffusion of a periodic field in frequency space.
For non-periodic fields or non-constant diffusivity, use another diffusion function such as `explicit()`.
Args:
field:
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
dt: Time interval. `diffusion_amount = diffusivity * dt`
Returns:
Diffused field of same type as `field`.
"""
assert isinstance(field, Grid), "Cannot diffuse field of type '%s'" % type(field)
assert field.extrapolation == math.extrapolation.PERIODIC, "Fourier diffusion can only be applied to periodic fields."
amount = diffusivity * dt
k = math.fftfreq(field.resolution)
k2 = math.vec_squared(k)
fft_laplace = -(2 * math.PI) ** 2 * k2
diffuse_kernel = math.exp(fft_laplace * amount)
result_k = math.fft(field.values) * diffuse_kernel
result_values = math.real(math.ifft(result_k))
return field.with_values(result_values)
| Python | 0.000012 |
3e62a39892c231419ac09310808d95cb42b4f69f | add python solution for valid_parentheses | python/valid_parentheses.py | python/valid_parentheses.py |
# validate parentheses of string
import sys
inputChars = [ x for x in sys.argv[1] ]
openParens = ('(', '[', '{')
closeParens = (')', ']', '}')
parenPairs = {
')': '(',
']': '[',
'}': '{'
}
parenHistory = []
for c in inputChars:
if c in openParens:
parenHistory.append(c)
elif c in closeParens:
if len(parenHistory) == 0 or parenHistory.pop() != parenPairs[c]:
print False
sys.exit(1)
print True if len(parenHistory) == 0 else False
| Python | 0.00115 | |
8ebe99ec5e944edaf7e0999222f1f1a54b07e5a4 | Fix restart_needed | salt/states/win_servermanager.py | salt/states/win_servermanager.py | # -*- coding: utf-8 -*-
'''
Manage Windows features via the ServerManager powershell module
'''
# Import salt modules
import salt.utils
def __virtual__():
'''
Load only if win_servermanager is loaded
'''
return 'win_servermanager' if 'win_servermanager.install' in __salt__ else False
def installed(name, recurse=False, force=False):
'''
Install the windows feature
name:
short name of the feature (the right column in win_servermanager.list_available)
recurse:
install all sub-features as well
force:
if the feature is installed but on of its sub-features are not installed set this to True to force
the installation of the sub-features
Note:
Some features require reboot after un/installation. If so, until the server is restarted
other features can not be installed!
Example:
Run ``salt MinionName win_servermanager.list_available`` to get a list of available roles and features. Use
the name in the right column. Do not use the role or feature names mentioned in the PKGMGR documentation. In
this example for IIS-WebServerRole the name to be used is Web-Server.
.. code-block:: yaml
ISWebserverRole:
win_servermanager.installed:
- force: True
- recurse: True
- name: Web-Server
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Determine if the feature is installed
old = __salt__['win_servermanager.list_installed']()
if name not in old:
ret['comment'] = '{0} will be installed recurse={1}'.format(name, recurse)
elif force and recurse:
ret['comment'] = '{0} already installed but might install sub-features'.format(name)
else:
ret['comment'] = 'The feature {0} is already installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
return ret
# Install the features
status = __salt__['win_servermanager.install'](name, recurse)
ret['result'] = status['Success']
if not ret['result']:
ret['comment'] = 'Failed to install {0}: {1}'.format(name, ret['changes']['feature']['ExitCode'])
if 'already installed' not in status['DisplayName']:
ret['changes']['feature'] = status
ret['restart_needed'] = status['RestartNeeded']
new = __salt__['win_servermanager.list_installed']()
ret['changes']['features'] = salt.utils.compare_dicts(old, new)
return ret
def removed(name):
'''
Remove the windows feature
name:
short name of the feature (the right column in win_servermanager.list_available)
.. note::
Some features require a reboot after uninstallation. If so the feature will not be completely uninstalled until
the server is restarted.
Example:
Run ``salt MinionName win_servermanager.list_installed`` to get a list of all features installed. Use the top
name listed for each feature, not the indented one. Do not use the role or feature names mentioned in the
PKGMGR documentation.
.. code-block:: yaml
ISWebserverRole:
win_servermanager.removed:
- name: Web-Server
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Determine if the feature is installed
old = __salt__['win_servermanager.list_installed']()
if name in old:
ret['comment'] = '{0} will be removed'.format(name)
else:
ret['comment'] = 'The feature {0} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
return ret
# Remove the features
status = __salt__['win_servermanager.remove'](name)
ret['result'] = status['Success']
if not ret['result']:
ret['comment'] = 'Failed to uninstall the feature {0}'.format(ret['changes']['feature']['ExitCode'])
ret['restart_needed'] = status['RestartNeeded']
new = __salt__['win_servermanager.list_installed']()
ret['changes']['features'] = salt.utils.compare_dicts(old, new)
return ret
| # -*- coding: utf-8 -*-
'''
Manage Windows features via the ServerManager powershell module
'''
# Import salt modules
import salt.utils
def __virtual__():
'''
Load only if win_servermanager is loaded
'''
return 'win_servermanager' if 'win_servermanager.install' in __salt__ else False
def installed(name, recurse=False, force=False):
'''
Install the windows feature
name:
short name of the feature (the right column in win_servermanager.list_available)
recurse:
install all sub-features as well
force:
if the feature is installed but on of its sub-features are not installed set this to True to force
the installation of the sub-features
Note:
Some features require reboot after un/installation. If so, until the server is restarted
other features can not be installed!
Example:
Run ``salt MinionName win_servermanager.list_available`` to get a list of available roles and features. Use
the name in the right column. Do not use the role or feature names mentioned in the PKGMGR documentation. In
this example for IIS-WebServerRole the name to be used is Web-Server.
.. code-block:: yaml
ISWebserverRole:
win_servermanager.installed:
- force: True
- recurse: True
- name: Web-Server
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Determine if the feature is installed
old = __salt__['win_servermanager.list_installed']()
if name not in old:
ret['comment'] = '{0} will be installed recurse={1}'.format(name, recurse)
elif force and recurse:
ret['comment'] = '{0} already installed but might install sub-features'.format(name)
else:
ret['comment'] = 'The feature {0} is already installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
return ret
# Install the features
status = __salt__['win_servermanager.install'](name, recurse)
ret['result'] = status['Success']
if not ret['result']:
ret['comment'] = 'Failed to install {0}: {1}'.format(name, ret['changes']['feature']['ExitCode'])
if 'already installed' not in status['DisplayName']:
ret['changes']['feature'] = status
ret['changes']['restart_needed'] = status['RestartNeeded']
new = __salt__['win_servermanager.list_installed']()
ret['changes']['features'] = salt.utils.compare_dicts(old, new)
return ret
def removed(name):
'''
Remove the windows feature
name:
short name of the feature (the right column in win_servermanager.list_available)
.. note::
Some features require a reboot after uninstallation. If so the feature will not be completely uninstalled until
the server is restarted.
Example:
Run ``salt MinionName win_servermanager.list_installed`` to get a list of all features installed. Use the top
name listed for each feature, not the indented one. Do not use the role or feature names mentioned in the
PKGMGR documentation.
.. code-block:: yaml
ISWebserverRole:
win_servermanager.removed:
- name: Web-Server
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Determine if the feature is installed
old = __salt__['win_servermanager.list_installed']()
if name in old:
ret['comment'] = '{0} will be removed'.format(name)
else:
ret['comment'] = 'The feature {0} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
return ret
# Remove the features
status = __salt__['win_servermanager.remove'](name)
ret['result'] = status['Success']
if not ret['result']:
ret['comment'] = 'Failed to uninstall the feature {0}'.format(ret['changes']['feature']['ExitCode'])
ret['changes']['restart_needed'] = status['RestartNeeded']
new = __salt__['win_servermanager.list_installed']()
ret['changes']['features'] = salt.utils.compare_dicts(old, new)
return ret
| Python | 0.000002 |
60f753e736827f61607e10d160b7e7bab75b77cc | update pyasn version for workers | pipeline/setup.py | pipeline/setup.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dependency setup for beam remote workers."""
import setuptools
setuptools.setup(
name='censoredplanet-analysis',
version='0.0.1',
install_requires=['pyasn==1.6.1'],
packages=setuptools.find_packages(),
url='https://github.com/Jigsaw-Code/censoredplanet-analysis',
author='Sarah Laplante',
author_email='laplante@google.com')
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dependency setup for beam remote workers."""
import setuptools
setuptools.setup(
name='censoredplanet-analysis',
version='0.0.1',
install_requires=['pyasn==1.6.0b1'],
packages=setuptools.find_packages(),
url='https://github.com/Jigsaw-Code/censoredplanet-analysis',
author='Sarah Laplante',
author_email='laplante@google.com')
| Python | 0 |
33240ac6581188e363d2e4e21753a3071f57df86 | fix default source | pipenv/project.py | pipenv/project.py | import os
import toml
from . import _pipfile as pipfile
from .utils import format_toml, multi_split
from .utils import convert_deps_from_pip, convert_deps_to_pip
class Project(object):
"""docstring for Project"""
def __init__(self):
super(Project, self).__init__()
@property
def name(self):
return self.pipfile_location.split(os.sep)[-2]
@property
def pipfile_exists(self):
return bool(self.pipfile_location)
@property
def virtualenv_exists(self):
return os.path.isdir(self.virtualenv_location)
@property
def virtualenv_location(self):
return os.sep.join(self.pipfile_location.split(os.sep)[:-1] + ['.venv'])
@property
def pipfile_location(self):
try:
return pipfile.Pipfile.find()
except RuntimeError:
return None
@property
def parsed_pipfile(self):
with open(self.pipfile_location, 'r') as f:
return toml.load(f)
@property
def lockfile_location(self):
return '{0}.lock'.format(self.pipfile_location)
@property
def lockfile_exists(self):
return os.path.isfile(self.lockfile_location)
def create_pipfile(self):
data = {u'source': [{u'url': u'https://pypi.python.org/simple', u'verify_ssl': True}], u'packages': {}, 'dev-packages': {}}
with open('Pipfile', 'w') as f:
f.write(toml.dumps(data))
@property
def source(self):
if 'source' in self.parsed_pipfile:
return self.parsed_pipfile['source'][0]
else:
return [{u'url': u'https://pypi.python.org/simple', u'verify_ssl': True}][0]
def remove_package_from_pipfile(self, package_name, dev=False):
pipfile_path = pipfile.Pipfile.find()
# Read and append Pipfile.
p = self.parsed_pipfile
key = 'dev-packages' if dev else 'packages'
if key in p:
if package_name in p[key]:
del p[key][package_name]
# Write Pipfile.
data = format_toml(toml.dumps(p))
with open(pipfile_path, 'w') as f:
f.write(data)
def add_package_to_pipfile(self, package_name, dev=False):
# Lower-case package name.
package_name = package_name.lower()
# Find the Pipfile.
pipfile_path = pipfile.Pipfile.find()
# Read and append Pipfile.
p = self.parsed_pipfile
key = 'dev-packages' if dev else 'packages'
# Set empty group if it doesn't exist yet.
if key not in p:
p[key] = {}
package = convert_deps_from_pip(package_name)
package_name = [k for k in package.keys()][0]
# Add the package to the group.
p[key][package_name] = package[package_name]
# Write Pipfile.
data = format_toml(toml.dumps(p))
with open(pipfile_path, 'w') as f:
f.write(data)
| import os
import toml
from . import _pipfile as pipfile
from .utils import format_toml, multi_split
from .utils import convert_deps_from_pip, convert_deps_to_pip
class Project(object):
"""docstring for Project"""
def __init__(self):
super(Project, self).__init__()
@property
def name(self):
return self.pipfile_location.split(os.sep)[-2]
@property
def pipfile_exists(self):
return bool(self.pipfile_location)
@property
def virtualenv_exists(self):
return os.path.isdir(self.virtualenv_location)
@property
def virtualenv_location(self):
return os.sep.join(self.pipfile_location.split(os.sep)[:-1] + ['.venv'])
@property
def pipfile_location(self):
try:
return pipfile.Pipfile.find()
except RuntimeError:
return None
@property
def parsed_pipfile(self):
with open(self.pipfile_location, 'r') as f:
return toml.load(f)
@property
def lockfile_location(self):
return '{0}.lock'.format(self.pipfile_location)
@property
def lockfile_exists(self):
return os.path.isfile(self.lockfile_location)
def create_pipfile(self):
data = {u'source': [{u'url': u'https://pypi.org/', u'verify_ssl': True}], u'packages': {}, 'dev-packages': {}}
with open('Pipfile', 'w') as f:
f.write(toml.dumps(data))
@property
def source(self):
if 'source' in self.parsed_pipfile:
return self.parsed_pipfile['source'][0]
else:
return [{u'url': u'https://pypi.org/', u'verify_ssl': True}][0]
@staticmethod
def remove_package_from_pipfile(package_name, dev=False):
pipfile_path = pipfile.Pipfile.find()
# Read and append Pipfile.
with open(pipfile_path, 'r') as f:
p = toml.loads(f.read())
key = 'dev-packages' if dev else 'packages'
if key in p:
if package_name in p[key]:
del p[key][package_name]
# Write Pipfile.
data = format_toml(toml.dumps(p))
with open(pipfile_path, 'w') as f:
f.write(data)
@staticmethod
def add_package_to_pipfile(package_name, dev=False):
# Lower-case package name.
package_name = package_name.lower()
# Find the Pipfile.
pipfile_path = pipfile.Pipfile.find()
# Read and append Pipfile.
with open(pipfile_path, 'r') as f:
p = toml.loads(f.read())
key = 'dev-packages' if dev else 'packages'
# Set empty group if it doesn't exist yet.
if key not in p:
p[key] = {}
package = convert_deps_from_pip(package_name)
package_name = [k for k in package.keys()][0]
# Add the package to the group.
p[key][package_name] = package[package_name]
# Write Pipfile.
data = format_toml(toml.dumps(p))
with open(pipfile_path, 'w') as f:
f.write(data)
| Python | 0.000001 |
7842919b2af368c640363b4e4e05144049b111ba | Remove BaseMail dependency on User object | ovp_core/emails.py | ovp_core/emails.py | from django.core.mail import EmailMultiAlternatives
from django.template import Context, Template
from django.template.loader import get_template
from django.conf import settings
import threading
class EmailThread(threading.Thread):
def __init__(self, msg):
self.msg = msg
threading.Thread.__init__(self)
def run (self):
return self.msg.send() > 0
class BaseMail:
"""
This class is responsible for firing emails
"""
from_email = ''
def __init__(self, email_address, async_mail=None):
self.email_address = email_address
self.async_mail = async_mail
def sendEmail(self, template_name, subject, context):
ctx = Context(context)
text_content = get_template('email/{}.txt'.format(template_name)).render(ctx)
html_content = get_template('email/{}.html'.format(template_name)).render(ctx)
msg = EmailMultiAlternatives(subject, text_content, self.from_email, [self.email_address])
msg.attach_alternative(text_content, "text/plain")
msg.attach_alternative(html_content, "text/html")
if self.async_mail:
async_flag="async"
else:
async_flag=getattr(settings, "DEFAULT_SEND_EMAIL", "async")
if async_flag == "async":
t = EmailThread(msg)
t.start()
return t
else:
return msg.send() > 0
| from django.core.mail import EmailMultiAlternatives
from django.template import Context, Template
from django.template.loader import get_template
from django.conf import settings
import threading
class EmailThread(threading.Thread):
def __init__(self, msg):
self.msg = msg
threading.Thread.__init__(self)
def run (self):
return self.msg.send() > 0
class BaseMail:
"""
This class is responsible for firing emails
"""
from_email = ''
def __init__(self, user, async_mail=None):
self.user = user
self.async_mail = async_mail
def sendEmail(self, template_name, subject, context):
ctx = Context(context)
text_content = get_template('email/{}.txt'.format(template_name)).render(ctx)
html_content = get_template('email/{}.html'.format(template_name)).render(ctx)
msg = EmailMultiAlternatives(subject, text_content, self.from_email, [self.user.email])
msg.attach_alternative(text_content, "text/plain")
msg.attach_alternative(html_content, "text/html")
if self.async_mail:
async_flag="async"
else:
async_flag=getattr(settings, "DEFAULT_SEND_EMAIL", "async")
if async_flag == "async":
t = EmailThread(msg)
t.start()
return t
else:
return msg.send() > 0
| Python | 0 |
3c9de69112c8158877e4b0060ef0ab89c083f376 | Build 1.14.0.1 package for Windows | packages/custom.py | packages/custom.py | # -*- Mode: Python -*- vi:si:et:sw=4:sts=4:ts=4:syntax=python
from cerbero.packages import package
from cerbero.enums import License
class GStreamer:
url = "http://gstreamer.freedesktop.org"
version = '1.14.0.1'
vendor = 'GStreamer Project'
licenses = [License.LGPL]
org = 'org.freedesktop.gstreamer'
| # -*- Mode: Python -*- vi:si:et:sw=4:sts=4:ts=4:syntax=python
from cerbero.packages import package
from cerbero.enums import License
class GStreamer:
url = "http://gstreamer.freedesktop.org"
version = '1.14.0'
vendor = 'GStreamer Project'
licenses = [License.LGPL]
org = 'org.freedesktop.gstreamer'
| Python | 0 |
2250fdef5528bb59ca2c3218110d637484737659 | fix pilutil.imresize test. Patch by Mark Wiebe. | scipy/misc/tests/test_pilutil.py | scipy/misc/tests/test_pilutil.py | import os.path
import numpy as np
from numpy.testing import assert_, assert_equal, \
dec, decorate_methods, TestCase, run_module_suite
try:
import PIL.Image
except ImportError:
_have_PIL = False
else:
_have_PIL = True
import scipy.misc.pilutil as pilutil
# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')
datapath = os.path.dirname(__file__)
class TestPILUtil(TestCase):
def test_imresize(self):
im = np.random.random((10,20))
for T in np.sctypes['float'] + [float]:
# 1.1 rounds to below 1.1 for float16, 1.101 works
im1 = pilutil.imresize(im,T(1.101))
assert_equal(im1.shape,(11,22))
def test_imresize2(self):
im = np.random.random((20,30))
im2 = pilutil.imresize(im, (30,40), interp='bicubic')
assert_equal(im2.shape, (30,40))
def test_imresize3(self):
im = np.random.random((15,30))
im2 = pilutil.imresize(im, (30,60), interp='nearest')
assert_equal(im2.shape, (30,60))
def test_bytescale(self):
x = np.array([0,1,2],np.uint8)
y = np.array([0,1,2])
assert_equal(pilutil.bytescale(x),x)
assert_equal(pilutil.bytescale(y),[0,127,255])
def tst_fromimage(filename, irange):
img = pilutil.fromimage(PIL.Image.open(filename))
imin,imax = irange
assert_(img.min() >= imin)
assert_(img.max() <= imax)
@_pilskip
def test_fromimage():
''' Test generator for parametric tests '''
data = {'icon.png':(0,255),
'icon_mono.png':(0,2),
'icon_mono_flat.png':(0,1)}
for fn, irange in data.iteritems():
yield tst_fromimage, os.path.join(datapath,'data',fn), irange
decorate_methods(TestPILUtil, _pilskip)
if __name__ == "__main__":
run_module_suite()
| import os.path
import numpy as np
from numpy.testing import assert_, assert_equal, \
dec, decorate_methods, TestCase, run_module_suite
try:
import PIL.Image
except ImportError:
_have_PIL = False
else:
_have_PIL = True
import scipy.misc.pilutil as pilutil
# Function / method decorator for skipping PIL tests on import failure
_pilskip = dec.skipif(not _have_PIL, 'Need to import PIL for this test')
datapath = os.path.dirname(__file__)
class TestPILUtil(TestCase):
def test_imresize(self):
im = np.random.random((10,20))
for T in np.sctypes['float'] + [float]:
im1 = pilutil.imresize(im,T(1.1))
assert_equal(im1.shape,(11,22))
def test_imresize2(self):
im = np.random.random((20,30))
im2 = pilutil.imresize(im, (30,40), interp='bicubic')
assert_equal(im2.shape, (30,40))
def test_imresize3(self):
im = np.random.random((15,30))
im2 = pilutil.imresize(im, (30,60), interp='nearest')
assert_equal(im2.shape, (30,60))
def test_bytescale(self):
x = np.array([0,1,2],np.uint8)
y = np.array([0,1,2])
assert_equal(pilutil.bytescale(x),x)
assert_equal(pilutil.bytescale(y),[0,127,255])
def tst_fromimage(filename, irange):
img = pilutil.fromimage(PIL.Image.open(filename))
imin,imax = irange
assert_(img.min() >= imin)
assert_(img.max() <= imax)
@_pilskip
def test_fromimage():
''' Test generator for parametric tests '''
data = {'icon.png':(0,255),
'icon_mono.png':(0,2),
'icon_mono_flat.png':(0,1)}
for fn, irange in data.iteritems():
yield tst_fromimage, os.path.join(datapath,'data',fn), irange
decorate_methods(TestPILUtil, _pilskip)
if __name__ == "__main__":
run_module_suite()
| Python | 0 |
b3573faeff22f220990ea2c97a7c9eae26429258 | add parse for application/json | tornado-sqlalchemy-example/app.py | tornado-sqlalchemy-example/app.py | # -*- coding: utf-8 -*-
import os
import tornado.web
import tornado.options
import tornado.ioloop
from db import db
from model import User
from tornado.escape import json_decode, to_unicode
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
def get_json_argument(self, name, default=None):
"""当Content-Type的值为application/json, 解析请求参数"""
args = json_decode(self.request.body)
name = to_unicode(name)
if name in args:
return args[name]
elif default is not None:
return default
else:
raise tornado.web.MissingArgumentError(name)
class IndexHandler(BaseHandler):
def get(self):
data = self.db.query(User).all()
a = User(username="test", password="test")
self.db.add(a)
data1 = self.db.query(User).all()
for d in data:
self.write("user: %s\n" % d.username)
self.write("==================")
for d in data1:
self.write("second %s" % d.username)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
]
settings = dict(
debug=True,
static_path=os.path.join(os.path.dirname(__file__), "static"),
template_path=os.path.join(os.path.dirname(__file__), "templates")
)
tornado.web.Application.__init__(self, handlers, **settings)
self.db = db
if __name__ == '__main__':
tornado.options.parse_command_line()
Application().listen(8000)
tornado.ioloop.IOLoop.instance().start()
| import os
import tornado.web
import tornado.options
import tornado.ioloop
from db import db
from model import User
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.db
class IndexHandler(BaseHandler):
def get(self):
data = self.db.query(User).all()
a = User(username="test", password="test")
self.db.add(a)
data1 = self.db.query(User).all()
for d in data:
self.write("user: %s\n" % d.username)
self.write("==================")
for d in data1:
self.write("second %s" % d.username)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", IndexHandler),
]
settings = dict(
debug=True,
static_path=os.path.join(os.path.dirname(__file__), "static"),
template_path=os.path.join(os.path.dirname(__file__), "templates")
)
tornado.web.Application.__init__(self, handlers, **settings)
self.db = db
if __name__ == '__main__':
tornado.options.parse_command_line()
Application().listen(8000)
tornado.ioloop.IOLoop.instance().start()
| Python | 0.000001 |
3056cf737ae0b6717073a03a6e01addfb1415416 | is_project is *not* a uuid | scrapi/processing/osf/hashing.py | scrapi/processing/osf/hashing.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unicodedata
import string
import hashlib
def get_id(doc):
return normalize_string(doc['id']['serviceID'])
def get_source(doc):
return normalize_string(doc['source'])
def get_doi(doc):
return normalize_string(doc['id']['doi'] + get_title(doc))
def normalize_string(astring): # helper function for grab_ funcs; takes a unicode string
astring = astring.lower()
# stop words - happy to add more
stopwords = ['the', 'a', 'an', 'about', 'do', 'does', 'what', 'who', 'it', 'to', 'has', 'had', 'in', 'by']
for word in stopwords:
word = ' ' + word + ' '
astring = astring.replace(word, ' ')
# docs.python.org/2/library/unicodedata.html
# TODO: this may not work for some unicode characters; I dealt w/ known special cases
# (when it fails to transliterate, it replaces with '')
astring = astring.replace(u'æ', u'ae')
astring = astring.replace(u'Æ', u'Ae')
astring = astring.replace(u'ß', u'ss') # assumes good transliteration
astring = astring.replace(u'—', u'')
bstring = unicodedata.normalize('NFKD', astring).encode('ascii','ignore')
exclude = set(string.punctuation)
exclude.add(' ')
exclude.add('\n')
bstring = ''.join(ch for ch in bstring if ch not in exclude)
return bstring # returns the essence of the string, as a string
def get_contributors(doc):
contributors = doc['contributors'] # this is a list
namelist = ''
for contributor in contributors:
fullname = contributor['given'] + contributor['family']
namelist += fullname
namelist = sorted(namelist) # alphabetical order, in case contrib order varies by source
namelist = ''.join(namelist)
namelist = normalize_string(namelist)
namelist = hashlib.md5(namelist).hexdigest() # should be shorter as md5 than full list
return normalize_string(namelist) # returns a list of strings
def get_title(doc):
title = doc['title']
title = normalize_string(title)
title = hashlib.md5(title).hexdigest() # should be shorter on average than full title
return title
def is_project(doc):
return ';isProject:true'
REPORT_HASH_FUNCTIONS = [get_title, get_contributors, get_doi, get_id]
RESOURCE_HASH_FUNCTIONS = [get_title, get_contributors]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unicodedata
import string
import hashlib
def get_id(doc):
return normalize_string(doc['id']['serviceID'])
def get_source(doc):
return normalize_string(doc['source'])
def get_doi(doc):
return normalize_string(doc['id']['doi'] + get_title(doc))
def normalize_string(astring): # helper function for grab_ funcs; takes a unicode string
astring = astring.lower()
# stop words - happy to add more
stopwords = ['the', 'a', 'an', 'about', 'do', 'does', 'what', 'who', 'it', 'to', 'has', 'had', 'in', 'by']
for word in stopwords:
word = ' ' + word + ' '
astring = astring.replace(word, ' ')
# docs.python.org/2/library/unicodedata.html
# TODO: this may not work for some unicode characters; I dealt w/ known special cases
# (when it fails to transliterate, it replaces with '')
astring = astring.replace(u'æ', u'ae')
astring = astring.replace(u'Æ', u'Ae')
astring = astring.replace(u'ß', u'ss') # assumes good transliteration
astring = astring.replace(u'—', u'')
bstring = unicodedata.normalize('NFKD', astring).encode('ascii','ignore')
exclude = set(string.punctuation)
exclude.add(' ')
exclude.add('\n')
bstring = ''.join(ch for ch in bstring if ch not in exclude)
return bstring # returns the essence of the string, as a string
def get_contributors(doc):
contributors = doc['contributors'] # this is a list
namelist = ''
for contributor in contributors:
fullname = contributor['given'] + contributor['family']
namelist += fullname
namelist = sorted(namelist) # alphabetical order, in case contrib order varies by source
namelist = ''.join(namelist)
namelist = normalize_string(namelist)
namelist = hashlib.md5(namelist).hexdigest() # should be shorter as md5 than full list
return normalize_string(namelist) # returns a list of strings
def get_title(doc):
title = doc['title']
title = normalize_string(title)
title = hashlib.md5(title).hexdigest() # should be shorter on average than full title
return title
def is_project(doc):
return ';isProject:true'
REPORT_HASH_FUNCTIONS = [get_title, get_contributors, get_doi, get_id]
RESOURCE_HASH_FUNCTIONS = [get_title, get_contributors, is_project]
| Python | 0.999386 |
60c355182f5e2d6a049f763031ffd15c57539a18 | add views as a figshare metric | totalimpact/providers/figshare.py | totalimpact/providers/figshare.py | from totalimpact.providers import provider
from totalimpact.providers.provider import Provider, ProviderContentMalformedError
import simplejson
import logging
logger = logging.getLogger('ti.providers.figshare')
class Figshare(Provider):
example_id = ("doi", "10.6084/m9.figshare.92393")
url = "http://figshare.com"
descr = "Make all of your research outputs sharable, citable and visible in the browser for free."
biblio_url_template = "http://api.figshare.com/v1/articles/%s"
aliases_url_template = "http://api.figshare.com/v1/articles/%s"
metrics_url_template = "http://api.figshare.com/v1/articles/%s"
provenance_url_template = "http://dx.doi.org/%s"
static_meta_dict = {
"shares": {
"display_name": "shares",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been shared",
"icon": "http://figshare.com/static/img/favicon.png",
},
"downloads": {
"display_name": "downloads",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been downloaded",
"icon": "http://figshare.com/static/img/favicon.png",
},
"views": {
"display_name": "views",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this item has been viewed",
"icon": "http://figshare.com/static/img/favicon.png",
}
}
def __init__(self):
super(Figshare, self).__init__()
def is_relevant_alias(self, alias):
(namespace, nid) = alias
is_figshare_doi = (namespace == "doi") and (".figshare." in nid.lower())
return is_figshare_doi
def _extract_item(self, page, id):
data = provider._load_json(page)
if not data:
return {}
item = data["items"][0]
if item["doi"] == self._get_templated_url(self.provenance_url_template, id, "provenance"):
return item
else:
return {}
def _extract_biblio(self, page, id=None):
dict_of_keylists = {
'title' : ['title'],
'authors' : ['authors'],
'published_date' : ['published_date'],
'url' : ['doi']
}
item = self._extract_item(page, id)
biblio_dict = provider._extract_from_data_dict(item, dict_of_keylists)
if "published_date" in biblio_dict:
biblio_dict["year"] = biblio_dict["published_date"][-4:]
del biblio_dict["published_date"]
if "authors" in biblio_dict:
biblio_dict["authors"] = ", ".join(author["last_name"] for author in biblio_dict["authors"])
return biblio_dict
def _extract_aliases(self, page, id=None):
dict_of_keylists = {
'title' : ['title'],
'url' : ['doi']
}
item = self._extract_item(page, id)
aliases_dict = provider._extract_from_data_dict(item, dict_of_keylists)
if aliases_dict:
aliases_list = [(namespace, nid) for (namespace, nid) in aliases_dict.iteritems()]
else:
aliases_list = []
return aliases_list
def _extract_metrics(self, page, status_code=200, id=None):
if status_code != 200:
if status_code == 404:
return {}
else:
raise(self._get_error(status_code))
dict_of_keylists = {
'figshare:shares' : ['shares'],
'figshare:downloads' : ['downloads'],
'figshare:views' : ['views']
}
item = self._extract_item(page, id)
metrics_dict = provider._extract_from_data_dict(item, dict_of_keylists)
return metrics_dict
| from totalimpact.providers import provider
from totalimpact.providers.provider import Provider, ProviderContentMalformedError
import simplejson
import logging
logger = logging.getLogger('ti.providers.figshare')
class Figshare(Provider):
example_id = ("doi", "10.6084/m9.figshare.92393")
url = "http://figshare.com"
descr = "Make all of your research outputs sharable, citable and visible in the browser for free."
biblio_url_template = "http://api.figshare.com/v1/articles/%s"
aliases_url_template = "http://api.figshare.com/v1/articles/%s"
metrics_url_template = "http://api.figshare.com/v1/articles/%s"
provenance_url_template = "http://dx.doi.org/%s"
static_meta_dict = {
"shares": {
"display_name": "shares",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been shared",
"icon": "http://figshare.com/static/img/favicon.png",
},
"downloads": {
"display_name": "downloads",
"provider": "figshare",
"provider_url": "http://figshare.com",
"description": "The number of times this has been downloaded",
"icon": "http://figshare.com/static/img/favicon.png",
}
}
def __init__(self):
super(Figshare, self).__init__()
def is_relevant_alias(self, alias):
(namespace, nid) = alias
is_figshare_doi = (namespace == "doi") and (".figshare." in nid.lower())
return is_figshare_doi
def _extract_item(self, page, id):
data = provider._load_json(page)
if not data:
return {}
item = data["items"][0]
if item["doi"] == self._get_templated_url(self.provenance_url_template, id, "provenance"):
return item
else:
return {}
def _extract_biblio(self, page, id=None):
dict_of_keylists = {
'title' : ['title'],
'authors' : ['authors'],
'published_date' : ['published_date'],
'url' : ['doi']
}
item = self._extract_item(page, id)
biblio_dict = provider._extract_from_data_dict(item, dict_of_keylists)
if "published_date" in biblio_dict:
biblio_dict["year"] = biblio_dict["published_date"][-4:]
del biblio_dict["published_date"]
if "authors" in biblio_dict:
biblio_dict["authors"] = ", ".join(author["last_name"] for author in biblio_dict["authors"])
return biblio_dict
def _extract_aliases(self, page, id=None):
dict_of_keylists = {
'title' : ['title'],
'url' : ['doi']
}
item = self._extract_item(page, id)
aliases_dict = provider._extract_from_data_dict(item, dict_of_keylists)
if aliases_dict:
aliases_list = [(namespace, nid) for (namespace, nid) in aliases_dict.iteritems()]
else:
aliases_list = []
return aliases_list
def _extract_metrics(self, page, status_code=200, id=None):
if status_code != 200:
if status_code == 404:
return {}
else:
raise(self._get_error(status_code))
dict_of_keylists = {
'figshare:shares' : ['shares'],
'figshare:downloads' : ['downloads']
}
item = self._extract_item(page, id)
metrics_dict = provider._extract_from_data_dict(item, dict_of_keylists)
return metrics_dict
| Python | 0 |
bf81484b7fd55e6383ae8e0f103e5e69ddea430e | Update utils.py | academictorrents/utils.py | academictorrents/utils.py | import hashlib
import os
import json
import datetime
import calendar
import time
def convert_bytes_to_decimal(headerBytes):
size = 0
power = len(headerBytes) - 1
for ch in headerBytes:
if isinstance(ch, int):
size += ch * 256 ** power
else:
size += int(ord(ch)) * 256 ** power
power -= 1
return size
def sha1_hash(string):
"""Return 20-byte sha1 hash of string."""
return hashlib.sha1(string).digest()
def get_timestamp_filename():
return clean_path("~/.academictorrents_timestamps.json")
def get_datastore(datastore="", path_to_config_file="~/.academictorrents.config"):
if datastore:
datastore = clean_path(datastore)
else:
datastore = json.loads(open(clean_path(path_to_config_file)).read()).get("datastore", os.getcwd() + "/datastore/")
if datastore[-1] != "/":
datastore = datastore + "/"
return datastore
def clean_path(path=None):
if path.startswith("~"):
return os.path.expanduser(path)
else:
return os.path.abspath(path)
def write_timestamp(at_hash):
filename = get_timestamp_filename()
try:
f = open(filename, 'r')
timestamps = json.load(f)
f.close()
except Exception:
timestamps = {}
timestamps[at_hash] = int(datetime.datetime.timestamp(datetime.datetime.now()))
f = open(filename, 'w')
json.dump(timestamps, f)
def read_timestamp(at_hash):
filename = get_timestamp_filename()
try:
f = open(filename, 'r')
timestamp = json.load(f).get(at_hash, 0)
f.close()
except Exception:
timestamp = 0
return timestamp
def timestamp_is_within_30_days(timestamp):
seconds_in_a_month = 86400 * 30
if timestamp > int(calendar.timegm(time.gmtime())) - seconds_in_a_month:
return True
return False
def timestamp_is_within_10_seconds(timestamp):
ten_seconds = 10
if timestamp > int(calendar.timegm(time.gmtime())) - ten_seconds:
return True
return False
def filenames_present(torrent):
return torrent.contents['info']['name'] in os.listdir(torrent.datastore)
| import hashlib
import os
import json
import datetime
import calendar
import time
def convert_bytes_to_decimal(headerBytes):
size = 0
power = len(headerBytes) - 1
for ch in headerBytes:
if isinstance(ch, int):
size += ch * 256 ** power
else:
size += int(ord(ch)) * 256 ** power
power -= 1
return size
def sha1_hash(string):
"""Return 20-byte sha1 hash of string."""
return hashlib.sha1(string).digest()
def get_timestamp_filename():
return clean_path("~/.academictorrents_timestamps.json")
def get_datastore(datastore="", path_to_config_file="~/.academictorrents.config"):
if datastore:
datastore = clean_path(datastore)
else:
datastore = json.loads(open(clean_path(path_to_config_file)).read()).get("datastore", os.getcwd() + "/datastore/")
if datastore[-1] != "/":
datastore = datastore + "/"
return datastore
def clean_path(path=None):
if path.startswith("~"):
return os.path.expanduser(path)
else:
return os.path.abspath(path)
def write_timestamp(at_hash):
filename = get_timestamp_filename()
try:
f = open(filename, 'r')
timestamps = json.load(f)
f.close()
except Exception:
timestamps = {}
timestamps[at_hash] = int(datetime.datetime.now().strftime("%s"))
f = open(filename, 'w')
json.dump(timestamps, f)
def read_timestamp(at_hash):
filename = get_timestamp_filename()
try:
f = open(filename, 'r')
timestamp = json.load(f).get(at_hash, 0)
f.close()
except Exception:
timestamp = 0
return timestamp
def timestamp_is_within_30_days(timestamp):
seconds_in_a_month = 86400 * 30
if timestamp > int(calendar.timegm(time.gmtime())) - seconds_in_a_month:
return True
return False
def timestamp_is_within_10_seconds(timestamp):
ten_seconds = 10
if timestamp > int(calendar.timegm(time.gmtime())) - ten_seconds:
return True
return False
def filenames_present(torrent):
return torrent.contents['info']['name'] in os.listdir(torrent.datastore)
| Python | 0.000001 |
2b5ac57fd02e5e20f738f9060456542f69eeff95 | Bump version to 4.0.0a12 | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0a12")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0a11")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| Python | 0 |
4e3cb4354c49101f29d64e4e5c59e347f95d98c9 | Fix way to create login_url in dashboard test | tempest/scenario/test_dashboard_basic_ops.py | tempest/scenario/test_dashboard_basic_ops.py | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import html_parser as HTMLParser
from six.moves.urllib import parse
from six.moves.urllib import request
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class HorizonHTMLParser(HTMLParser.HTMLParser):
csrf_token = None
region = None
login = None
def _find_name(self, attrs, name):
for attrpair in attrs:
if attrpair[0] == 'name' and attrpair[1] == name:
return True
return False
def _find_value(self, attrs):
for attrpair in attrs:
if attrpair[0] == 'value':
return attrpair[1]
return None
def _find_attr_value(self, attrs, attr_name):
for attrpair in attrs:
if attrpair[0] == attr_name:
return attrpair[1]
return None
def handle_starttag(self, tag, attrs):
if tag == 'input':
if self._find_name(attrs, 'csrfmiddlewaretoken'):
self.csrf_token = self._find_value(attrs)
if self._find_name(attrs, 'region'):
self.region = self._find_value(attrs)
if tag == 'form':
self.login = self._find_attr_value(attrs, 'action')
class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
@classmethod
def skip_checks(cls):
super(TestDashboardBasicOps, cls).skip_checks()
if not CONF.service_available.horizon:
raise cls.skipException("Horizon support is required")
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestDashboardBasicOps, cls).setup_credentials()
def check_login_page(self):
response = request.urlopen(CONF.dashboard.dashboard_url)
self.assertIn("id_username", response.read())
def user_login(self, username, password):
self.opener = request.build_opener(request.HTTPCookieProcessor())
response = self.opener.open(CONF.dashboard.dashboard_url).read()
# Grab the CSRF token and default region
parser = HorizonHTMLParser()
parser.feed(response)
# construct login url for dashboard, discovery accommodates non-/ web
# root for dashboard
login_url = parse.urljoin(CONF.dashboard.dashboard_url, parser.login)
# Prepare login form request
req = request.Request(login_url)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('Referer', CONF.dashboard.dashboard_url)
params = {'username': username,
'password': password,
'region': parser.region,
'csrfmiddlewaretoken': parser.csrf_token}
self.opener.open(req, parse.urlencode(params))
def check_home_page(self):
response = self.opener.open(CONF.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
@test.idempotent_id('4f8851b1-0e69-482b-b63b-84c6e76f6c80')
@test.services('dashboard')
def test_basic_scenario(self):
creds = self.os.credentials
self.check_login_page()
self.user_login(creds.username, creds.password)
self.check_home_page()
| # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import html_parser as HTMLParser
from six.moves.urllib import parse
from six.moves.urllib import request
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class HorizonHTMLParser(HTMLParser.HTMLParser):
csrf_token = None
region = None
login = None
def _find_name(self, attrs, name):
for attrpair in attrs:
if attrpair[0] == 'name' and attrpair[1] == name:
return True
return False
def _find_value(self, attrs):
for attrpair in attrs:
if attrpair[0] == 'value':
return attrpair[1]
return None
def _find_attr_value(self, attrs, attr_name):
for attrpair in attrs:
if attrpair[0] == attr_name:
return attrpair[1]
return None
def handle_starttag(self, tag, attrs):
if tag == 'input':
if self._find_name(attrs, 'csrfmiddlewaretoken'):
self.csrf_token = self._find_value(attrs)
if self._find_name(attrs, 'region'):
self.region = self._find_value(attrs)
if tag == 'form':
self.login = self._find_attr_value(attrs, 'action')
class TestDashboardBasicOps(manager.ScenarioTest):
"""
This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
@classmethod
def skip_checks(cls):
super(TestDashboardBasicOps, cls).skip_checks()
if not CONF.service_available.horizon:
raise cls.skipException("Horizon support is required")
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(TestDashboardBasicOps, cls).setup_credentials()
def check_login_page(self):
response = request.urlopen(CONF.dashboard.dashboard_url)
self.assertIn("id_username", response.read())
def user_login(self, username, password):
self.opener = request.build_opener(request.HTTPCookieProcessor())
response = self.opener.open(CONF.dashboard.dashboard_url).read()
# Grab the CSRF token and default region
parser = HorizonHTMLParser()
parser.feed(response)
# construct login url for dashboard, discovery accommodates non-/ web
# root for dashboard
login_url = CONF.dashboard.dashboard_url + parser.login[1:]
# Prepare login form request
req = request.Request(login_url)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('Referer', CONF.dashboard.dashboard_url)
params = {'username': username,
'password': password,
'region': parser.region,
'csrfmiddlewaretoken': parser.csrf_token}
self.opener.open(req, parse.urlencode(params))
def check_home_page(self):
response = self.opener.open(CONF.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
@test.idempotent_id('4f8851b1-0e69-482b-b63b-84c6e76f6c80')
@test.services('dashboard')
def test_basic_scenario(self):
creds = self.os.credentials
self.check_login_page()
self.user_login(creds.username, creds.password)
self.check_home_page()
| Python | 0.000456 |
82a1bcd4bd104ca2b45cb5dc93a44e4a16d1cbe3 | add more QC options and colorized output for quicker review | scripts/asos/archive_quantity.py | scripts/asos/archive_quantity.py | """ Create a simple prinout of observation quanity in the database """
import datetime
now = datetime.datetime.utcnow()
import numpy
counts = numpy.zeros((120,12))
mslp = numpy.zeros((120,12))
metar = numpy.zeros((120,12))
import iemdb
ASOS = iemdb.connect('asos', bypass=True)
acursor = ASOS.cursor()
import sys
stid = sys.argv[1]
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
acursor.execute("""SELECT extract(year from valid) as yr,
extract(month from valid) as mo, count(*),
sum(case when mslp is null or mslp < 1 then 1 else 0 end),
sum(case when metar is null or metar = '' then 1 else 0 end)
from alldata WHERE
station = %s GROUP by yr, mo ORDER by yr ASC, mo ASC""", (stid,))
for row in acursor:
counts[int(row[0]-1900),int(row[1]-1)] = row[2]
mslp[int(row[0]-1900),int(row[1]-1)] = row[3]
metar[int(row[0]-1900),int(row[1]-1)] = row[4]
def d(hits, total):
if total == 0:
return " N/A"
val = hits / float(total)
c1 = bcolors.ENDC
if val > 0.5:
c1 = bcolors.FAIL
return "%s%.2f%s" % (c1, val, bcolors.ENDC)
print 'Observation Count For %s' % (stid,)
print 'YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC'
output = False
for i in range(120):
year = 1900 + i
if year > now.year:
continue
if not output and numpy.max(counts[i,:]) == 0:
continue
output = True
if len(sys.argv) < 3:
print "%s %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i" % (year,
counts[i,0],counts[i,1],counts[i,2],counts[i,3],
counts[i,4],counts[i,5],counts[i,6],counts[i,7],
counts[i,8],counts[i,9],counts[i,10],counts[i,11])
else:
if sys.argv[2] == 'metar':
data = metar
else:
data = mslp
print "%s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s %4s" % (year,
d(data[i,0], counts[i,0]),
d(data[i,1], counts[i,1]),
d(data[i,2], counts[i,2]),
d(data[i,3], counts[i,3]),
d(data[i,4], counts[i,4]),
d(data[i,5], counts[i,5]),
d(data[i,6], counts[i,6]),
d(data[i,7], counts[i,7]),
d(data[i,8], counts[i,8]),
d(data[i,9], counts[i,9]),
d(data[i,10], counts[i,10]),
d(data[i,11], counts[i,11]))
| """ Create a simple prinout of observation quanity in the database """
import datetime
now = datetime.datetime.utcnow()
import numpy
counts = numpy.zeros((120,12))
import iemdb
ASOS = iemdb.connect('asos', bypass=True)
acursor = ASOS.cursor()
import sys
stid = sys.argv[1]
acursor.execute("""SELECT extract(year from valid) as yr,
extract(month from valid) as mo, count(*) from alldata WHERE
station = %s GROUP by yr, mo ORDER by yr ASC, mo ASC""", (stid,))
for row in acursor:
counts[int(row[0]-1900),int(row[1]-1)] = row[2]
print 'Observation Count For %s' % (stid,)
print 'YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC'
output = False
for i in range(120):
year = 1900 + i
if year > now.year:
continue
if not output and numpy.max(counts[i,:]) == 0:
continue
output = True
print "%s %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i %4i" % (year,
counts[i,0],counts[i,1],counts[i,2],counts[i,3],
counts[i,4],counts[i,5],counts[i,6],counts[i,7],
counts[i,8],counts[i,9],counts[i,10],counts[i,11])
| Python | 0 |
546d8fc8b41de424a76beb03c6530a7cf505a6a3 | add orca EarthLocation | km3pipe/constants.py | km3pipe/constants.py | # coding=utf-8
# Filename: constants.py
# pylint: disable=C0103
# pragma: no cover
"""
The constants used in KM3Pipe.
"""
from __future__ import division, absolute_import, print_function
# TODO: this module should be refactored soon!
import math
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
# Detector related parameters
arca_frame_duration = 0.1 # s
orca_frame_duration = 0.1 # s
c = 2.99792458e8 # m/s
n_water_antares_phase = 1.3499
n_water_antares_group = 1.3797
n_water_km3net_group = 1.3787
n_water_antares = n_water_antares_group
theta_cherenkov_water_antares = math.acos(1 / n_water_antares_phase)
theta_cherenkov_water_km3net = math.acos(1 / n_water_km3net_group)
c_water_antares = c / n_water_antares_group
c_water_km3net = c / n_water_km3net_group
# Math
pi = math.pi
e = math.e
# Default values for time residuals
dt_window_l = -15 # ns
dt_window_h = +25 # ns
orca_coords = (42 + (48/60), 6 + (2/60)) # (n, e) / degree
orca_height = -2450 # m
| # coding=utf-8
# Filename: constants.py
# pylint: disable=C0103
# pragma: no cover
"""
The constants used in KM3Pipe.
"""
from __future__ import division, absolute_import, print_function
# TODO: this module should be refactored soon!
import math
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
# Detector related parameters
arca_frame_duration = 0.1 # s
orca_frame_duration = 0.1 # s
c = 2.99792458e8 # m/s
n_water_antares_phase = 1.3499
n_water_antares_group = 1.3797
n_water_km3net_group = 1.3787
n_water_antares = n_water_antares_group
theta_cherenkov_water_antares = math.acos(1 / n_water_antares_phase)
theta_cherenkov_water_km3net = math.acos(1 / n_water_km3net_group)
c_water_antares = c / n_water_antares_group
c_water_km3net = c / n_water_km3net_group
# Math
pi = math.pi
e = math.e
# Default values for time residuals
dt_window_l = -15 # ns
dt_window_h = +25 # ns
| Python | 0.000037 |
ce47d219076dc2ff36c58db1d91ba349b9968d61 | Update test_bandits.py | bandits/tests/test_bandits.py | bandits/tests/test_bandits.py | from sklearn.utils.testing import assert_equal
import numpy as np
import pytest
@pytest.mark.fast_test
def dummy_test():
"""
Quick test to build with Circle CI.
"""
x = 2 + 2
assert_equal(x, 4)
| from sklearn.utils.testing import assert_equal
import numpy as np
import pytest
print("Hello tests!")
| Python | 0.000001 |
19df1f99c1d6f50c49ac390c772a0f2fe45efabc | improve estimator to use gridded analysis when neighbor query fails | scripts/coop/estimate_missing.py | scripts/coop/estimate_missing.py | """
Crude data estimator!
"""
import sys
import numpy as np
import network
import psycopg2.extras
import netCDF4
import datetime
from pyiem import iemre
from pyiem.datatypes import temperature
# Database Connection
COOP = psycopg2.connect(database='coop', host='iemdb')
ccursor = COOP.cursor(cursor_factory=psycopg2.extras.DictCursor)
ccursor2 = COOP.cursor()
state = sys.argv[1]
nt = network.Table("%sCLIMATE" % (state.upper(),))
vnameconv = {'high': 'high_tmpk', 'low': 'low_tmpk', 'precip': 'p01d'}
# We'll care about our nearest 11 stations, arbitrary
friends = {}
weights = {}
for station in nt.sts.keys():
sql = """select id, ST_distance(geom, 'SRID=4326;POINT(%s %s)') from stations
WHERE network = '%sCLIMATE' and id != '%s'
and archive_begin < '1951-01-01' and
substr(id, 3, 1) != 'C' and substr(id, 3,4) != '0000'
ORDER by st_distance
ASC LIMIT 11""" % (nt.sts[station]['lon'], nt.sts[station]['lat'],
state.upper(), station)
ccursor.execute( sql )
friends[station] = []
weights[station] = []
for row in ccursor:
friends[station].append( row[0] )
weights[station].append( 1.0 / row[1] )
weights[station] = np.array( weights[station] )
def do_var(varname):
"""
Run our estimator for a given variable
"""
currentnc = None
sql = """select day, station from alldata_%s WHERE %s IS NULL
and day >= '1893-01-01' ORDER by day ASC""" % (state.lower(), varname)
ccursor.execute( sql )
for row in ccursor:
day = row[0]
station = row[1]
if not nt.sts.has_key(station):
continue
sql = """SELECT station, %s from alldata_%s WHERE %s is not NULL
and station in %s and day = '%s'""" % (varname, state, varname,
tuple(friends[station]), day)
ccursor2.execute(sql)
weight = []
value = []
for row2 in ccursor2:
idx = friends[station].index(row2[0])
weight.append( weights[station][idx] )
value.append( row2[1] )
if len(weight) < 3:
# Nearest neighbors failed, so lets look at our grided analysis
# and sample from it
if currentnc is None or currentnc.title.find(str(day.year)) == -1:
currentnc = netCDF4.Dataset(("/mesonet/data/iemre/"
+"%s_mw_daily.nc") % (day.year,))
tidx = iemre.daily_offset(datetime.datetime(day.year, day.month,
day.day))
iidx, jidx = iemre.find_ij(nt.sts[station]['lon'],
nt.sts[station]['lat'])
iemreval = currentnc.variables[vnameconv[varname]][tidx, jidx,
iidx]
if varname in ('high', 'low'):
interp = temperature(iemreval, 'K').value('F')
else:
interp = iemreval / 24.5
print '--> Neighbor failure, %s %s %s' % (station, day, varname)
else:
mass = sum(weight)
interp = np.sum(np.array(weight) * np.array(value) / mass)
dataformat = '%.2f'
if varname in ['high', 'low']:
dataformat = '%.0f'
print 'Set station: %s day: %s varname: %s value: %s' % (station,
day, varname, dataformat % (interp,))
sql = """UPDATE alldata_%s SET estimated = true, %s = %s WHERE
station = '%s' and day = '%s'""" % (state.lower(), varname,
dataformat % (interp,), station, day)
sql = sql.replace(' nan ', ' null ')
ccursor2.execute( sql )
def main():
for varname in ['high', 'low', 'precip']:
do_var(varname)
ccursor2.close()
COOP.commit()
if __name__ == '__main__':
main() | """
Crude data estimator!
"""
import sys
import iemdb
import numpy
import network
import psycopg2.extras
# Database Connection
COOP = iemdb.connect('coop', bypass=True)
ccursor = COOP.cursor(cursor_factory=psycopg2.extras.DictCursor)
ccursor2 = COOP.cursor()
state = sys.argv[1]
nt = network.Table("%sCLIMATE" % (state.upper(),))
# We'll care about our nearest 11 stations, arbitrary
friends = {}
weights = {}
for station in nt.sts.keys():
sql = """select id, ST_distance(geom, 'SRID=4326;POINT(%s %s)') from stations
WHERE network = '%sCLIMATE' and id != '%s'
and archive_begin < '1951-01-01' and
substr(id, 3, 1) != 'C' and substr(id, 3,4) != '0000'
ORDER by st_distance
ASC LIMIT 11""" % (nt.sts[station]['lon'], nt.sts[station]['lat'],
state.upper(), station)
ccursor.execute( sql )
friends[station] = []
weights[station] = []
for row in ccursor:
friends[station].append( row[0] )
weights[station].append( 1.0 / row[1] )
weights[station] = numpy.array( weights[station] )
def do_var(varname):
"""
Run our estimator for a given variable
"""
sql = """select day, station from alldata_%s WHERE %s IS NULL
and day >= '1893-01-01'""" % (state.lower(), varname)
ccursor.execute( sql )
for row in ccursor:
day = row[0]
station = row[1]
if not nt.sts.has_key(station):
continue
sql = """SELECT station, %s from alldata_%s WHERE %s is not NULL
and station in %s and day = '%s'""" % (varname, state, varname,
tuple(friends[station]), day)
ccursor2.execute(sql)
weight = []
value = []
for row2 in ccursor2:
idx = friends[station].index(row2[0])
weight.append( weights[station][idx] )
value.append( row2[1] )
if len(weight) < 3:
print 'Not Enough Data Found station: %s day: %s var: %s' % (
station, day, varname)
continue
mass = sum(weight)
interp = numpy.sum(numpy.array(weight) * numpy.array(value) / mass)
dataformat = '%.2f'
if varname in ['high', 'low']:
dataformat = '%.0f'
print 'Set station: %s day: %s varname: %s value: %s' % (station,
day, varname, dataformat % (interp,))
sql = """UPDATE alldata_%s SET estimated = true, %s = %s WHERE
station = '%s' and day = '%s'""" % (state.lower(), varname,
dataformat % (interp,), station, day)
ccursor2.execute( sql )
def main():
for varname in ['high', 'low', 'precip']:
do_var(varname)
ccursor2.close()
COOP.commit()
if __name__ == '__main__':
main() | Python | 0 |
cbb5290e42f738025fb11f4745a35bda71968f1f | Add support for Lovelace dashboards (#342) | pychromecast/controllers/homeassistant.py | pychromecast/controllers/homeassistant.py | """
Controller to interface with Home Assistant
"""
from ..config import APP_HOME_ASSISTANT
from . import BaseController
APP_NAMESPACE = "urn:x-cast:com.nabucasa.hast"
class HomeAssistantController(BaseController):
""" Controller to interact with Home Assistant. """
def __init__(
self,
hass_url,
client_id,
refresh_token,
app_namespace=APP_NAMESPACE,
app_id=APP_HOME_ASSISTANT,
):
super().__init__(app_namespace, app_id)
self.hass_url = hass_url
self.client_id = client_id
self.refresh_token = refresh_token
# {
# connected: boolean;
# showDemo: boolean;
# hassUrl?: string;
# lovelacePath?: string | number | null;
# }
self.status = None
self._on_connect = []
@property
def hass_connected(self):
"""Return if connected to Home Assistant."""
return (
self.status is not None
and self.status["connected"]
and self.status["hassUrl"] == self.hass_url
)
def channel_connected(self):
""" Called when a channel has been openend that supports the
namespace of this controller. """
self.get_status()
def channel_disconnected(self):
""" Called when a channel is disconnected. """
self.status = None
def receive_message(self, message, data):
"""Called when a message is received."""
if data.get("type") == "receiver_status":
was_connected = self.hass_connected
self.status = data
if was_connected or not self.hass_connected:
return True
# We just got connected, call the callbacks.
while self._on_connect:
self._on_connect.pop()()
return True
return False
def connect_hass(self, callback_function=None):
"""Connect to Home Assistant."""
self._on_connect.append(callback_function)
self.send_message(
{
"type": "connect",
"refreshToken": self.refresh_token,
"clientId": self.client_id,
"hassUrl": self.hass_url,
}
)
def show_demo(self):
"""Show the demo."""
self.send_message({"type": "show_demo"})
def get_status(self, callback_function=None):
"""Get status of Home Assistant Cast."""
self.send_connected_message(
{"type": "get_status"}, callback_function=callback_function
)
def show_lovelace_view(self, view_path, url_path=None, callback_function=None):
"""Show a Lovelace UI."""
self.send_connected_message(
{"type": "show_lovelace_view", "viewPath": view_path, "urlPath": url_path},
callback_function=callback_function,
)
def send_connected_message(self, data, callback_function=None):
"""Send a message to a connected Home Assistant Cast"""
if self.hass_connected:
self.send_message_nocheck(data, callback_function=callback_function)
return
self.connect_hass(
lambda: self.send_message_nocheck(data, callback_function=callback_function)
)
| """
Controller to interface with Home Assistant
"""
from ..config import APP_HOME_ASSISTANT
from . import BaseController
APP_NAMESPACE = "urn:x-cast:com.nabucasa.hast"
class HomeAssistantController(BaseController):
""" Controller to interact with Home Assistant. """
def __init__(
self,
hass_url,
client_id,
refresh_token,
app_namespace=APP_NAMESPACE,
app_id=APP_HOME_ASSISTANT,
):
super().__init__(app_namespace, app_id)
self.hass_url = hass_url
self.client_id = client_id
self.refresh_token = refresh_token
# {
# connected: boolean;
# showDemo: boolean;
# hassUrl?: string;
# lovelacePath?: string | number | null;
# }
self.status = None
self._on_connect = []
@property
def hass_connected(self):
"""Return if connected to Home Assistant."""
return (
self.status is not None
and self.status["connected"]
and self.status["hassUrl"] == self.hass_url
)
def channel_connected(self):
""" Called when a channel has been openend that supports the
namespace of this controller. """
self.get_status()
def channel_disconnected(self):
""" Called when a channel is disconnected. """
self.status = None
def receive_message(self, message, data):
"""Called when a message is received."""
if data.get("type") == "receiver_status":
was_connected = self.hass_connected
self.status = data
if was_connected or not self.hass_connected:
return True
# We just got connected, call the callbacks.
while self._on_connect:
self._on_connect.pop()()
return True
return False
def connect_hass(self, callback_function=None):
"""Connect to Home Assistant."""
self._on_connect.append(callback_function)
self.send_message(
{
"type": "connect",
"refreshToken": self.refresh_token,
"clientId": self.client_id,
"hassUrl": self.hass_url,
}
)
def show_demo(self):
"""Show the demo."""
self.send_message({"type": "show_demo"})
def get_status(self, callback_function=None):
"""Get status of Home Assistant Cast."""
self.send_connected_message(
{"type": "get_status"}, callback_function=callback_function
)
def show_lovelace_view(self, view_path, callback_function=None):
"""Show a Lovelace UI."""
self.send_connected_message(
{"type": "show_lovelace_view", "viewPath": view_path},
callback_function=callback_function,
)
def send_connected_message(self, data, callback_function=None):
"""Send a message to a connected Home Assistant Cast"""
if self.hass_connected:
self.send_message_nocheck(data, callback_function=callback_function)
return
self.connect_hass(
lambda: self.send_message_nocheck(data, callback_function=callback_function)
)
| Python | 0 |
1e247dace112ce6def2bedf2f3ab864835ed7e06 | enforce that source.yaml files have to specify a version attribute | src/rosdistro/source_file.py | src/rosdistro/source_file.py | # Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .repository import Repository
class SourceFile(object):
_type = 'source'
def __init__(self, name, data):
self.name = name
assert 'type' in data, "Expected file type is '%s'" % SourceFile._type
assert data['type'] == SourceFile._type, "Expected file type is '%s', not '%s'" % (SourceFile._type, data['type'])
assert 'version' in data, "Source file for '%s' lacks required version information" % self.name
assert int(data['version']) == 1, "Unable to handle '%s' format version '%d', please update rosdistro" % (SourceFile._type, int(data['version']))
self.version = int(data['version'])
self.repositories = {}
if 'repositories' in data:
for repo_name in data['repositories']:
repo_data = data['repositories'][repo_name]
try:
assert 'version' in repo_data, "Repository '%s' lacks required version information" % repo_name
repo = Repository(repo_name, repo_data)
except AssertionError as e:
e.args = [("Source file '%s': %s" % (self.name, a) if i == 0 else a) for i, a in enumerate(e.args)]
raise e
self.repositories[repo_name] = repo
def get_data(self):
data = {}
data['type'] = SourceFile._type
data['version'] = self.version
data['repositories'] = {}
for repo_name in sorted(self.repositories):
repo = self.repositories[repo_name]
data['repositories'][repo_name] = repo.get_data()
return data
| # Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .repository import Repository
class SourceFile(object):
_type = 'source'
def __init__(self, name, data):
self.name = name
assert 'type' in data, "Expected file type is '%s'" % SourceFile._type
assert data['type'] == SourceFile._type, "Expected file type is '%s', not '%s'" % (SourceFile._type, data['type'])
assert 'version' in data, "Source file for '%s' lacks required version information" % self.name
assert int(data['version']) == 1, "Unable to handle '%s' format version '%d', please update rosdistro" % (SourceFile._type, int(data['version']))
self.version = int(data['version'])
self.repositories = {}
if 'repositories' in data:
for repo_name in data['repositories']:
repo_data = data['repositories'][repo_name]
try:
repo = Repository(repo_name, repo_data)
except AssertionError as e:
e.args = [("Source file '%s': %s" % (self.name, a) if i == 0 else a) for i, a in enumerate(e.args)]
raise e
self.repositories[repo_name] = repo
def get_data(self):
data = {}
data['type'] = SourceFile._type
data['version'] = self.version
data['repositories'] = {}
for repo_name in sorted(self.repositories):
repo = self.repositories[repo_name]
data['repositories'][repo_name] = repo.get_data()
return data
| Python | 0.000001 |
92a269d95006f991aa65456d413776a6d6d0a93c | remove unused import | pyramid_oauth2_provider/authentication.py | pyramid_oauth2_provider/authentication.py | #
# Copyright (c) Elliot Peele <elliot@bentlogic.net>
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
import logging
from zope.interface import implementer
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authentication import CallbackAuthenticationPolicy
from pyramid.httpexceptions import HTTPBadRequest
from .models import Oauth2Token
from .models import DBSession as db
from .errors import InvalidToken
from .util import getClientCredentials
log = logging.getLogger('pyramid_oauth2_provider.authentication')
@implementer(IAuthenticationPolicy)
class OauthAuthenticationPolicy(CallbackAuthenticationPolicy):
def _isOauth(self, request):
return bool(getClientCredentials(request))
def unauthenticated_userid(self, request):
token_type, token = getClientCredentials(request)
if token_type != 'bearer':
return None
auth_token = db.query(Oauth2Token).filter_by(access_token=token).first()
if not auth_token:
raise HTTPBadRequest(InvalidToken())
return auth_token.user_id
def remember(self, request, principal, **kw):
"""
I don't think there is anything to do for an oauth request here.
"""
def forget(self, request):
"""
You could revoke the access token on a call to forget.
"""
@implementer(IAuthenticationPolicy)
class OauthTktAuthenticationPolicy(OauthAuthenticationPolicy,
AuthTktAuthenticationPolicy):
def __init__(self, *args, **kwargs):
OauthAuthenticationPolicy.__init__(self)
AuthTktAuthenticationPolicy.__init__(self, *args, **kwargs)
def unauthenticated_userid(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.unauthenticated_userid(
self, request)
else:
return AuthTktAuthenticationPolicy.unauthenticated_userid(
self, request)
def remember(self, request, principal, **kw):
if self._isOauth(request):
return OauthAuthenticationPolicy.remember(
self, request, principal, **kw)
else:
return AuthTktAuthenticationPolicy.remember(
self, request, principal, **kw)
def forget(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.forget(
self, request)
else:
return AuthTktAuthenticationPolicy.forget(
self, request)
| #
# Copyright (c) Elliot Peele <elliot@bentlogic.net>
#
# This program is distributed under the terms of the MIT License as found
# in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/mit-license.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the MIT License for full details.
#
import base64
import logging
from zope.interface import implementer
from pyramid.interfaces import IAuthenticationPolicy
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authentication import CallbackAuthenticationPolicy
from pyramid.httpexceptions import HTTPBadRequest
from .models import Oauth2Token
from .models import DBSession as db
from .errors import InvalidToken
from .util import getClientCredentials
log = logging.getLogger('pyramid_oauth2_provider.authentication')
@implementer(IAuthenticationPolicy)
class OauthAuthenticationPolicy(CallbackAuthenticationPolicy):
def _isOauth(self, request):
return bool(getClientCredentials(request))
def unauthenticated_userid(self, request):
token_type, token = getClientCredentials(request)
if token_type != 'bearer':
return None
auth_token = db.query(Oauth2Token).filter_by(access_token=token).first()
if not auth_token:
raise HTTPBadRequest(InvalidToken())
return auth_token.user_id
def remember(self, request, principal, **kw):
"""
I don't think there is anything to do for an oauth request here.
"""
def forget(self, request):
"""
You could revoke the access token on a call to forget.
"""
@implementer(IAuthenticationPolicy)
class OauthTktAuthenticationPolicy(OauthAuthenticationPolicy,
AuthTktAuthenticationPolicy):
def __init__(self, *args, **kwargs):
OauthAuthenticationPolicy.__init__(self)
AuthTktAuthenticationPolicy.__init__(self, *args, **kwargs)
def unauthenticated_userid(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.unauthenticated_userid(
self, request)
else:
return AuthTktAuthenticationPolicy.unauthenticated_userid(
self, request)
def remember(self, request, principal, **kw):
if self._isOauth(request):
return OauthAuthenticationPolicy.remember(
self, request, principal, **kw)
else:
return AuthTktAuthenticationPolicy.remember(
self, request, principal, **kw)
def forget(self, request):
if self._isOauth(request):
return OauthAuthenticationPolicy.forget(
self, request)
else:
return AuthTktAuthenticationPolicy.forget(
self, request)
| Python | 0.000001 |
2652919c8d2e6fad8f7b3d47f5e82528b4b5214e | Write the last point for plot completeness | plots/monotone.py | plots/monotone.py |
# MONOTONE
# Produce a monotonically decreasing output plot from noisy data
# Input: columns: t x
# Output: columns: t_i x_i , sampled such that x_i <= x_j
# for j > i.
from string import *
import sys
# Set PYTHONPATH=$PWD
from plottools import *
if len(sys.argv) != 3:
abort("usage: <input file> <output file>")
input_file = sys.argv[1]
output_file = sys.argv[2]
val_loss_min = sys.float_info.max
with open(input_file, "r") as fp_i, \
open(output_file, "w") as fp_o:
for line in fp_i:
(t, val_loss_string) = split(line)
val_loss = float(val_loss_string)
if val_loss < val_loss_min:
val_loss_min = val_loss
fp_o.write("%s, %f\n" % (t, val_loss_min))
# Ensure the last data point is written for the plot:
if val_loss >= val_loss_min:
fp_o.write("%s, %f\n" % (t, val_loss_min))
|
# MONOTONE
# Produce a monotonically decreasing output plot from noisy data
# Input: columns: t x
# Output: columns: t_i x_i , sampled such that x_i <= x_j
# for j > i.
from string import *
import sys
# Set PYTHONPATH=$PWD
from plottools import *
if len(sys.argv) != 3:
abort("usage: <input file> <output file>")
input_file = sys.argv[1]
output_file = sys.argv[2]
val_loss_min = sys.float_info.max
with open(input_file, "r") as fp_i, \
open(output_file, "w") as fp_o:
for line in fp_i:
(t, val_loss_string) = split(line)
val_loss = float(val_loss_string)
if val_loss < val_loss_min:
val_loss_min = val_loss
fp_o.write("%s, %f\n" % (t, val_loss_min))
| Python | 0.999273 |
58ee8882fdbdef01f36859f0ed40afc346518690 | Add test for double backward | tests/chainer_tests/functions_tests/array_tests/test_flip.py | tests/chainer_tests/functions_tests/array_tests/test_flip.py | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (1,), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2, 3, 4), 'axis': -2},
{'shape': (2, 3, 4), 'axis': -1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestFlip(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_double_backward_options.update(dtype=numpy.float64)
def check_forward(self, x_data, axis):
x = chainer.Variable(x_data)
y = functions.flip(x, axis)
testing.assert_allclose(y.data, numpy.flip(x_data, axis))
def test_forward_cpu(self):
self.check_forward(self.x, self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), self.axis)
def check_backward(self, x_data, axis, y_grad):
gradient_check.check_backward(lambda x: functions.flip(x, axis),
x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.axis, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, axis, y_grad, x_grad_grad):
def f(x):
x = functions.flip(x, axis)
return x * x
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.axis, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.x), self.axis,
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
@testing.parameterize(
{'axis': 3},
{'axis': -4},
)
class TestFlipInvalidTypeAxis(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.flip(x, self.axis)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
class TestFlipInvalidTypeError(unittest.TestCase):
def test_invalid_axis(self):
with self.assertRaises(TypeError):
functions.Flip('a')
testing.run_module(__name__, __file__)
| import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (1,), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2, 3, 4), 'axis': -2},
{'shape': (2, 3, 4), 'axis': -1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestFlip(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
self.g = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
def check_forward(self, x_data, axis):
x = chainer.Variable(x_data)
y = functions.flip(x, axis)
testing.assert_allclose(y.data, numpy.flip(x_data, axis))
def test_forward_cpu(self):
self.check_forward(self.x, self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), self.axis)
def check_backward(self, x_data, axis, y_grad):
gradient_check.check_backward(lambda x: functions.flip(x, axis),
x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.axis, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.g))
@testing.parameterize(
{'axis': 3},
{'axis': -4},
)
class TestFlipInvalidTypeAxis(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.flip(x, self.axis)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
class TestFlipInvalidTypeError(unittest.TestCase):
def test_invalid_axis(self):
with self.assertRaises(TypeError):
functions.Flip('a')
testing.run_module(__name__, __file__)
| Python | 0.000007 |
6482c485982fe5039574eab797b46d5f1b93bacc | Refactor populate script | finance/management/commands/populate.py | finance/management/commands/populate.py | import random
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
import factory
from accounts.factories import UserFactory
from books.factories import TransactionFactory
class Command(BaseCommand):
help = "Popoulates databse with dummy data"
def handle(self, *args, **options):
if not User.objects.filter(username='admin'):
self.create_admin()
else:
self.admin = User.objects.get(username='admin')
print("admin user already exists")
self.create_transactions()
def create_admin(self):
# Factory creates simple user, so ``is_staff`` is set later
self.admin = UserFactory(username='admin', password='asdasd')
self.admin.is_staff = True
self.admin.save()
print("admin user have been created successfully")
def create_transactions(self):
TransactionFactory.create_batch(
10,
amount=factory.Sequence(lambda n: random.randrange(0, 10)),
category=random.randrange(0, 2), # random range from 0 to 1
user=self.admin,
)
print("Transactions for admin created")
| import random
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db import IntegrityError
import factory
from accounts.factories import UserFactory
from books.factories import TransactionFactory
class Command(BaseCommand):
help = "Popoulates databse with dummy data"
def handle(self, *args, **options):
# Factory creates simple user, so ``is_staff`` is set later
try:
admin = UserFactory(username='admin', password='asdasd')
admin.is_staff = True
admin.save()
print("admin user have been created successfully")
except IntegrityError:
admin = User.objects.get(username='admin')
print("admin user already exists")
TransactionFactory.create_batch(
10,
price=factory.Sequence(lambda n: random.randrange(0, 10)),
user=admin,
)
print("Transactions for admin created")
| Python | 0 |
87405b65ca4f6848a3e7ec0a63369658d09cd0d5 | print debug messages to stderr, not stdout | fasttsne/__init__.py | fasttsne/__init__.py | import scipy.linalg as la
import numpy as np
import time
import sys
from fasttsne import _TSNE as TSNE
def timed_reducer(f):
def f2(data, d, mode, **kwargs):
t = time.time()
print >> sys.stderr, "Reducing to %dd using %s..." % (d, f.__name__)
if mode == 1:
from sklearn.preprocessing import Normalizer
data = Normalizer().fit_transform(data)
X = f(data, d, mode, **kwargs)
print >> sys.stderr, "%s -> %s. Took %.1fs" % (data.shape, X.shape, time.time() - t)
return X
return f2
@timed_reducer
def sparse_encode(data, d, mode, alpha=500):
import sklearn.decomposition as deco
print "finding dict..."
code, dictionary, errors = deco.dict_learning(data[:1000], d, alpha, verbose=True)
print code, dictionary, errors
return deco.sparse_encode(data, dictionary)
@timed_reducer
def pca_reduce(data, pca_d, mode, algorithm='RandomizedPCA'):
import sklearn.decomposition as deco
alg = getattr(deco, algorithm)
print "pca..."
pca = alg(n_components=pca_d)
X = pca.fit_transform(data)
return X
@timed_reducer
def whitened_pca_reduce(data, pca_d, mode):
import sklearn.decomposition as deco
print "pca..."
pca = deco.RandomizedPCA(pca_d, whiten=True)
X = pca.fit_transform(data)
return X
def fast_tsne(data, pca_d=None, d=2, perplexity=30., theta=0.5, mode=0, normalise=0,
whiten=0):
"""
Run Barnes-Hut T-SNE on _data_.
@param data The data.
@param pca_d The dimensionality of data is reduced via PCA
to this dimensionality.
@param d The embedding dimensionality. Must be fixed to
2.
@param perplexity The perplexity controls the effective number of
neighbors.
@param theta Degree of BH optimisation (0-1; higher -> faster, worse).
@param mode 0: Euclidean; 1: normalised Euclidean.
@param normalise Normalise mean around zero.
@param whiten Whiten when doing PCA.
"""
# inplace!!
if normalise_mean:
print >> sys.stderr, "normalising..."
data = data - data.mean(axis=0)
if not pca_d or pca_d > data.shape[1]:
X = data
elif whiten:
X = whitened_pca_reduce(data, pca_d, mode)
del data
else:
X = pca_reduce(data, pca_d, mode)
del data
N, vlen = X.shape
print >> sys.stderr, X.shape
tsne = TSNE()
Y = tsne.run(X, N, vlen, d, perplexity, theta, mode)
return Y
| import scipy.linalg as la
import numpy as np
import time
from fasttsne import _TSNE as TSNE
def timed_reducer(f):
def f2(data, d, mode, **kwargs):
t = time.time()
print "Reducing to %dd using %s..." % (d, f.__name__)
if mode == 1:
from sklearn.preprocessing import Normalizer
data = Normalizer().fit_transform(data)
X = f(data, d, mode, **kwargs)
print "%s -> %s. Took %.1fs" % (data.shape, X.shape, time.time() - t)
return X
return f2
@timed_reducer
def sparse_encode(data, d, mode, alpha=500):
import sklearn.decomposition as deco
print "finding dict..."
code, dictionary, errors = deco.dict_learning(data[:1000], d, alpha, verbose=True)
print code, dictionary, errors
return deco.sparse_encode(data, dictionary)
@timed_reducer
def pca_reduce(data, pca_d, mode, algorithm='RandomizedPCA'):
import sklearn.decomposition as deco
alg = getattr(deco, algorithm)
print "pca..."
pca = alg(n_components=pca_d)
X = pca.fit_transform(data)
return X
@timed_reducer
def whitened_pca_reduce(data, pca_d, mode):
import sklearn.decomposition as deco
print "pca..."
pca = deco.RandomizedPCA(pca_d, whiten=True)
X = pca.fit_transform(data)
return X
def fast_tsne(data, pca_d=None, d=2, perplexity=30., theta=0.5, mode=0, normalise=0,
whiten=0):
"""
Run Barnes-Hut T-SNE on _data_.
@param data The data.
@param pca_d The dimensionality of data is reduced via PCA
to this dimensionality.
@param d The embedding dimensionality. Must be fixed to
2.
@param perplexity The perplexity controls the effective number of
neighbors.
@param theta Degree of BH optimisation (0-1; higher -> faster, worse).
@param mode 0: Euclidean; 1: normalised Euclidean.
@param normalise Normalise mean around zero.
@param whiten Whiten when doing PCA.
"""
# inplace!!
if normalise:
print "normalising..."
data = data - data.mean(axis=0)
if not pca_d or pca_d > data.shape[1]:
X = data
elif whiten:
X = whitened_pca_reduce(data, pca_d, mode)
del data
else:
X = pca_reduce(data, pca_d, mode)
del data
N, vlen = X.shape
print X.shape
tsne = TSNE()
Y = tsne.run(X, N, vlen, d, perplexity, theta, mode)
return Y
| Python | 0.998474 |
300f0b0101587aacaad9791ba3617dae75ed96ad | Fix apple_trailers plugin | flexget/plugins/input/apple_trailers.py | flexget/plugins/input/apple_trailers.py | from __future__ import unicode_literals, division, absolute_import
import logging
import urlparse
import re
from flexget.entry import Entry
from flexget.plugin import priority, register_plugin, get_plugin_by_name, DependencyError
from flexget.utils.cached_input import cached
from flexget.utils.requests import RequestException
from flexget.utils.soup import get_soup
try:
from flexget.plugins.input.rss import InputRSS
except ImportError:
raise DependencyError(issued_by='apple_trailers', missing='rss')
log = logging.getLogger('apple_trailers')
class AppleTrailers(InputRSS):
"""
Adds support for Apple.com movie trailers.
apple_trailers: 480p
Choice of quality is one of: ipod, '320', '480', 640w, 480p, 720p, 1080p
"""
rss_url = 'http://trailers.apple.com/trailers/home/rss/newtrailers.rss'
qualities = ['480p', '720p']
schema = {'enum': qualities}
# Run before headers plugin
@priority(135)
def on_task_start(self, task, config):
# TODO: Resolve user-agent in a way that doesn't involve modifying the task config.
# make sure we have dependencies available, will throw DependencyError if not
get_plugin_by_name('headers')
# configure them
task.config['headers'] = {'User-Agent': 'QuickTime/7.6.6'}
@priority(127)
@cached('apple_trailers')
def on_task_input(self, task, config):
# use rss plugin
# since we have to do 2 page lookups per trailer, use all_entries False to lighten load
rss_config = {'url': self.rss_url, 'all_entries': False}
rss_entries = super(AppleTrailers, self).on_task_input(task, rss_config)
# Multiple entries can point to the same movie page (trailer 1, clip1, etc.)
trailers = {}
for entry in rss_entries:
url = entry['original_url']
trailers.setdefault(url, []).append(entry['title'])
result = []
if config == '720p':
url_extension = 'includes/extralarge.html'
else:
url_extension = 'includes/large.html'
for url, titles in trailers.iteritems():
inc_url = url + url_extension
try:
page = task.requests.get(inc_url)
except RequestException as err:
log.warning("RequestsException when opening playlist page: %s" % err)
continue
soup = get_soup(page.text)
for title in titles:
trailer = soup.find(text=title.split(' - ')[1])
if not trailer:
log.debug('did not find trailer link')
continue
trailers_link = trailer.find_parent('a')
if not trailers_link:
log.debug('did not find trailer link')
continue
try:
page = task.requests.get(urlparse.urljoin(url, trailers_link['href']))
except RequestException as e:
log.debug('error getting trailers page')
continue
trailer_soup = get_soup(page.text)
link = trailer_soup.find('a', attrs={'class': 'movieLink'})
if not link:
log.debug('could not find download link')
continue
# Need to add an 'h' in front of the resolution
entry_url = link['href']
entry_url = entry_url[:entry_url.find(config + '.mov')] + 'h%s.mov' % config
result.append(Entry(title, entry_url))
return result
register_plugin(AppleTrailers, 'apple_trailers', api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
import logging
import re
from urllib2 import HTTPError
from flexget.entry import Entry
from flexget.plugin import priority, register_plugin, get_plugin_by_name, DependencyError
from flexget.utils.cached_input import cached
from flexget.utils.tools import urlopener
from flexget.utils.soup import get_soup
try:
from flexget.plugins.input.rss import InputRSS
except ImportError:
raise DependencyError(issued_by='apple_trailers', missing='rss')
log = logging.getLogger('apple_trailers')
class AppleTrailers(InputRSS):
"""
Adds support for Apple.com movie trailers.
apple_trailers: 480p
Choice of quality is one of: ipod, '320', '480', 640w, 480p, 720p, 1080p
"""
rss_url = 'http://trailers.apple.com/trailers/home/rss/newtrailers.rss'
qualities = ['ipod', 320, '320', 480, '480', '640w', '480p', '720p', '1080p']
schema = {'enum': qualities}
# Run before headers plugin
@priority(135)
def on_task_start(self, task, config):
# TODO: Resolve user-agent in a way that doesn't involve modifying the task config.
# make sure we have dependencies available, will throw DependencyError if not
get_plugin_by_name('headers')
# configure them
task.config['headers'] = {'User-Agent': 'QuickTime/7.6.6'}
self.quality = str(config)
@priority(127)
@cached('apple_trailers')
def on_task_input(self, task, config):
# use rss plugin
rss_config = {'url': self.rss_url}
rss_entries = super(AppleTrailers, self).on_task_input(task, rss_config)
# Multiple entries can point to the same movie page (trailer 1, clip
# 1, etc.)
entries = {}
for entry in rss_entries:
url = entry['original_url']
if url in entries:
continue
else:
title = entry['title']
entries[url] = title[:title.rfind('-')].rstrip()
result = []
for url, title in entries.iteritems():
inc_url = url + 'includes/playlists/web.inc'
try:
page = urlopener(inc_url, log)
except HTTPError, err:
log.warning("HTTPError when opening playlist page: %d %s" % (err.code, err.reason))
continue
soup = get_soup(page)
links = soup.find_all('a', attrs={'class': 'target-quicktimeplayer', 'href': re.compile(r'_h?480p\.mov$')})
for link in links:
url = link.get('href')
url = url[:url.rfind('_')]
quality = self.quality.lower()
if quality == 'ipod':
url += '_i320.m4v'
else:
url += '_h' + quality + '.mov'
entry = Entry()
entry['url'] = url
entry['title'] = title
match = re.search(r'.*/([^?#]*)', url)
entry['filename'] = match.group(1)
result.append(entry)
log.debug('found trailer %s', url)
return result
register_plugin(AppleTrailers, 'apple_trailers', api_ver=2)
| Python | 0 |
17037f53d3b3a54456892a986e1a199d381b5074 | Use absolute_import in markdown.py, to fix import problem. | pokedex/db/markdown.py | pokedex/db/markdown.py | # encoding: utf8
u"""Implements the markup used for description and effect text in the database.
The language used is a variation of Markdown and Markdown Extra. There are
docs for each at http://daringfireball.net/projects/markdown/ and
http://michelf.com/projects/php-markdown/extra/ respectively.
Pokédex links are represented with the extended syntax `[name]{type}`, e.g.,
`[Eevee]{pokemon}`. The actual code that parses these is in spline-pokedex.
"""
from __future__ import absolute_import
import markdown
import sqlalchemy.types
class MarkdownString(object):
"""Wraps a Markdown string. Stringifies to the original text, but .as_html
will return an HTML rendering.
To add extensions to the rendering (which is necessary for rendering links
correctly, and which spline-pokedex does), you must append to this class's
`markdown_extensions` list. Yep, that's gross.
"""
markdown_extensions = ['extra']
def __init__(self, source_text):
self.source_text = source_text
self._as_html = None
def __unicode__(self):
return self.source_text
@property
def as_html(self):
"""Returns the string as HTML4."""
if self._as_html:
return self._as_html
md = markdown.Markdown(
extensions=self.markdown_extensions,
safe_mode='escape',
output_format='xhtml1',
)
self._as_html = md.convert(self.source_text)
return self._as_html
@property
def as_text(self):
"""Returns the string in a plaintext-friendly form.
At the moment, this is just the original source text.
"""
return self.source_text
class MoveEffectProperty(object):
"""Property that wraps a move effect. Used like this:
MoveClass.effect = MoveEffectProperty('effect')
some_move.effect # returns a MarkdownString
some_move.effect.as_html # returns a chunk of HTML
This class also performs simple substitution on the effect, replacing
`$effect_chance` with the move's actual effect chance.
"""
def __init__(self, effect_column):
self.effect_column = effect_column
def __get__(self, move, move_class):
effect_text = getattr(move.move_effect, self.effect_column)
effect_text = effect_text.replace(
u'$effect_chance',
unicode(move.effect_chance),
)
return MarkdownString(effect_text)
class MarkdownColumn(sqlalchemy.types.TypeDecorator):
"""Generic SQLAlchemy column type for Markdown text.
Do NOT use this for move effects! They need to know what move they belong
to so they can fill in, e.g., effect chances. Use the MoveEffectProperty
property class above.
"""
impl = sqlalchemy.types.Unicode
def process_bind_param(self, value, dialect):
if not isinstance(value, basestring):
# Can't assign, e.g., MarkdownString objects yet
raise NotImplementedError
return unicode(value)
def process_result_value(self, value, dialect):
return MarkdownString(value)
| # encoding: utf8
u"""Implements the markup used for description and effect text in the database.
The language used is a variation of Markdown and Markdown Extra. There are
docs for each at http://daringfireball.net/projects/markdown/ and
http://michelf.com/projects/php-markdown/extra/ respectively.
Pokédex links are represented with the extended syntax `[name]{type}`, e.g.,
`[Eevee]{pokemon}`. The actual code that parses these is in spline-pokedex.
"""
import markdown
import sqlalchemy.types
class MarkdownString(object):
"""Wraps a Markdown string. Stringifies to the original text, but .as_html
will return an HTML rendering.
To add extensions to the rendering (which is necessary for rendering links
correctly, and which spline-pokedex does), you must append to this class's
`markdown_extensions` list. Yep, that's gross.
"""
markdown_extensions = ['extra']
def __init__(self, source_text):
self.source_text = source_text
self._as_html = None
def __unicode__(self):
return self.source_text
@property
def as_html(self):
"""Returns the string as HTML4."""
if self._as_html:
return self._as_html
md = markdown.Markdown(
extensions=self.markdown_extensions,
safe_mode='escape',
output_format='xhtml1',
)
self._as_html = md.convert(self.source_text)
return self._as_html
@property
def as_text(self):
"""Returns the string in a plaintext-friendly form.
At the moment, this is just the original source text.
"""
return self.source_text
class MoveEffectProperty(object):
"""Property that wraps a move effect. Used like this:
MoveClass.effect = MoveEffectProperty('effect')
some_move.effect # returns a MarkdownString
some_move.effect.as_html # returns a chunk of HTML
This class also performs simple substitution on the effect, replacing
`$effect_chance` with the move's actual effect chance.
"""
def __init__(self, effect_column):
self.effect_column = effect_column
def __get__(self, move, move_class):
effect_text = getattr(move.move_effect, self.effect_column)
effect_text = effect_text.replace(
u'$effect_chance',
unicode(move.effect_chance),
)
return MarkdownString(effect_text)
class MarkdownColumn(sqlalchemy.types.TypeDecorator):
"""Generic SQLAlchemy column type for Markdown text.
Do NOT use this for move effects! They need to know what move they belong
to so they can fill in, e.g., effect chances. Use the MoveEffectProperty
property class above.
"""
impl = sqlalchemy.types.Unicode
def process_bind_param(self, value, dialect):
if not isinstance(value, basestring):
# Can't assign, e.g., MarkdownString objects yet
raise NotImplementedError
return unicode(value)
def process_result_value(self, value, dialect):
return MarkdownString(value)
| Python | 0 |
306c735f863d3fe6a0922a433a7cdd1d21bdd772 | fix unit test | flumotion/test/test_feedcomponent010.py | flumotion/test/test_feedcomponent010.py | # -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import time
from twisted.trial import unittest
import common
from twisted.python import failure
from twisted.internet import defer
from flumotion.component import feedcomponent010 as fc
class TestFeeder(unittest.TestCase):
def setUp(self):
self.feeder = fc.Feeder('video:default')
def test_clientConnected(self):
clientId = '/default/muxer-video'
self.feeder.clientConnected(clientId, 3)
clients = self.feeder.getClients()
self.failUnless(3 in clients.keys())
client = clients[3]
self.assertEquals(client.uiState.get('clientId'), clientId)
def testReconnect(self):
clientId = '/default/muxer-video'
# connect
c = self.feeder.clientConnected(clientId, 3)
# verify some stuff
self.clientAssertStats(c, 0, 0, 0, 0, 1)
# read 10 bytes, drop 1 buffer
c.setStats((10, None, None, None, time.time(), 1))
self.clientAssertStats(c, 10, 1, 10, 1, 1)
# disconnect
self.feeder.clientDisconnected(3)
self.clientAssertStats(c, 0, 0, 10, 1, 1)
# connect again
self.feeder.clientConnected(clientId, 3)
self.clientAssertStats(c, 0, 0, 10, 1, 2)
# read 20 bytes, drop 2 buffers
c.setStats((20, None, None, None, time.time(), 2))
self.clientAssertStats(c, 20, 2, 30, 3, 2)
def clientAssertEquals(self, client, key, value):
self.assertEquals(client.uiState.get(key), value)
def clientAssertStats(self, client, brc, bdc, brt, bdt, reconnects):
self.clientAssertEquals(client, 'bytesReadCurrent', brc)
self.clientAssertEquals(client, 'buffersDroppedCurrent', bdc)
self.clientAssertEquals(client, 'bytesReadTotal', brt)
self.clientAssertEquals(client, 'buffersDroppedTotal', bdt)
self.clientAssertEquals(client, 'reconnects', reconnects)
if __name__ == '__main__':
unittest.main()
| # -*- Mode: Python; test-case-name: flumotion.test.test_feedcomponent010 -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
from twisted.trial import unittest
import common
from twisted.python import failure
from twisted.internet import defer
from flumotion.component import feedcomponent010 as fc
class TestFeeder(unittest.TestCase):
def setUp(self):
self.feeder = fc.Feeder('video:default')
def test_clientConnected(self):
clientId = '/default/muxer-video'
self.feeder.clientConnected(clientId, 3)
clients = self.feeder.getClients()
self.failUnless(3 in clients.keys())
client = clients[3]
self.assertEquals(client.uiState.get('clientId'), clientId)
def testReconnect(self):
clientId = '/default/muxer-video'
# connect
c = self.feeder.clientConnected(clientId, 3)
# verify some stuff
self.clientAssertStats(c, 0, 0, 0, 0, 1)
# read 10 bytes, drop 1 buffer
c.setStats((10, None, None, None, None, 1))
self.clientAssertStats(c, 10, 1, 10, 1, 1)
# disconnect
self.feeder.clientDisconnected(3)
self.clientAssertStats(c, 0, 0, 10, 1, 1)
# connect again
self.feeder.clientConnected(clientId, 3)
self.clientAssertStats(c, 0, 0, 10, 1, 2)
# read 20 bytes, drop 2 buffers
c.setStats((20, None, None, None, None, 2))
self.clientAssertStats(c, 20, 2, 30, 3, 2)
def clientAssertEquals(self, client, key, value):
self.assertEquals(client.uiState.get(key), value)
def clientAssertStats(self, client, brc, bdc, brt, bdt, reconnects):
self.clientAssertEquals(client, 'bytesReadCurrent', brc)
self.clientAssertEquals(client, 'buffersDroppedCurrent', bdc)
self.clientAssertEquals(client, 'bytesReadTotal', brt)
self.clientAssertEquals(client, 'buffersDroppedTotal', bdt)
self.clientAssertEquals(client, 'reconnects', reconnects)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
1afce678dec65bf3c6445322ff7961c7aca05f56 | add more error checking for couchbase python client removal | api/code/src/main/python/stratuslab/installator/CouchbaseClient.py | api/code/src/main/python/stratuslab/installator/CouchbaseClient.py | #
# Copyright (c) 2013, Centre National de la Recherche Scientifique
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import string
import os.path
from random import choice
import stratuslab.system.SystemFactory as SystemFactory
from stratuslab.installator.Installator import Installator
from stratuslab import Util
from stratuslab.Util import printError
class CouchbaseClient(Installator):
def __init__(self, configHolder):
configHolder.assign(self)
self.system = SystemFactory.getSystem(self.frontendSystem, configHolder)
self._pkgs = ['libcouchbase2-libevent', 'libcouchbase-devel']
self._deps = ['python-pip', 'gcc']
self._repofile = '/etc/yum.repos.d/couchbase.repo'
self._repourl = 'http://packages.couchbase.com/rpm/couchbase-centos62-x86_64.repo'
def _installFrontend(self):
self._installPackages()
def _setupFrontend(self):
pass
def _startServicesFrontend(self):
pass
def _installPackages(self):
Util.printStep('Setting up Couchbase yum repository')
cmd = 'curl --output %s %s' % (self._repofile, self._repourl)
self._executeExitOnError(cmd)
Util.printStep('Removing Couchbase python client')
try:
cmd = 'pip uninstall -y couchbase'
rc, output = Util.execute(cmd.split(' '),
withOutput=True,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
Util.printInfo('Couchbase python client NOT removed')
else:
Util.printInfo('Couchbase python client removed')
except:
Util.printInfo("Couchbase python client NOT removed")
Util.printStep('Removing Couchbase C client')
cmd = 'yum erase -y %s' % ' '.join(self._pkgs)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase C client')
cmd = 'yum install -y %s' % ' '.join(self._pkgs)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase python client dependencies')
cmd = 'yum install -y %s' % ' '.join(self._deps)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase python client')
cmd = 'pip install couchbase'
self._executeExitOnError(cmd)
def _configure(self):
pass
def _restartService(self):
pass
def _executeExitOnError(self, cmd_str):
rc, output = Util.execute(cmd_str.split(' '),
withOutput=True,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
printError('Failed running: %s\n%s' % (cmd_str, output))
| #
# Copyright (c) 2013, Centre National de la Recherche Scientifique
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import string
import os.path
from random import choice
import stratuslab.system.SystemFactory as SystemFactory
from stratuslab.installator.Installator import Installator
from stratuslab import Util
from stratuslab.Util import printError
class CouchbaseClient(Installator):
def __init__(self, configHolder):
configHolder.assign(self)
self.system = SystemFactory.getSystem(self.frontendSystem, configHolder)
self._pkgs = ['libcouchbase2-libevent', 'libcouchbase-devel']
self._deps = ['python-pip', 'gcc']
self._repofile = '/etc/yum.repos.d/couchbase.repo'
self._repourl = 'http://packages.couchbase.com/rpm/couchbase-centos62-x86_64.repo'
def _installFrontend(self):
self._installPackages()
def _setupFrontend(self):
pass
def _startServicesFrontend(self):
pass
def _installPackages(self):
Util.printStep('Setting up Couchbase yum repository')
cmd = 'curl --output %s %s' % (self._repofile, self._repourl)
self._executeExitOnError(cmd)
Util.printStep('Removing Couchbase python client')
cmd = 'pip uninstall -y couchbase'
rc, output = Util.execute(cmd.split(' '),
withOutput=True,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
Util.printInfo("cannot uninstall couchbase python client\n%s" % output)
Util.printStep('Removing Couchbase C client')
cmd = 'yum erase -y %s' % ' '.join(self._pkgs)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase C client')
cmd = 'yum install -y %s' % ' '.join(self._pkgs)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase python client dependencies')
cmd = 'yum install -y %s' % ' '.join(self._deps)
self._executeExitOnError(cmd)
Util.printStep('Installing Couchbase python client')
cmd = 'pip install couchbase'
self._executeExitOnError(cmd)
def _configure(self):
pass
def _restartService(self):
pass
def _executeExitOnError(self, cmd_str):
rc, output = Util.execute(cmd_str.split(' '),
withOutput=True,
verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
printError('Failed running: %s\n%s' % (cmd_str, output))
| Python | 0 |
4475cd927dda1d8ab685507895e0fc4bde6e3b4a | switch window index error | pages/base_page.py | pages/base_page.py | from .page import Page
class BasePage(Page):
def get_cookie_index_page(self, url, cookie):
self.get_relative_path(url)
self.maximize_window()
self.selenium.add_cookie(cookie)
self.selenium.refresh()
def switch_to_second_window(self):
handles = self.selenium.window_handles
try:
handle = handles[1]
except IndexError:
handle = handles[0]
self.selenium.switch_to_window(handle)
| from .page import Page
class BasePage(Page):
def get_cookie_index_page(self, url, cookie):
self.get_relative_path(url)
self.maximize_window()
self.selenium.add_cookie(cookie)
self.selenium.refresh()
def switch_to_second_window(self):
handles = self.selenium.window_handles
self.selenium.switch_to_window(handles[1])
| Python | 0.000001 |
439a09ce69b9ba66e2dc7c21b952ffc438fbe0f4 | Add Abuse enum to outcomes. (#13833) | src/sentry/utils/outcomes.py | src/sentry/utils/outcomes.py | """
sentry.utils.outcomes.py
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import datetime
from django.conf import settings
from enum import IntEnum
import random
import six
import time
from sentry import tsdb, options
from sentry.utils import json, metrics
from sentry.utils.data_filters import FILTER_STAT_KEYS_TO_VALUES
from sentry.utils.dates import to_datetime
from sentry.utils.pubsub import QueuedPublisherService, KafkaPublisher
# valid values for outcome
class Outcome(IntEnum):
ACCEPTED = 0
FILTERED = 1
RATE_LIMITED = 2
INVALID = 3
ABUSE = 4
outcomes = settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]
outcomes_publisher = None
def track_outcome(org_id, project_id, key_id, outcome, reason=None, timestamp=None, event_id=None):
"""
This is a central point to track org/project counters per incoming event.
NB: This should only ever be called once per incoming event, which means
it should only be called at the point we know the final outcome for the
event (invalid, rate_limited, accepted, discarded, etc.)
This increments all the relevant legacy RedisTSDB counters, as well as
sending a single metric event to Kafka which can be used to reconstruct the
counters with SnubaTSDB.
"""
global outcomes_publisher
if outcomes_publisher is None:
outcomes_publisher = QueuedPublisherService(
KafkaPublisher(
settings.KAFKA_CLUSTERS[outcomes['cluster']]
)
)
assert isinstance(org_id, six.integer_types)
assert isinstance(project_id, six.integer_types)
assert isinstance(key_id, (type(None), six.integer_types))
assert isinstance(outcome, Outcome)
assert isinstance(timestamp, (type(None), datetime))
timestamp = timestamp or to_datetime(time.time())
increment_list = []
if outcome != Outcome.INVALID:
# This simply preserves old behavior. We never counted invalid events
# (too large, duplicate, CORS) toward regular `received` counts.
increment_list.extend([
(tsdb.models.project_total_received, project_id),
(tsdb.models.organization_total_received, org_id),
(tsdb.models.key_total_received, key_id),
])
if outcome == Outcome.FILTERED:
increment_list.extend([
(tsdb.models.project_total_blacklisted, project_id),
(tsdb.models.organization_total_blacklisted, org_id),
(tsdb.models.key_total_blacklisted, key_id),
])
elif outcome == Outcome.RATE_LIMITED:
increment_list.extend([
(tsdb.models.project_total_rejected, project_id),
(tsdb.models.organization_total_rejected, org_id),
(tsdb.models.key_total_rejected, key_id),
])
if reason in FILTER_STAT_KEYS_TO_VALUES:
increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))
increment_list = [(model, key) for model, key in increment_list if key is not None]
if increment_list:
tsdb.incr_multi(increment_list, timestamp=timestamp)
# Send a snuba metrics payload.
if random.random() <= options.get('snuba.track-outcomes-sample-rate'):
outcomes_publisher.publish(
outcomes['topic'],
json.dumps({
'timestamp': timestamp,
'org_id': org_id,
'project_id': project_id,
'key_id': key_id,
'outcome': outcome.value,
'reason': reason,
'event_id': event_id,
})
)
metrics.incr(
'events.outcomes',
skip_internal=True,
tags={
'outcome': outcome.name.lower(),
'reason': reason,
},
)
| """
sentry.utils.outcomes.py
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import datetime
from django.conf import settings
from enum import IntEnum
import random
import six
import time
from sentry import tsdb, options
from sentry.utils import json, metrics
from sentry.utils.data_filters import FILTER_STAT_KEYS_TO_VALUES
from sentry.utils.dates import to_datetime
from sentry.utils.pubsub import QueuedPublisherService, KafkaPublisher
# valid values for outcome
class Outcome(IntEnum):
ACCEPTED = 0
FILTERED = 1
RATE_LIMITED = 2
INVALID = 3
outcomes = settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]
outcomes_publisher = None
def track_outcome(org_id, project_id, key_id, outcome, reason=None, timestamp=None, event_id=None):
"""
This is a central point to track org/project counters per incoming event.
NB: This should only ever be called once per incoming event, which means
it should only be called at the point we know the final outcome for the
event (invalid, rate_limited, accepted, discarded, etc.)
This increments all the relevant legacy RedisTSDB counters, as well as
sending a single metric event to Kafka which can be used to reconstruct the
counters with SnubaTSDB.
"""
global outcomes_publisher
if outcomes_publisher is None:
outcomes_publisher = QueuedPublisherService(
KafkaPublisher(
settings.KAFKA_CLUSTERS[outcomes['cluster']]
)
)
assert isinstance(org_id, six.integer_types)
assert isinstance(project_id, six.integer_types)
assert isinstance(key_id, (type(None), six.integer_types))
assert isinstance(outcome, Outcome)
assert isinstance(timestamp, (type(None), datetime))
timestamp = timestamp or to_datetime(time.time())
increment_list = []
if outcome != Outcome.INVALID:
# This simply preserves old behavior. We never counted invalid events
# (too large, duplicate, CORS) toward regular `received` counts.
increment_list.extend([
(tsdb.models.project_total_received, project_id),
(tsdb.models.organization_total_received, org_id),
(tsdb.models.key_total_received, key_id),
])
if outcome == Outcome.FILTERED:
increment_list.extend([
(tsdb.models.project_total_blacklisted, project_id),
(tsdb.models.organization_total_blacklisted, org_id),
(tsdb.models.key_total_blacklisted, key_id),
])
elif outcome == Outcome.RATE_LIMITED:
increment_list.extend([
(tsdb.models.project_total_rejected, project_id),
(tsdb.models.organization_total_rejected, org_id),
(tsdb.models.key_total_rejected, key_id),
])
if reason in FILTER_STAT_KEYS_TO_VALUES:
increment_list.append((FILTER_STAT_KEYS_TO_VALUES[reason], project_id))
increment_list = [(model, key) for model, key in increment_list if key is not None]
if increment_list:
tsdb.incr_multi(increment_list, timestamp=timestamp)
# Send a snuba metrics payload.
if random.random() <= options.get('snuba.track-outcomes-sample-rate'):
outcomes_publisher.publish(
outcomes['topic'],
json.dumps({
'timestamp': timestamp,
'org_id': org_id,
'project_id': project_id,
'key_id': key_id,
'outcome': outcome.value,
'reason': reason,
'event_id': event_id,
})
)
metrics.incr(
'events.outcomes',
skip_internal=True,
tags={
'outcome': outcome.name.lower(),
'reason': reason,
},
)
| Python | 0 |
e69efded329ebbcf5ccf74ef137dc1a80bd4b4a6 | add 2.1.2, re-run cython if needed (#13102) | var/spack/repos/builtin/packages/py-line-profiler/package.py | var/spack/repos/builtin/packages/py-line-profiler/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class PyLineProfiler(PythonPackage):
"""Line-by-line profiler."""
homepage = "https://github.com/rkern/line_profiler"
url = "https://pypi.io/packages/source/l/line_profiler/line_profiler-2.0.tar.gz"
version('2.1.2', sha256='efa66e9e3045aa7cb1dd4bf0106e07dec9f80bc781a993fbaf8162a36c20af5c')
version('2.0', 'fc93c6bcfac3b7cb1912cb28836d7ee6')
depends_on('python@2.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-cython', type='build')
depends_on('py-ipython@0.13:', type=('build', 'run'))
# See https://github.com/rkern/line_profiler/issues/166
@run_before('build')
@when('^python@3.7:')
def fix_cython(self):
cython = self.spec['py-cython'].command
for root, _, files in os.walk('.'):
for fn in files:
if fn.endswith('.pyx'):
cython(os.path.join(root, fn))
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLineProfiler(PythonPackage):
"""Line-by-line profiler."""
homepage = "https://github.com/rkern/line_profiler"
url = "https://pypi.io/packages/source/l/line_profiler/line_profiler-2.0.tar.gz"
version('2.0', 'fc93c6bcfac3b7cb1912cb28836d7ee6')
depends_on('python@2.5:')
depends_on('py-setuptools', type='build')
depends_on('py-cython', type='build')
depends_on('py-ipython@0.13:', type=('build', 'run'))
| Python | 0 |
381adeeec0fd1d65372d7003183d4b1ec8f2cfbf | Increase V8JS Stack Limit (#584) | dmoj/executors/V8JS.py | dmoj/executors/V8JS.py | from dmoj.executors.script_executor import ScriptExecutor
class Executor(ScriptExecutor):
ext = 'js'
name = 'V8JS'
command = 'v8dmoj'
test_program = 'print(gets());'
address_grace = 786432
nproc = -1
@classmethod
def get_version_flags(cls, command):
return [('-e', 'print(version())')]
def get_cmdline(self):
return [self.get_command(), '--stack-size=131072', self._code] # 128MB Stack Limit
| from dmoj.executors.script_executor import ScriptExecutor
class Executor(ScriptExecutor):
ext = 'js'
name = 'V8JS'
command = 'v8dmoj'
test_program = 'print(gets());'
address_grace = 786432
nproc = -1
@classmethod
def get_version_flags(cls, command):
return [('-e', 'print(version())')]
| Python | 0 |
7fba4a676622e93416f32ee69bfa295647979c7a | fix path on test file | taxcalc/tests/test_calculate.py | taxcalc/tests/test_calculate.py | import os
import sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path, "../../"))
sys.path.append(os.path.join(cur_path, "../"))
import numpy as np
import pandas as pd
from numba import jit, vectorize, guvectorize
from taxcalc import *
def test_make_Calculator():
tax_dta = pd.read_csv(os.path.join(cur_path, "../../puf2.csv"))
calc = Calculator(tax_dta)
def test_make_Calculator_mods():
cur_path = os.path.abspath(os.path.dirname(__file__))
tax_dta = pd.read_csv(os.path.join(cur_path, "../../puf2.csv"))
calc1 = calculator(tax_dta)
calc2 = calculator(tax_dta, _amex=np.array([4000]))
update_calculator_from_module(calc2, constants)
update_globals_from_calculator(calc2)
assert all(calc2._amex == np.array([4000]))
| import os
import sys
cur_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(cur_path, "../../"))
sys.path.append(os.path.join(cur_path, "../"))
import numpy as np
import pandas as pd
from numba import jit, vectorize, guvectorize
from taxcalc import *
def test_make_Calculator():
tax_dta = pd.read_csv("../../puf2.csv")
calc = Calculator(tax_dta)
def test_make_Calculator_mods():
cur_path = os.path.abspath(os.path.dirname(__file__))
tax_dta = pd.read_csv(os.path.join(cur_path, "../../puf2.csv"))
calc1 = calculator(tax_dta)
calc2 = calculator(tax_dta, _amex=np.array([4000]))
update_calculator_from_module(calc2, constants)
update_globals_from_calculator(calc2)
assert all(calc2._amex == np.array([4000]))
| Python | 0.000001 |
f4a460646f87b63781ad32b8ef6a0b9c0d8a6290 | fix issue #357, which makes real problem more obvious (media file does not exist | moviepy/video/io/VideoFileClip.py | moviepy/video/io/VideoFileClip.py | import os
from moviepy.video.VideoClip import VideoClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.Clip import Clip
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
class VideoFileClip(VideoClip):
"""
A video clip originating from a movie file. For instance: ::
>>> clip = VideoFileClip("myHolidays.mp4")
>>> clip2 = VideoFileClip("myMaskVideo.avi")
Parameters
------------
filename:
The name of the video file. It can have any extension supported
by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc.
has_mask:
Set this to 'True' if there is a mask included in the videofile.
Video files rarely contain masks, but some video codecs enable
that. For istance if you have a MoviePy VideoClip with a mask you
can save it to a videofile with a mask. (see also
``VideoClip.write_videofile`` for more details).
audio:
Set to `False` if the clip doesn't have any audio or if you do not
wish to read the audio.
Attributes
-----------
filename:
Name of the original video file.
fps:
Frames per second in the original file.
Read docstrings for Clip() and VideoClip() for other, more generic, attributes.
"""
def __init__(self, filename, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
self.reader = None #need this just in case FFMPEG has issues (__del__ complains)
self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
self.filename = self.reader.filename
if has_mask:
self.make_frame = lambda t: reader.get_frame(t)[:,:,:3]
mask_mf = lambda t: reader.get_frame(t)[:,:,3]/255.0
self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
.set_duration(self.duration))
self.mask.fps = self.fps
else:
self.make_frame = lambda t: reader.get_frame(t)
# Make a reader for the audio, if any.
if audio and self.reader.infos['audio_found']:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
def __del__(self):
""" Close/delete the internal reader. """
del self.reader
| import os
from moviepy.video.VideoClip import VideoClip
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.Clip import Clip
from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
class VideoFileClip(VideoClip):
"""
A video clip originating from a movie file. For instance: ::
>>> clip = VideoFileClip("myHolidays.mp4")
>>> clip2 = VideoFileClip("myMaskVideo.avi")
Parameters
------------
filename:
The name of the video file. It can have any extension supported
by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc.
has_mask:
Set this to 'True' if there is a mask included in the videofile.
Video files rarely contain masks, but some video codecs enable
that. For istance if you have a MoviePy VideoClip with a mask you
can save it to a videofile with a mask. (see also
``VideoClip.write_videofile`` for more details).
audio:
Set to `False` if the clip doesn't have any audio or if you do not
wish to read the audio.
Attributes
-----------
filename:
Name of the original video file.
fps:
Frames per second in the original file.
Read docstrings for Clip() and VideoClip() for other, more generic, attributes.
"""
def __init__(self, filename, has_mask=False,
audio=True, audio_buffersize = 200000,
audio_fps=44100, audio_nbytes=2, verbose=False):
VideoClip.__init__(self)
# Make a reader
pix_fmt= "rgba" if has_mask else "rgb24"
reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)
self.reader = reader
# Make some of the reader's attributes accessible from the clip
self.duration = self.reader.duration
self.end = self.reader.duration
self.fps = self.reader.fps
self.size = self.reader.size
self.filename = self.reader.filename
if has_mask:
self.make_frame = lambda t: reader.get_frame(t)[:,:,:3]
mask_mf = lambda t: reader.get_frame(t)[:,:,3]/255.0
self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
.set_duration(self.duration))
self.mask.fps = self.fps
else:
self.make_frame = lambda t: reader.get_frame(t)
# Make a reader for the audio, if any.
if audio and self.reader.infos['audio_found']:
self.audio = AudioFileClip(filename,
buffersize= audio_buffersize,
fps = audio_fps,
nbytes = audio_nbytes)
def __del__(self):
""" Close/delete the internal reader. """
del self.reader
| Python | 0 |
faf9638bc69dc79c7fdc9294cc309c40ca57d518 | Fix process names in test_nailyd_alive | fuelweb_test/integration/test_nailyd.py | fuelweb_test/integration/test_nailyd.py | import logging
import xmlrpclib
from fuelweb_test.integration.base import Base
from fuelweb_test.helpers import SSHClient
class TestNailyd(Base):
def __init__(self, *args, **kwargs):
super(TestNailyd, self).__init__(*args, **kwargs)
self.remote = SSHClient()
def setUp(self):
logging.info('Admin node ip: %s' % self.get_admin_node_ip())
self.ip = self.get_admin_node_ip()
def tearDown(self):
pass
def test_nailyd_alive(self):
self.remote.connect_ssh(self.ip, 'root', 'r00tme')
ps_output = self.remote.execute('ps ax')['stdout']
naily_processes = filter(lambda x: 'naily master' in x, ps_output)
logging.debug("Found %d naily master processes: %s" %
(len(naily_processes), naily_processes))
self.assertEqual(1, len(naily_processes))
naily_processes = filter(lambda x: 'naily worker' in x, ps_output)
logging.debug("Found %d naily worker processes: %s" %
(len(naily_processes), naily_processes))
self.assertEqual(True, len(naily_processes) > 1)
| import logging
import xmlrpclib
from fuelweb_test.integration.base import Base
from fuelweb_test.helpers import SSHClient
class TestNailyd(Base):
def __init__(self, *args, **kwargs):
super(TestNailyd, self).__init__(*args, **kwargs)
self.remote = SSHClient()
def setUp(self):
logging.info('Admin node ip: %s' % self.get_admin_node_ip())
self.ip = self.get_admin_node_ip()
def tearDown(self):
pass
def test_nailyd_alive(self):
self.remote.connect_ssh(self.ip, 'root', 'r00tme')
ps_output = self.remote.execute('ps ax')['stdout']
naily_processes = filter(lambda x: '/usr/bin/nailyd' in x, ps_output)
logging.debug("Found naily processes: %s" % naily_processes)
self.assertEquals(len(naily_processes), 1)
| Python | 0.000024 |
13a64059b71fccb8315f552d8e96f130c513a540 | Remove old code. | charity_server.py | charity_server.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 01:14:12 2017
@author: colm
"""
from flask import Flask, jsonify
from parse_likecharity import refresh_charities
from datetime import datetime
app = Flask(__name__)
refresh_rate = 24 * 60 * 60 #Seconds
start_time = datetime.now()
initialized = False
# variables that are accessible from anywhere
payload = {}
@app.route("/gci")
def gci():
global payload
delta = datetime.now() - start_time
if delta.total_seconds() > refresh_rate or not(initialized):
categories, charity_dict = refresh_charities()
payload = {'categories':categories, 'charities':charity_dict}
return jsonify(payload)
if __name__ == "__main__":
categories, charity_dict = refresh_charities()
app.run(host='0.0.0.0')
print('test')
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 01:14:12 2017
@author: colm
"""
from flask import Flask, jsonify
from parse_likecharity import refresh_charities
import threading
from datetime import datetime
app = Flask(__name__)
refresh_rate = 24 * 60 * 60 #Seconds
start_time = datetime.now()
# variables that are accessible from anywhere
payload = {}
# lock to control access to variable
dataLock = threading.Lock()
# thread handler
backgroundThread = threading.Thread()
def update_charities():
print('Updating charities in background thread')
global payload
global backgroundThread
with dataLock:
categories, charity_dict = refresh_charities()
payload = {'categories':categories, 'charities':charity_dict}
print('Running!')
# Set the next thread to happen
backgroundThread = threading.Timer(refresh_rate, update_charities, ())
backgroundThread.start()
@app.route("/gci")
def gci():
global payload
delta = datetime.now() - start_time
if delta.total_seconds() > refresh_rate:
categories, charity_dict = refresh_charities()
payload = {'categories':categories, 'charities':charity_dict}
return jsonify(payload)
if __name__ == "__main__":
update_charities()
app.run(host='0.0.0.0')
backgroundThread.cancel()
print('test')
| Python | 0.000045 |
c5c0b3f8b6d61a1534e74e4ceba8b6a7eedb106d | support multiple registration to the same event | dbus-tools/dbus-register.py | dbus-tools/dbus-register.py | ###############################################################################
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys, dbus, json
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor, defer
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, WampServerFactory, WampServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
log.startLogging(sys.stdout)
###############################################################################
def hashId(senderName, objectName, interfaceName, signalName):
return senderName + "#" + objectName + "#" + interfaceName + "#" + signalName
###############################################################################
class DbusSignalHandler:
def __init__(self, bus, senderName, objectName, interfaceName, signalName):
# publish hash id
self.id = hashId(senderName, objectName, interfaceName, signalName)
# connect dbus proxy object to signal
self.object = bus.get_object(senderName, objectName)
self.object.connect_to_signal(signalName, self.handleSignal, interfaceName)
def handleSignal(self, *args):
# publish dbus args under topic hash id
factory.dispatch(self.id, json.dumps(args))
###############################################################################
class DbusRegisterService:
def __init__(self):
# signal handlers
self.signalHandlers = []
@exportRpc
def dbusRegister(self, list):
# read arguments list by position
if len(list) < 5:
raise Exception("Error: expected arguments: bus, sender, object, interface, signal)")
# check if a handler exists
sigId = hashId(list[1], list[2], list[3], list[4])
for handler in self.signalHandlers:
if handler.id == sigId:
return sigId
if list[0] == "session":
bus = dbus.SessionBus()
elif list[0] == "system":
bus = dbus.SystemBus()
else:
raise Exception("Error: invalid bus: %s" % list[0])
# create a handler that will publish the signal
dbusSignalHandler = DbusSignalHandler(bus, list[1], list[2], list[3], list[4])
self.signalHandlers.append(dbusSignalHandler)
return dbusSignalHandler.id
###############################################################################
class DbusRegisterServerProtocol(WampServerProtocol):
def onSessionOpen(self):
# create dbus-register service instance
self.DbusRegisterService = DbusRegisterService()
# register it for RPC
self.registerForRpc(self.DbusRegisterService)
# register for Publish / Subscribe
self.registerForPubSub("", True)
###############################################################################
if __name__ == '__main__':
port = "9001"
if len(sys.argv) == 2:
port = sys.argv[1]
uri = "ws://localhost:" + port
factory = WampServerFactory(uri, debugWamp = True)
factory.protocol = DbusRegisterServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
| ###############################################################################
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys, dbus, json
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor, defer
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, WampServerFactory, WampServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
log.startLogging(sys.stdout)
###############################################################################
class DbusSignalHandler:
def __init__(self, bus, senderName, objectName, interfaceName, signalName):
# publish hash id
self.id = senderName + "#" + objectName + "#" + interfaceName + "#" + signalName
# connect dbus proxy object to signal
self.object = bus.get_object(senderName, objectName)
self.object.connect_to_signal(signalName, self.handleSignal, interfaceName)
def handleSignal(self, *args):
# publish dbus args under topic hash id
factory.dispatch(self.id, json.dumps(args))
###############################################################################
class DbusRegisterService:
def __init__(self):
# signal handlers
self.signalHandlers = []
@exportRpc
def dbusRegister(self, list):
# read arguments list by position
if len(list) < 5:
raise Exception("Error: expected arguments: bus, sender, object, interface, signal)")
if list[0] == "session":
bus = dbus.SessionBus()
elif list[0] == "system":
bus = dbus.SystemBus()
else:
raise Exception("Error: invalid bus: %s" % list[0])
# create a handler that will publish the signal
dbusSignalHandler = DbusSignalHandler(bus, list[1], list[2], list[3], list[4])
self.signalHandlers.append(dbusSignalHandler)
return dbusSignalHandler.id
###############################################################################
class DbusRegisterServerProtocol(WampServerProtocol):
def onSessionOpen(self):
# create dbus-register service instance
self.DbusRegisterService = DbusRegisterService()
# register it for RPC
self.registerForRpc(self.DbusRegisterService)
# register for Publish / Subscribe
self.registerForPubSub("", True)
###############################################################################
if __name__ == '__main__':
port = "9001"
if len(sys.argv) == 2:
port = sys.argv[1]
uri = "ws://localhost:" + port
factory = WampServerFactory(uri, debugWamp = True)
factory.protocol = DbusRegisterServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
| Python | 0 |
ddf4cbfc263b71ba3eee54b53d33e7ed31e5a8e5 | remove args logging | swampdragon/models.py | swampdragon/models.py | from .pubsub_providers.base_provider import PUBACTIONS
from .model_tools import get_property
from .pubsub_providers.model_publisher import publish_model
from .serializers.serializer_importer import get_serializer
from django.db.models.signals import pre_delete, m2m_changed
from django.dispatch.dispatcher import receiver
import logging
logger = logging.getLogger(__name__)
class SelfPublishModel(object):
serializer_class = None
def __init__(self, *args, **kwargs):
if isinstance(self.serializer_class, str):
self.serializer_class = get_serializer(self.serializer_class, self)
self._pre_save_state = dict()
super(SelfPublishModel, self).__init__(*args, **kwargs)
self._serializer = self.serializer_class(instance=self)
self._set_pre_save_state()
#logger.info("__init__ in SelfPublishModel")
#logger.info(self)
#logger.info(args)
#logger.info(kwargs)
def _set_pre_save_state(self):
"""
Set the state of the model before any changes are done,
so it's possible to determine what fields have changed.
"""
relevant_fields = self._get_relevant_fields()
for field in relevant_fields:
val = get_property(self, field)
if hasattr(self._serializer, field):
continue
if val is None:
self._pre_save_state[field] = None
continue
self._pre_save_state[field] = val
def _get_relevant_fields(self):
"""
Get all fields that will affect the state.
This is used to save the state of the model before it's updated,
to be able to get changes used when publishing an update (so not all fields are published)
"""
# update_fields = list(self._serializer.opts.update_fields)
# publish_fields = list(self._serializer.opts.publish_fields)
# relevant_fields = set(update_fields + publish_fields)
relevant_fields = self._serializer.base_fields
if 'id' in relevant_fields:
relevant_fields.remove('id')
return relevant_fields
def get_changed_fields(self):
changed_fields = []
for k, v in self._pre_save_state.items():
val = get_property(self, k)
if val != v:
changed_fields.append(k)
return changed_fields
def serialize(self):
return self._serializer.serialize()
def _publish(self, action, changed_fields=None):
publish_model(self, self._serializer, action, changed_fields)
def save(self, *args, **kwargs):
logger.info(self.pk)
# if not self.pk:
# http://stackoverflow.com/questions/11561722/django-what-is-the-role-of-modelstate
if self._state.adding:
self.action = PUBACTIONS.created
self.changed_fields = None
else:
self.action = PUBACTIONS.updated
self.changed_fields = self.get_changed_fields()
super(SelfPublishModel, self).save(*args, **kwargs)
self._publish(self.action, self.changed_fields)
@receiver(m2m_changed)
def _self_publish_model_m2m_change(sender, instance, action, model, pk_set, **kwargs):
if not isinstance(instance, SelfPublishModel):
return
instance.action = PUBACTIONS.updated
if action in ['post_add', 'post_clear', 'post_remove']:
instance._publish(instance.action, instance._serializer.opts.publish_fields)
@receiver(pre_delete)
def _self_publish_model_delete(sender, instance, **kwargs):
if isinstance(instance, SelfPublishModel):
instance._publish(PUBACTIONS.deleted)
| from .pubsub_providers.base_provider import PUBACTIONS
from .model_tools import get_property
from .pubsub_providers.model_publisher import publish_model
from .serializers.serializer_importer import get_serializer
from django.db.models.signals import pre_delete, m2m_changed
from django.dispatch.dispatcher import receiver
import logging
logger = logging.getLogger(__name__)
class SelfPublishModel(object):
serializer_class = None
def __init__(self, *args, **kwargs):
if isinstance(self.serializer_class, str):
self.serializer_class = get_serializer(self.serializer_class, self)
self._pre_save_state = dict()
super(SelfPublishModel, self).__init__(*args, **kwargs)
self._serializer = self.serializer_class(instance=self)
self._set_pre_save_state()
#logger.info("__init__ in SelfPublishModel")
#logger.info(self)
logger.info(args)
logger.info(kwargs)
def _set_pre_save_state(self):
"""
Set the state of the model before any changes are done,
so it's possible to determine what fields have changed.
"""
relevant_fields = self._get_relevant_fields()
for field in relevant_fields:
val = get_property(self, field)
if hasattr(self._serializer, field):
continue
if val is None:
self._pre_save_state[field] = None
continue
self._pre_save_state[field] = val
def _get_relevant_fields(self):
"""
Get all fields that will affect the state.
This is used to save the state of the model before it's updated,
to be able to get changes used when publishing an update (so not all fields are published)
"""
# update_fields = list(self._serializer.opts.update_fields)
# publish_fields = list(self._serializer.opts.publish_fields)
# relevant_fields = set(update_fields + publish_fields)
relevant_fields = self._serializer.base_fields
if 'id' in relevant_fields:
relevant_fields.remove('id')
return relevant_fields
def get_changed_fields(self):
changed_fields = []
for k, v in self._pre_save_state.items():
val = get_property(self, k)
if val != v:
changed_fields.append(k)
return changed_fields
def serialize(self):
return self._serializer.serialize()
def _publish(self, action, changed_fields=None):
publish_model(self, self._serializer, action, changed_fields)
def save(self, *args, **kwargs):
logger.info(self.pk)
# if not self.pk:
# http://stackoverflow.com/questions/11561722/django-what-is-the-role-of-modelstate
if self._state.adding:
self.action = PUBACTIONS.created
self.changed_fields = None
else:
self.action = PUBACTIONS.updated
self.changed_fields = self.get_changed_fields()
super(SelfPublishModel, self).save(*args, **kwargs)
self._publish(self.action, self.changed_fields)
@receiver(m2m_changed)
def _self_publish_model_m2m_change(sender, instance, action, model, pk_set, **kwargs):
if not isinstance(instance, SelfPublishModel):
return
instance.action = PUBACTIONS.updated
if action in ['post_add', 'post_clear', 'post_remove']:
instance._publish(instance.action, instance._serializer.opts.publish_fields)
@receiver(pre_delete)
def _self_publish_model_delete(sender, instance, **kwargs):
if isinstance(instance, SelfPublishModel):
instance._publish(PUBACTIONS.deleted)
| Python | 0.000003 |
21bbf9ec71c2d63f5c826dfdc3641927692cb202 | test test | test.py | test.py | from flask import Flask
import pytest
def test_app():
app = Flask(__name__)
app.testing = True
@app.route("/")
def hello():
return "Hello World!"
# app.run() # this actually works here...
with app.test_client() as client:
response = client.get("/")
assert response.status_code == 200
assert response.data == "Hello World!"
print response.headers
assert False
| from flask import Flask
import pytest
def test_app():
app = Flask(__name__)
app.testing = True
@app.route("/")
def hello():
return "Hello World!"
# app.run() # this actually works here...
client = app.test_client()
response = client.get("/")
assert response.status_code == 200
assert response.data == "Hello World!"
print response.headers
assert False
| Python | 0.000037 |
727078f0d7105138310f0870f8ab3a751e0f72da | Fix linting issues in test runner | test.py | test.py | """
Run all tests in this project.
"""
import unittest
if __name__ == "__main__":
loader = unittest.TestLoader()
tests = loader.discover(".", pattern="test_*.py")
runner = unittest.TextTestRunner()
runner.run(tests)
| # Run all tests in this project
import os
import sys
import unittest
if __name__=="__main__":
loader = unittest.TestLoader()
tests = loader.discover(".", pattern="test_*.py")
runner = unittest.TextTestRunner()
runner.run(tests)
| Python | 0.000001 |
26fc8789445c22f85467387bec7eeb6eccedc2c5 | Stop before starting when restarting | synapse/app/synctl.py | synapse/app/synctl.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import subprocess
import signal
SYNAPSE = ["python", "-m", "synapse.app.homeserver"]
CONFIGFILE="homeserver.yaml"
PIDFILE="homeserver.pid"
GREEN="\x1b[1;32m"
NORMAL="\x1b[m"
def start():
if not os.path.exists(CONFIGFILE):
sys.stderr.write(
"No config file found\n"
"To generate a config file, run '%s -c %s --generate-config"
" --server-name=<server name>'\n" % (
" ".join(SYNAPSE), CONFIGFILE
)
)
sys.exit(1)
print "Starting ...",
args = SYNAPSE
args.extend(["--daemonize", "-c", CONFIGFILE, "--pid-file", PIDFILE])
subprocess.check_call(args)
print GREEN + "started" + NORMAL
def stop():
if os.path.exists(PIDFILE):
pid = int(open(PIDFILE).read())
os.kill(pid, signal.SIGTERM)
print GREEN + "stopped" + NORMAL
def main():
action = sys.argv[1] if sys.argv[1:] else "usage"
if action == "start":
start()
elif action == "stop":
stop()
elif action == "restart":
stop()
start()
else:
sys.stderr.write("Usage: %s [start|stop|restart]\n" % (sys.argv[0],))
sys.exit(1)
if __name__=='__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import subprocess
import signal
SYNAPSE = ["python", "-m", "synapse.app.homeserver"]
CONFIGFILE="homeserver.yaml"
PIDFILE="homeserver.pid"
GREEN="\x1b[1;32m"
NORMAL="\x1b[m"
def start():
if not os.path.exists(CONFIGFILE):
sys.stderr.write(
"No config file found\n"
"To generate a config file, run '%s -c %s --generate-config"
" --server-name=<server name>'\n" % (
" ".join(SYNAPSE), CONFIGFILE
)
)
sys.exit(1)
print "Starting ...",
args = SYNAPSE
args.extend(["--daemonize", "-c", CONFIGFILE, "--pid-file", PIDFILE])
subprocess.check_call(args)
print GREEN + "started" + NORMAL
def stop():
if os.path.exists(PIDFILE):
pid = int(open(PIDFILE).read())
os.kill(pid, signal.SIGTERM)
print GREEN + "stopped" + NORMAL
def main():
action = sys.argv[1] if sys.argv[1:] else "usage"
if action == "start":
start()
elif action == "stop":
stop()
elif action == "restart":
start()
stop()
else:
sys.stderr.write("Usage: %s [start|stop|restart]\n" % (sys.argv[0],))
sys.exit(1)
if __name__=='__main__':
main()
| Python | 0.00005 |
56476902b36ec8b9d7bfcaa3b8442eb51745d044 | Set DISPLAY variable on prelaunched processes so the search UI pops up in the right place. | src/prelaunchd.py | src/prelaunchd.py | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The prelaunchd's job is to keep a quickopen instance warmed up in the
# background, and service "give me a prelauncher" requests from quickopend
# clients.
import os
import subprocess
import logging
def _is_port_bindable(host, port):
import socket
s = socket.socket()
try:
s.bind((host, port))
except socket.error:
return False
s.close()
return True
class PrelaunchedProcess(object):
def __init__(self, proc, port):
if not isinstance(proc, subprocess.Popen):
raise "Expected subprocess"
self.proc = proc
self.port = port
@property
def pid(self):
return self.proc.pid
def poll(self):
return self.proc.poll()
def kill(self):
self.proc.kill()
class PrelaunchDaemon(object):
def __init__(self, server):
server.add_json_route('/existing_quickopen/(.+)', self.get_existing_quickopen, ['GET'])
server.exit.add_listener(self._on_exit)
server.lo_idle.add_listener(self._join_in_use_processes)
self._quickopen = {}
self._in_use_processes = []
self._next_control_port = 27412
def _get_another_control_port(self):
self._next_control_port += 1
for i in range(100):
self._next_control_port += 1
if not _is_port_bindable("", self._next_control_port):
continue
return self._next_control_port
raise Exception("Could not find open control port")
def _launch_new_quickopen(self, display):
assert display not in self._quickopen
quickopen_script = os.path.join(os.path.dirname(__file__), "../quickopen")
assert os.path.exists(quickopen_script)
control_port = self._get_another_control_port()
env = {}
if display != 'cocoa' and display != 'terminal':
env["DISPLAY"] = display
proc = subprocess.Popen([quickopen_script,
"prelaunch",
"--wait",
"--control-port",
str(control_port)],
env=env)
self._quickopen[display] = PrelaunchedProcess(proc, control_port)
def get_existing_quickopen(self, m, verb, data):
display = m.group(1)
if display not in self._quickopen:
self._launch_new_quickopen(display)
try:
proc = self._quickopen[display]
del self._quickopen[display]
self._in_use_processes.append(proc)
return proc.port
finally:
# todo, move this to another place... ideally, when the previous prelaunch quits
self._launch_new_quickopen(display)
pass
def _on_exit(self):
self.stop()
def _join_in_use_processes(self):
procs = list(self._in_use_processes)
del self._in_use_processes[:]
for p in procs:
if not p.poll():
self._in_use_processes.append(p)
else:
logging.debug("prelaunched pid=%i is gone" % p.pid)
def stop(self):
logging.debug("closing prelaunched quickopen")
for proc in self._quickopen.values():
proc.kill()
self._quickopen = {}
self._join_in_use_processes()
for p in self._in_use_processes:
if not p.poll():
logging.debug("killing %i" % p.pid)
try:
p.kill()
except:
pass
| # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The prelaunchd's job is to keep a quickopen instance warmed up in the
# background, and service "give me a prelauncher" requests from quickopend
# clients.
import os
import subprocess
import logging
def _is_port_bindable(host, port):
import socket
s = socket.socket()
try:
s.bind((host, port))
except socket.error:
return False
s.close()
return True
class PrelaunchedProcess(object):
def __init__(self, proc, port):
if not isinstance(proc, subprocess.Popen):
raise "Expected subprocess"
self.proc = proc
self.port = port
@property
def pid(self):
return self.proc.pid
def poll(self):
return self.proc.poll()
def kill(self):
self.proc.kill()
class PrelaunchDaemon(object):
def __init__(self, server):
server.add_json_route('/existing_quickopen/(.+)', self.get_existing_quickopen, ['GET'])
server.exit.add_listener(self._on_exit)
server.lo_idle.add_listener(self._join_in_use_processes)
self._quickopen = {}
self._in_use_processes = []
self._next_control_port = 27412
def _get_another_control_port(self):
self._next_control_port += 1
for i in range(100):
self._next_control_port += 1
if not _is_port_bindable("", self._next_control_port):
continue
return self._next_control_port
raise Exception("Could not find open control port")
def _launch_new_quickopen(self, display):
assert display not in self._quickopen
quickopen_script = os.path.join(os.path.dirname(__file__), "../quickopen")
assert os.path.exists(quickopen_script)
control_port = self._get_another_control_port()
proc = subprocess.Popen([quickopen_script,
"prelaunch",
"--wait",
"--control-port",
str(control_port)])
self._quickopen[display] = PrelaunchedProcess(proc, control_port)
def get_existing_quickopen(self, m, verb, data):
display = m.group(1)
if display not in self._quickopen:
self._launch_new_quickopen(display)
try:
proc = self._quickopen[display]
del self._quickopen[display]
self._in_use_processes.append(proc)
return proc.port
finally:
# todo, move this to another place... ideally, when the previous prelaunch quits
self._launch_new_quickopen(display)
pass
def _on_exit(self):
self.stop()
def _join_in_use_processes(self):
procs = list(self._in_use_processes)
del self._in_use_processes[:]
for p in procs:
if not p.poll():
self._in_use_processes.append(p)
else:
logging.debug("prelaunched pid=%i is gone" % p.pid)
def stop(self):
logging.debug("closing prelaunched quickopen")
for proc in self._quickopen.values():
proc.kill()
self._quickopen = {}
self._join_in_use_processes()
for p in self._in_use_processes:
if not p.poll():
logging.debug("killing %i" % p.pid)
try:
p.kill()
except:
pass
| Python | 0 |
6c02b743ad3859e05eeb980298e54acf3fbd9788 | Add __len__ to FlagField (#3981) | allennlp/data/fields/flag_field.py | allennlp/data/fields/flag_field.py | from typing import Any, Dict, List
from overrides import overrides
from allennlp.data.fields.field import Field
class FlagField(Field[Any]):
"""
A class representing a flag, which must be constant across all instances in a batch.
This will be passed to a `forward` method as a single value of whatever type you pass in.
"""
def __init__(self, flag_value: Any) -> None:
self.flag_value = flag_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Any:
return self.flag_value
@overrides
def empty_field(self):
# Because this has to be constant across all instances in a batch, we need to keep the same
# value.
return FlagField(self.flag_value)
def __str__(self) -> str:
return f"FlagField({self.flag_value})"
def __len__(self) -> int:
return 1
@overrides
def batch_tensors(self, tensor_list: List[Any]) -> Any:
if len(set(tensor_list)) != 1:
raise ValueError(
f"Got different values in a FlagField when trying to batch them: {tensor_list}"
)
return tensor_list[0]
| from typing import Any, Dict, List
from overrides import overrides
from allennlp.data.fields.field import Field
class FlagField(Field[Any]):
"""
A class representing a flag, which must be constant across all instances in a batch.
This will be passed to a `forward` method as a single value of whatever type you pass in.
"""
def __init__(self, flag_value: Any) -> None:
self.flag_value = flag_value
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
return {}
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Any:
return self.flag_value
@overrides
def empty_field(self):
# Because this has to be constant across all instances in a batch, we need to keep the same
# value.
return FlagField(self.flag_value)
def __str__(self) -> str:
return f"FlagField({self.flag_value})"
@overrides
def batch_tensors(self, tensor_list: List[Any]) -> Any:
if len(set(tensor_list)) != 1:
raise ValueError(
f"Got different values in a FlagField when trying to batch them: {tensor_list}"
)
return tensor_list[0]
| Python | 0.000013 |
4c66010cf0cd4f763b362b6e84eb67d7ef1278b8 | Make "near" group optional in regex | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Jack Brewer
# Copyright (c) 2015 Jack Brewer
#
# License: MIT
"""This module exports the Stylint plugin class."""
from SublimeLinter.lint import NodeLinter, util
class Stylint(NodeLinter):
"""Provides an interface to stylint."""
npm_name = 'stylint'
syntax = 'stylus'
cmd = 'stylint @ *'
executable = 'stylint'
version_requirement = '>= 1.5.0'
regex = r'''(?xi)
# Comments show example output for each line of a Stylint warning
# /path/to/file/example.styl
^.*$\s*
# 177:24 colors warning hexidecimal color should be a variable
^(?P<line>\d+):?(?P<near>\d+)?\s*\w+\s*((?P<warning>warning)|(?P<error>error))\s*(?P<message>.+)$\s*
'''
multiline = True
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'styl'
config_file = ('--config', '.stylintrc', '~')
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Jack Brewer
# Copyright (c) 2015 Jack Brewer
#
# License: MIT
"""This module exports the Stylint plugin class."""
from SublimeLinter.lint import NodeLinter, util
class Stylint(NodeLinter):
"""Provides an interface to stylint."""
npm_name = 'stylint'
syntax = 'stylus'
cmd = 'stylint @ *'
executable = 'stylint'
version_requirement = '>= 1.5.0'
regex = r'''(?xi)
# Comments show example output for each line of a Stylint warning
# /path/to/file/example.styl
^.*$\s*
# 177:24 colors warning hexidecimal color should be a variable
^(?P<line>\d+):(?P<near>\d+)\s*\w+\s*((?P<warning>warning)|(?P<error>error))\s*(?P<message>.+)$\s*
'''
multiline = True
error_stream = util.STREAM_STDOUT
tempfile_suffix = 'styl'
config_file = ('--config', '.stylintrc', '~')
| Python | 0.999796 |
ba2db7713d4fbb929c26bf9ce848b0f7b420809d | fix typo | bootmachine/settings_tests.py | bootmachine/settings_tests.py | import os
"""
CONFIGURATION MANAGEMENT
"""
# salt
LOCAL_STATES_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
"configuration", "states/")
LOCAL_PILLARS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
"configuration", "pillars/")
REMOTE_STATES_DIR = "/srv/salt/states/"
REMOTE_PILLARS_DIR = "/srv/salt/pillars/"
SALT_INSTALLER_ARCH_201208 = "aur"
SALT_INSTALLER_DEBIAN_6 = "backports"
SALT_INSTALLER_FEDORA_16 = "rpm-stable"
SALT_INSTALLER_FEDORA_17 = "rpm-stable"
SALT_INSTALLER_UBUNTU_1204LTS = "ppa"
# puppet (not yet implemented)
PUPPET_VERSION = NotImplementedError()
PUPPET_RECIPES_DIR = NotImplementedError()
# chef (not yet implemented)
CHEF_VERSION = NotImplementedError()
CHEF_RECIPIES_DIR = NotImplementedError()
"""
PROVIDERS AND SERVER STACK
"""
# Rackspace authentication via openstack-compute
OPENSTACK_USERNAME = os.environ.get("OPENSTACK_COMPUTE_USERNAME")
OPENSTACK_APIKEY = os.environ.get("OPENSTACK_COMPUTE_APIKEY")
# Rackspace authentication via python-novaclient api v2
OS_USERNAME = os.environ.get("OS_USERNAME")
OS_PASSWORD = os.environ.get("OS_PASSWORD")
OS_TENANT_NAME = os.environ.get("OS_TENANT_NAME")
OS_AUTH_URL = os.environ.get("OS_AUTH_URL")
OS_REGION_NAME = os.environ.get("OS_REGION_NAME")
OS_COMPUTE_API_VERSION = os.environ.get("OS_COMPUTE_API_VERSION")
# Amazon authentication via boto
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
"""
SECURITY
"""
# Change the default SSH port of 22, suggestion is between 20000 and 65535.
SSH_PORT = "30000"
| import os
"""
CONFIGURATION MANAGEMENT
"""
# salt
LOCAL_SALTSTATES_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
"configuration", "states/")
LOCAL_PILLARS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
"configuration", "pillars/")
REMOTE_STATES_DIR = "/srv/salt/states/"
REMOTE_PILLARS_DIR = "/srv/salt/pillars/"
SALT_INSTALLER_ARCH_201208 = "aur"
SALT_INSTALLER_DEBIAN_6 = "backports"
SALT_INSTALLER_FEDORA_16 = "rpm-stable"
SALT_INSTALLER_FEDORA_17 = "rpm-stable"
SALT_INSTALLER_UBUNTU_1204LTS = "ppa"
# puppet (not yet implemented)
PUPPET_VERSION = NotImplementedError()
PUPPET_RECIPES_DIR = NotImplementedError()
# chef (not yet implemented)
CHEF_VERSION = NotImplementedError()
CHEF_RECIPIES_DIR = NotImplementedError()
"""
PROVIDERS AND SERVER STACK
"""
# Rackspace authentication via openstack-compute
OPENSTACK_USERNAME = os.environ.get("OPENSTACK_COMPUTE_USERNAME")
OPENSTACK_APIKEY = os.environ.get("OPENSTACK_COMPUTE_APIKEY")
# Rackspace authentication via python-novaclient api v2
OS_USERNAME = os.environ.get("OS_USERNAME")
OS_PASSWORD = os.environ.get("OS_PASSWORD")
OS_TENANT_NAME = os.environ.get("OS_TENANT_NAME")
OS_AUTH_URL = os.environ.get("OS_AUTH_URL")
OS_REGION_NAME = os.environ.get("OS_REGION_NAME")
OS_COMPUTE_API_VERSION = os.environ.get("OS_COMPUTE_API_VERSION")
# Amazon authentication via boto
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
"""
SECURITY
"""
# Change the default SSH port of 22, suggestion is between 20000 and 65535.
SSH_PORT = "30000"
| Python | 0.999991 |
6d4eb6ebfb03f974c2f6fb04992fc25e5a53ece9 | Change docstring | src/psd2svg/rasterizer/batik_rasterizer.py | src/psd2svg/rasterizer/batik_rasterizer.py | # -*- coding: utf-8 -*-
"""
Batik-based rasterizer module.
Download the latest batik rasterizer to use the module. Note Ubuntu 16.04LTS
package is broken and does not work.
Prerequisite:
wget http://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&\
filename=xmlgraphics/batik/binaries/batik-bin-1.9.tar.gz
tar xzf batik-bin-1.9.tar.gz
export BATIK_PATH=./batik-bin-1.9
Deb package:
sudo apt-get install -y libbatik-java
"""
from __future__ import absolute_import, unicode_literals
from PIL import Image
import logging
import os
import subprocess
from psd2svg.utils import temporary_directory
from psd2svg.rasterizer.base_rasterizer import BaseRasterizer
logger = logging.getLogger(__name__)
BATIK_PATH = os.environ.get(
'BATIK_PATH', "/usr/share/java/batik-rasterizer.jar"
)
class BatikRasterizer(BaseRasterizer):
"""Batik rasterizer."""
def __init__(self, jar_path=None, **kwargs):
self.jar_path = jar_path if jar_path else BATIK_PATH
assert os.path.exists(self.jar_path)
def rasterize(self, url, size=None, format="png"):
with temporary_directory() as d:
basename, ext = os.path.splitext(os.path.basename(url))
output_file = os.path.join(d, "{}.{}".format(basename, format))
cmd = [
"java", "-Djava.awt.headless=true",
"-jar", self.jar_path,
"-bg", "0.255.255.255",
"-m", "image/{}".format(format),
"-d", d,
"{}".format(url),
]
if size:
cmd += ["-w", size[0], "-h", size[1]]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
try:
assert os.path.exists(output_file)
rasterized = Image.open(output_file)
except:
logger.error("{}\n{}{}".format(" ".join(cmd), stdout, stderr))
raise
return self.composite_background(rasterized)
| # -*- coding: utf-8 -*-
"""
Batik-based rasterizer module.
Download the latest batik rasterizer to use the module. Note Ubuntu 16.04LTS
package is broken and does not work.
Prerequisite:
wget http://www.apache.org/dyn/mirrors/mirrors.cgi?action=download&\
filename=xmlgraphics/batik/binaries/batik-bin-1.9.tar.gz
export BATIK_PATH=./batik-bin-1.9.tar.gz
Deb package:
sudo apt-get install -y libbatik-java
"""
from __future__ import absolute_import, unicode_literals
from PIL import Image
import logging
import os
import subprocess
from psd2svg.utils import temporary_directory
from psd2svg.rasterizer.base_rasterizer import BaseRasterizer
logger = logging.getLogger(__name__)
BATIK_PATH = os.environ.get(
'BATIK_PATH', "/usr/share/java/batik-rasterizer.jar"
)
class BatikRasterizer(BaseRasterizer):
"""Batik rasterizer."""
def __init__(self, jar_path=None, **kwargs):
self.jar_path = jar_path if jar_path else BATIK_PATH
assert os.path.exists(self.jar_path)
def rasterize(self, url, size=None, format="png"):
with temporary_directory() as d:
basename, ext = os.path.splitext(os.path.basename(url))
output_file = os.path.join(d, "{}.{}".format(basename, format))
cmd = [
"java", "-Djava.awt.headless=true",
"-jar", self.jar_path,
"-bg", "0.255.255.255",
"-m", "image/{}".format(format),
"-d", d,
"{}".format(url),
]
if size:
cmd += ["-w", size[0], "-h", size[1]]
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
try:
assert os.path.exists(output_file)
rasterized = Image.open(output_file)
except:
logger.error("{}\n{}{}".format(" ".join(cmd), stdout, stderr))
raise
return self.composite_background(rasterized)
| Python | 0.000002 |
b8ac65a810a08e11a2f429db08e8b0d4d00651d6 | Add ALLOW_HOSTS in production settings | src/biocloud/settings/production.py | src/biocloud/settings/production.py | # In production set the environment variable like this:
# DJANGO_SETTINGS_MODULE=my_proj.settings.production
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
# Must mention ALLOWED_HOSTS in production!
ALLOWED_HOSTS = ['172.16.0.66']
# Cache the templates in memory for speed-up
loaders = [
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0]['OPTIONS'].update({"debug": False})
TEMPLATES[0]['APP_DIRS'] = False
# Email settings
EMAIL_BACKEND = env.str('EMAIL_BACKEND')
EMAIL_HOST = env.str('EMAIL_HOST')
EMAIL_HOST_USER = env.str('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env.str('EMAIL_HOST_PASSWORD')
EMAIL_PORT = env.int('EMAIL_PORT')
EMAIL_USE_SSL = env.bool('EMAIL_USE_SSL')
EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS')
DEFAULT_FROM_EMAIL = SERVER_EMAIL = '{name} <{addr}>'.format(
name='BioCloud Dev',
addr='biocloud@liang2.io',
)
# Securiy related settings
# SECURE_HSTS_SECONDS = 2592000
# SECURE_BROWSER_XSS_FILTER = True
# SECURE_CONTENT_TYPE_NOSNIFF=True
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# X_FRAME_OPTIONS = 'DENY'
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(BASE_DIR, 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': (
'[%(asctime)s] %(levelname)s '
'[%(pathname)s:%(lineno)s] %(message)s'
),
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'django_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'django.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django_log_file', ],
'propagate': True,
'level': 'DEBUG',
},
}
}
for app in LOCAL_APPS:
app_handler = '%s_log_file' % app
app_log_filepath = '%s.log' % app
LOGGING['loggers'][app] = {
'handlers': [app_handler, 'console', ],
'level': 'DEBUG',
}
LOGGING['handlers'][app_handler] = {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, app_log_filepath),
'formatter': 'verbose',
}
logging.config.dictConfig(LOGGING)
| # In production set the environment variable like this:
# DJANGO_SETTINGS_MODULE=my_proj.settings.production
from .base import * # NOQA
import logging.config
# For security and performance reasons, DEBUG is turned off
DEBUG = False
# Must mention ALLOWED_HOSTS in production!
# ALLOWED_HOSTS = []
# Cache the templates in memory for speed-up
loaders = [
(
'django.template.loaders.cached.Loader',
[
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
),
]
TEMPLATES[0]['OPTIONS'].update({"loaders": loaders})
TEMPLATES[0]['OPTIONS'].update({"debug": False})
TEMPLATES[0]['APP_DIRS'] = False
# Email settings
EMAIL_BACKEND = env.str('EMAIL_BACKEND')
EMAIL_HOST = env.str('EMAIL_HOST')
EMAIL_HOST_USER = env.str('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = env.str('EMAIL_HOST_PASSWORD')
EMAIL_PORT = env.int('EMAIL_PORT')
EMAIL_USE_SSL = env.bool('EMAIL_USE_SSL')
EMAIL_USE_TLS = env.bool('EMAIL_USE_TLS')
DEFAULT_FROM_EMAIL = SERVER_EMAIL = '{name} <{addr}>'.format(
name='BioCloud Dev',
addr='biocloud@liang2.io',
)
# Securiy related settings
# SECURE_HSTS_SECONDS = 2592000
# SECURE_BROWSER_XSS_FILTER = True
# SECURE_CONTENT_TYPE_NOSNIFF=True
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# CSRF_COOKIE_HTTPONLY = True
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# X_FRAME_OPTIONS = 'DENY'
# Log everything to the logs directory at the top
LOGFILE_ROOT = join(BASE_DIR, 'logs')
# Reset logging
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': (
'[%(asctime)s] %(levelname)s '
'[%(pathname)s:%(lineno)s] %(message)s'
),
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'django_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'django.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['django_log_file', ],
'propagate': True,
'level': 'DEBUG',
},
}
}
for app in LOCAL_APPS:
app_handler = '%s_log_file' % app
app_log_filepath = '%s.log' % app
LOGGING['loggers'][app] = {
'handlers': [app_handler, 'console', ],
'level': 'DEBUG',
}
LOGGING['handlers'][app_handler] = {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, app_log_filepath),
'formatter': 'verbose',
}
logging.config.dictConfig(LOGGING)
| Python | 0 |
73f49b5603802ccce3a9c4db0ee0b2eaa4bf0e7f | Update startup script (lyli.py) | lyli.py | lyli.py | #!flask/bin/python
import logging
import werkzeug.serving
from app import app
import config
# we are behind a proxy. log the ip of the end-user, not the proxy.
# this will also work without the proxy
werkzeug.serving.WSGIRequestHandler.address_string = lambda self: self.headers.get('x-real-ip', self.client_address[0])
# log to a file (access.log), not stderr
logging.basicConfig(filename='access.log', level=logging.DEBUG, format='%(message)s')
if config.debug:
app.run(port=3003, debug=True, use_reloader=True)
else:
app.run(port=3004, debug=False, use_reloader=False)
| #!flask/bin/python
import logging
from os import fork
import werkzeug.serving
from app import app
pid = fork()
if pid > 0:
print('PID: %d' % pid)
exit(0)
elif pid < 0:
print('Could not fork: %d' % pid)
exit(1)
# we are behind a proxy. log the ip of the end-user, not the proxy.
# this will also work without the proxy
werkzeug.serving.WSGIRequestHandler.address_string = lambda self: self.headers.get('x-real-ip', self.client_address[0])
# log to a file (access.log), not stderr
logging.basicConfig(filename='access.log', level=logging.DEBUG, format='%(message)s')
app.run(port=3004, debug=False, use_reloader=False)
#app.run(port=3003, debug=True, use_reloader=True)
| Python | 0 |
531ada2164f4c184d298110e518415233419bd9f | Update poisson_2d_square_0.py | demo/poisson_2d_square_0.py | demo/poisson_2d_square_0.py | #
# Solve -laplace(u) = f in (-1, 1)^2 with T(u) = 0 [1]
#
from sympy import symbols, integrate
from lega.shen_basis import mass_matrix, stiffness_matrix, load_vector
from lega.legendre_basis import ForwardLegendreTransformation as FLT
import scipy.linalg as la
import numpy as np
def get_rhs(u):
'''
Verify that u satisfies boundary conditions and compute the right hand
side f.
'''
x, y = symbols('x, y')
assert integrate(abs(u.subs(x, -1)), (y, -1, 1)) < 1E-15
assert integrate(abs(u.subs(x, 1)), (y, -1, 1)) < 1E-15
assert integrate(abs(u.subs(y, -1)), (x, -1, 1)) < 1E-15
assert integrate(abs(u.subs(y, 1)), (x, -1, 1)) < 1E-15
# Right hand side if u is to be the solution
f = -u.diff(x, 2) - u.diff(y, 2)
return f
def solve_poisson_2d(f, n):
'''Solve the Poisson problem by nxn Shen polynomials.'''
A = stiffness_matrix(n)
M = mass_matrix(n)
F = FLT([n+2, n+2])(f)
b = load_vector(F) # nxn matrix
# Solve the problem by tensor product solver
lmbda, V = la.eigh(A.toarray(), M.toarray())
# Map the right hand side to eigen space
bb = (V.T).dot(b.dot(V))
# Apply the inverse in eigen space
U_ = np.array([[bb[i, j]/(lmbda[i] + lmbda[j])
for j in range(n)]
for i in range(n)])
# Map back to physical space
U = (V).dot(U_.dot(V.T))
return U
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import sin, pi, lambdify
from lega.shen_basis import shen_function, legendre_to_shen_matrix
from lega.legendre_basis import mass_matrix as L_mass_matrix
from sympy.plotting import plot3d
from sympy.mpmath import quad
from math import sqrt
# Setup
x, y = symbols('x, y')
u = (x**2-1)*sin(2*pi*y)
f = get_rhs(u)
n_max = 30
# Representation of exact solution in the Legendre basis
u_leg = FLT([n_max+2, n_max+2])(u)
n = 2
tol = 1E-14
converged = False
while not converged:
U = solve_poisson_2d(f, n) # w.r.t to shen
#TODO: should add symbolic as well, just here and only for comparison!
# Error using representation w.r.t to Shen basis and the mass matrix
# Turn U from shen to Legendre
Tmat = legendre_to_shen_matrix(n+2)
U_leg = Tmat.T.dot(U.dot(Tmat.toarray())) # n+2 x n . n x n . n x n+2
# Subract on the subspace
E = u_leg[:n+2, :n+2] - U_leg
# Legendre mass matrix computes the L2 error
M = L_mass_matrix(n+2)
error = sqrt(np.trace((M.dot(E)).dot(M.dot(E.T))))
print 'n=%d {e}_2=%.4E' % (n, error)
converged = error < tol or n > n_max-1
n += 1
# Plot the symbolic error
uh = shen_function(U)
e = u - uh
plot3d(e, (x, -1, 1), (y, -1, 1))
| #
# Solve -laplace(u) = f in (-1, 1)^2 with T(u) = 0 [1]
#
from sympy import symbols, integrate
from lega.shen_basis import mass_matrix, stiffness_matrix, load_vector
from lega.legendre_basis import ForwardLegendreTransformation as FLT
import scipy.linalg as la
import numpy as np
def get_rhs(u):
'''
Verify that u satisfies boundary conditions and compute the right hand
side f.
'''
x, y = symbols('x, y')
assert integrate(abs(u.subs(x, -1)), (y, -1, 1)) < 1E-15
assert integrate(abs(u.subs(x, 1)), (y, -1, 1)) < 1E-15
assert integrate(abs(u.subs(y, -1)), (x, -1, 1)) < 1E-15
assert integrate(abs(u.subs(y, 1)), (x, -1, 1)) < 1E-15
# Right hand side if u is to be the solution
f = -u.diff(x, 2) - u.diff(y, 2)
return f
def solve_poisson_2d(f, n):
'''Solve the Poisson problem by nxn Shen polynomials.'''
A = stiffness_matrix(n)
M = mass_matrix(n)
F = FLT([n+2, n+2])(f)
b = load_vector(F) # nxn matrix
# Solve the problem by tensor product solver
lmbda, V = la.eigh(A.toarray(), M.toarray())
# Map the right hand side to eigen space
bb = (V.T).dot(b.dot(V))
# Apply the inverse in eigen space
U_ = np.array([[bb[i, j]/(lmbda[i] + lmbda[j])
for j in range(n)]
for i in range(n)])
# Map back to physical space
U = (V).dot(U_.dot(V.T))
return U
# -----------------------------------------------------------------------------
if __name__ == '__main__':
from sympy import sin, pi, lambdify
from lega.shen_basis import shen_function, legendre_to_shen_matrix
from lega.legendre_basis import mass_matrix as L_mass_matrix
from sympy.plotting import plot3d
from sympy.mpmath import quad
from math import sqrt
# Setup
x, y = symbols('x, y')
u = (x**2-1)*sin(2*pi*y)
f = get_rhs(u)
n_max = 30
# Representation of exact solution in the Legendre basis
u_leg = FLT([n_max+2, n_max+2])(u)
n = 2
tol = 1E-14
converged = False
while not converged:
U = solve_poisson_2d(f, n) # w.r.t to shen
# Error using representation w.r.t to Shen basis and the mass matrix
# Turn U from shen to Legendre
Tmat = legendre_to_shen_matrix(n+2)
U_leg = Tmat.T.dot(U.dot(Tmat.toarray())) # n+2 x n . n x n . n x n+2
# Subract on the subspace
E = u_leg[:n+2, :n+2] - U_leg
# Legendre mass matrix computes the L2 error
M = L_mass_matrix(n+2)
error = sqrt(np.trace((M.dot(E)).dot(M.dot(E.T))))
print 'n=%d {e}_2=%.4E' % (n, error)
converged = error < tol or n > n_max-1
n += 1
# Plot the symbolic error
uh = shen_function(U)
e = u - uh
plot3d(e, (x, -1, 1), (y, -1, 1))
| Python | 0.000003 |
8a74b2f49314f780864f39d04ddaea4695633c21 | Add support for feed deltas | src/crawler/lib/headers_handling.py | src/crawler/lib/headers_handling.py | from datetime import timedelta, timezone
import dateutil
import logging
import re
from bootstrap import conf
from lib.utils import to_hash, utc_now
logger = logging.getLogger(__name__)
MAX_AGE_RE = re.compile('max-age=([0-9]+)')
RFC_1123_FORMAT = '%a, %d %b %Y %X %Z'
def rfc_1123_utc(time_obj=None, delta=None):
"""return time obj or now formated in the RFC1123 style. Add time delta if
present.
"""
if time_obj is None:
time_obj = utc_now()
if delta is not None:
time_obj += delta
return time_obj.strftime(RFC_1123_FORMAT)
def _extract_max_age(headers, feed_info, now):
if 'max-age' in headers.get('cache-control', ''):
try:
max_age = int(MAX_AGE_RE.search(headers['cache-control']).group(1))
feed_info['expires'] = now + timedelta(seconds=max_age)
except Exception:
pass
def _extract_expires(headers, feed_info):
if headers.get('expires'):
try:
expires = dateutil.parser.parse(headers['expires'])
if expires.tzinfo:
expires = expires.astimezone(timezone.utc)
else:
expires = expires.replace(tzinfo=timezone.utc)
feed_info['expires'] = expires
except Exception:
pass
def extract_feed_info(headers):
"""providing the headers of a feed response, will calculate the headers
needed for basic cache control.
will extract etag and last modified.
will calculate expires, with limit define in configuration file by
FEED_MIN_EXPIRES and FEED_MAX_EXPIRES.
"""
now = utc_now()
min_expires = now + timedelta(seconds=conf.FEED_MIN_EXPIRES)
max_expires = now + timedelta(seconds=conf.FEED_MAX_EXPIRES)
feed_info = {'etag': headers.get('etag', ''),
'last_modified': headers.get('last-modified', rfc_1123_utc())}
_extract_max_age(headers, feed_info, now)
if 'expires' not in feed_info:
_extract_expires(headers, feed_info)
if not feed_info.get('expires'):
feed_info['expires'] = None
elif max_expires < feed_info['expires']:
logger.info("expiring too late, forcing expiring at %r",
max_expires.isoformat())
feed_info['expires'] = max_expires
elif feed_info['expires'] < min_expires:
logger.info("expiring too early, forcing expiring at %r",
min_expires.isoformat())
feed_info['expires'] = min_expires + timedelta(minutes=5)
return feed_info
def prepare_headers(feed):
"""For a known feed, will construct some header dictionnary"""
headers = {'User-Agent': conf.CRAWLER_USER_AGENT}
if feed.get('last_modified'):
headers['If-Modified-Since'] = feed['last_modified']
if feed.get('etag') and 'jarr' not in feed['etag']:
headers['If-None-Match'] = feed['etag']
if 'If-Modified-Since' in headers or 'If-None-Match' in headers:
headers['A-IM'] = 'feed'
logger.debug('%r %r - calculated headers %r',
feed['id'], feed['title'], headers)
return headers
def response_match_cache(response, feed):
if 'etag' not in response.headers:
logger.debug('%r %r - manually generating etag',
feed['id'], feed['title'])
response.headers['etag'] = 'jarr/"%s"' % to_hash(response.text)
if response.headers['etag'] and feed['etag'] \
and response.headers['etag'] == feed['etag']:
if 'jarr' in feed['etag']:
logger.info("%r %r - calculated hash matches (%d)",
feed['id'], feed['title'], response.status_code)
else:
logger.info("%r %r - feed responded with same etag (%d)",
feed['id'], feed['title'], response.status_code)
return True
return False
| from datetime import timedelta, timezone
import dateutil
import logging
import re
from bootstrap import conf
from lib.utils import to_hash, utc_now
logger = logging.getLogger(__name__)
MAX_AGE_RE = re.compile('max-age=([0-9]+)')
RFC_1123_FORMAT = '%a, %d %b %Y %X %Z'
def rfc_1123_utc(time_obj=None, delta=None):
"""return time obj or now formated in the RFC1123 style. Add time delta if
present.
"""
if time_obj is None:
time_obj = utc_now()
if delta is not None:
time_obj += delta
return time_obj.strftime(RFC_1123_FORMAT)
def _extract_max_age(headers, feed_info, now):
if 'max-age' in headers.get('cache-control', ''):
try:
max_age = int(MAX_AGE_RE.search(headers['cache-control']).group(1))
feed_info['expires'] = now + timedelta(seconds=max_age)
except Exception:
pass
def _extract_expires(headers, feed_info):
if headers.get('expires'):
try:
expires = dateutil.parser.parse(headers['expires'])
if expires.tzinfo:
expires = expires.astimezone(timezone.utc)
else:
expires = expires.replace(tzinfo=timezone.utc)
feed_info['expires'] = expires
except Exception:
pass
def extract_feed_info(headers):
"""providing the headers of a feed response, will calculate the headers
needed for basic cache control.
will extract etag and last modified.
will calculate expires, with limit define in configuration file by
FEED_MIN_EXPIRES and FEED_MAX_EXPIRES.
"""
now = utc_now()
min_expires = now + timedelta(seconds=conf.FEED_MIN_EXPIRES)
max_expires = now + timedelta(seconds=conf.FEED_MAX_EXPIRES)
feed_info = {'etag': headers.get('etag', ''),
'last_modified': headers.get('last-modified', rfc_1123_utc())}
_extract_max_age(headers, feed_info, now)
if 'expires' not in feed_info:
_extract_expires(headers, feed_info)
if not feed_info.get('expires'):
feed_info['expires'] = None
elif max_expires < feed_info['expires']:
logger.info("expiring too late, forcing expiring at %r",
max_expires.isoformat())
feed_info['expires'] = max_expires
elif feed_info['expires'] < min_expires:
logger.info("expiring too early, forcing expiring at %r",
min_expires.isoformat())
feed_info['expires'] = min_expires + timedelta(minutes=5)
return feed_info
def prepare_headers(feed):
"""For a known feed, will construct some header dictionnary"""
headers = {'User-Agent': conf.CRAWLER_USER_AGENT}
if feed.get('last_modified'):
headers['If-Modified-Since'] = feed['last_modified']
if feed.get('etag') and 'jarr' not in feed['etag']:
headers['If-None-Match'] = feed['etag']
logger.debug('%r %r - calculated headers %r',
feed['id'], feed['title'], headers)
return headers
def response_match_cache(response, feed):
if 'etag' not in response.headers:
logger.debug('%r %r - manually generating etag',
feed['id'], feed['title'])
response.headers['etag'] = 'jarr/"%s"' % to_hash(response.text)
if response.headers['etag'] and feed['etag'] \
and response.headers['etag'] == feed['etag']:
if 'jarr' in feed['etag']:
logger.info("%r %r - calculated hash matches (%d)",
feed['id'], feed['title'], response.status_code)
else:
logger.info("%r %r - feed responded with same etag (%d)",
feed['id'], feed['title'], response.status_code)
return True
return False
| Python | 0 |
1b996bf797b5e1a0203054f11001771ede309b23 | remove dead code | scrapi/harvesters/smithsonian.py | scrapi/harvesters/smithsonian.py | '''
Harvester for the Smithsonian Digital Repository for the SHARE project
Example API call: http://repository.si.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
import re
from scrapi.base import helpers
from scrapi.base import OAIHarvester
class SiHarvester(OAIHarvester):
short_name = 'smithsonian'
long_name = 'Smithsonian Digital Repository'
url = 'http://repository.si.edu'
@property
def schema(self):
return helpers.updated_schema(self._schema, {
"uris": ('//dc:identifier/node()', helpers.oai_process_uris)
})
base_url = 'http://repository.si.edu/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
| '''
Harvester for the Smithsonian Digital Repository for the SHARE project
Example API call: http://repository.si.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
import re
from scrapi.base import helpers
from scrapi.base import OAIHarvester
class SiHarvester(OAIHarvester):
short_name = 'smithsonian'
long_name = 'Smithsonian Digital Repository'
url = 'http://repository.si.edu'
@property
def schema(self):
return helpers.updated_schema(self._schema, {
"uris": ('//dc:identifier/node()', helpers.oai_process_uris)
})
base_url = 'http://repository.si.edu/oai/request'
property_list = ['date', 'identifier', 'type', 'format', 'setSpec']
timezone_granularity = True
def get_doi_from_identifier(identifiers):
doi_re = re.compile(r'10\.\S*\/\S*')
identifiers = [identifiers] if not isinstance(identifiers, list) else identifiers
for identifier in identifiers:
try:
found_doi = doi_re.search(identifier).group()
return 'http://dx.doi.org/{}'.format(found_doi)
except AttributeError:
continue
| Python | 0.999454 |
1b7e68c3bdfc2f43f754cc39e1f2f80bfa5bee80 | Add validate_log_translations flake8 check | designate/hacking/checks.py | designate/hacking/checks.py | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import pep8
mutable_default_argument_check = re.compile(
r"^\s*def .+\((.+=\{\}|.+=\[\])")
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
def mutable_default_arguments(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if mutable_default_argument_check.match(logical_line):
yield (0, "D701: Default paramater value is a mutable type")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
if "designate/tests" in filename:
return
if pep8.noqa(physical_line):
return
msg = "D702: Log messages require translation"
if log_translation.match(logical_line):
yield (0, msg)
def factory(register):
register(mutable_default_arguments)
register(validate_log_translations)
| # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
mutable_default_argument_check = re.compile(
r"^\s*def .+\((.+=\{\}|.+=\[\])")
def mutable_default_arguments(logical_line, filename):
if mutable_default_argument_check.match(logical_line):
yield (0, "D701: Default paramater value is a mutable type")
def factory(register):
register(mutable_default_arguments)
| Python | 0.000002 |
ec9bd84c7487ef0d3fead1641c5132f2f269b5bc | Use absolute path for the result of glob. | lbuild/repository.py | lbuild/repository.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import os
import glob
from .exception import BlobException
from . import utils
from . import environment
class OptionNameResolver:
def __init__(self, repository, options):
self.repository = repository
self.options = options
def __getitem__(self, key):
o = key.split(":")
if len(o) != 2:
raise BlobException("Option name '%s' must contain exactly one colon " \
"to separate repository and option name.")
repo, option = o
if repo == "":
key = "%s:%s" % (self.repository.name, option)
try:
return self.options[key].value
except KeyError:
raise BlobException("Unknown option name '%s'" % key)
def __repr__(self):
return repr(self.options)
def __len__(self):
return len(self.options)
class Repository:
def __init__(self, path):
# Path to the repository file. All relative paths refer to this path.
self.path = path
self.name = None
# Dict of modules, using the filename as the key
self.modules = {}
# Name -> Option()
self.options = {}
def set_name(self, name):
"""Set name of the repository."""
self.name = name
def _relocate(self, path):
"""
Relocate relative paths to the path of the repository
configuration file.
"""
if not os.path.isabs(path):
path = os.path.join(self.path, path)
return os.path.normpath(path)
def glob(self, pattern):
pattern = os.path.abspath(self._relocate(pattern))
return glob.glob(pattern)
def add_modules(self, modules):
"""
Add one or more module files.
Args:
modules: List of filenames
"""
module_files = utils.listify(modules)
for file in module_files:
file = self._relocate(file)
if not os.path.isfile(file):
raise BlobException("Module file not found '%s'" % file)
self.modules[file] = None
def find_modules(self, basepath="", modulefile="module.lb"):
"""
Find all module files following a specific pattern.
Args:
basepath : Rootpath for the search.
modulefile : Filename of the module files to search
for (default: "module.lb").
"""
basepath = self._relocate(basepath)
for path, _, files in os.walk(basepath):
if modulefile in files:
self.modules[os.path.normpath(os.path.join(path, modulefile))] = None
def add_option(self, name, description, default=None):
"""
Define new repository wide option.
These options can be used by modules to decide whether they are
available and what options they provide for a specific set of
repository options.
"""
self._check_for_duplicates(name)
self.options[name] = environment.Option(name, description, default)
def add_boolean_option(self, name, description, default=None):
self._check_for_duplicates(name)
self.options[name] = environment.BooleanOption(name, description, default)
def add_numeric_option(self, name, description, default=None):
self._check_for_duplicates(name)
self.options[name] = environment.NumericOption(name, description, default)
def _check_for_duplicates(self, name):
if name in self.options:
raise BlobException("Option name '%s' is already defined" % name)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import os
import glob
from .exception import BlobException
from . import utils
from . import environment
class OptionNameResolver:
def __init__(self, repository, options):
self.repository = repository
self.options = options
def __getitem__(self, key):
o = key.split(":")
if len(o) != 2:
raise BlobException("Option name '%s' must contain exactly one colon " \
"to separate repository and option name.")
repo, option = o
if repo == "":
key = "%s:%s" % (self.repository.name, option)
try:
return self.options[key].value
except KeyError:
raise BlobException("Unknown option name '%s'" % key)
def __repr__(self):
return repr(self.options)
def __len__(self):
return len(self.options)
class Repository:
def __init__(self, path):
# Path to the repository file. All relative paths refer to this path.
self.path = path
self.name = None
# Dict of modules, using the filename as the key
self.modules = {}
# Name -> Option()
self.options = {}
def set_name(self, name):
"""Set name of the repository."""
self.name = name
def _relocate(self, path):
"""
Relocate relative paths to the path of the repository
configuration file.
"""
if not os.path.isabs(path):
path = os.path.join(self.path, path)
return os.path.normpath(path)
def glob(self, pattern):
pattern = self._relocate(pattern)
return glob.glob(pattern)
def add_modules(self, modules):
"""
Add one or more module files.
Args:
modules: List of filenames
"""
module_files = utils.listify(modules)
for file in module_files:
file = self._relocate(file)
if not os.path.isfile(file):
raise BlobException("Module file not found '%s'" % file)
self.modules[file] = None
def find_modules(self, basepath="", modulefile="module.lb"):
"""
Find all module files following a specific pattern.
Args:
basepath : Rootpath for the search.
modulefile : Filename of the module files to search
for (default: "module.lb").
"""
basepath = self._relocate(basepath)
for path, _, files in os.walk(basepath):
if modulefile in files:
self.modules[os.path.normpath(os.path.join(path, modulefile))] = None
def add_option(self, name, description, default=None):
"""
Define new repository wide option.
These options can be used by modules to decide whether they are
available and what options they provide for a specific set of
repository options.
"""
self._check_for_duplicates(name)
self.options[name] = environment.Option(name, description, default)
def add_boolean_option(self, name, description, default=None):
self._check_for_duplicates(name)
self.options[name] = environment.BooleanOption(name, description, default)
def add_numeric_option(self, name, description, default=None):
self._check_for_duplicates(name)
self.options[name] = environment.NumericOption(name, description, default)
def _check_for_duplicates(self, name):
if name in self.options:
raise BlobException("Option name '%s' is already defined" % name) | Python | 0 |
54115d8ecd90da614a24bb910939001b37acd246 | Test pairwise combinations | transmutagen/tests/test_origen.py | transmutagen/tests/test_origen.py | import os
from itertools import combinations
import numpy as np
from ..tape9utils import origen_to_name
DATA_DIR = os.path.abspath(os.path.join(__file__, os.path.pardir,
os.path.pardir, os.path.pardir, 'docker', 'data'))
def load_data(datafile):
with open(datafile) as f:
return eval(f.read(), {'array': np.array})
def test_data():
for datafile in os.listdir(DATA_DIR):
data = load_data(os.path.join(DATA_DIR, datafile))
tape9, time, nuc, phi = datafile.split()[0]
assert 'table_4' in data
assert 'nuclide' in data['table_4']
nuclides = data['table_4']['nuclides']
keys = ['activation_products', 'actinides', 'fission_products']
# Sanity check
for comb in combinations(keys, 2):
assert set.intersection(*comb) == set()
| import os
import numpy as np
DATA_DIR = os.path.abspath(os.path.join(__file__, os.path.pardir,
os.path.pardir, os.path.pardir, 'docker', 'data'))
def load_data(datafile):
with open(datafile) as f:
return eval(f.read(), {'array': np.array})
def test_data():
for datafile in os.listdir(DATA_DIR):
data = load_data(os.path.join(DATA_DIR, datafile))
assert 'table_4' in data
| Python | 0.000009 |
c809f4f286bbec3b4cb1ebbff96c23256dd176e8 | Change PowerVM version to an int | nova_powervm/virt/powervm/host.py | nova_powervm/virt/powervm/host.py | # Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import vm_mode
from oslo_log import log as logging
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
# Power VM hypervisor info
IBM_POWERVM_HYPERVISOR_VERSION = '710'
# The types of LPARS that are supported.
POWERVM_SUPPORTED_INSTANCES = jsonutils.dumps([(arch.PPC64,
hv_type.PHYP,
vm_mode.HVM),
(arch.PPC64LE,
hv_type.PHYP,
vm_mode.HVM)])
# cpu_info that will be returned by build_host_stats_from_entry()
HOST_STATS_CPU_INFO = jsonutils.dumps({'vendor': 'ibm', 'arch': 'ppc64'})
def build_host_resource_from_ms(ms_wrapper):
"""Build the host resource dict from an MS adapter wrapper
This method builds the host resource dictionary from the
ManagedSystem Entry wrapper
:param ms_wrapper: ManagedSystem Entry Wrapper.
"""
data = {}
# Calculate the vcpus
proc_units = ms_wrapper.proc_units_configurable
proc_units_avail = ms_wrapper.proc_units_avail
pu_used = float(proc_units) - float(proc_units_avail)
data['vcpus'] = int(math.ceil(float(proc_units)))
data['vcpus_used'] = int(math.ceil(pu_used))
data['memory_mb'] = ms_wrapper.memory_configurable
data['memory_mb_used'] = (ms_wrapper.memory_configurable -
ms_wrapper.memory_free)
data["hypervisor_type"] = hv_type.PHYP
data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION
data["hypervisor_hostname"] = ms_wrapper.mtms.mtms_str
data["cpu_info"] = HOST_STATS_CPU_INFO
data["numa_topology"] = None
data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES
stats = {'proc_units': '%.2f' % float(proc_units),
'proc_units_used': '%.2f' % pu_used
}
data["stats"] = stats
return data
| # Copyright 2014 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import vm_mode
from oslo_log import log as logging
from oslo_serialization import jsonutils
LOG = logging.getLogger(__name__)
# Power VM hypervisor info
IBM_POWERVM_HYPERVISOR_VERSION = '7.1'
# The types of LPARS that are supported.
POWERVM_SUPPORTED_INSTANCES = jsonutils.dumps([(arch.PPC64,
hv_type.PHYP,
vm_mode.HVM),
(arch.PPC64LE,
hv_type.PHYP,
vm_mode.HVM)])
# cpu_info that will be returned by build_host_stats_from_entry()
HOST_STATS_CPU_INFO = jsonutils.dumps({'vendor': 'ibm', 'arch': 'ppc64'})
def build_host_resource_from_ms(ms_wrapper):
"""Build the host resource dict from an MS adapter wrapper
This method builds the host resource dictionary from the
ManagedSystem Entry wrapper
:param ms_wrapper: ManagedSystem Entry Wrapper.
"""
data = {}
# Calculate the vcpus
proc_units = ms_wrapper.proc_units_configurable
proc_units_avail = ms_wrapper.proc_units_avail
pu_used = float(proc_units) - float(proc_units_avail)
data['vcpus'] = int(math.ceil(float(proc_units)))
data['vcpus_used'] = int(math.ceil(pu_used))
data['memory_mb'] = ms_wrapper.memory_configurable
data['memory_mb_used'] = (ms_wrapper.memory_configurable -
ms_wrapper.memory_free)
data["hypervisor_type"] = hv_type.PHYP
data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION
data["hypervisor_hostname"] = ms_wrapper.mtms.mtms_str
data["cpu_info"] = HOST_STATS_CPU_INFO
data["numa_topology"] = None
data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES
stats = {'proc_units': '%.2f' % float(proc_units),
'proc_units_used': '%.2f' % pu_used
}
data["stats"] = stats
return data
| Python | 0.000001 |
aa262ba141290ba04beb2ec4866b1bad1ea85db2 | Fix applying nan mask to specified mask. | turbustat/cube_tools/sim_cubes.py | turbustat/cube_tools/sim_cubes.py |
'''
Wrapper on spectral_cube for simulated datasets
'''
import numpy as np
import spectral_cube as SpectralCube
try:
from signal_id import Noise
except ImportError:
prefix = "/srv/astro/erickoch/" # Adjust if you're not me!
execfile(prefix + "Dropbox/code_development/signal-id/noise.py")
class SimCube(object):
def __init__(self, cube, beam=None, mask=None, method="MAD", compute=True):
# Initialize cube object
self.cube = SpectralCube.read(cube)
# Initialize noise object
self.noise = Noise(self.cube, beam=beam, method=method)
self.mask = mask
def add_noise(self):
# Create the noisy cube
self.noise.get_noise_cube()
self._noise_cube = self.noise.noise_cube +\
self.cube.filled_data[:]
# Update SpectralCube object
self._update(data=self.noise_cube)
return self
def apply_mask(self, mask=None):
# Update mask
if mask is not None:
self.mask = mask
# Create the mask, auto masking nan values
default_mask = np.isfinite(self.cube)
if self.mask is not None:
self.mask *= default_mask
else:
self.mask = default_mask
# Apply mask to spectral cube object
self.cube = self.cube.with_mask(mask)
return self
def _update(self, data=None, wcs=None, beam=None, method="MAD"):
'''
Helper function to update classes.
'''
# Check if we need a new SpectralCube
if data is None & wcs is None:
pass
else:
if data is None:
data = self.cube.unmasked_data[:]
if wcs is None:
wcs = self.cube.wcs
# Make new SpectralCube object
self.cube = SpectralCube(data=data, wcs=wcs)
if beam is not None:
self.noise = Noise(self.cube, beam=beam, method=method)
def compute_properties(self):
self._moment0 = self.cube.moment0().value
self._moment1 = self.cube.moment1().value
self._moment2 = self.cube.moment2().value
self.get_int_intensity()
return self
@property
def noise_cube(self):
return self._noise_cube
@property
def moment0(self):
return self._moment0
@property
def moment1(self):
return self._moment1
@property
def moment2(self):
return self._moment2
@property
def intint(self):
return self._intint
def get_int_intensity(self):
'''
Get an integrated intensity image of the cube.
'''
good_channels = self.noise.spectral_norm > self.noise.scale
channel_range = self.cube.spectral_axis[good_channels][[0, -1]]
channel_size = np.abs(self.cube.spectral_axis[1] -
self.cube.spectral_axis[0])
slab = self.cube.spectral_slab(*channel_range).filled_data[:]
self._intint = np.nansum(slab, axis=0) * channel_size
return self
|
'''
Wrapper on spectral_cube for simulated datasets
'''
import numpy as np
import spectral_cube as SpectralCube
try:
from signal_id import Noise
except ImportError:
prefix = "/srv/astro/erickoch/" # Adjust if you're not me!
execfile(prefix + "Dropbox/code_development/signal-id/noise.py")
class SimCube(object):
def __init__(self, cube, beam=None, method="MAD", compute=True):
# Initialize cube object
self.cube = SpectralCube.read(cube)
# Initialize noise object
self.noise = Noise(self.cube, beam=beam, method=method)
def add_noise(self):
# Create the noisy cube
self.noise.get_noise_cube()
self._noise_cube = self.noise.noise_cube +\
self.cube.filled_data[:]
# Update SpectralCube object
self._update(data=self.noise_cube)
return self
def apply_mask(self, mask=None):
# Create the mask, auto masking nan values
mask = np.isfinite(self.cube)
if mask is not None:
mask *= mask
# Apply mask to spectral cube object
self.cube = self.cube.with_mask(mask)
return self
def _update(self, data=None, wcs=None, beam=None, method="MAD"):
'''
Helper function to update classes.
'''
# Check if we need a new SpectralCube
if data is None & wcs is None:
pass
else:
if data is None:
data = self.cube.unmasked_data[:]
if wcs is None:
wcs = self.cube.wcs
# Make new SpectralCube object
self.cube = SpectralCube(data=data, wcs=wcs)
if beam is not None:
self.noise = Noise(self.cube, beam=beam, method=method)
def compute_properties(self):
self._moment0 = self.cube.moment0().value
self._moment1 = self.cube.moment1().value
self._moment2 = self.cube.moment2().value
self.get_int_intensity()
return self
@property
def noise_cube(self):
return self._noise_cube
@property
def moment0(self):
return self._moment0
@property
def moment1(self):
return self._moment1
@property
def moment2(self):
return self._moment2
@property
def intint(self):
return self._intint
def get_int_intensity(self):
'''
Get an integrated intensity image of the cube.
'''
good_channels = self.noise.spectral_norm > self.noise.scale
channel_range = self.cube.spectral_axis[good_channels][[0, -1]]
channel_size = np.abs(self.cube.spectral_axis[1] -
self.cube.spectral_axis[0])
slab = self.cube.spectral_slab(*channel_range).filled_data[:]
self._intint = np.nansum(slab, axis=0) * channel_size
return self
| Python | 0 |
89560fd773d833a049824bfa8a7ccf4ce301bed4 | remove utils.push_dir | build/fbcode_builder/utils.py | build/fbcode_builder/utils.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'Miscellaneous utility functions.'
import itertools
import logging
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
def recursively_flatten_list(l):
return itertools.chain.from_iterable(
(recursively_flatten_list(i) if type(i) is list else (i,))
for i in l
)
def run_command(*cmd, **kwargs):
'The stdout of most fbcode_builder utilities is meant to be parsed.'
logging.debug('Running: {0} with {1}'.format(cmd, kwargs))
kwargs['stdout'] = sys.stderr
subprocess.check_call(cmd, **kwargs)
@contextmanager
def make_temp_dir(d):
os.mkdir(d)
try:
yield d
finally:
shutil.rmtree(d, ignore_errors=True)
def read_fbcode_builder_config(filename):
# Allow one spec to read another
# When doing so, treat paths as relative to the config's project directory.
project_dir = os.path.dirname(filename)
def inner_read_config(path):
full_path = os.path.join(project_dir, path)
return read_fbcode_builder_config(full_path)
scope = {'read_fbcode_builder_config': inner_read_config}
with open(filename) as config_file:
# Note that this will need to be changed to an exec() function call for
# python 3 compatibility. Unfortunately python 2.7 does not seem to
# treat the scope correctly when using exec() function syntax here.
exec config_file.read() in scope
return scope['config']
def steps_for_spec(builder, spec, processed_modules=None):
'''
Sets `builder` configuration, and returns all the builder steps
necessary to build `spec` and its dependencies.
Traverses the dependencies in depth-first order, honoring the sequencing
in each 'depends_on' list.
'''
if processed_modules is None:
processed_modules = set()
steps = []
for module in spec.get('depends_on', []):
if module not in processed_modules:
processed_modules.add(module)
steps.extend(steps_for_spec(
builder,
module.fbcode_builder_spec(builder),
processed_modules
))
steps.extend(spec.get('steps', []))
return steps
def build_fbcode_builder_config(config):
return lambda builder: builder.build(
steps_for_spec(builder, config['fbcode_builder_spec'](builder))
)
| #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'Miscellaneous utility functions.'
import itertools
import logging
import os
import shutil
import subprocess
import sys
from contextlib import contextmanager
def recursively_flatten_list(l):
return itertools.chain.from_iterable(
(recursively_flatten_list(i) if type(i) is list else (i,))
for i in l
)
def run_command(*cmd, **kwargs):
'The stdout of most fbcode_builder utilities is meant to be parsed.'
logging.debug('Running: {0} with {1}'.format(cmd, kwargs))
kwargs['stdout'] = sys.stderr
subprocess.check_call(cmd, **kwargs)
@contextmanager
def make_temp_dir(d):
os.mkdir(d)
try:
yield d
finally:
shutil.rmtree(d, ignore_errors=True)
@contextmanager
def push_dir(d):
old_dir = os.getcwd()
os.chdir(d)
try:
yield d
finally:
os.chdir(old_dir)
def read_fbcode_builder_config(filename):
# Allow one spec to read another
# When doing so, treat paths as relative to the config's project directory.
project_dir = os.path.dirname(filename)
def inner_read_config(path):
full_path = os.path.join(project_dir, path)
return read_fbcode_builder_config(full_path)
scope = {'read_fbcode_builder_config': inner_read_config}
with open(filename) as config_file:
# Note that this will need to be changed to an exec() function call for
# python 3 compatibility. Unfortunately python 2.7 does not seem to
# treat the scope correctly when using exec() function syntax here.
exec config_file.read() in scope
return scope['config']
def steps_for_spec(builder, spec, processed_modules=None):
'''
Sets `builder` configuration, and returns all the builder steps
necessary to build `spec` and its dependencies.
Traverses the dependencies in depth-first order, honoring the sequencing
in each 'depends_on' list.
'''
if processed_modules is None:
processed_modules = set()
steps = []
for module in spec.get('depends_on', []):
if module not in processed_modules:
processed_modules.add(module)
steps.extend(steps_for_spec(
builder,
module.fbcode_builder_spec(builder),
processed_modules
))
steps.extend(spec.get('steps', []))
return steps
def build_fbcode_builder_config(config):
return lambda builder: builder.build(
steps_for_spec(builder, config['fbcode_builder_spec'](builder))
)
| Python | 0 |
287757680b96957ba3e7f9db179896f85790ea69 | use cleditor instead of cleditor.min. | addons/web/__openerp__.py | addons/web/__openerp__.py | {
"name" : "web",
"category": "Hidden",
"description":
"""
OpenERP Web core module.
This module provides the core of the OpenERP web client.
""",
"depends" : [],
'auto_install': True,
'post_load' : 'wsgi_postload',
'js' : [
"static/lib/datejs/globalization/en-US.js",
"static/lib/datejs/core.js",
"static/lib/datejs/parser.js",
"static/lib/datejs/sugarpak.js",
"static/lib/datejs/extras.js",
"static/lib/jquery/jquery-1.7.2.js",
"static/lib/jquery.MD5/jquery.md5.js",
"static/lib/jquery.form/jquery.form.js",
"static/lib/jquery.validate/jquery.validate.js",
"static/lib/jquery.ba-bbq/jquery.ba-bbq.js",
"static/lib/jquery.blockUI/jquery.blockUI.js",
"static/lib/jquery.ui/js/jquery-ui-1.8.17.custom.min.js",
"static/lib/jquery.ui.timepicker/js/jquery-ui-timepicker-addon.js",
"static/lib/jquery.ui.notify/js/jquery.notify.js",
"static/lib/jquery.deferred-queue/jquery.deferred-queue.js",
"static/lib/jquery.scrollTo/jquery.scrollTo-min.js",
"static/lib/jquery.tipsy/jquery.tipsy.js",
"static/lib/jquery.textext/jquery.textext.js",
"static/lib/jquery.timeago/jquery.timeago.js",
"static/lib/qweb/qweb2.js",
"static/lib/underscore/underscore.js",
"static/lib/underscore/underscore.string.js",
"static/lib/backbone/backbone.js",
"static/lib/cleditor/jquery.cleditor.js",
"static/lib/py.js/lib/py.js",
"static/src/js/boot.js",
"static/src/js/corelib.js",
"static/src/js/coresetup.js",
"static/src/js/dates.js",
"static/src/js/formats.js",
"static/src/js/chrome.js",
"static/src/js/views.js",
"static/src/js/data.js",
"static/src/js/data_export.js",
"static/src/js/data_import.js",
"static/src/js/search.js",
"static/src/js/view_form.js",
"static/src/js/view_list.js",
"static/src/js/view_list_editable.js",
"static/src/js/view_tree.js",
"static/src/js/view_editor.js"
],
'css' : [
"static/lib/jquery.ui.bootstrap/css/custom-theme/jquery-ui-1.8.16.custom.css",
"static/lib/jquery.ui.timepicker/css/jquery-ui-timepicker-addon.css",
"static/lib/jquery.ui.notify/css/ui.notify.css",
"static/lib/jquery.tipsy/tipsy.css",
"static/lib/jquery.textext/jquery.textext.css",
"static/src/css/base.css",
"static/src/css/data_export.css",
"static/src/css/data_import.css",
"static/lib/cleditor/jquery.cleditor.css",
],
'qweb' : [
"static/src/xml/*.xml",
],
}
| {
"name" : "web",
"category": "Hidden",
"description":
"""
OpenERP Web core module.
This module provides the core of the OpenERP web client.
""",
"depends" : [],
'auto_install': True,
'post_load' : 'wsgi_postload',
'js' : [
"static/lib/datejs/globalization/en-US.js",
"static/lib/datejs/core.js",
"static/lib/datejs/parser.js",
"static/lib/datejs/sugarpak.js",
"static/lib/datejs/extras.js",
"static/lib/jquery/jquery-1.7.2.js",
"static/lib/jquery.MD5/jquery.md5.js",
"static/lib/jquery.form/jquery.form.js",
"static/lib/jquery.validate/jquery.validate.js",
"static/lib/jquery.ba-bbq/jquery.ba-bbq.js",
"static/lib/jquery.blockUI/jquery.blockUI.js",
"static/lib/jquery.ui/js/jquery-ui-1.8.17.custom.min.js",
"static/lib/jquery.ui.timepicker/js/jquery-ui-timepicker-addon.js",
"static/lib/jquery.ui.notify/js/jquery.notify.js",
"static/lib/jquery.deferred-queue/jquery.deferred-queue.js",
"static/lib/jquery.scrollTo/jquery.scrollTo-min.js",
"static/lib/jquery.tipsy/jquery.tipsy.js",
"static/lib/jquery.textext/jquery.textext.js",
"static/lib/jquery.timeago/jquery.timeago.js",
"static/lib/qweb/qweb2.js",
"static/lib/underscore/underscore.js",
"static/lib/underscore/underscore.string.js",
"static/lib/backbone/backbone.js",
"static/lib/cleditor/jquery.cleditor.min.js",
"static/lib/py.js/lib/py.js",
"static/src/js/boot.js",
"static/src/js/corelib.js",
"static/src/js/coresetup.js",
"static/src/js/dates.js",
"static/src/js/formats.js",
"static/src/js/chrome.js",
"static/src/js/views.js",
"static/src/js/data.js",
"static/src/js/data_export.js",
"static/src/js/data_import.js",
"static/src/js/search.js",
"static/src/js/view_form.js",
"static/src/js/view_list.js",
"static/src/js/view_list_editable.js",
"static/src/js/view_tree.js",
"static/src/js/view_editor.js"
],
'css' : [
"static/lib/jquery.ui.bootstrap/css/custom-theme/jquery-ui-1.8.16.custom.css",
"static/lib/jquery.ui.timepicker/css/jquery-ui-timepicker-addon.css",
"static/lib/jquery.ui.notify/css/ui.notify.css",
"static/lib/jquery.tipsy/tipsy.css",
"static/lib/jquery.textext/jquery.textext.css",
"static/src/css/base.css",
"static/src/css/data_export.css",
"static/src/css/data_import.css",
"static/lib/cleditor/jquery.cleditor.css",
],
'qweb' : [
"static/src/xml/*.xml",
],
}
| Python | 0 |
78ff5c0968e4867b550b4cb6dab70885e7119d11 | Use revert instead of reset, bloom-patch remove | bloom/commands/patch/remove_cmd.py | bloom/commands/patch/remove_cmd.py | from __future__ import print_function
import sys
import argparse
from bloom.commands.patch.common import get_patch_config
from bloom.commands.patch.common import set_patch_config
from bloom.git import branch_exists
from bloom.git import checkout
from bloom.git import get_commit_hash
from bloom.git import get_current_branch
from bloom.git import track_branches
from bloom.logging import log_prefix
from bloom.logging import error
from bloom.logging import debug
from bloom.util import add_global_arguments
from bloom.util import execute_command
from bloom.util import handle_global_arguments
@log_prefix('[git-bloom-patch remove]: ')
def remove_patches(directory=None):
# Get the current branch
current_branch = get_current_branch(directory)
# Ensure the current branch is valid
if current_branch is None:
error("Could not determine current branch, are you in a git repo?")
return 1
# Construct the patches branch
patches_branch = 'patches/' + current_branch
try:
# See if the patches branch exists
if branch_exists(patches_branch, False, directory=directory):
if not branch_exists(patches_branch, True, directory=directory):
track_branches(patches_branch, directory)
else:
error("No patches branch (" + patches_branch + ") found, cannot "
"remove patches.")
return 1
# Get the parent branch from the patches branch
config = get_patch_config(patches_branch, directory=directory)
parent, spec = config['parent'], config['base']
if None in [parent, spec]:
error("Could not retrieve patches info.")
return 1
debug("Removing patches from " + current_branch + " back to base "
"commit " + spec)
# Reset this branch using git revert --no-edit spec
execute_command('git revert --no-edit ' + spec, cwd=directory)
# Update the base
config['base'] = get_commit_hash(current_branch, directory)
set_patch_config(patches_branch, config, directory=directory)
finally:
if current_branch:
checkout(current_branch, directory=directory)
return 0
def get_parser():
"""Returns a parser.ArgumentParser with all arguments defined"""
parser = argparse.ArgumentParser(description="""
Removes any applied patches from the working branch, including any un-exported
patches, so use with caution.
""")
return parser
def main():
# Assumptions: in a git repo, this command verb was passed, argv has enough
sysargs = sys.argv[2:]
parser = get_parser()
parser = add_global_arguments(parser)
args = parser.parse_args(sysargs)
handle_global_arguments(args)
return remove_patches()
| from __future__ import print_function
import sys
from argparse import ArgumentParser
from bloom.util import add_global_arguments
from bloom.util import execute_command
from bloom.util import handle_global_arguments
from bloom.logging import log_prefix
from bloom.logging import error
from bloom.logging import debug
from bloom.git import branch_exists
from bloom.git import checkout
from bloom.git import get_current_branch
from bloom.git import track_branches
from bloom.commands.patch.common import get_patch_config
@log_prefix('[git-bloom-patch remove]: ')
def remove_patches(directory=None):
# Get the current branch
current_branch = get_current_branch(directory)
# Ensure the current branch is valid
if current_branch is None:
error("Could not determine current branch, are you in a git repo?")
return 1
# Construct the patches branch
patches_branch = 'patches/' + current_branch
try:
# See if the patches branch exists
if branch_exists(patches_branch, False, directory=directory):
if not branch_exists(patches_branch, True, directory=directory):
track_branches(patches_branch, directory)
else:
error("No patches branch (" + patches_branch + ") found, cannot "
"remove patches.")
return 1
# Get the parent branch from the patches branch
config = get_patch_config(patches_branch, directory=directory)
parent, spec = config['parent'], config['base']
if None in [parent, spec]:
error("Could not retrieve patches info.")
return 1
debug("Removing patches from " + current_branch + " back to base "
"commit " + spec)
# Reset this branch using git reset --hard spec
execute_command('git reset --hard ' + spec, cwd=directory)
finally:
if current_branch:
checkout(current_branch, directory=directory)
return 0
def get_parser():
"""Returns a parser.ArgumentParser with all arguments defined"""
parser = ArgumentParser(description="""
Removes any applied patches from the working branch, including any un-exported
patches, so use with caution.
""")
return parser
def main():
# Assumptions: in a git repo, this command verb was passed, argv has enough
sysargs = sys.argv[2:]
parser = get_parser()
parser = add_global_arguments(parser)
args = parser.parse_args(sysargs)
handle_global_arguments(args)
return remove_patches()
| Python | 0 |
4e418e6168425173c3e6ed44299864d52da286ee | fix var reference in gutenberg_filter | scripts/gutenberg_filter.py | scripts/gutenberg_filter.py | import os
import re
class GutenbergIndexFilter(object):
# Extensions excluded from rsync of both ftp and cached/generated content
EXCLUDED_EXT = ['.zip', '.wav', '.mp3', '.ogg', '.iso', '.ISO', '.rar', '.mpeg', '.m4b']
# Additional extensions excluded from cached/generated files
CACHE_EXCLUDED_EXT = ['.log', '.mobi', '.pdb', '.rdf', '.qioo.jar']
def __init__(self):
self.removed_texts = []
self.notitle_count = 0
def filter(self, record):
"""Return true if keep record, false if should discard record"""
if self.is_description_record(record):
has_title = 'title' in record and len(record['title']) > 0
if not has_title:
self.removed_texts.append(record['textId'])
print "[omit %s notitle]" % record['textId']
self.notitle_count += 1
return has_title
else:
# NOTE: Changes to the record persist and are externally visible!
# remove prepended '#' from text reference
record['textId'] = record['textId'][1:]
# adjust the file path (should add warning if path does not match pattern)
FILE_PREFIX = '^http://www.gutenberg.org/dirs/'
record['file'] = re.sub(FILE_PREFIX, 'gutenberg/', record['file'])
CACHE_FILE_PREFIX = '^http://www.gutenberg.org/cache/epub/'
record['file'] = re.sub(CACHE_FILE_PREFIX, 'cache/generated/', record['file'])
# seems ugly - would multiple filters be better? or maybe a filter stage followed by a transform stage?
if record['file'].startswith('http'):
print "[file prefix unexpected %s]" % record['file']
# omit files based on three criteria:
# (a) book description was omitted due to filter criteria above
# (b) rsync script excluded the content (extensions and 'pgdvd')
# (c) rsync script excluded the cached content (extensions and 'pgdvd')
ext = self.get_extension(record['file'])
return (record['textId'] not in self.removed_texts and
u'pgdvd' not in record['file'] and
ext not in self.EXCLUDED_EXT and
(not record['file'].startswith(u'cache/') or ext not in self.CACHE_EXCLUDED_EXT))
def is_description_record(self, record):
return record['record_type'] == 'DESCRIPTION'
def get_extension(self, filename):
name, ext = os.path.splitext(filename)
return ext
| import os
import re
class GutenbergIndexFilter(object):
# Extensions excluded from rsync of both ftp and cached/generated content
EXCLUDED_EXT = ['.zip', '.wav', '.mp3', '.ogg', '.iso', '.ISO', '.rar', '.mpeg', '.m4b']
# Additional extensions excluded from cached/generated files
CACHE_EXCLUDED_EXT = ['.log', '.mobi', '.pdb', '.rdf', '.qioo.jar']
def __init__(self):
self.removed_texts = []
self.notitle_count = 0
def filter(self, record):
"""Return true if keep record, false if should discard record"""
if self.is_description_record(record):
has_title = 'title' in record and len(record['title']) > 0
if not has_title:
self.removed_texts.append(record['textId'])
print "[omit %s notitle]" % record['textId']
self.notitle_count += 1
return has_title
else:
# NOTE: Changes to the record persist and are externally visible!
# remove prepended '#' from text reference
record['textId'] = record['textId'][1:]
# adjust the file path (should add warning if path does not match pattern)
FILE_PREFIX = '^http://www.gutenberg.org/dirs/'
record['file'] = re.sub(FILE_PREFIX, 'gutenberg/', record['file'])
CACHE_FILE_PREFIX = '^http://www.gutenberg.org/cache/epub/'
record['file'] = re.sub(CACHE_FILE_PREFIX, 'cache/generated/', record['file'])
# seems ugly - would multiple filters be better? or maybe a filter stage followed by a transform stage?
if record['file'].startswith('http'):
print "[file prefix unexpected %s]" % record['file']
# omit files based on three criteria:
# (a) book description was omitted due to filter criteria above
# (b) rsync script excluded the content (extensions and 'pgdvd')
# (c) rsync script excluded the cached content (extensions and 'pgdvd')
ext = self.get_extension(record['file'])
return (record['textId'] not in self.removed_texts and
u'pgdvd' not in record['file'] and
ext not in self.EXCLUDED_EXT and
(not record['file'].startswith(u'cache/') or ext not in CACHE_EXCLUDED_EXT))
def is_description_record(self, record):
return record['record_type'] == 'DESCRIPTION'
def get_extension(self, filename):
name, ext = os.path.splitext(filename)
return ext
| Python | 0 |
8a5e49876eae4f2d9bc8ced2fa2e2be0d24ddd68 | rollback to 1.7.0 release | scripts/imgtool/__init__.py | scripts/imgtool/__init__.py | # Copyright 2017-2020 Linaro Limited
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
imgtool_version = "1.7.0"
| # Copyright 2017-2020 Linaro Limited
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
imgtool_version = "1.8.0a1"
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.