commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
564f1470e742f06dd2802341c4b5b118074a9013 | Support unhidden polygamy.json file in a config directory | solarnz/polygamy,solarnz/polygamy | polygamy/RepoConfigParser.py | polygamy/RepoConfigParser.py | from __future__ import absolute_import
import json
import os.path
class BaseConfigParser:
CONFIG_DIR = '.polygamy'
def parse_file(self):
pass
def find_config_file(self, path):
real_path = os.path.realpath(path)
config_file = os.path.join(path, self.CONFIG_FILE)
config_dir = os.path.join(path, self.CONFIG_DIR)
config_dir_file = os.path.join(config_dir, self.CONFIG_FILE)
# Search for a config file within a .polygamy directory
if os.path.isdir(config_dir):
for f in (self.CONFIG_FILE, self.DIR_CONFIG_FILE):
config_dir_file = os.path.join(config_dir, f)
if os.path.isfile(config_dir_file):
self.config_path = config_dir_file
self.working_directory = real_path
return config_dir_file
# Look or a .polygamy.json file.
if os.path.isfile(config_file):
self.config_path = config_file
self.working_directory = real_path
return config_file
# Stop recursively searching when we hit the root directory.
if real_path == os.path.realpath(os.path.join(path, os.path.pardir)):
# TODO: Better error handling for not finding a config file. Some
# kind of wizard for generating a config would be very cool indeed.
raise ValueError('Cannot find config file or directory')
return self.find_config_file(
os.path.join(path, os.path.pardir)
)
class JsonConfigParser(BaseConfigParser):
CONFIG_FILE = '.polygamy.json'
DIR_CONFIG_FILE = 'polygamy.json'
def parse_file(self):
with open(self.config_path) as config_file:
json_data = json.loads(config_file.read())
self.repositories = json_data['repos']
self.remotes = json_data['remotes']
| from __future__ import absolute_import
import json
import os.path
class BaseConfigParser:
CONFIG_DIR = '.polygamy'
def parse_file(self):
pass
def find_config_file(self, path):
real_path = os.path.realpath(path)
config_file = os.path.join(path, self.CONFIG_FILE)
config_dir = os.path.join(path, self.CONFIG_DIR)
config_dir_file = os.path.join(config_dir, self.CONFIG_FILE)
# Search for a .polygamy.josn file within a .polygamy directory
if os.path.isdir(config_dir) and os.path.isfile(config_dir_file):
self.config_path = config_dir_file
self.working_directory = real_path
return config_dir_file
# Look or a .polygamy.json file.
if os.path.isfile(config_file):
self.config_path = config_file
self.working_directory = real_path
return config_file
# Stop recursively searching when we hit the root directory.
if real_path == os.path.realpath(os.path.join(path, os.path.pardir)):
# TODO: Better error handling for not finding a config file. Some
# kind of wizard for generating a config would be very cool indeed.
raise ValueError('Cannot find config file or directory')
return self.find_config_file(
os.path.join(path, os.path.pardir)
)
class JsonConfigParser(BaseConfigParser):
CONFIG_FILE = '.polygamy.json'
def parse_file(self):
with open(self.config_path) as config_file:
json_data = json.loads(config_file.read())
self.repositories = json_data['repos']
self.remotes = json_data['remotes']
| bsd-3-clause | Python |
3ace8c1e603dd07bde9c46e036e14b49a6103238 | Upgrade notify message | codex-bot/github | github/events/watch.py | github/events/watch.py | from data_types.hook import Hook
from data_types.repository import Repository
from data_types.user import User
from .base import EventBase
import math
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
class EventWatch(EventBase):
def __init__(self, sdk):
super(EventWatch, self).__init__(sdk)
self.hook = None
self.repository = None
self.sender = None
"""
WatchEvent
Triggered when someone stars your repository.
https://developer.github.com/v3/activity/events/types/#watchevent
"""
async def process(self, payload, chat):
"""
Processes Watch event
:param payload: JSON object with payload
:param chat: current chat object
:return:
"""
self.sdk.log("Watch event payload taken {}".format(payload))
try:
self.repository = Repository(payload['repository'])
self.sender = User(payload['sender'])
except Exception as e:
self.sdk.log('Cannot process WatchEvent payload because of {}'.format(e))
await self.send(
chat['chat'],
'<a href=\"{}\">{}</a> added a {} ⭐️ to <a href=\"{}\">{}</a>'.format(
self.sender.html_url,
self.sender.login,
ordinal(self.sender.repository.stargazers_count),
self.repository.html_url,
self.repository.full_name),
'HTML'
)
| from data_types.hook import Hook
from data_types.repository import Repository
from data_types.user import User
from .base import EventBase
class EventWatch(EventBase):
def __init__(self, sdk):
super(EventWatch, self).__init__(sdk)
self.hook = None
self.repository = None
self.sender = None
"""
WatchEvent
Triggered when someone stars your repository.
https://developer.github.com/v3/activity/events/types/#watchevent
"""
async def process(self, payload, chat):
"""
Processes Watch event
:param payload: JSON object with payload
:param chat: current chat object
:return:
"""
self.sdk.log("Watch event payload taken {}".format(payload))
try:
self.repository = Repository(payload['repository'])
self.sender = User(payload['sender'])
except Exception as e:
self.sdk.log('Cannot process WatchEvent payload because of {}'.format(e))
await self.send(
chat['chat'],
'<a href=\"{}\">{}</a> added new star ⭐️ to <a href=\"{}\">{}</a>.'.format(
self.sender.html_url,
self.sender.login,
self.repository.html_url,
self.repository.full_name),
'HTML'
)
| mit | Python |
24d808ae518b192b0136e2dfe3cf8a3464ac857d | Update watch.py | codex-bot/github | github/events/watch.py | github/events/watch.py | from data_types.hook import Hook
from data_types.repository import Repository
from data_types.user import User
from .base import EventBase
class EventWatch(EventBase):
def __init__(self, sdk):
super(EventWatch, self).__init__(sdk)
self.hook = None
self.repository = None
self.sender = None
"""
WatchEvent
Triggered when someone stars your repository.
https://developer.github.com/v3/activity/events/types/#watchevent
"""
async def process(self, payload, chat):
"""
Processes Watch event
:param payload: JSON object with payload
:param chat: current chat object
:return:
"""
self.sdk.log("Watch event payload taken {}".format(payload))
try:
self.repository = Repository(payload['repository'])
self.sender = User(payload['sender'])
except Exception as e:
self.sdk.log('Cannot process WatchEvent payload because of {}'.format(e))
await self.send(
chat['chat'],
'<a href=\"{}\">{}</a> ⭐️ {} <a href=\"{}\">{}</a>'.format(
self.sender.html_url,
self.sender.login,
ordinal(self.sender.repository.stargazers_count),
self.repository.html_url,
self.repository.full_name),
'HTML'
)
| from data_types.hook import Hook
from data_types.repository import Repository
from data_types.user import User
from .base import EventBase
import math
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(math.floor(n/10)%10!=1)*(n%10<4)*n%10::4])
class EventWatch(EventBase):
def __init__(self, sdk):
super(EventWatch, self).__init__(sdk)
self.hook = None
self.repository = None
self.sender = None
"""
WatchEvent
Triggered when someone stars your repository.
https://developer.github.com/v3/activity/events/types/#watchevent
"""
async def process(self, payload, chat):
"""
Processes Watch event
:param payload: JSON object with payload
:param chat: current chat object
:return:
"""
self.sdk.log("Watch event payload taken {}".format(payload))
try:
self.repository = Repository(payload['repository'])
self.sender = User(payload['sender'])
except Exception as e:
self.sdk.log('Cannot process WatchEvent payload because of {}'.format(e))
await self.send(
chat['chat'],
'<a href=\"{}\">{}</a> added a {} ⭐️ to <a href=\"{}\">{}</a>'.format(
self.sender.html_url,
self.sender.login,
ordinal(self.sender.repository.stargazers_count),
self.repository.html_url,
self.repository.full_name),
'HTML'
)
| mit | Python |
aee36b42f59ae9e8ec2f4f2dd82cd758c73a7274 | Remove unneeded import from south_africa urls | ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,patricmutwiri/pombola,patricmutwiri/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,hzj123/56th,hzj123/56th,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,geoffkilpin/pombola,mysociety/pombola,hzj123/56th,hzj123/56th,geoffkilpin/pombola,ken-muturi/pombola,patricmutwiri/pombola,ken-muturi/pombola,geoffkilpin/pombola,ken-muturi/pombola | pombola/south_africa/urls.py | pombola/south_africa/urls.py | from django.conf.urls import patterns, include, url
from pombola.south_africa.views import LatLonDetailView,SAPlaceDetailSub
urlpatterns = patterns('pombola.south_africa.views',
url(r'^place/latlon/(?P<lat>[0-9\.-]+),(?P<lon>[0-9\.-]+)/', LatLonDetailView.as_view(), name='latlon'),
url(r'^place/(?P<slug>[-\w]+)/places/', SAPlaceDetailSub.as_view(), {'sub_page': 'places'}, name='place_places'),
)
| from django.conf.urls import patterns, include, url
from pombola.core.views import PersonDetailSub
from pombola.south_africa.views import LatLonDetailView,SAPlaceDetailSub
urlpatterns = patterns('pombola.south_africa.views',
url(r'^place/latlon/(?P<lat>[0-9\.-]+),(?P<lon>[0-9\.-]+)/', LatLonDetailView.as_view(), name='latlon'),
url(r'^place/(?P<slug>[-\w]+)/places/', SAPlaceDetailSub.as_view(), {'sub_page': 'places'}, name='place_places'),
)
| agpl-3.0 | Python |
f878e80ea7d743abcb5b730568dff71bab391f33 | Combine tests into one | CubicComet/exercism-python-solutions | leap/leap.py | leap/leap.py | def is_leap_year(y):
return (y % 4 == 0) and (y % 100 != 0 or y % 400 == 0)
| def is_leap_year(y):
if y % 400 == 0:
return True
elif y % 100 == 0:
return False
elif y % 4 == 0:
return True
else:
return False
| agpl-3.0 | Python |
9011f4f5b984de16cbf5a166d75984ac95d37b6d | Add support for GRAFANA_API_TOKEN environment variable | m110/grafcli,m110/grafcli | grafcli/storage/api.py | grafcli/storage/api.py | import os
import requests
from climb.config import config
from grafcli.storage import Storage
from grafcli.documents import Dashboard
from grafcli.exceptions import DocumentNotFound
class APIStorage(Storage):
def __init__(self, host):
super().__init__(host)
self._config = config[host]
def _call(self, method, url, data=None):
full_url = os.path.join(self._config['url'], url)
auth = None
headers = {}
api_token = os.getenv('GRAFANA_API_TOKEN');
if api_token:
headers['Authorization'] = 'Bearer {}'.format(api_token)
elif self._config.get('token'):
headers['Authorization'] = 'Bearer {}'.format(self._config['token'])
else:
auth = (self._config['user'], self._config['password'])
response = requests.request(method, full_url,
headers=headers,
auth=auth,
json=data)
response.raise_for_status()
return response.json()
def list(self):
return [row['uri'].split('/')[-1]
for row in self._call('GET', 'search')]
def get(self, dashboard_id):
try:
source = self._call('GET', 'dashboards/db/{}'.format(dashboard_id))
except requests.HTTPError as exc:
if exc.response.status_code == 404:
raise DocumentNotFound("There is no such dashboard: {}".format(dashboard_id))
raise
return Dashboard(source['dashboard'], dashboard_id)
def save(self, dashboard_id, dashboard):
if not dashboard_id:
dashboard_id = dashboard.slug
data = {
"dashboard": dashboard.source,
}
try:
self._call('GET', 'dashboards/db/{}'.format(dashboard_id))
data["overwrite"] = True
except requests.HTTPError as exc:
if exc.response.status_code != 404:
raise
data["dashboard"]["id"] = None
self._call('POST', 'dashboards/db', data)
def remove(self, dashboard_id):
self._call('DELETE', 'dashboards/db/{}'.format(dashboard_id))
| import os
import requests
from climb.config import config
from grafcli.storage import Storage
from grafcli.documents import Dashboard
from grafcli.exceptions import DocumentNotFound
class APIStorage(Storage):
def __init__(self, host):
super().__init__(host)
self._config = config[host]
def _call(self, method, url, data=None):
full_url = os.path.join(self._config['url'], url)
auth = None
headers = {}
if self._config.get('token'):
headers['Authorization'] = 'Bearer {}'.format(self._config['token'])
else:
auth = (self._config['user'], self._config['password'])
response = requests.request(method, full_url,
headers=headers,
auth=auth,
json=data)
response.raise_for_status()
return response.json()
def list(self):
return [row['uri'].split('/')[-1]
for row in self._call('GET', 'search')]
def get(self, dashboard_id):
try:
source = self._call('GET', 'dashboards/db/{}'.format(dashboard_id))
except requests.HTTPError as exc:
if exc.response.status_code == 404:
raise DocumentNotFound("There is no such dashboard: {}".format(dashboard_id))
raise
return Dashboard(source['dashboard'], dashboard_id)
def save(self, dashboard_id, dashboard):
if not dashboard_id:
dashboard_id = dashboard.slug
data = {
"dashboard": dashboard.source,
}
try:
self._call('GET', 'dashboards/db/{}'.format(dashboard_id))
data["overwrite"] = True
except requests.HTTPError as exc:
if exc.response.status_code != 404:
raise
data["dashboard"]["id"] = None
self._call('POST', 'dashboards/db', data)
def remove(self, dashboard_id):
self._call('DELETE', 'dashboards/db/{}'.format(dashboard_id))
| mit | Python |
d0e7763827e542fa2c63d254010021c3ad507346 | Implement SchemaNavigator Update CollectionHelper.transform() method | lucasdavid/grapher | grapher/core/common.py | grapher/core/common.py | import abc
from . import errors
class CollectionHelper(metaclass=abc.ABCMeta):
"""Helper for normalizing unknown structures into lists.
"""
@classmethod
def needs(cls, item):
"""Checks if :item needs to be transformed into a list.
Fundamentally, lists, tuples, sets and objects that have a member '__iter__' don't need transformation,
except for dictionaries and objects of their subclasses.
:param item: the unknown structure.
:return: :boolean:
"""
return not isinstance(item, (list, tuple, set)) and \
not hasattr(item, '__iter__') or isinstance(item, dict) or issubclass(item.__class__, dict)
@classmethod
def transform(cls, item):
"""Transforms an unknown structure into a list, if it isn't one yet.
:param item: the structure to be transformed.
:return: a list containing the passed structure, or the structure itself, if it's already a list.
"""
return ([item], True) if cls.needs(item) \
else (item, False)
@classmethod
def restore(cls, item, previously_transformed):
"""Restore a structure to its original state.
:param item: the structure to be restored.
:param previously_transformed: the flag which indicates if the structure was previously transformed.
:return: the structure, in case it was not transformed. The element in the first-level of the list, otherwise.
"""
return item.pop() if previously_transformed else item
@classmethod
def enumerate(cls, item):
item, transformed = cls.transform(item)
if item:
item = {i: e for i, e in enumerate(item)}
return item, transformed
@classmethod
def restore_enumeration(cls, item, previously_transformed):
item = [e for _, e in item.items()]
return cls.restore(item, previously_transformed)
class SchemaNavigator(metaclass=abc.ABCMeta):
@classmethod
def identity_field_from(cls, schema):
# Let's assume :_id is the identity field.
identity = None
# Now, we override :_id, if explicitly flagged by the user.
for field, desc in schema.items():
if 'identity' in desc and desc['identity']:
if identity:
raise errors.SchemaDefinitionError(
'Cannot define both fields %s and %s as identity.', identity, field)
identity = field
return identity or '_id'
| import abc
class CollectionHelper(metaclass=abc.ABCMeta):
"""Helper for normalizing unknown structures into lists.
"""
@classmethod
def needs(cls, item):
"""Checks if :item needs to be transformed into a list.
:param item: the unknown structure.
:return: :boolean:
"""
return not isinstance(item, (list, tuple, set))
@classmethod
def transform(cls, item):
"""Transforms an unknown structure into a list, if it isn't one yet.
:param item: the structure to be transformed.
:return: a list containing the passed structure, or the structure itself, if it's already a list.
"""
if item is None:
return None, False
return ([item], True) if cls.needs(item) \
else (item, False)
@classmethod
def restore(cls, item, previously_transformed):
"""Restore a structure to its original state.
:param item: the structure to be restored.
:param previously_transformed: the flag which indicates if the structure was previously transformed.
:return: the structure, in case it was not transformed. The element in the first-level of the list, otherwise.
"""
return item.pop() if previously_transformed and item else item
@classmethod
def enumerate(cls, item):
item, transformed = cls.transform(item)
if item:
item = {i: e for i, e in enumerate(item)}
return item, transformed
@classmethod
def restore_enumeration(cls, item, previously_transformed):
item = [e for _, e in item.items()]
return item.pop() if previously_transformed and item else item
| mit | Python |
7a9d91a91603bfb6adc514c483c5b552fccbff44 | fix circular reference in TimerCallback with calls to connect/disconnect | openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro | software/ddapp/src/python/ddapp/timercallback.py | software/ddapp/src/python/ddapp/timercallback.py | import time
from PythonQt import QtCore
import traceback
class TimerCallback(object):
def __init__(self, targetFps=30):
'''
Construct TimerCallback. The targetFps defines frames per second, the
frequency for the ticks() callback method.
'''
self.targetFps = targetFps
self.timer = QtCore.QTimer()
self.timer.setSingleShot(True)
self.singleShotTimer = QtCore.QTimer()
self.singleShotTimer.setSingleShot(True)
self.callback = None
def start(self):
'''
Start the timer.
'''
self.startTime = time.time()
self.lastTickTime = self.startTime
self.timer.connect('timeout()', self._timerEvent)
self.timer.start(0)
def stop(self):
'''
Stop the timer.
'''
self.timer.stop()
self.timer.disconnect('timeout()', self._timerEvent)
def tick(self):
'''
Timer event callback method. Subclasses can override this method.
'''
if self.callback:
try:
return self.callback()
except:
print traceback.format_exc()
return False
def singleShot(self, timeoutInSeconds):
self.singleShotTimer.connect('timeout()', self._singleShotTimerEvent)
self.singleShotTimer.start(int(timeoutInSeconds * 1000))
def _singleShotTimerEvent(self):
self.tick()
self.singleShotTimer.disconnect('timeout()', self._singleShotTimerEvent)
def _schedule(self, elapsedTimeInSeconds):
'''
This method is given an elapsed time since the start of the last
call to ticks(). It schedules a timer event to acheive the targetFps.
'''
fpsDelayMilliseconds = int(1000.0 / self.targetFps)
elapsedMilliseconds = int(elapsedTimeInSeconds*1000.0)
waitMilliseconds = fpsDelayMilliseconds - elapsedMilliseconds
self.timer.start(waitMilliseconds if waitMilliseconds > 0 else 1)
def _timerEvent(self):
'''
Internal timer callback method. Calls ticks() and measures elapsed time.
'''
startTime = time.time()
self.elapsed = startTime - self.lastTickTime
if self.tick() is not False:
self.lastTickTime = startTime
self._schedule(time.time() - startTime)
else:
self.stop()
| import time
from PythonQt import QtCore
import traceback
class TimerCallback(object):
def __init__(self, targetFps=30):
'''
Construct TimerCallback. The targetFps defines frames per second, the
frequency for the ticks() callback method.
'''
self.targetFps = targetFps
self.timer = QtCore.QTimer()
self.timer.setSingleShot(True)
self.timer.connect('timeout()', self._timerEvent)
self.singleShotTimer = QtCore.QTimer()
self.singleShotTimer.setSingleShot(True)
self.singleShotTimer.connect('timeout()', self._singleShotTimerEvent)
self.callback = None
def start(self):
'''
Start the timer.
'''
self.startTime = time.time()
self.lastTickTime = self.startTime
self.timer.start(0)
def stop(self):
'''
Stop the timer.
'''
self.timer.stop()
def tick(self):
'''
Timer event callback method. Subclasses can override this method.
'''
if self.callback:
try:
return self.callback()
except:
print traceback.format_exc()
return False
def singleShot(self, timeoutInSeconds):
self.singleShotTimer.start(int(timeoutInSeconds * 1000))
def _singleShotTimerEvent(self):
self.tick()
def _schedule(self, elapsedTimeInSeconds):
'''
This method is given an elapsed time since the start of the last
call to ticks(). It schedules a timer event to acheive the targetFps.
'''
fpsDelayMilliseconds = int(1000.0 / self.targetFps)
elapsedMilliseconds = int(elapsedTimeInSeconds*1000.0)
waitMilliseconds = fpsDelayMilliseconds - elapsedMilliseconds
self.timer.start(waitMilliseconds if waitMilliseconds > 0 else 1)
def _timerEvent(self):
'''
Internal timer callback method. Calls ticks() and measures elapsed time.
'''
startTime = time.time()
self.elapsed = startTime - self.lastTickTime
if self.tick() is not False:
self.lastTickTime = startTime
self._schedule(time.time() - startTime)
| bsd-3-clause | Python |
1192cb849fb23dfd516d40f5b32d86d76c4e765b | Use the right variable in this loop | thread/django-lightweight-queue,thread/django-lightweight-queue | django_lightweight_queue/management/commands/queue_configuration.py | django_lightweight_queue/management/commands/queue_configuration.py | from typing import Any
from django.core.management.base import BaseCommand, CommandParser
from ... import app_settings
from ...utils import get_backend, get_queue_counts, load_extra_config
from ...cron_scheduler import get_cron_config
class Command(BaseCommand):
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
'--config',
action='store',
default=None,
help="The path to an additional django-style config file to load",
)
def handle(self, **options: Any) -> None:
# Configuration overrides
extra_config = options['config']
if extra_config is not None:
load_extra_config(extra_config)
print("django-lightweight-queue")
print("========================")
print("")
print("{0:<55} {1:<5} {2}".format("Queue name", "Concurrency", "Backend"))
print("-" * 27)
for k, v in sorted(get_queue_counts().items()):
print(" {0:<54} {1:<5} {2}".format(
k,
v,
get_backend(k).__class__.__name__,
))
print("")
print("Middleware:")
for x in app_settings.MIDDLEWARE:
print(" * {}".format(x))
print("")
print("Cron configuration")
for config in get_cron_config():
print("")
for key in (
'command',
'command_args',
'hours',
'minutes',
'queue',
'timeout',
'sigkill_on_stop',
):
print("{:20s}: {}".format(key, config.get(key, '-')))
| from typing import Any
from django.core.management.base import BaseCommand, CommandParser
from ... import app_settings
from ...utils import get_backend, get_queue_counts, load_extra_config
from ...cron_scheduler import get_cron_config
class Command(BaseCommand):
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
'--config',
action='store',
default=None,
help="The path to an additional django-style config file to load",
)
def handle(self, **options: Any) -> None:
# Configuration overrides
extra_config = options['config']
if extra_config is not None:
load_extra_config(extra_config)
print("django-lightweight-queue")
print("========================")
print("")
print("{0:<55} {1:<5} {2}".format("Queue name", "Concurrency", "Backend"))
print("-" * 27)
for k, v in sorted(get_queue_counts().items()):
print(" {0:<54} {1:<5} {2}".format(
k,
v,
get_backend(k).__class__.__name__,
))
print("")
print("Middleware:")
for x in app_settings.MIDDLEWARE:
print(" * {}".format(x))
print("")
print("Cron configuration")
for config in get_cron_config():
print("")
for key in (
'command',
'command_args',
'hours',
'minutes',
'queue',
'timeout',
'sigkill_on_stop',
):
print("{:20s}: {}".format(key, config.get(k, '-')))
| bsd-3-clause | Python |
82b2e87de2c41b43248a2bccdc75c0066ec2ef49 | Correct SDK PEP 564 nanosecond time reference (#1867) | open-telemetry/opentelemetry-python,open-telemetry/opentelemetry-python | opentelemetry-api/src/opentelemetry/util/_time.py | opentelemetry-api/src/opentelemetry/util/_time.py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from sys import version_info
if version_info.minor < 7:
getLogger(__name__).warning( # pylint: disable=logging-not-lazy
"You are using Python 3.%s. This version does not support timestamps "
"with nanosecond precision and the OpenTelemetry SDK will use "
"millisecond precision instead. Please refer to PEP 564 for more "
"information. Please upgrade to Python 3.7 or newer to use nanosecond "
"precision." % version_info.minor
)
from time import time
def _time_ns() -> int:
return int(time() * 1e9)
else:
from time import time_ns
_time_ns = time_ns
| # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from sys import version_info
if version_info.minor < 7:
getLogger(__name__).warning( # pylint: disable=logging-not-lazy
"You are using Python 3.%s. This version does not support timestamps "
"with nanosecond precision and the OpenTelemetry SDK will use "
"millisecond precision instead. Please refer to PEP 546 for more "
"information. Please upgrade to Python 3.7 or newer to use nanosecond "
"precision." % version_info.minor
)
from time import time
def _time_ns() -> int:
return int(time() * 1e9)
else:
from time import time_ns
_time_ns = time_ns
| apache-2.0 | Python |
f1465418e0f36b1062233672bdeece1382394b6f | Remove unnecessary idiokit.stop argument. | abusesa/abusehelper | abusehelper/contrib/autoshun/autoshunbot.py | abusehelper/contrib/autoshun/autoshunbot.py | import idiokit
from abusehelper.core import utils, cymruwhois, bot, events
AUTOSHUN_CSV_URL = "http://www.autoshun.org/files/shunlist.csv"
class AutoshunBot(bot.PollingBot):
COLUMNS = ["ip", "time", "type"]
feed_url = bot.Param(default=AUTOSHUN_CSV_URL)
use_cymru_whois = bot.BoolParam(default=True)
def poll(self):
pipe = self._poll(url=self.feed_url)
if self.use_cymru_whois:
pipe = pipe | cymruwhois.augment("ip")
return pipe | self._normalize()
@idiokit.stream
def _poll(self, url):
self.log.info("Downloading %s" % url)
try:
info, fileobj = yield utils.fetch_url(url)
except utils.FetchUrlFailed, fuf:
self.log.error("Download failed: %r", fuf)
idiokit.stop()
self.log.info("Downloaded")
# Skip first line
fileobj.readline()
yield utils.csv_to_events(fileobj,
columns=self.COLUMNS,
charset=info.get_param("charset"))
@idiokit.stream
def _normalize(self):
while True:
event = yield idiokit.next()
event.add("feed", "autoshun")
event.add("source url", self.feed_url)
yield idiokit.send(event)
if __name__ == "__main__":
AutoshunBot.from_command_line().execute()
| import idiokit
from abusehelper.core import utils, cymruwhois, bot, events
AUTOSHUN_CSV_URL = "http://www.autoshun.org/files/shunlist.csv"
class AutoshunBot(bot.PollingBot):
COLUMNS = ["ip", "time", "type"]
feed_url = bot.Param(default=AUTOSHUN_CSV_URL)
use_cymru_whois = bot.BoolParam(default=True)
def poll(self):
pipe = self._poll(url=self.feed_url)
if self.use_cymru_whois:
pipe = pipe | cymruwhois.augment("ip")
return pipe | self._normalize()
@idiokit.stream
def _poll(self, url):
self.log.info("Downloading %s" % url)
try:
info, fileobj = yield utils.fetch_url(url)
except utils.FetchUrlFailed, fuf:
self.log.error("Download failed: %r", fuf)
idiokit.stop(False)
self.log.info("Downloaded")
# Skip first line
fileobj.readline()
yield utils.csv_to_events(fileobj,
columns=self.COLUMNS,
charset=info.get_param("charset"))
@idiokit.stream
def _normalize(self):
while True:
event = yield idiokit.next()
event.add("feed", "autoshun")
event.add("source url", self.feed_url)
yield idiokit.send(event)
if __name__ == "__main__":
AutoshunBot.from_command_line().execute()
| mit | Python |
9c591c864bbef43cfb632cb7992b746a7f957842 | Clarify the ProhibitNoAbortFunction message (#304) | Kuniwak/vint,Kuniwak/vint | vint/linting/policy/prohibit_no_abort_function.py | vint/linting/policy/prohibit_no_abort_function.py | from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy.reference.googlevimscriptstyleguide import get_reference_source
from vint.linting.policy_registry import register_policy
@register_policy
class ProhibitNoAbortFunction(AbstractPolicy):
description = 'Use the abort attribute and ! for functions in autoload'
reference = get_reference_source('FUNCTIONS')
level = Level.WARNING
def listen_node_types(self):
return [NodeType.FUNCTION]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibits functions in autoload that have no 'abort' or bang
"""
if 'autoload' not in lint_context['lint_target'].path.parts:
return True
has_bang = node['ea']['forceit'] != 0
has_abort = node['attr']['abort'] != 0
return has_bang and has_abort
| from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy.reference.googlevimscriptstyleguide import get_reference_source
from vint.linting.policy_registry import register_policy
@register_policy
class ProhibitNoAbortFunction(AbstractPolicy):
description = 'Use the abort attribute for functions in autoload'
reference = get_reference_source('FUNCTIONS')
level = Level.WARNING
def listen_node_types(self):
return [NodeType.FUNCTION]
def is_valid(self, node, lint_context):
""" Whether the specified node is valid.
This policy prohibits functions in autoload that have no 'abort' or bang
"""
if 'autoload' not in lint_context['lint_target'].path.parts:
return True
has_bang = node['ea']['forceit'] != 0
has_abort = node['attr']['abort'] != 0
return has_bang and has_abort
| mit | Python |
0507dfbd23db74db1c59bd1084647cc49ef19aee | Change logger messages to info | shingonoide/odoo_ezdoo,shingonoide/odoo_ezdoo | addons/website_notfound_redirect/ir_http.py | addons/website_notfound_redirect/ir_http.py | # -*- coding: utf-8 -*-
import logging
import urllib2
from openerp.http import request
from openerp.osv import orm
logger = logging.getLogger(__name__)
class ir_http(orm.AbstractModel):
_inherit = 'ir.http'
def _handle_exception(self, exception, code=500):
code = getattr(exception, 'code', code)
if code == 404:
page = request.httprequest.path
logger.info("404 code... %s" % (page))
url = request.registry['ir.config_parameter'].get_param(request.cr,
request.uid, 'website.notfound_redirect_url')
if url:
url_request = "%s%s" % (url, page)
logger.info("The redirect url: %s" % (url_request))
try:
req = urllib2.Request(url_request)
request_old = urllib2.urlopen(req)
except (urllib2.HTTPError, urllib2.URLError):
request_old = False
else:
request_old = False
if not request_old:
logger.info("URL not found: %s" % (url_request))
return super(ir_http, self)._handle_exception(exception, code)
else:
logger.info("Redirect to %s" % (url_request))
return request.redirect(url_request, code=302)
return super(ir_http, self)._handle_exception(exception, code)
| # -*- coding: utf-8 -*-
import logging
import urllib2
from openerp.http import request
from openerp.osv import orm
logger = logging.getLogger(__name__)
class ir_http(orm.AbstractModel):
_inherit = 'ir.http'
def _handle_exception(self, exception, code=500):
code = getattr(exception, 'code', code)
if code == 404:
page = request.httprequest.path
logger.warning("404 code... %s" % (page))
url = request.registry['ir.config_parameter'].get_param(request.cr,
request.uid, 'website.notfound_redirect_url')
if url:
url_request = "%s%s" % (url, page)
logger.info("The redirect url: %s" % (url_request))
try:
req = urllib2.Request(url_request)
request_old = urllib2.urlopen(req)
except (urllib2.HTTPError, urllib2.URLError):
request_old = False
else:
request_old = False
if not request_old:
logger.warning("URL not found: %s" % (url_request))
return super(ir_http, self)._handle_exception(exception, code)
else:
logger.warning("Redirect to %s" % (url_request))
return request.redirect(url_request, code=302)
return super(ir_http, self)._handle_exception(exception, code)
| agpl-3.0 | Python |
1827b4c5cbb53f652f47acdb45358dfdfda6c228 | use autotools. (#11808) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/mpileaks/package.py | var/spack/repos/builtin/packages/mpileaks/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(AutotoolsPackage):
"""Tool to detect and report leaked MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
depends_on("mpi")
depends_on("adept-utils")
depends_on("callpath")
def configure_args(self):
args = []
args.append("--with-adept-utils=%s" % self.spec['adept-utils'].prefix)
args.append("--with-callpath=%s" % self.spec['callpath'].prefix)
return args
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpileaks(Package):
"""Tool to detect and report leaked MPI objects like MPI_Requests and
MPI_Datatypes."""
homepage = "https://github.com/hpc/mpileaks"
url = "https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz"
version('1.0', '8838c574b39202a57d7c2d68692718aa')
depends_on("mpi")
depends_on("adept-utils")
depends_on("callpath")
def install(self, spec, prefix):
configure("--prefix=" + prefix,
"--with-adept-utils=" + spec['adept-utils'].prefix,
"--with-callpath=" + spec['callpath'].prefix)
make()
make("install")
| lgpl-2.1 | Python |
5b14ed5ad9b45830951b2bf495bf1d46feeed7cf | remove superfluous shebang | freifunkhamburg/ffmap-backend,kpcyrd/ffmap-backend,ffnord/ffmap-backend,freifunk-mwu/ffmap-backend,rubo77/ffmap-backend,ffac/ffmap-backend,FreifunkBremen/ffmap-backend,freifunk-fulda/ffmap-backend,FreifunkJena/ffmap-backend,freifunk-mwu/ffmap-backend,FreifunkBremen/ffmap-backend,freifunkhamburg/ffmap-backend,mweinelt/ffmap-backend,freifunk-kiel/ffmap-backend,ffnord/ffmap-backend | alfred.py | alfred.py | import subprocess
import json
def _fetch(data_type):
output = subprocess.check_output(["alfred-json", "-z", "-f", "json", "-r", str(data_type)])
return json.loads(output.decode("utf-8")).values()
def nodeinfo():
return _fetch(158)
def statistics():
return _fetch(159)
def vis():
return _fetch(160)
def aliases():
alias = {}
for node in nodeinfo():
node_alias = {}
try:
# TODO: better pass lat, lng as a tuple?
node_alias['gps'] = "{lat}\x20{lng}".format(lat=node['location']['latitude'],
lng=node['location']['longitude'])
except KeyError:
pass
try:
node_alias['firmware'] = node['software']['firmware']['release']
except KeyError:
pass
try:
node_alias['id'] = node['network']['mac']
except KeyError:
pass
if 'hostname' in node:
node_alias['name'] = node['hostname']
elif 'name' in node:
node_alias['name'] = node['name']
if len(node_alias):
alias[node['network']['mac']] = node_alias
return alias
| #!/usr/bin/env python3
import subprocess
import json
def _fetch(data_type):
output = subprocess.check_output(["alfred-json", "-z", "-f", "json", "-r", str(data_type)])
return json.loads(output.decode("utf-8")).values()
def nodeinfo():
return _fetch(158)
def statistics():
return _fetch(159)
def vis():
return _fetch(160)
def aliases():
alias = {}
for node in nodeinfo():
node_alias = {}
try:
# TODO: better pass lat, lng as a tuple?
node_alias['gps'] = "{lat}\x20{lng}".format(lat=node['location']['latitude'],
lng=node['location']['longitude'])
except KeyError:
pass
try:
node_alias['firmware'] = node['software']['firmware']['release']
except KeyError:
pass
try:
node_alias['id'] = node['network']['mac']
except KeyError:
pass
if 'hostname' in node:
node_alias['name'] = node['hostname']
elif 'name' in node:
node_alias['name'] = node['name']
if len(node_alias):
alias[node['network']['mac']] = node_alias
return alias
| bsd-3-clause | Python |
2c13128cdb2296f3b7920d8b1c083d01445c1a76 | Remove wait | zemogle/raspberrysky | allsky.py | allsky.py | import time
import numpy as np
import logging
import subprocess
import signal
import sys
import time
import os
import json
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT,level=logging.DEBUG)
logger = logging.getLogger('imgserver')
def camera_active():
# is the camera running?
cmd = "ps aux |grep raspistill -n -w"
lines = subprocess.check_output(cmd,shell=True)
for line in lines.decode("utf-8").split('\n'):
if '-awb off' in line:
return True
return False
def single_image_raspistill(filename='test.jpg', exp=20000000):
if camera_active():
return False
annot = "%Y-%m-%dT%H:%M:%S"
cmd = f"raspistill -n -w 1012 -h 760 -ISO 800 -ss {exp} -awb off -a 8 -a {annot} -o {filename}"
proc = subprocess.Popen(cmd.split(), shell=False)
if proc.returncode == 0:
sys.stdout.write(f'Image {filename} Captured')
else:
sys.stderr.write(f'Problem with camera')
sys.stderr.write(f"{proc.stderr}")
return proc.pid
def check_image_status(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return json.dumps({'status':'complete'})
else:
return json.dumps({'status':'runnning'})
def scale_data(data):
'''
Scale image
- Find the 99.5% value
- Make all values above 99.5% value white
'''
data[data<0.]=0.
median = np.median(data)
data-= median
data[data<0.]=0.
sc_data= data #np.arcsinh(data)
max_val = np.percentile(sc_data,99.5)
logging.warning('99.5 =%s' % max_val)
scaled = sc_data*255./(max_val)
scaled[scaled>255]=255
logging.warning('Median of scaled=%s' % np.median(scaled))
logging.warning('Min scaled=%s' % scaled.min())
return scaled
if __name__ == '__main__':
single_image_raspistill()
| import time
import numpy as np
import logging
import subprocess
import signal
import sys
import time
import os
import json
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT,level=logging.DEBUG)
logger = logging.getLogger('imgserver')
def camera_active():
# is the camera running?
cmd = "ps aux |grep raspistill -n -w"
lines = subprocess.check_output(cmd,shell=True)
for line in lines.decode("utf-8").split('\n'):
if '-awb off' in line:
return True
return False
def single_image_raspistill(filename='test.jpg', exp=20000000):
if camera_active():
return False
annot = "%Y-%m-%dT%H:%M:%S"
cmd = f"raspistill -n -w 1012 -h 760 -ISO 800 -ss {exp} -awb off -a 8 -a '{annot}' -o {filename}"
proc = subprocess.Popen(cmd.split(), shell=False)
for n in range(0,100):
if proc.poll() is None:
time.sleep(0.5)
proc.send_signal(signal.SIGUSR1)
if proc.returncode == 0:
sys.stdout.write(f'Image {filename} Captured')
else:
sys.stderr.write(f'Problem with camera')
sys.stderr.write(f"{proc.stderr}")
return proc.pid
def check_image_status(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return json.dumps({'status':'complete'})
else:
return json.dumps({'status':'runnning'})
def scale_data(data):
'''
Scale image
- Find the 99.5% value
- Make all values above 99.5% value white
'''
data[data<0.]=0.
median = np.median(data)
data-= median
data[data<0.]=0.
sc_data= data #np.arcsinh(data)
max_val = np.percentile(sc_data,99.5)
logging.warning('99.5 =%s' % max_val)
scaled = sc_data*255./(max_val)
scaled[scaled>255]=255
logging.warning('Median of scaled=%s' % np.median(scaled))
logging.warning('Min scaled=%s' % scaled.min())
return scaled
if __name__ == '__main__':
single_image_raspistill()
| mit | Python |
8af3aef367135dbbc55e573c6a943a86ff3ccd9d | Use an absolute Path for localization tests | Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey | survey/tests/locale/test_locale_normalization.py | survey/tests/locale/test_locale_normalization.py | import os
import platform
import subprocess
import unittest
from pathlib import Path
class TestLocaleNormalization(unittest.TestCase):
LOCALE_PATH = Path("survey", "locale").absolute()
def test_normalization(self):
""" We test if the messages were properly created with makemessages --no-obsolete --no-wrap. """
if platform.system() == "Windows":
python_3 = ["py", "-3"]
else:
python_3 = ["python3"]
makemessages_command = python_3 + [
"manage.py",
"makemessages",
"--no-obsolete",
"--no-wrap",
"--ignore",
"venv",
]
number_of_language = len(os.listdir(self.LOCALE_PATH))
subprocess.check_call(makemessages_command)
git_diff_command = ["git", "diff", self.LOCALE_PATH]
git_diff = subprocess.check_output(git_diff_command).decode("utf8")
# In the diff we should have a change only for the date of the generation
# So 2 * @@ * number of language
number_of_change = git_diff.count("@@") / 2
msg = (
"You did not update the translation following your changes. Maybe you did not use the "
"normalized 'python3 manage.py makemessages --no-obsolete --no-wrap' ? If you're "
"working locally, just use 'git add {}', we launched it during tests.".format(self.LOCALE_PATH),
)
self.assertEqual(number_of_change, number_of_language, msg)
| import os
import platform
import subprocess
import unittest
class TestLocaleNormalization(unittest.TestCase):
LOCALE_PATH = "survey/locale/"
def test_normalization(self):
""" We test if the messages were properly created with makemessages --no-obsolete --no-wrap. """
if platform.system() == "Windows":
python_3 = ["py", "-3"]
else:
python_3 = ["python3"]
makemessages_command = python_3 + [
"manage.py",
"makemessages",
"--no-obsolete",
"--no-wrap",
"--ignore",
"venv",
]
number_of_language = len(os.listdir(self.LOCALE_PATH))
subprocess.check_call(makemessages_command)
git_diff_command = ["git", "diff", self.LOCALE_PATH]
git_diff = subprocess.check_output(git_diff_command).decode("utf8")
# In the diff we should have a change only for the date of the generation
# So 2 * @@ * number of language
number_of_change = git_diff.count("@@") / 2
msg = (
"You did not update the translation following your changes. Maybe you did not use the "
"normalized 'python3 manage.py makemessages --no-obsolete --no-wrap' ? If you're "
"working locally, just use 'git add {}', we launched it during tests.".format(self.LOCALE_PATH),
)
self.assertEqual(number_of_change, number_of_language, msg)
| agpl-3.0 | Python |
7a174e05108b673ae3e6a7b259ee8992b764e973 | Use more robust quickfix parser. | markstory/lint-review,markstory/lint-review,markstory/lint-review | lintreview/tools/yamllint.py | lintreview/tools/yamllint.py | import os
import logging
from lintreview.tools import Tool
from lintreview.tools import run_command, process_quickfix
from lintreview.utils import in_path
log = logging.getLogger(__name__)
class Yamllint(Tool):
name = 'yamllint'
def check_dependencies(self):
"""
See if yamllint is on the PATH
"""
return in_path('yamllint')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext in ['.yml', '.yaml']
def process_files(self, files):
"""
Run code checks with yamllint.
Only a single process is made for all files
to save resources.
Configuration is not supported at this time
"""
log.debug('Processing %s files with %s', files, self.name)
command = ['yamllint', '--format=parsable']
# Add config file if its present
if self.options.get('config'):
command += ['-c', self.apply_base(self.options['config'])]
command += files
output = run_command(command, split=True, ignore_error=True)
if not output:
log.debug('No yamllint errors found.')
return False
process_quickfix(self.problems, output, lambda x: x)
| import os
import logging
from lintreview.tools import Tool
from lintreview.tools import run_command
from lintreview.utils import in_path
log = logging.getLogger(__name__)
class Yamllint(Tool):
name = 'yamllint'
def check_dependencies(self):
"""
See if yamllint is on the PATH
"""
return in_path('yamllint')
def match_file(self, filename):
base = os.path.basename(filename)
name, ext = os.path.splitext(base)
return ext in ['.yml', '.yaml']
def process_files(self, files):
"""
Run code checks with yamllint.
Only a single process is made for all files
to save resources.
Configuration is not supported at this time
"""
log.debug('Processing %s files with %s', files, self.name)
command = ['yamllint', '--format=parsable']
# Add config file if its present
if self.options.get('config'):
command += ['-c', self.apply_base(self.options['config'])]
command += files
output = run_command(command, split=True, ignore_error=True)
if not output:
log.debug('No yamllint errors found.')
return False
for line in output:
filename, line, error = self._parse_line(line)
self.problems.add(filename, line, error)
def _parse_line(self, line):
"""
yamllint only generates results as stdout.
Parse the output for real data.
"""
parts = line.split(':', 3)
if len(parts) == 3:
message = parts[2].strip()
else:
message = parts[3].strip()
return (parts[0], int(parts[1]), message)
| mit | Python |
3e8a64f522986f0b41836e045bf3826def55577f | Disable internet access for non-online tests | dpshelio/sunpy,dpshelio/sunpy,dpshelio/sunpy | sunpy/conftest.py | sunpy/conftest.py | from __future__ import absolute_import, print_function
from functools import partial
import os
import socket
import tempfile
import json
from sunpy.extern.six.moves.urllib.request import urlopen
from sunpy.extern.six.moves.urllib.error import URLError
import pytest
# Force MPL to use non-gui backends for testing.
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('Agg')
from astropy.tests import disable_internet
from sunpy.tests import hash
hash_library_original_len = len(hash.hash_library)
GOOGLE_URL = 'http://www.google.com'
def site_reachable(url):
try:
urlopen(url, timeout=1)
except (URLError, socket.timeout):
return False
else:
return True
is_online = partial(site_reachable, GOOGLE_URL)
def pytest_runtest_setup(item):
"""
pytest hook to skip all tests that have the mark 'online' if the
client is online (simply detected by checking whether http://www.google.com
can be requested).
"""
if isinstance(item, item.Function):
if 'online' in item.keywords and not is_online():
msg = 'skipping test {0} (reason: client seems to be offline)'
pytest.skip(msg.format(item.name))
if 'online' not in item.keywords:
disable_internet.turn_off_internet()
def pytest_runtest_teardown(item, nextitem):
disable_internet.turn_on_internet()
def pytest_unconfigure(config):
tempdir = tempfile.mkdtemp(suffix="_figures")
# the hash_library is indexed by the name of the test but we want to look
# things up with the hash value
inv_hash_library = {v: k for k, v in hash.hash_library.items()}
for h in hash.file_list:
test_name = inv_hash_library.get(h, '')
if test_name != '':
os.rename(hash.file_list[h], os.path.join(tempdir,
test_name + '.png'))
print('All test files for figure hashes can be found in {0}'.format(
tempdir))
# Check if additions have been made to the hash library
if len(hash.hash_library) > hash_library_original_len:
# Write the new hash library in JSON
tempdir = tempfile.mkdtemp()
hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)
with open(hashfile, 'wb') as outfile:
json.dump(
hash.hash_library,
outfile,
sort_keys=True,
indent=4,
separators=(',', ': '))
print(
"The hash library has expanded and should be copied to sunpy/tests/")
print(" " + hashfile)
| from __future__ import absolute_import, print_function
from functools import partial
import os
import socket
import tempfile
import json
from sunpy.extern.six.moves.urllib.request import urlopen
from sunpy.extern.six.moves.urllib.error import URLError
import pytest
# Force MPL to use non-gui backends for testing.
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('Agg')
from sunpy.tests import hash
hash_library_original_len = len(hash.hash_library)
GOOGLE_URL = 'http://www.google.com'
def site_reachable(url):
try:
urlopen(url, timeout=1)
except (URLError, socket.timeout):
return False
else:
return True
is_online = partial(site_reachable, GOOGLE_URL)
def pytest_runtest_setup(item):
"""pytest hook to skip all tests that have the mark 'online' if the
client is online (simply detected by checking whether http://www.google.com
can be requested).
"""
if isinstance(item, item.Function):
if 'online' in item.keywords and not is_online():
msg = 'skipping test {0} (reason: client seems to be offline)'
pytest.skip(msg.format(item.name))
def pytest_unconfigure(config):
tempdir = tempfile.mkdtemp(suffix="_figures")
# the hash_library is indexed by the name of the test but we want to look
# things up with the hash value
inv_hash_library = {v: k for k, v in hash.hash_library.items()}
for h in hash.file_list:
test_name = inv_hash_library.get(h, '')
if test_name != '':
os.rename(hash.file_list[h], os.path.join(tempdir, test_name + '.png'))
print('All test files for figure hashes can be found in {0}'.format(tempdir))
#Check if additions have been made to the hash library
if len(hash.hash_library) > hash_library_original_len:
#Write the new hash library in JSON
tempdir = tempfile.mkdtemp()
hashfile = os.path.join(tempdir, hash.HASH_LIBRARY_NAME)
with open(hashfile, 'wb') as outfile:
json.dump(hash.hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))
print("The hash library has expanded and should be copied to sunpy/tests/")
print(" " + hashfile)
| bsd-2-clause | Python |
9ff9a5585ad1535840fd802421c0664ae75c6460 | Add some additional test cases for types. | mistio/libcloud,Kami/libcloud,apache/libcloud,Kami/libcloud,andrewsomething/libcloud,andrewsomething/libcloud,andrewsomething/libcloud,apache/libcloud,mistio/libcloud,mistio/libcloud,Kami/libcloud,apache/libcloud | libcloud/test/compute/test_types.py | libcloud/test/compute/test_types.py | import sys
import unittest
from unittest import TestCase
from libcloud.compute.types import Provider, NodeState, StorageVolumeState, \
VolumeSnapshotState, Type
class TestType(Type):
INUSE = "inuse"
NOTINUSE = "NOTINUSE"
class TestTestType(TestCase):
model = TestType
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(TestType.INUSE), "INUSE")
self.assertEqual(Provider.tostring(TestType.NOTINUSE), "NOTINUSE")
def test_provider_fromstring(self):
self.assertEqual(TestType.fromstring("inuse"), TestType.INUSE)
self.assertEqual(TestType.fromstring("NOTINUSE"), TestType.NOTINUSE)
def test_provider_fromstring_caseinsensitive(self):
self.assertEqual(TestType.fromstring("INUSE"), TestType.INUSE)
self.assertEqual(TestType.fromstring("notinuse"), TestType.NOTINUSE)
def test_compare_as_string(self):
self.assertTrue(TestType.INUSE == 'inuse')
self.assertFalse(TestType.INUSE == 'bar')
class TestProvider(TestCase):
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(Provider.RACKSPACE), "RACKSPACE")
def test_provider_fromstring(self):
self.assertEqual(Provider.fromstring("rackspace"), Provider.RACKSPACE)
class TestNodeState(TestCase):
def test_nodestate_tostring(self):
self.assertEqual(NodeState.tostring(NodeState.RUNNING), "RUNNING")
def test_nodestate_fromstring(self):
self.assertEqual(NodeState.fromstring("running"), NodeState.RUNNING)
class TestStorageVolumeState(TestCase):
def test_storagevolumestate_tostring(self):
self.assertEqual(
StorageVolumeState.tostring(StorageVolumeState.AVAILABLE),
"AVAILABLE"
)
def test_storagevolumestate_fromstring(self):
self.assertEqual(
StorageVolumeState.fromstring("available"),
StorageVolumeState.AVAILABLE
)
class TestVolumeSnapshotState(TestCase):
def test_volumesnapshotstate_tostring(self):
self.assertEqual(
VolumeSnapshotState.tostring(VolumeSnapshotState.AVAILABLE),
"AVAILABLE"
)
def test_volumesnapshotstate_fromstring(self):
self.assertEqual(
VolumeSnapshotState.fromstring("available"),
VolumeSnapshotState.AVAILABLE
)
if __name__ == '__main__':
sys.exit(unittest.main())
| import sys
import unittest
from unittest import TestCase
from libcloud.compute.types import Provider, NodeState, StorageVolumeState, \
VolumeSnapshotState, Type
class TestType(Type):
INUSE = "inuse"
class TestTestType(TestCase):
model = TestType
attribute = TestType.INUSE
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(TestType.INUSE), "INUSE")
def test_provider_fromstring(self):
self.assertEqual(TestType.fromstring("inuse"), TestType.INUSE)
def test_provider_fromstring_caseinsensitive(self):
self.assertEqual(TestType.fromstring("INUSE"), TestType.INUSE)
def test_compare_as_string(self):
self.assertTrue(TestType.INUSE == 'inuse')
self.assertFalse(TestType.INUSE == 'bar')
class TestProvider(TestCase):
def test_provider_tostring(self):
self.assertEqual(Provider.tostring(Provider.RACKSPACE), "RACKSPACE")
def test_provider_fromstring(self):
self.assertEqual(Provider.fromstring("rackspace"), Provider.RACKSPACE)
class TestNodeState(TestCase):
def test_nodestate_tostring(self):
self.assertEqual(NodeState.tostring(NodeState.RUNNING), "RUNNING")
def test_nodestate_fromstring(self):
self.assertEqual(NodeState.fromstring("running"), NodeState.RUNNING)
class TestStorageVolumeState(TestCase):
def test_storagevolumestate_tostring(self):
self.assertEqual(
StorageVolumeState.tostring(StorageVolumeState.AVAILABLE),
"AVAILABLE"
)
def test_storagevolumestate_fromstring(self):
self.assertEqual(
StorageVolumeState.fromstring("available"),
StorageVolumeState.AVAILABLE
)
class TestVolumeSnapshotState(TestCase):
def test_volumesnapshotstate_tostring(self):
self.assertEqual(
VolumeSnapshotState.tostring(VolumeSnapshotState.AVAILABLE),
"AVAILABLE"
)
def test_volumesnapshotstate_fromstring(self):
self.assertEqual(
VolumeSnapshotState.fromstring("available"),
VolumeSnapshotState.AVAILABLE
)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | Python |
3279c37bea364f644045dab7aa13c2639e10767d | Remove a debugging statement | yougov/pmxbot,yougov/pmxbot,yougov/pmxbot | tests/functional/plugins/pmxbot_test_commands.py | tests/functional/plugins/pmxbot_test_commands.py | from pmxbot import core
@core.command("crashnow")
def crash_immediately():
"Crash now!"
raise TypeError("You should never call this!")
@core.command("crashiter")
def crash_in_iterator():
"Crash in iterator!"
raise TypeError("You should never call this!")
yield "You can't touch this"
@core.regexp('feck', r'\bfeck\b', doc="We don't use that sort of language around here")
def foobar(client, event, channel, nick, match):
if match:
return "Clean up your language %s" % nick
@core.command()
def echo(rest):
"echo"
return rest
| from pmxbot import core
@core.command("crashnow")
def crash_immediately():
"Crash now!"
raise TypeError("You should never call this!")
@core.command("crashiter")
def crash_in_iterator():
"Crash in iterator!"
raise TypeError("You should never call this!")
yield "You can't touch this"
@core.regexp('feck', r'\bfeck\b', doc="We don't use that sort of language around here")
def foobar(client, event, channel, nick, match):
if match:
yield "Clean up your language %s" % nick
yield repr(match)
@core.command()
def echo(rest):
"echo"
return rest
| mit | Python |
b0d5d9ee2e507906f5af75d5f62c0db3836dfc00 | Reformat for 80 chars | gratipay/aspen.py,gratipay/aspen.py | aspen/hooks/filters.py | aspen/hooks/filters.py | import re
def by_regex(hook, regex_tuples, default=True):
"""A filter for hooks.
regex_tuples is a list of (regex, filter?) where if the regex matches the
requested URI, then the hook is applied or not based on if filter? is True
or False.
"""
regex_res = [ (re.compile(regex), disposition) \
for regex, disposition in regex_tuples.iteritems() ]
def filtered_hook(request):
for regex, disposition in regex_res:
if regex.matches(request.line.uri):
if disposition:
return hook(request)
else:
return request
return default
return filtered_hook
def by_dict(hook, truthdict, default=True):
"""A filter for hooks.
truthdict is a mapping of URI -> filter? where if the requested URI is a
key in the dict, then the hook is applied based on the filter? value.
"""
def filtered_hook(request):
do_hook = truthdict.get(request.line.uri, default)
if do_hook:
return hook(request)
else:
return request
return filtered_hook
| import re
def by_regex(hook, regex_tuples, default=True):
"""A filter for hooks. regex_tuples is a list of (regex, filter?) where if the regex matches
the requested URI, then the hook is applied or not based on if filter? is True or False.
"""
regex_res = [ (re.compile(regex), disposition) for regex, disposition in regex_tuples.iteritems() ]
def filtered_hook(request):
for regex, disposition in regex_res:
if regex.matches(request.line.uri):
if disposition:
return hook(request)
else:
return request
return default
return filtered_hook
def by_dict(hook, truthdict, default=True):
"""A filter for hooks.
truthdict is a mapping of URI -> filter? where if the requested URI is a key in the dict, then
the hook is applied based on the filter? value.
"""
def filtered_hook(request):
do_hook = truthdict.get(request.line.uri, default)
if do_hook:
return hook(request)
else:
return request
return filtered_hook
| mit | Python |
cc246bd43efbc5e3873525f160eb360fb3335392 | fix logging issue with create_mapping | 4dn-dcic/fourfront,hms-dbmi/fourfront,hms-dbmi/fourfront,4dn-dcic/fourfront,4dn-dcic/fourfront,hms-dbmi/fourfront,4dn-dcic/fourfront,hms-dbmi/fourfront,hms-dbmi/fourfront | src/encoded/commands/create_mapping_on_deploy.py | src/encoded/commands/create_mapping_on_deploy.py | import argparse
import structlog
import logging
from pyramid.paster import get_app
from snovault.elasticsearch.create_mapping import run as run_create_mapping
from snovault import set_logging
from dcicutils.beanstalk_utils import whodaman
log = structlog.getLogger(__name__)
EPILOG = __doc__
def main():
parser = argparse.ArgumentParser(
description="Create Elasticsearch mapping on deployment", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--app-name', help="Pyramid app name in configfile")
args = parser.parse_args()
app = get_app(args.config_uri, args.app_name)
# Loading app will have configured from config file. Reconfigure here:
set_logging(app.registry.settings.get('production'), level=logging.DEBUG)
# check if staging
try:
data_env = whodaman()
env = app.registry.settings.get('env.name')
if 'webprod' in env:
if data_env != env:
log.info("looks like we are on staging, run create mapping without check first")
run_create_mapping(app, check_first=False)
return
# handle mastertest ... by blowing away all data first
if 'mastertest' in env:
run_create_mapping(app, check_first=False, purge_queue=True)
return
except Exception:
import traceback
log.warning("error checking whodaman: %s " % traceback.format_exc())
log.warning("couldn't get wodaman, so assuming NOT Stagging")
log.info("looks like we are NOT on staging so run create mapping with check first")
run_create_mapping(app, check_first=True, purge_queue=True, skip_indexing=True)
| import argparse
import structlog
import logging
from pyramid.paster import get_app
from snovault.elasticsearch.create_mapping import run as run_create_mapping
from dcicutils.beanstalk_utils import whodaman
log = structlog.getLogger(__name__)
EPILOG = __doc__
def main():
parser = argparse.ArgumentParser(
description="Create Elasticsearch mapping on deployment", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--app-name', help="Pyramid app name in configfile")
args = parser.parse_args()
app = get_app(args.config_uri, args.app_name)
# Loading app will have configured from config file. Reconfigure here:
set_logging(app.registry.settings.get('production'), level=logging.DEBUG)
# check if staging
try:
data_env = whodaman()
env = app.registry.settings.get('env.name')
if 'webprod' in env:
if data_env != env:
log.info("looks like we are on staging, run create mapping without check first")
run_create_mapping(app, check_first=False)
return
# handle mastertest ... by blowing away all data first
if 'mastertest' in env:
run_create_mapping(app, check_first=False, purge_queue=True)
return
except Exception:
import traceback
log.warning("error checking whodaman: %s " % traceback.format_exc())
log.warning("couldn't get wodaman, so assuming NOT Stagging")
log.info("looks like we are NOT on staging so run create mapping with check first")
run_create_mapping(app, check_first=True, purge_queue=True, skip_indexing=True)
| mit | Python |
4988bbe7fa3e75062bc0048aff18cba8d57a9bc1 | add more generic get_objs function | heracek/djangotoolbox,adieu/djangotoolbox | djangotoolbox/contrib/auth/models.py | djangotoolbox/contrib/auth/models.py | from django.db import models
from django.contrib.auth.models import User, Group, Permission
from djangotoolbox.fields import ListField
def get_objs(obj_cls, obj_ids):
objs = set()
if len(obj_ids) > 0:
# order_by() has to be used to override invalid default Permission filter
objs.update(obj_cls .objects.filter(id__in=obj_ids).order_by('name'))
return objs
class UserPermissionList(models.Model):
user = models.ForeignKey(User)
_fk_list = ListField(models.ForeignKey(Permission))
def _get_permissions(self):
if not hasattr(self, '_permissions_cache'):
setattr(self, '_permissions_cache', get_objs(Permission, self._fk_list))
return self._permissions_cache
permission_list = property(_get_permissions)
class GroupPermissionList(models.Model):
group = models.ForeignKey(Group)
_fk_list = ListField(models.ForeignKey(Permission))
def _get_permissions(self):
if not hasattr(self, '_permissions_cache'):
setattr(self, '_permissions_cache', get_objs(Permission, self._fk_list))
return self._permissions_cache
permissions = property(_get_permissions)
class GroupList(models.Model):
"""
GroupLists are used to map a list of groups to a user
"""
user = models.ForeignKey(User)
_fk_list = ListField(models.ForeignKey(Group))
def __unicode__(self):
return u'%s' %(self.user.username)
def _get_group_list(self):
if not hasattr(self, '_groups_cache'):
setattr(self, '_groups_cache', get_objs(Group, self._fk_list))
return self._groups_cache
groups = property(_get_group_list)
| from django.db import models
from django.contrib.auth.models import User, Group, Permission
from djangotoolbox.fields import ListField
class UserPermissionList(models.Model):
user = models.ForeignKey(User)
_fk_list = ListField(models.ForeignKey(Permission))
def _get_permissions(self):
if not hasattr(self, '_permissions_cache'):
perm_ids = self._fk_list
permissions = set()
if len(perm_ids) > 0:
# order_by() has to be used to override invalid default Permission filter
permissions.update(Permission.objects.filter(id__in=perm_ids).order_by('name'))
setattr(self, '_permissions_cache', permissions)
return self._permissions_cache
permission_list = property(_get_permissions)
class GroupPermissionList(models.Model):
group = models.ForeignKey(Group)
_fk_list = ListField(models.ForeignKey(Permission))
def _get_permissions(self):
if not hasattr(self, '_permissions_cache'):
perm_ids = self._fk_list
permissions = set()
if len(perm_ids) > 0:
# order_by() has to be used to override invalid default Permission filter
permissions.update(Permission.objects.filter(id__in=perm_ids).order_by('name'))
setattr(self, '_permissions_cache', permissions)
return self._permissions_cache
permissions = property(_get_permissions)
class GroupList(models.Model):
"""
GroupLists are used to map a list of groups to a user
"""
user = models.ForeignKey(User)
_fk_list = ListField(models.ForeignKey(Group))
def __unicode__(self):
return u'%s' %(self.user.username)
def _get_group_list(self):
if not hasattr(self, '_groups_cache'):
group_ids = self._fk_list
groups = set()
if len(group_ids) > 0:
# order_by() has to be used to override invalid default Permission filter
groups.update(Group.objects.filter(id__in=group_ids))
setattr(self, '_groups_cache', groups)
return self._groups_cache
groups = property(_get_group_list)
| bsd-3-clause | Python |
fb6c1cc3da1a632e922c7bdeb467ad9f9cb32efe | undo temporary changes needed to fix #2699 | lsaffre/lino,lino-framework/lino,lino-framework/lino,lsaffre/lino,lsaffre/lino,lino-framework/lino,lsaffre/lino,lino-framework/lino,lsaffre/lino,lino-framework/lino | lino/utils/ajax.py | lino/utils/ajax.py | # -*- coding: UTF-8 -*-
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""This middleware is automatically being installed on every Lino
site.
When an exception occurs during an AJAX call, Lino should not respond
with Django's default HTML formatted error report but with a
plain-text traceback because that's more readable when seen in a
browser console.
Originally inspired by http://djangosnippets.org/snippets/650
Additions by LS:
- also logs a warning on the development server because that is easier
to read than opening firebug and look at the response.
- must work also when :setting:`DEBUG` is False. Yes, on a production
server it is not wise to publish the traceback, but our nice HTML
formatted "Congratulations, you found a problem" page is never the
right answer to an AJAX call.
- :func:`format_request` adds information about the incoming call,
including POST or PUT data.
"""
from __future__ import unicode_literals
from builtins import object
import sys
import traceback
from django.conf import settings
# from django.http import HttpResponseServerError
from django.http import HttpResponse
# from django.http import HttpResponseForbidden, HttpResponseBadRequest
from django.utils.encoding import smart_text
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from lino.core.utils import format_request
from django.utils.deprecation import MiddlewareMixin
class AjaxExceptionResponse(MiddlewareMixin):
"""The middleware class definition."""
no_traceback = (PermissionDenied, ObjectDoesNotExist)
# no_traceback = (PermissionDenied, )
# see also /docs/specs/invalid_requests.rst
# it can be helpful to temporarily disable filtering of ObjectDoesNotExist
# exceptions on a production site in order to debug problems like #2699
def process_exception(self, request, exception):
if request.is_ajax():
(exc_type, exc_info, tb) = sys.exc_info()
# response to client:
response = "%s: " % exc_type.__name__
response += "%s" % exc_info
if not isinstance(exception, self.no_traceback):
# message to be logged:
msg = "AjaxExceptionResponse {0}\n".format(response)
msg += "\nin request {0}\n".format(format_request(request))
msg += "TRACEBACK:\n"
for tb in traceback.format_tb(tb):
msg += smart_text(tb)
if settings.DEBUG:
settings.SITE.logger.warning(msg)
else:
settings.SITE.logger.exception(msg)
return HttpResponse(response, status=400)
# if isinstance(exception, ObjectDoesNotExist):
# return HttpResponseBadRequest(response)
# return HttpResponseServerError(response)
| # -*- coding: UTF-8 -*-
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""This middleware is automatically being installed on every Lino
site.
When an exception occurs during an AJAX call, Lino should not respond
with Django's default HTML formatted error report but with a
plain-text traceback because that's more readable when seen in a
browser console.
Originally inspired by http://djangosnippets.org/snippets/650
Additions by LS:
- also logs a warning on the development server because that is easier
to read than opening firebug and look at the response.
- must work also when :setting:`DEBUG` is False. Yes, on a production
server it is not wise to publish the traceback, but our nice HTML
formatted "Congratulations, you found a problem" page is never the
right answer to an AJAX call.
- :func:`format_request` adds information about the incoming call,
including POST or PUT data.
"""
from __future__ import unicode_literals
from builtins import object
import sys
import traceback
from django.conf import settings
# from django.http import HttpResponseServerError
from django.http import HttpResponse
# from django.http import HttpResponseForbidden, HttpResponseBadRequest
from django.utils.encoding import smart_text
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from lino.core.utils import format_request
from django.utils.deprecation import MiddlewareMixin
class AjaxExceptionResponse(MiddlewareMixin):
"""The middleware class definition."""
#no_traceback = (PermissionDenied, ObjectDoesNotExist)
no_traceback = (PermissionDenied, )
def process_exception(self, request, exception):
if request.is_ajax():
(exc_type, exc_info, tb) = sys.exc_info()
# response to client:
response = "%s: " % exc_type.__name__
response += "%s" % exc_info
if not isinstance(exception, self.no_traceback):
# message to be logged:
msg = "AjaxExceptionResponse {0}\n".format(response)
msg += "\nin request {0}\n".format(format_request(request))
msg += "TRACEBACK:\n"
for tb in traceback.format_tb(tb):
msg += smart_text(tb)
if settings.DEBUG:
settings.SITE.logger.warning(msg)
else:
settings.SITE.logger.exception(msg)
return HttpResponse(response, status=400)
# if isinstance(exception, ObjectDoesNotExist):
# return HttpResponseBadRequest(response)
# return HttpResponseServerError(response)
| unknown | Python |
9c9e564d51d44fb27101249d57d769828f14e97e | Fix the failing dns test on Windows | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/integration/modules/test_win_dns_client.py | tests/integration/modules/test_win_dns_client.py | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt libs
import salt.utils.platform
@skipIf(not salt.utils.platform.is_windows(), 'windows test only')
class WinDNSTest(ModuleCase):
'''
Test for salt.modules.win_dns_client
'''
@destructiveTest
def test_add_remove_dns(self):
'''
Test add and removing a dns server
'''
# Get a list of interfaces on the system
interfaces = self.run_function('network.interfaces_names')
skipIf(interfaces.count == 0, 'This test requires a network interface')
interface = interfaces[0]
dns = '8.8.8.8'
# add dns server
self.assertTrue(self.run_function('win_dns_client.add_dns', [dns, interface], index=42))
srvs = self.run_function('win_dns_client.get_dns_servers', interface=interface)
self.assertIn(dns, srvs)
# remove dns server
self.assertTrue(self.run_function('win_dns_client.rm_dns', [dns], interface=interface))
srvs = self.run_function('win_dns_client.get_dns_servers', interface=interface)
self.assertNotIn(dns, srvs)
| # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.helpers import destructiveTest
# Import Salt libs
import salt.utils.platform
@skipIf(not salt.utils.platform.is_windows(), 'windows test only')
class WinDNSTest(ModuleCase):
'''
Test for salt.modules.win_dns_client
'''
@destructiveTest
def test_add_remove_dns(self):
'''
Test add and removing a dns server
'''
dns = '8.8.8.8'
interface = 'Ethernet'
# add dns server
self.assertTrue(self.run_function('win_dns_client.add_dns', [dns, interface], index=42))
srvs = self.run_function('win_dns_client.get_dns_servers', interface=interface)
self.assertIn(dns, srvs)
# remove dns server
self.assertTrue(self.run_function('win_dns_client.rm_dns', [dns], interface=interface))
srvs = self.run_function('win_dns_client.get_dns_servers', interface=interface)
self.assertNotIn(dns, srvs)
| apache-2.0 | Python |
577a72831479f971de4c8ad16496984d25578de5 | Remove Redundant Information in Example Dags (#5497) | dhuang/incubator-airflow,spektom/incubator-airflow,danielvdende/incubator-airflow,danielvdende/incubator-airflow,apache/airflow,bolkedebruin/airflow,spektom/incubator-airflow,cfei18/incubator-airflow,Acehaidrey/incubator-airflow,nathanielvarona/airflow,bolkedebruin/airflow,airbnb/airflow,cfei18/incubator-airflow,danielvdende/incubator-airflow,mrkm4ntr/incubator-airflow,Fokko/incubator-airflow,lyft/incubator-airflow,DinoCow/airflow,apache/airflow,apache/airflow,mistercrunch/airflow,airbnb/airflow,cfei18/incubator-airflow,nathanielvarona/airflow,danielvdende/incubator-airflow,sekikn/incubator-airflow,nathanielvarona/airflow,lyft/incubator-airflow,sekikn/incubator-airflow,sekikn/incubator-airflow,dhuang/incubator-airflow,wileeam/airflow,Acehaidrey/incubator-airflow,spektom/incubator-airflow,lyft/incubator-airflow,wooga/airflow,wileeam/airflow,Acehaidrey/incubator-airflow,nathanielvarona/airflow,mtagle/airflow,DinoCow/airflow,Acehaidrey/incubator-airflow,DinoCow/airflow,apache/airflow,danielvdende/incubator-airflow,bolkedebruin/airflow,Fokko/incubator-airflow,cfei18/incubator-airflow,apache/incubator-airflow,wileeam/airflow,wooga/airflow,spektom/incubator-airflow,apache/incubator-airflow,danielvdende/incubator-airflow,mistercrunch/airflow,apache/incubator-airflow,DinoCow/airflow,Acehaidrey/incubator-airflow,mtagle/airflow,cfei18/incubator-airflow,apache/incubator-airflow,dhuang/incubator-airflow,airbnb/airflow,lyft/incubator-airflow,wileeam/airflow,wooga/airflow,mrkm4ntr/incubator-airflow,mrkm4ntr/incubator-airflow,cfei18/incubator-airflow,bolkedebruin/airflow,apache/airflow,wooga/airflow,mrkm4ntr/incubator-airflow,apache/airflow,Fokko/incubator-airflow,bolkedebruin/airflow,airbnb/airflow,Acehaidrey/incubator-airflow,mistercrunch/airflow,dhuang/incubator-airflow,mistercrunch/airflow,mtagle/airflow,sekikn/incubator-airflow,nathanielvarona/airflow,Fokko/incubator-airflow,nathanielvarona/airflow,mtagle/airflow | airflow/example_dags/example_subdag_operator.py | airflow/example_dags/example_subdag_operator.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example DAG demonstrating the usage of the SubDagOperator."""
import airflow
from airflow.example_dags.subdags.subdag import subdag
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
DAG_NAME = 'example_subdag_operator'
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id=DAG_NAME,
default_args=args,
schedule_interval="@once",
)
start = DummyOperator(
task_id='start',
dag=dag,
)
section_1 = SubDagOperator(
task_id='section-1',
subdag=subdag(DAG_NAME, 'section-1', args),
dag=dag,
)
some_other_task = DummyOperator(
task_id='some-other-task',
dag=dag,
)
section_2 = SubDagOperator(
task_id='section-2',
subdag=subdag(DAG_NAME, 'section-2', args),
dag=dag,
)
end = DummyOperator(
task_id='end',
dag=dag,
)
start >> section_1 >> some_other_task >> section_2 >> end
| # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example DAG demonstrating the usage of the SubDagOperator."""
import airflow
from airflow.example_dags.subdags.subdag import subdag
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
DAG_NAME = 'example_subdag_operator'
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id=DAG_NAME,
default_args=args,
schedule_interval="@once",
)
start = DummyOperator(
task_id='start',
default_args=args,
dag=dag,
)
section_1 = SubDagOperator(
task_id='section-1',
subdag=subdag(DAG_NAME, 'section-1', args),
default_args=args,
dag=dag,
)
some_other_task = DummyOperator(
task_id='some-other-task',
default_args=args,
dag=dag,
)
section_2 = SubDagOperator(
task_id='section-2',
subdag=subdag(DAG_NAME, 'section-2', args),
default_args=args,
dag=dag,
)
end = DummyOperator(
task_id='end',
default_args=args,
dag=dag,
)
start >> section_1 >> some_other_task >> section_2 >> end
| apache-2.0 | Python |
a806bb51961b6640cc77c4fcb0ec05bac5f9616a | Update __manifest__.py | OCA/l10n-romania,OCA/l10n-romania | l10n_ro_stock_account_date_wizard/__manifest__.py | l10n_ro_stock_account_date_wizard/__manifest__.py | # Copyright (C) 2022 NextERP Romania
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Romania - Stock Accounting Date Wizard",
"version": "14.0.1.2.0",
"category": "Localization",
"summary": "Romania - Stock Accounting Date Wizard",
"author": "NextERP Romania," "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-romania",
"depends": ["l10n_ro_stock_account_date"],
"license": "AGPL-3",
"data": [
"wizard/stock_backorder_confirmation_views.xml",
"wizard/stock_immediate_transfer_views.xml",
],
"installable": True,
"maintainers": ["feketemihai"],
}
| # Copyright (C) 2022 NextERP Romania
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Romania - Stock Accounting Date Wizard",
"version": "14.0.1.2.0",
"category": "Localization",
"summary": "Romania - Stock Accounting Date Wizard",
"author": "NextERP Romania," "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-romania",
"depends": ["l10n_ro_stock_account"],
"license": "AGPL-3",
"data": [
"wizard/stock_backorder_confirmation_views.xml",
"wizard/stock_immediate_transfer_views.xml",
],
"installable": True,
"maintainers": ["feketemihai"],
}
| agpl-3.0 | Python |
231969addd7abffb4cddebff88ed3983c04f61d7 | Make English strings modifiable | servalproject/nikola,s2hc-johan/nikola,TyberiusPrime/nikola,lucacerone/nikola,wcmckee/nikola,xuhdev/nikola,xuhdev/nikola,damianavila/nikola,andredias/nikola,schettino72/nikola,lucacerone/nikola,atiro/nikola,getnikola/nikola,jjconti/nikola,berezovskyi/nikola,okin/nikola,wcmckee/nikola,atiro/nikola,TyberiusPrime/nikola,yamila-moreno/nikola,TyberiusPrime/nikola,andredias/nikola,Proteus-tech/nikola,gwax/nikola,Proteus-tech/nikola,schettino72/nikola,masayuko/nikola,pluser/nikola,kotnik/nikola,s2hc-johan/nikola,x1101/nikola,knowsuchagency/nikola,JohnTroony/nikola,masayuko/nikola,getnikola/nikola,x1101/nikola,techdragon/nikola,JohnTroony/nikola,servalproject/nikola,immanetize/nikola,jjconti/nikola,techdragon/nikola,getnikola/nikola,jjconti/nikola,damianavila/nikola,JohnTroony/nikola,okin/nikola,okin/nikola,gwax/nikola,wcmckee/nikola,berezovskyi/nikola,yamila-moreno/nikola,x1101/nikola,andredias/nikola,knowsuchagency/nikola,techdragon/nikola,s2hc-johan/nikola,immanetize/nikola,damianavila/nikola,yamila-moreno/nikola,kotnik/nikola,lucacerone/nikola,pluser/nikola,schettino72/nikola,kotnik/nikola,Proteus-tech/nikola,Proteus-tech/nikola,getnikola/nikola,servalproject/nikola,berezovskyi/nikola,xuhdev/nikola,atiro/nikola,gwax/nikola,okin/nikola,xuhdev/nikola,immanetize/nikola,masayuko/nikola,knowsuchagency/nikola,pluser/nikola | nikola/data/themes/default/messages/messages_en.py | nikola/data/themes/default/messages/messages_en.py | # -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"LANGUAGE": "English",
"Posts for year %s": "Posts for year %s",
"Archive": "Archive",
"Posts about %s": "Posts about %s",
"Tags": "Tags",
"Also available in": "Also available in",
"More posts about": "More posts about",
"Posted": "Posted",
"Original site": "Original site",
"Read in English": "Read in English",
"Newer posts": "Newer posts",
"Older posts": "Older posts",
"Previous post": "Previous post",
"Next post": "Next post",
"old posts page %d": "old posts page %d",
"Read more": "Read more",
"Source": "Source",
}
| from __future__ import unicode_literals
MESSAGES = [
"Posts for year %s",
"Archive",
"Posts about %s",
"Tags",
"Also available in",
"More posts about",
"Posted",
"Original site",
"Read in English",
"Newer posts",
"Older posts",
"Previous post",
"Next post",
"old posts page %d",
"Read more",
"Source",
]
# In english things are not translated
msg_dict = {}
for msg in MESSAGES:
msg_dict[msg] = msg
MESSAGES = msg_dict
MESSAGES["LANGUAGE"] = "English"
| mit | Python |
f3a275f4acda2afee4d071961f78ef8705d547f9 | Create bddirectory-amarphonebook-com-css.py | thetypist/scrappybot | quotesbot/spiders/bddirectory-amarphonebook-com-css.py | quotesbot/spiders/bddirectory-amarphonebook-com-css.py |
# -*- coding: utf-8 -*-
import scrapy
class ToScrapeCSSSpider(scrapy.Spider):
name = "amarphonebook-com-css"
start_urls = [
'http://www.amarphonebook.com/list/Dhaka/All-Groceries/1/1113',
]
def parse(self, response):
for quote in response.css("div.list_common1"):
yield {
'title': quote.css("p.detailstitle::text").extract(),
'phone': quote.css("p.detailsphone::text").extract(),
'address': quote.css("p.detailsaddress::text").extract(),
'category': quote.css("p.detailscategory > i::text").extract(),
}
next_page_url = response.css("li.next > a::attr(href)").extract_first()
if next_page_url is not None:
yield scrapy.Request(response.urljoin(next_page_url))
|
# -*- coding: utf-8 -*-
import scrapy
class ToScrapeCSSSpider(scrapy.Spider):
name = "amarphonebook-com-css"
start_urls = [
'http://www.amarphonebook.com/list/Dhaka/All-Groceries/1/1113',
]
def parse(self, response):
for quote in response.css("div.list_common1"):
yield {
'title': quote.css("p.detailstitle::text").extract_first(),
'phone': quote.css("p.detailsphone::text").extract(),
'address': quote.css("p.detailsaddress::text").extract(),
'category': quote.css("p.detailscategory::text").extract(),
}
next_page_url = response.css("li.next > a::attr(href)").extract_first()
if next_page_url is not None:
yield scrapy.Request(response.urljoin(next_page_url))
| mit | Python |
dabdae4ec8aef552cb59472fd454ebd8db6ed004 | Bump version to 0.7.5 (final) | iamutkarshtiwari/sympy,pbrady/sympy,bukzor/sympy,Designist/sympy,Gadal/sympy,mafiya69/sympy,madan96/sympy,dqnykamp/sympy,atsao72/sympy,AkademieOlympia/sympy,atreyv/sympy,Mitchkoens/sympy,debugger22/sympy,dqnykamp/sympy,lindsayad/sympy,pbrady/sympy,Mitchkoens/sympy,moble/sympy,atreyv/sympy,lindsayad/sympy,saurabhjn76/sympy,rahuldan/sympy,asm666/sympy,Vishluck/sympy,farhaanbukhsh/sympy,Shaswat27/sympy,kaushik94/sympy,farhaanbukhsh/sympy,wyom/sympy,kumarkrishna/sympy,kumarkrishna/sympy,Curious72/sympy,toolforger/sympy,shipci/sympy,ChristinaZografou/sympy,shipci/sympy,VaibhavAgarwalVA/sympy,Davidjohnwilson/sympy,beni55/sympy,ga7g08/sympy,toolforger/sympy,garvitr/sympy,souravsingh/sympy,maniteja123/sympy,ChristinaZografou/sympy,kaushik94/sympy,souravsingh/sympy,postvakje/sympy,shikil/sympy,atreyv/sympy,grevutiu-gabriel/sympy,beni55/sympy,Sumith1896/sympy,skirpichev/omg,jamesblunt/sympy,debugger22/sympy,garvitr/sympy,Gadal/sympy,madan96/sympy,ga7g08/sympy,asm666/sympy,sampadsaha5/sympy,saurabhjn76/sympy,sunny94/temp,moble/sympy,Davidjohnwilson/sympy,dqnykamp/sympy,abhiii5459/sympy,vipulroxx/sympy,asm666/sympy,aktech/sympy,bukzor/sympy,skidzo/sympy,shipci/sympy,jaimahajan1997/sympy,abhiii5459/sympy,jbbskinny/sympy,wyom/sympy,oliverlee/sympy,atsao72/sympy,diofant/diofant,bukzor/sympy,jerli/sympy,oliverlee/sympy,Shaswat27/sympy,saurabhjn76/sympy,wyom/sympy,meghana1995/sympy,grevutiu-gabriel/sympy,jbbskinny/sympy,cccfran/sympy,wanglongqi/sympy,shikil/sympy,MridulS/sympy,Titan-C/sympy,Designist/sympy,MridulS/sympy,emon10005/sympy,Titan-C/sympy,abhiii5459/sympy,AkademieOlympia/sympy,Arafatk/sympy,Davidjohnwilson/sympy,farhaanbukhsh/sympy,kaichogami/sympy,liangjiaxing/sympy,emon10005/sympy,cccfran/sympy,MridulS/sympy,sunny94/temp,rahuldan/sympy,kaichogami/sympy,yukoba/sympy,VaibhavAgarwalVA/sympy,postvakje/sympy,AunShiLord/sympy,Vishluck/sympy,yashsharan/sympy,Mitchkoens/sympy,mafiya69/sympy,garvitr/sympy,jaimahajan1997/sympy,sahilshekhawat/sympy,skidzo/sympy,pandeyadarsh/sympy,toolforger/sympy,abloomston/sympy,yashsharan/sympy,Gadal/sympy,kevalds51/sympy,mcdaniel67/sympy,moble/sympy,pbrady/sympy,debugger22/sympy,chaffra/sympy,abloomston/sympy,postvakje/sympy,oliverlee/sympy,wanglongqi/sympy,jamesblunt/sympy,grevutiu-gabriel/sympy,sunny94/temp,jaimahajan1997/sympy,atsao72/sympy,ChristinaZografou/sympy,vipulroxx/sympy,Vishluck/sympy,madan96/sympy,jamesblunt/sympy,sahilshekhawat/sympy,Curious72/sympy,jerli/sympy,wanglongqi/sympy,sahmed95/sympy,aktech/sympy,mafiya69/sympy,emon10005/sympy,jerli/sympy,AkademieOlympia/sympy,kaichogami/sympy,Curious72/sympy,liangjiaxing/sympy,rahuldan/sympy,cswiercz/sympy,abloomston/sympy,Designist/sympy,iamutkarshtiwari/sympy,Titan-C/sympy,kaushik94/sympy,Arafatk/sympy,sahilshekhawat/sympy,drufat/sympy,mcdaniel67/sympy,Shaswat27/sympy,sampadsaha5/sympy,lindsayad/sympy,sampadsaha5/sympy,drufat/sympy,iamutkarshtiwari/sympy,mcdaniel67/sympy,beni55/sympy,MechCoder/sympy,Sumith1896/sympy,MechCoder/sympy,vipulroxx/sympy,Sumith1896/sympy,AunShiLord/sympy,meghana1995/sympy,chaffra/sympy,liangjiaxing/sympy,meghana1995/sympy,shikil/sympy,VaibhavAgarwalVA/sympy,hargup/sympy,maniteja123/sympy,pandeyadarsh/sympy,chaffra/sympy,Arafatk/sympy,yashsharan/sympy,hargup/sympy,aktech/sympy,maniteja123/sympy,MechCoder/sympy,kevalds51/sympy,skidzo/sympy,ga7g08/sympy,kumarkrishna/sympy,ahhda/sympy,pandeyadarsh/sympy,kevalds51/sympy,drufat/sympy,jbbskinny/sympy,yukoba/sympy,yukoba/sympy,AunShiLord/sympy,souravsingh/sympy,sahmed95/sympy,cswiercz/sympy,ahhda/sympy,ahhda/sympy,sahmed95/sympy,hargup/sympy,cccfran/sympy,cswiercz/sympy | sympy/__init__.py | sympy/__init__.py | """
SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://code.google.com/p/sympy/"""
from __future__ import absolute_import, print_function
__version__ = "0.7.5"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
return eval(os.getenv('SYMPY_DEBUG', 'False'))
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, Plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
| """
SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://code.google.com/p/sympy/"""
from __future__ import absolute_import, print_function
__version__ = "0.7.5.rc1"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
return eval(os.getenv('SYMPY_DEBUG', 'False'))
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, Plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
| bsd-3-clause | Python |
dc6500b6a2cc58585b6971fee9efd695cd6124bb | Update homedept_assignment_filter.py | ctsit/vivo-pump,ctsit/vivo-pump,mconlon17/vivo-pump | uf_examples/people/homedept_assignment_filter.py | uf_examples/people/homedept_assignment_filter.py | #!/usr/bin/env/python
"""
homedept_assignment_filter.py -- for home departments matched to patterns in an exception file, assign new
home departments as indicated in the exception file
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2016 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
import shelve
import sys
import re
from pump.vivopump import read_csv_fp, write_csv_fp
homedept_shelve = shelve.open('deptid_exceptions.db')
data_in = read_csv_fp(sys.stdin)
data_out = {}
reassign_count = 0
for row, data in data_in.items():
new_data = dict(data)
# check each pattern
for pattern_string, action in homedept_shelve.items():
pattern = re.compile(pattern_string)
if pattern.search(new_data['HOME_DEPT']) is not None:
new_data['HOME_DEPT'] = action['assigned_deptid']
print >>sys.stderr, "Reassign from", data['HOME_DEPT'], 'to', new_data['HOME_DEPT']
reassign_count += 1
data_out[row] = new_data
print >>sys.stderr, 'Reassign count', reassign_count
write_csv_fp(sys.stdout, data_out)
homedept_shelve.close()
| #!/usr/bin/env/python
"""
homedept_assignment_filter.py -- for home departments matched to patterns in an exception file, assign new
home departments as indicated in the exception file
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2015 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
import shelve
import sys
import re
from pump.vivopump import read_csv_fp, write_csv_fp
homedept_shelve = shelve.open('deptid_exceptions.db')
data_in = read_csv_fp(sys.stdin)
data_out = {}
reassign_count = 0
for row, data in data_in.items():
new_data = dict(data)
# check each pattern
for pattern_string, action in homedept_shelve.items():
pattern = re.compile(pattern_string)
if pattern.search(new_data['HOME_DEPT']) is not None:
new_data['HOME_DEPT'] = action['assigned_deptid']
print >>sys.stderr, "Reassign from", data['HOME_DEPT'], 'to', new_data['HOME_DEPT']
reassign_count += 1
data_out[row] = new_data
print >>sys.stderr, 'Reassign count', reassign_count
write_csv_fp(sys.stdout, data_out)
homedept_shelve.close()
| bsd-2-clause | Python |
7f74a3d8bfae9c578a3ec4cfee53b19550728cac | Fix unit tests | rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo | dojo/unittests/test_microfocus_webinspect_parser.py | dojo/unittests/test_microfocus_webinspect_parser.py | from django.test import TestCase
from dojo.tools.microfocus_webinspect.parser import MicrofocusWebinspectXMLParser
from dojo.models import Test, Engagement, Product
class TestMicrofocusWebinspectXMLParser(TestCase):
def test_parse_without_file_has_no_findings(self):
parser = MicrofocusWebinspectXMLParser(None, Test())
self.assertEqual(0, len(parser.items))
def test_parse_file_with_no_vuln_has_no_findings(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
testfile = open("dojo/unittests/scans/microfocus_webinspect/Webinspect_no_vuln.xml")
parser = MicrofocusWebinspectXMLParser(testfile, test)
self.assertEqual(0, len(parser.items))
def test_parse_file_with_one_vuln_has_one_findings(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
testfile = open("dojo/unittests/scans/microfocus_webinspect/Webinspect_one_vuln.xml")
parser = MicrofocusWebinspectXMLParser(testfile, test)
self.assertEqual(1, len(parser.items))
item = parser.items[0]
self.assertEqual(200, item.cwe)
self.assertLess(0, len(item.unsaved_endpoints))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
testfile = open("dojo/unittests/scans/microfocus_webinspect/Webinspect_many_vuln.xml")
parser = MicrofocusWebinspectXMLParser(testfile, test)
self.assertEqual(8, len(parser.items))
item = parser.items[1]
self.assertEqual(525, item.cwe)
self.assertIsNotNone(item.references)
self.assertEqual("1cfe38ee-89f7-4110-ad7c-8fca476b2f04", item.unique_id_from_tool)
self.assertLess(0, len(item.unsaved_endpoints))
def test_convert_severity(self):
with self.subTest("convert info", val="0"):
self.assertEqual("Info", MicrofocusWebinspectXMLParser.convert_severity("0"))
with self.subTest("convert medium", val="2"):
self.assertEqual("Medium", MicrofocusWebinspectXMLParser.convert_severity("2"))
| from django.test import TestCase
from dojo.tools.microfocus_webinspect.parser import MicrofocusWebinspectXMLParser
from dojo.models import Test, Engagement, Product
class TestMicrofocusWebinspectXMLParser(TestCase):
def test_parse_without_file_has_no_findings(self):
parser = MicrofocusWebinspectXMLParser(None, Test())
self.assertEqual(0, len(parser.items))
def test_parse_file_with_no_vuln_has_no_findings(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
testfile = open("dojo/unittests/scans/microfocus_webinspect/Webinspect_no_vuln.xml")
parser = MicrofocusWebinspectXMLParser(testfile, test)
self.assertEqual(0, len(parser.items))
def test_parse_file_with_one_vuln_has_one_findings(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
testfile = open("dojo/unittests/scans/microfocus_webinspect/Webinspect_one_vuln.xml")
parser = MicrofocusWebinspectXMLParser(testfile, test)
self.assertEqual(1, len(parser.items))
item = parser.items[0]
self.assertEqual(200, item.cwe)
self.assertEqual(1, len(item.unsaved_endpoints))
def test_parse_file_with_multiple_vuln_has_multiple_finding(self):
test = Test()
test.engagement = Engagement()
test.engagement.product = Product()
testfile = open("dojo/unittests/scans/microfocus_webinspect/Webinspect_many_vuln.xml")
parser = MicrofocusWebinspectXMLParser(testfile, test)
self.assertEqual(8, len(parser.items))
item = parser.items[1]
self.assertEqual(525, item.cwe)
self.assertIsNotNone(item.references)
self.assertEqual("1cfe38ee-89f7-4110-ad7c-8fca476b2f04", item.unique_id_from_tool)
self.assertEqual(1, len(item.unsaved_endpoints))
def test_convert_severity(self):
with self.subTest("convert info", val="0"):
self.assertEqual("Info", "0")
with self.subTest("convert medium", val="2"):
self.assertEqual("Info", "2")
| bsd-3-clause | Python |
29366d6b23caefa390abb0c2f401844ea1827d06 | update SendWriteDiaryEmailTask: send email to users who didn't write diary | jupiny/EnglishDiary,jupiny/EnglishDiary,jupiny/EnglishDiary | english_diary/users/tasks/send_write_diary_email.py | english_diary/users/tasks/send_write_diary_email.py | from django.conf import settings
from django.contrib.auth import get_user_model
from datetime import datetime
from celery import Task
from users.utils.send_email import send_email
class SendWriteDiaryEmailTask(Task):
def run(self):
today = datetime.now().strftime("%Y/%m/%d")
for user in get_user_model().objects.all():
if not user.diary_set.get_or_none(datetime=today):
send_email(
sender=settings.ADMIN_SENDER_EMAIL,
receiver=user.email,
subject=settings.WRITE_DIARY_EMAIL_SUBJECT.format(
username=user.username,
),
text=settings.WRITE_DIARY_EMAIL_TEXT.format(
username=user.username,
)
)
| from django.conf import settings
from django.contrib.auth import get_user_model
from celery import Task
from users.utils.send_email import send_email
class SendWriteDiaryEmailTask(Task):
def run(self):
for user in get_user_model().objects.all():
send_email(
sender=settings.ADMIN_SENDER_EMAIL,
receiver=user.email,
subject=settings.WRITE_DIARY_EMAIL_SUBJECT.format(
username=user.username,
),
text=settings.WRITE_DIARY_EMAIL_TEXT.format(
username=user.username,
)
)
| mit | Python |
05078fdca2c2ad30ebb12234f5873470c20ade94 | Add a context manager to the useful read/writer lock. | openstack/taskflow,pombredanne/taskflow-1,jessicalucci/TaskManagement,jessicalucci/TaskManagement,pombredanne/taskflow-1,junneyang/taskflow,jimbobhickville/taskflow,citrix-openstack-build/taskflow,citrix-openstack-build/taskflow,varunarya10/taskflow,varunarya10/taskflow,openstack/taskflow,junneyang/taskflow,jimbobhickville/taskflow | taskflow/utils.py | taskflow/utils.py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import threading
class ReaderWriterLock(object):
"""A simple reader-writer lock.
Several readers can hold the lock simultaneously, and only one writer.
Write locks have priority over reads to prevent write starvation.
Public domain @ http://majid.info/blog/a-reader-writer-lock-for-python/
"""
def __init__(self):
self.rwlock = 0
self.writers_waiting = 0
self.monitor = threading.Lock()
self.readers_ok = threading.Condition(self.monitor)
self.writers_ok = threading.Condition(self.monitor)
@contextlib.contextmanager
def acquire(self, read=True):
"""Acquire a read or write lock in a context manager."""
try:
if read:
self.acquire_read()
else:
self.acquire_write()
yield self
finally:
self.release()
def acquire_read(self):
"""Acquire a read lock.
Several threads can hold this typeof lock.
It is exclusive with write locks."""
self.monitor.acquire()
while self.rwlock < 0 or self.writers_waiting:
self.readers_ok.wait()
self.rwlock += 1
self.monitor.release()
def acquire_write(self):
"""Acquire a write lock.
Only one thread can hold this lock, and only when no read locks
are also held."""
self.monitor.acquire()
while self.rwlock != 0:
self.writers_waiting += 1
self.writers_ok.wait()
self.writers_waiting -= 1
self.rwlock = -1
self.monitor.release()
def release(self):
"""Release a lock, whether read or write."""
self.monitor.acquire()
if self.rwlock < 0:
self.rwlock = 0
else:
self.rwlock -= 1
wake_writers = self.writers_waiting and self.rwlock == 0
wake_readers = self.writers_waiting == 0
self.monitor.release()
if wake_writers:
self.writers_ok.acquire()
self.writers_ok.notify()
self.writers_ok.release()
elif wake_readers:
self.readers_ok.acquire()
self.readers_ok.notifyAll()
self.readers_ok.release()
| # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| apache-2.0 | Python |
b73a8f283972b686fa7b2aeb83b1314fb042e619 | Add comment | pubres/pubres | pubres/tests/logging_test.py | pubres/tests/logging_test.py | import logging
import logging.handlers
import multiprocessing
import pubres
from pubres.pubres_logging import setup_logging
from .base import *
# Tests if the logging works.
# Uses a custom logging handler that collects log messages into a list
# and then asserts that they are there.
# Care is taken about the server actually running in a different process.
class MultiprocessingQueueStreamHandler(logging.handlers.BufferingHandler):
"""A logging handler that pushes the getMessage() of every
LogRecord into a multiprocessing.Queue.
Used to test log messages of a server started in its own process.
"""
def __init__(self, *args, **kwargs):
super(MultiprocessingQueueStreamHandler, self).__init__(*args,
**kwargs)
self.mp_logrecord_queue = multiprocessing.Queue()
# Don't override emit(self, record);
# BufferingHandler will append record to self.buffer
def emit(self, record):
super(MultiprocessingQueueStreamHandler, self).emit(record)
self.mp_logrecord_queue.put(record.getMessage())
def getLogRecordBuffer(self):
ret = []
while not self.mp_logrecord_queue.empty():
log = self.mp_logrecord_queue.get()
ret.append(log)
return ret
def test_logging():
# Set up log capturing
handler = MultiprocessingQueueStreamHandler(10)
setup_logging(handler=handler)
# Do some server actions
with pubres.BackgroundServer():
with pub('key1', 'val1'):
pass
# Make sure actions appear in log
log_buffer = handler.getLogRecordBuffer()
assert "pub {'key1': 'val1'}" in log_buffer
| import logging
import logging.handlers
import multiprocessing
import pubres
from pubres.pubres_logging import setup_logging
from .base import *
class MultiprocessingQueueStreamHandler(logging.handlers.BufferingHandler):
"""A logging handler that pushes the getMessage() of every
LogRecord into a multiprocessing.Queue.
Used to test log messages of a server started in its own process.
"""
def __init__(self, *args, **kwargs):
super(MultiprocessingQueueStreamHandler, self).__init__(*args,
**kwargs)
self.mp_logrecord_queue = multiprocessing.Queue()
# Don't override emit(self, record);
# BufferingHandler will append record to self.buffer
def emit(self, record):
super(MultiprocessingQueueStreamHandler, self).emit(record)
self.mp_logrecord_queue.put(record.getMessage())
def getLogRecordBuffer(self):
ret = []
while not self.mp_logrecord_queue.empty():
log = self.mp_logrecord_queue.get()
ret.append(log)
return ret
def test_logging():
# Set up log capturing
handler = MultiprocessingQueueStreamHandler(10)
setup_logging(handler=handler)
# Do some server actions
with pubres.BackgroundServer():
with pub('key1', 'val1'):
pass
# Make sure actions appear in log
log_buffer = handler.getLogRecordBuffer()
assert "pub {'key1': 'val1'}" in log_buffer
| mit | Python |
c719802c83c8c5f77e27d2ac8b2bc59516cafe4f | Add new version of zlib, deprecate 1.2.10 (#3136) | EmreAtes/spack,tmerrick1/spack,iulian787/spack,LLNL/spack,skosukhin/spack,matthiasdiener/spack,EmreAtes/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,krafczyk/spack,LLNL/spack,iulian787/spack,matthiasdiener/spack,skosukhin/spack,krafczyk/spack,EmreAtes/spack,matthiasdiener/spack,TheTimmy/spack,LLNL/spack,skosukhin/spack,matthiasdiener/spack,lgarren/spack,tmerrick1/spack,TheTimmy/spack,EmreAtes/spack,mfherbst/spack,lgarren/spack,tmerrick1/spack,EmreAtes/spack,lgarren/spack,skosukhin/spack,iulian787/spack,lgarren/spack,iulian787/spack,mfherbst/spack,mfherbst/spack,matthiasdiener/spack,TheTimmy/spack,mfherbst/spack,krafczyk/spack,skosukhin/spack,TheTimmy/spack,LLNL/spack,krafczyk/spack,mfherbst/spack,tmerrick1/spack,krafczyk/spack,TheTimmy/spack,lgarren/spack | var/spack/repos/builtin/packages/zlib/package.py | var/spack/repos/builtin/packages/zlib/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Zlib(AutotoolsPackage):
"""A free, general-purpose, legally unencumbered lossless
data-compression library."""
homepage = "http://zlib.net"
url = "http://zlib.net/fossils/zlib-1.2.10.tar.gz"
version('1.2.11', '1c9f62f0778697a09d36121ead88e08e')
# Due to the bug fixes, any installations of 1.2.9 or 1.2.10 should be
# immediately replaced with 1.2.11.
version('1.2.8', '44d667c142d7cda120332623eab69f40')
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
variant('shared', default=True,
description='Enables the build of shared libraries.')
def setup_environment(self, spack_env, run_env):
if '+pic' in self.spec:
spack_env.set('CFLAGS', self.compiler.pic_flag)
def configure_args(self):
config_args = []
if '+shared' not in self.spec:
config_args.append('--static')
return config_args
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Zlib(AutotoolsPackage):
"""A free, general-purpose, legally unencumbered lossless
data-compression library."""
homepage = "http://zlib.net"
url = "http://zlib.net/fossils/zlib-1.2.10.tar.gz"
version('1.2.10', 'd9794246f853d15ce0fcbf79b9a3cf13')
# author had this to say about 1.2.9....
# Due to the bug fixes, any installations of 1.2.9 should be immediately
# replaced with 1.2.10.
version('1.2.8', '44d667c142d7cda120332623eab69f40')
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
variant('shared', default=True,
description='Enables the build of shared libraries.')
def setup_environment(self, spack_env, run_env):
if '+pic' in self.spec:
spack_env.set('CFLAGS', self.compiler.pic_flag)
def configure_args(self):
config_args = []
if '+shared' not in self.spec:
config_args.append('--static')
return config_args
| lgpl-2.1 | Python |
2f75327ce3be5c67f815f53d9afd8e8341a4235a | Fix default value quotes, test was failing | valdur55/py3status,ultrabug/py3status,tobes/py3status,Andrwe/py3status,vvoland/py3status,guiniol/py3status,Andrwe/py3status,tobes/py3status,valdur55/py3status,guiniol/py3status,ultrabug/py3status,ultrabug/py3status,docwalter/py3status,alexoneill/py3status,valdur55/py3status | py3status/modules/systemd.py | py3status/modules/systemd.py | # -*- coding: utf-8 -*-
"""
Check systemd unit status.
Check the status of a systemd unit.
Configuration parameters:
cache_timeout: How often we refresh this module in seconds (default 5)
format: Format for module output (default "{unit}: {status}")
unit: Name of the unit (default "dbus.service")
Format of status string placeholders:
{unit} name of the unit
{status} 'active', 'inactive' or 'not-found'
Color options:
color_good: Unit active
color_bad: Unit inactive
color_degraded: Unit not found
Example:
```
# Check status of vpn service
# Start with left click
# Stop with right click
systemd vpn {
unit = 'vpn.service'
on_click 1 = "exec sudo systemctl start vpn"
on_click 3 = "exec sudo systemctl stop vpn"
format = '{unit} is {status}'
}
```
Requires:
pydbus: python lib for dbus
@author Adrian Lopez <adrianlzt@gmail.com>
@license BSD
"""
from pydbus import SystemBus
class Py3status:
# available configuration parameters
cache_timeout = 5
format = '{unit}: {status}'
unit = 'dbus.service'
def post_config_hook(self):
bus = SystemBus()
systemd = bus.get('org.freedesktop.systemd1')
s_unit = systemd.LoadUnit(self.unit)
self.systemd_unit = bus.get('.systemd1', s_unit)
def check_status(self, i3s_output_list, i3s_config):
"""
Ask dbus to get Status and loaded status for the unit
"""
status = self.systemd_unit.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
exists = self.systemd_unit.Get('org.freedesktop.systemd1.Unit', 'LoadState')
if exists == 'not-found':
color = self.py3.COLOR_DEGRADED
status = exists
elif status == 'active':
color = self.py3.COLOR_GOOD
elif status == 'inactive':
color = self.py3.COLOR_BAD
else:
color = self.py3.COLOR_DEGRADED
full_text = self.py3.safe_format(self.format, {'unit': self.unit, 'status': status})
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text,
'color': color
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| # -*- coding: utf-8 -*-
"""
Check systemd unit status.
Check the status of a systemd unit.
Configuration parameters:
cache_timeout: How often we refresh this module in seconds (default 5)
format: Format for module output (default "{unit}: {status}")
unit: Name of the unit (default dbus.service)
Format of status string placeholders:
{unit} name of the unit
{status} 'active', 'inactive' or 'not-found'
Color options:
color_good: Unit active
color_bad: Unit inactive
color_degraded: Unit not found
Example:
```
# Check status of vpn service
# Start with left click
# Stop with right click
systemd vpn {
unit = 'vpn.service'
on_click 1 = "exec sudo systemctl start vpn"
on_click 3 = "exec sudo systemctl stop vpn"
format = '{unit} is {status}'
}
```
Requires:
pydbus: python lib for dbus
@author Adrian Lopez <adrianlzt@gmail.com>
@license BSD
"""
from pydbus import SystemBus
class Py3status:
# available configuration parameters
cache_timeout = 5
format = '{unit}: {status}'
unit = 'dbus.service'
def post_config_hook(self):
bus = SystemBus()
systemd = bus.get('org.freedesktop.systemd1')
s_unit = systemd.LoadUnit(self.unit)
self.systemd_unit = bus.get('.systemd1', s_unit)
def check_status(self, i3s_output_list, i3s_config):
"""
Ask dbus to get Status and loaded status for the unit
"""
status = self.systemd_unit.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
exists = self.systemd_unit.Get('org.freedesktop.systemd1.Unit', 'LoadState')
if exists == 'not-found':
color = self.py3.COLOR_DEGRADED
status = exists
elif status == 'active':
color = self.py3.COLOR_GOOD
elif status == 'inactive':
color = self.py3.COLOR_BAD
else:
color = self.py3.COLOR_DEGRADED
full_text = self.py3.safe_format(self.format, {'unit': self.unit, 'status': status})
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text,
'color': color
}
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | Python |
16f9d0943c8ae7bde5bde8a58c5aaa119342ecd8 | Fix docstring for python.call | Fizzadar/pyinfra,Fizzadar/pyinfra | pyinfra/operations/python.py | pyinfra/operations/python.py | '''
The Python module allows you to execute Python code within the context of a deploy.
'''
from pyinfra.api import FunctionCommand, operation
@operation
def call(function, *args, **kwargs):
'''
Execute a Python function within a deploy.
+ function: the function to execute
+ args: additional arguments to pass to the function
+ kwargs: additional keyword arguments to pass to the function
Callback functions args passed the state, host, and any args/kwargs passed
into the operation directly, eg:
.. code:: python
def my_callback(state, host, hello=None):
command = 'echo hello'
if hello:
command = command + ' ' + hello
status, stdout, stderr = host.run_shell_command(state, command=command, sudo=SUDO)
assert status is True # ensure the command executed OK
if 'hello ' not in str(stdout):
raise Exception('`{}` problem with callback stdout:{} stderr:{}'.format(
command, stdout, stderr))
python.call(
name='Run my_callback function',
function=my_callback,
hello='world',
)
'''
kwargs.pop('state', None)
kwargs.pop('host', None)
yield FunctionCommand(function, args, kwargs)
@operation
def raise_exception(exception, *args, **kwargs):
def raise_exc(*args, **kwargs): # pragma: no cover
raise exception(*args, **kwargs)
kwargs.pop('state', None)
kwargs.pop('host', None)
yield FunctionCommand(raise_exc, args, kwargs)
| '''
The Python module allows you to execute Python code within the context of a deploy.
'''
from pyinfra.api import FunctionCommand, operation
@operation
def call(function, *args, **kwargs):
'''
Execute a Python function within a deploy.
+ func: the function to execute
+ args: additional arguments to pass to the function
+ kwargs: additional keyword arguments to pass to the function
Callback functions args passed the state, host, and any args/kwargs passed
into the operation directly, eg:
.. code:: python
def my_callback(state, host, hello=None):
command = 'echo hello'
if hello:
command = command + ' ' + hello
status, stdout, stderr = host.run_shell_command(state, command=command, sudo=SUDO)
assert status is True # ensure the command executed OK
if 'hello ' not in str(stdout):
raise Exception('`{}` problem with callback stdout:{} stderr:{}'.format(
command, stdout, stderr))
python.call(
name='Run my_callback function',
function=my_callback,
hello='world',
)
'''
kwargs.pop('state', None)
kwargs.pop('host', None)
yield FunctionCommand(function, args, kwargs)
@operation
def raise_exception(exception, *args, **kwargs):
def raise_exc(*args, **kwargs): # pragma: no cover
raise exception(*args, **kwargs)
kwargs.pop('state', None)
kwargs.pop('host', None)
yield FunctionCommand(raise_exc, args, kwargs)
| mit | Python |
2393de5f80744baf86c4ca1d822dc9d0bc1ff905 | Fix package doc. | Dioptas/pymatgen,sonium0/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,ctoher/pymatgen,ctoher/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,Dioptas/pymatgen,migueldiascosta/pymatgen,Bismarrck/pymatgen,rousseab/pymatgen,sonium0/pymatgen,Bismarrck/pymatgen,rousseab/pymatgen,rousseab/pymatgen,Bismarrck/pymatgen,yanikou19/pymatgen,migueldiascosta/pymatgen,yanikou19/pymatgen,sonium0/pymatgen,Bismarrck/pymatgen | pymatgen/matproj/__init__.py | pymatgen/matproj/__init__.py | """
This module implements high-level interfaces to the public Materials Project.
""" | mit | Python | |
6f3336ef5dd43c02c851001715cf0f231c269276 | Add keystone to the request | bertjwregeer/pyramid_keystone | pyramid_keystone/__init__.py | pyramid_keystone/__init__.py |
default_settings = [
('auth_url', str, 'http://localhost:5000/v3'),
('region', str, 'RegionOne'),
('user_domain_name', str, 'Default'),
('cacert', str, ''),
]
def parse_settings(settings):
parsed = {}
def populate(name, convert, default):
sname = '%s%s' % ('keystone.', name)
value = convert(settings.get(sname, default))
parsed[sname] = value
for name, convert, default in default_settings:
populate(name, convert, default)
return parsed
def includeme(config):
""" Set up standard configurator registrations. Use via:
.. code-block:: python
config = Configurator()
config.include('pyramid_keystone')
"""
# We use an action so that the user can include us, and then add the
# required variables, upon commit we will pick up those changes.
def register():
registry = config.registry
settings = parse_settings(registry.settings)
registry.settings.update(settings)
config.action('keystone-configure', register)
# Allow the user to use our auth policy (recommended)
config.add_directive('keystone_auth_policy', '.authentication.add_auth_policy')
# Add the keystone property to the request
config.add_request_method('.keystone.request_keystone', name='keystone', property=True, reify=True)
|
default_settings = [
('auth_url', str, 'http://localhost:5000/v3'),
('region', str, 'RegionOne'),
('user_domain_name', str, 'Default'),
('cacert', str, ''),
]
def parse_settings(settings):
parsed = {}
def populate(name, convert, default):
sname = '%s%s' % ('keystone.', name)
value = convert(settings.get(sname, default))
parsed[sname] = value
for name, convert, default in default_settings:
populate(name, convert, default)
return parsed
def includeme(config):
""" Set up standard configurator registrations. Use via:
.. code-block:: python
config = Configurator()
config.include('pyramid_keystone')
"""
# We use an action so that the user can include us, and then add the
# required variables, upon commit we will pick up those changes.
def register():
registry = config.registry
settings = parse_settings(registry.settings)
registry.settings.update(settings)
config.action('keystone-configure', register)
config.add_directive('keystone_auth_policy', '.authentication.add_auth_policy')
| isc | Python |
41912722601e1296c05530641170401cc29a6bbb | Update __init__.py | PyThaiNLP/pythainlp | pythainlp/corpus/__init__.py | pythainlp/corpus/__init__.py | # -*- coding: utf-8 -*-
"""
Corpus related functions.
Access to dictionaries, word lists, and language models.
Including download manager.
"""
__all__ = [
"corpus_path",
"corpus_db_path",
"corpus_db_url",
"countries",
"download",
"get_corpus",
"get_corpus_db",
"get_corpus_db_detail",
"get_corpus_path",
"provinces",
"remove",
"thai_female_names",
"thai_male_names",
"thai_negations",
"thai_stopwords",
"thai_syllables",
"thai_words",
]
import os
from tinydb import TinyDB
from pythainlp.tools import get_full_data_path, get_pythainlp_path
# Remote and local corpus databases
_CORPUS_DIRNAME = "corpus"
_CORPUS_PATH = os.path.join(get_pythainlp_path(), _CORPUS_DIRNAME)
# remote corpus catalog URL
_CORPUS_DB_URL = (
"https://raw.githubusercontent.com/"
"PyThaiNLP/pythainlp-corpus/"
"fix-thainer/db.json" #2.2
)
# local corpus catalog filename
_CORPUS_DB_FILENAME = "db.json"
# local corpus catalog full path
_CORPUS_DB_PATH = get_full_data_path(_CORPUS_DB_FILENAME)
# create a local corpus database if it does not already exist
if not os.path.exists(_CORPUS_DB_PATH):
TinyDB(_CORPUS_DB_PATH)
def corpus_path() -> str:
"""
Get path where corpus files are kept locally.
"""
return _CORPUS_PATH
def corpus_db_url() -> str:
"""
Get remote URL of corpus catalog.
"""
return _CORPUS_DB_URL
def corpus_db_path() -> str:
"""
Get local path of corpus catalog.
"""
return _CORPUS_DB_PATH
from pythainlp.corpus.core import (
download,
get_corpus,
get_corpus_db,
get_corpus_db_detail,
get_corpus_path,
remove,
) # these imports must come before other pythainlp.corpus.* imports
from pythainlp.corpus.common import (
countries,
provinces,
thai_female_names,
thai_male_names,
thai_negations,
thai_stopwords,
thai_syllables,
thai_words,
)
| # -*- coding: utf-8 -*-
"""
Corpus related functions.
Access to dictionaries, word lists, and language models.
Including download manager.
"""
__all__ = [
"corpus_path",
"corpus_db_path",
"corpus_db_url",
"countries",
"download",
"get_corpus",
"get_corpus_db",
"get_corpus_db_detail",
"get_corpus_path",
"provinces",
"remove",
"thai_female_names",
"thai_male_names",
"thai_negations",
"thai_stopwords",
"thai_syllables",
"thai_words",
]
import os
from tinydb import TinyDB
from pythainlp.tools import get_full_data_path, get_pythainlp_path
# Remote and local corpus databases
_CORPUS_DIRNAME = "corpus"
_CORPUS_PATH = os.path.join(get_pythainlp_path(), _CORPUS_DIRNAME)
# remote corpus catalog URL
_CORPUS_DB_URL = (
"https://raw.githubusercontent.com/"
"PyThaiNLP/pythainlp-corpus/"
"fix-thainer/db.json"
)
# local corpus catalog filename
_CORPUS_DB_FILENAME = "db.json"
# local corpus catalog full path
_CORPUS_DB_PATH = get_full_data_path(_CORPUS_DB_FILENAME)
# create a local corpus database if it does not already exist
if not os.path.exists(_CORPUS_DB_PATH):
TinyDB(_CORPUS_DB_PATH)
def corpus_path() -> str:
"""
Get path where corpus files are kept locally.
"""
return _CORPUS_PATH
def corpus_db_url() -> str:
"""
Get remote URL of corpus catalog.
"""
return _CORPUS_DB_URL
def corpus_db_path() -> str:
"""
Get local path of corpus catalog.
"""
return _CORPUS_DB_PATH
from pythainlp.corpus.core import (
download,
get_corpus,
get_corpus_db,
get_corpus_db_detail,
get_corpus_path,
remove,
) # these imports must come before other pythainlp.corpus.* imports
from pythainlp.corpus.common import (
countries,
provinces,
thai_female_names,
thai_male_names,
thai_negations,
thai_stopwords,
thai_syllables,
thai_words,
)
| apache-2.0 | Python |
6ebeba5ebc7c76f9f0803a1cdbc2babd2ac57d63 | Update CUBE-AI example. | kwagyeman/openmv,iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv,kwagyeman/openmv,openmv/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv | scripts/examples/25-Machine-Learning/nn_stm32cubeai.py | scripts/examples/25-Machine-Learning/nn_stm32cubeai.py | # STM32 CUBE.AI on OpenMV MNIST Example
# See https://github.com/openmv/openmv/blob/master/src/stm32cubeai/README.MD
import sensor, image, time, nn_st
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_brightness(0)
sensor.set_auto_gain(True)
sensor.set_auto_exposure(True)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale
sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
# [CUBE.AI] Initialize the network
net = nn_st.loadnnst('network')
nn_input_sz = 28 # The NN input is 28x28
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
# Crop in the middle (avoids vignetting)
img.crop((img.width()//2-nn_input_sz//2,
img.height()//2-nn_input_sz//2,
nn_input_sz,
nn_input_sz))
# Binarize the image
img.midpoint(2, bias=0.5, threshold=True, offset=5, invert=True)
# [CUBE.AI] Run the inference
out = net.predict(img)
print('Network argmax output: {}'.format( out.index(max(out)) ))
img.draw_string(0, 0, str(out.index(max(out))))
print('FPS {}'.format(clock.fps())) # Note: OpenMV Cam runs about half as fast when connected
| # STM32 CUBE.AI on OpenMV MNIST Example
import sensor, image, time, nn_st
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(3)
sensor.set_brightness(0)
sensor.set_auto_gain(True)
sensor.set_auto_exposure(True)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to Grayscale
sensor.set_framesize(sensor.QQQVGA) # Set frame size to 80x60
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
# [CUBE.AI] Initialize the network
net = nn_st.loadnnst('network')
nn_input_sz = 28 # The NN input is 28x28
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
# Crop in the middle (avoids vignetting)
img.crop((img.width()//2-nn_input_sz//2,
img.height()//2-nn_input_sz//2,
nn_input_sz,
nn_input_sz))
# Binarize the image
img.midpoint(2, bias=0.5, threshold=True, offset=5, invert=True)
# [CUBE.AI] Run the inference
out = net.predict(img)
print('Network argmax output: {}'.format( out.index(max(out)) ))
img.draw_string(0, 0, str(out.index(max(out))))
print('FPS {}'.format(clock.fps())) # Note: OpenMV Cam runs about half as fast when connected
| mit | Python |
ac0f2c967ccd6fab47607fb246fc8e359c56073b | update user context to take program | masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api | web/impact/impact/tests/contexts/user_context.py | web/impact/impact/tests/contexts/user_context.py | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from datetime import timedelta
from django.utils import timezone
from impact.tests.factories import (
BaseProfileFactory,
EntrepreneurProfileFactory,
ExpertProfileFactory,
IndustryFactory,
MemberProfileFactory,
UserFactory,
ProgramRoleFactory,
ProgramRoleGrantFactory,
)
class UserContext(object):
def __init__(self,
user_type="ENTREPRENEUR",
primary_industry=None,
additional_industries=None,
functional_expertise=None,
program_families=[],
program=None):
user = UserFactory(date_joined=(timezone.now() + timedelta(-10)))
self.user = user
self.program_families = program_families
self.baseprofile = BaseProfileFactory(user=user, user_type=user_type)
self.program = program
if user_type == "ENTREPRENEUR":
self.profile = EntrepreneurProfileFactory(
user=user,
program_families=self.program_families)
user.entrepreneurprofile = self.profile
elif user_type == "EXPERT":
self.primary_industry = primary_industry or IndustryFactory()
self.additional_industries = additional_industries or []
self.functional_expertise = functional_expertise or []
self.profile = ExpertProfileFactory(
user=self.user,
primary_industry=self.primary_industry,
additional_industries=self.additional_industries,
functional_expertise=self.functional_expertise,
program_families=self.program_families)
user.expertprofile = self.profile
elif user_type == "MEMBER":
self.profile = MemberProfileFactory(user=self.user)
user.memberprofile = self.profile
if program:
self.program_role = ProgramRoleFactory(program=self.program)
self.program_role_grant = ProgramRoleGrantFactory(
person=self.user,
program_role=self.program_role)
user.save()
| # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from datetime import timedelta
from django.utils import timezone
from impact.tests.factories import (
BaseProfileFactory,
EntrepreneurProfileFactory,
ExpertProfileFactory,
IndustryFactory,
MemberProfileFactory,
UserFactory,
)
class UserContext(object):
def __init__(self,
user_type="ENTREPRENEUR",
primary_industry=None,
additional_industries=None,
functional_expertise=None,
program_families=[]):
user = UserFactory(date_joined=(timezone.now() + timedelta(-10)))
self.user = user
self.program_families = program_families
self.baseprofile = BaseProfileFactory(user=user, user_type=user_type)
if user_type == "ENTREPRENEUR":
self.profile = EntrepreneurProfileFactory(
user=user,
program_families=self.program_families)
user.entrepreneurprofile = self.profile
elif user_type == "EXPERT":
self.primary_industry = primary_industry or IndustryFactory()
self.additional_industries = additional_industries or []
self.functional_expertise = functional_expertise or []
self.profile = ExpertProfileFactory(
user=self.user,
primary_industry=self.primary_industry,
additional_industries=self.additional_industries,
functional_expertise=self.functional_expertise,
program_families=self.program_families)
user.expertprofile = self.profile
elif user_type == "MEMBER":
self.profile = MemberProfileFactory(user=self.user)
user.memberprofile = self.profile
user.save()
| mit | Python |
22b3b7e254714aa2c33cbad11126a05d1e7ecfc6 | Add new CloudSearch regions. Closes #1465. | lochiiconnectivity/boto,lochiiconnectivity/boto,rayluo/boto,zzzirk/boto,rjschwei/boto,alfredodeza/boto,TiVoMaker/boto,jameslegg/boto,awatts/boto,alex/boto,ocadotechnology/boto,disruptek/boto,israelbenatar/boto,yangchaogit/boto,kouk/boto,elainexmas/boto,shaunbrady/boto,bleib1dj/boto,stevenbrichards/boto,abridgett/boto,dablak/boto,jotes/boto,felix-d/boto,dablak/boto,drbild/boto,s0enke/boto,ryansb/boto,nikhilraog/boto,lra/boto,tpodowd/boto,podhmo/boto,cyclecomputing/boto,Pretio/boto,disruptek/boto,j-carl/boto,alex/boto,serviceagility/boto,weebygames/boto,ddzialak/boto,drbild/boto,appneta/boto,Timus1712/boto,jameslegg/boto,ekalosak/boto,darjus-amzn/boto,appneta/boto,shipci/boto,SaranyaKarthikeyan/boto,tpodowd/boto,ramitsurana/boto,nexusz99/boto,trademob/boto,revmischa/boto,acourtney2015/boto,vishnugonela/boto,Asana/boto,zachmullen/boto,clouddocx/boto,ric03uec/boto,khagler/boto,rosmo/boto,kouk/boto,dimdung/boto,jindongh/boto,nishigori/boto,rjschwei/boto,varunarya10/boto,bryx-inc/boto,pfhayes/boto,garnaat/boto,campenberger/boto,janslow/boto,vijaylbais/boto,weka-io/boto,FATruden/boto | boto/cloudsearch/__init__.py | boto/cloudsearch/__init__.py | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.ec2.regioninfo import RegionInfo
def regions():
"""
Get all available regions for the Amazon CloudSearch service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
import boto.cloudsearch.layer1
return [RegionInfo(name='us-east-1',
endpoint='cloudsearch.us-east-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
RegionInfo(name='us-west-1',
endpoint='cloudsearch.us-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
RegionInfo(name='us-west-2',
endpoint='cloudsearch.us-west-2.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
RegionInfo(name='ap-southeast-1',
endpoint='cloudsearch.ap-southeast-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.ec2.regioninfo import RegionInfo
def regions():
"""
Get all available regions for the Amazon CloudSearch service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
import boto.cloudsearch.layer1
return [RegionInfo(name='us-east-1',
endpoint='cloudsearch.us-east-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
]
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit | Python |
3902897c97977eb51f034696b376143d17a8c09a | Fix spelling in docstring. | machinelearningdeveloper/aoc_2016 | 01/solve_1.py | 01/solve_1.py | """Report the manhattan distance between a starting point and an ending point,
given a set of directions to follow to move between the two points."""
from distance import get_distance
from directions import load_directions, follow_directions
def main():
directions = load_directions('directions.txt')
starting_point = (0, 0)
starting_orientation = 'N'
ending_point, _ = follow_directions(starting_point, starting_orientation, *directions)
print(get_distance(starting_point, ending_point))
if __name__ == '__main__':
main()
| """Report the manhattan distance between a starting point and an ending point,
given a set of directions to follow to get move between the two points."""
from distance import get_distance
from directions import load_directions, follow_directions
def main():
directions = load_directions('directions.txt')
starting_point = (0, 0)
starting_orientation = 'N'
ending_point, _ = follow_directions(starting_point, starting_orientation, *directions)
print(get_distance(starting_point, ending_point))
if __name__ == '__main__':
main()
| mit | Python |
a361fd3eaccfa3ca16bdc1080ceea00b9a24c1cd | Change navbar DropdownMenu to right aligned so menu doesn't render off screen | CodeForPhilly/chime,CodeForPhilly/chime,CodeForPhilly/chime | src/chime_dash/app/components/menu.py | src/chime_dash/app/components/menu.py | """component/menu
Dropdown menu which appears on the navigation bar at the top of the screen
refactor incoming
"""
from typing import List
from dash.development.base_component import ComponentMeta
import dash_bootstrap_components as dbc
from chime_dash.app.components.base import Component
class Menu(Component):
"""
"""
def get_html(self) -> List[ComponentMeta]:
menu = dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Penn Medicine", header=True),
dbc.DropdownMenuItem(
"Predictive Healthcare",
href="http://predictivehealthcare.pennmedicine.org/",
external_link=True,
),
dbc.DropdownMenuItem(
"Contact Us",
href="http://predictivehealthcare.pennmedicine.org/contact/",
external_link=True,
),
dbc.DropdownMenuItem(
"User Docs",
href="https://code-for-philly.gitbook.io/chime/",
external_link=True,
),
dbc.DropdownMenuItem(
"GitHub",
href="https://github.com/CodeForPhilly/chime",
external_link=True,
),
dbc.DropdownMenuItem(
"Slack",
href="https://codeforphilly.org/chat?channel=covid19-chime-penn",
),
],
in_navbar=True,
label="Learn More",
color="light",
right=True
)
return [menu]
| """component/menu
Dropdown menu which appears on the navigation bar at the top of the screen
refactor incoming
"""
from typing import List
from dash.development.base_component import ComponentMeta
import dash_bootstrap_components as dbc
from chime_dash.app.components.base import Component
class Menu(Component):
"""
"""
def get_html(self) -> List[ComponentMeta]:
menu = dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("Penn Medicine", header=True),
dbc.DropdownMenuItem(
"Predictive Healthcare",
href="http://predictivehealthcare.pennmedicine.org/",
external_link=True,
),
dbc.DropdownMenuItem(
"Contact Us",
href="http://predictivehealthcare.pennmedicine.org/contact/",
external_link=True,
),
dbc.DropdownMenuItem(
"User Docs",
href="https://code-for-philly.gitbook.io/chime/",
external_link=True,
),
dbc.DropdownMenuItem(
"GitHub",
href="https://github.com/CodeForPhilly/chime",
external_link=True,
),
dbc.DropdownMenuItem(
"Slack",
href="https://codeforphilly.org/chat?channel=covid19-chime-penn",
),
],
in_navbar=True,
label="Learn More",
color="light",
)
return [menu]
| mit | Python |
2dd6ba84f348fa2680de896c306cdb9a788634d8 | fix attribute name | sapcc/monasca-notification | monasca_notification/plugins/abstract_notifier.py | monasca_notification/plugins/abstract_notifier.py | # (C) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from jinja2 import Template
@six.add_metaclass(abc.ABCMeta)
class AbstractNotifier(object):
def __init__(self):
self._config = None
self._template_text = None
self._template_mime_type = None
self._template = None
@abc.abstractproperty
def type(self):
pass
@abc.abstractproperty
def statsd_name(self):
pass
def config(self, config_dict):
self._config = {'timeout': 5}
self._config.update(config_dict)
tpl = self._config.get('template')
if tpl:
self._template_text = tpl.get('text')
if not self._template_text:
tpl_path = tpl['template_file']
self._template_text = open(tpl_path, 'r').read()
self._template_mime_type = tpl.get('mime_type')
self._template = Template(self._template_text)
@abc.abstractmethod
def send_notification(self, notification):
pass
| # (C) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
from jinja2 import Template
@six.add_metaclass(abc.ABCMeta)
class AbstractNotifier(object):
def __init__(self):
self._config = None
self._template_text = None
self._template_mime_type = None
self._template = None
@abc.abstractproperty
def type(self):
pass
@abc.abstractproperty
def statsd_name(self):
pass
def config(self, config_dict):
self._config = {'timeout': 5}
self._config.update(config_dict)
tpl = self._config.get('template')
if tpl:
self._template_text = tpl.get('text')
if not self._template_text:
tpl_path = tpl['template_file']
self._template_text = open(tpl_path, 'r').read()
self._template_mime_type = tpl.get('mime_type')
self._template = Template(self.template_text)
@abc.abstractmethod
def send_notification(self, notification):
pass
| apache-2.0 | Python |
8a4819daa627f06e1a0eac87ab44176b7e2a0115 | Correct renamed module names for bank-statement-import repository. | OpenUpgrade/OpenUpgrade,grap/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,Endika/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,Endika/OpenUpgrade | openerp/addons/openupgrade_records/lib/apriori.py | openerp/addons/openupgrade_records/lib/apriori.py | """ Encode any known changes to the database here
to help the matching process
"""
renamed_modules = {
'base_calendar': 'calendar',
'mrp_jit': 'procurement_jit',
'project_mrp': 'sale_service',
# OCA/account-invoicing
'invoice_validation_wkfl': 'account_invoice_validation_workflow',
'account_invoice_zero': 'account_invoice_zero_autopay',
# OCA/server-tools
'audittrail': 'auditlog',
# OCA/bank-statement-import
'account_banking': 'account_bank_statement_import',
'account_banking_camt': 'account_bank_statement_import_camt',
'account_banking_mt940':
'account_bank_statement_import_mt940_base',
'account_banking_nl_ing_mt940':
'account_bank_statement_import_mt940_nl_ing',
'account_banking_nl_rabo_mt940':
'account_bank_statement_import_mt940_nl_rabo',
}
renamed_models = {
}
| """ Encode any known changes to the database here
to help the matching process
"""
renamed_modules = {
'base_calendar': 'calendar',
'mrp_jit': 'procurement_jit',
'project_mrp': 'sale_service',
# OCA/account-invoicing
'invoice_validation_wkfl': 'account_invoice_validation_workflow',
'account_invoice_zero': 'account_invoice_zero_autopay',
# OCA/server-tools
'audittrail': 'auditlog',
# OCA/bank-statement-import
'account_banking': 'account_bank_statement_import',
'account_banking_camt': 'bank_statement_parse_camt',
'account_banking_nl_ing_mt940': 'bank_statement_parse_nl_ing_mt940',
'account_banking_nl_rabo_mt940': 'bank_statement_parse_nl_rabo_mt940',
}
renamed_models = {
}
| agpl-3.0 | Python |
f026af91d39cf8f5f4db0925d09fc6ddff2cc443 | Use breadcrumbs from lava-server | OSSystems/lava-server,OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,OSSystems/lava-server,Linaro/lava-server | dashboard_app/bread_crumbs.py | dashboard_app/bread_crumbs.py | # Copyright (C) 2010 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of Launch Control.
#
# Launch Control is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# Launch Control is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Launch Control. If not, see <http://www.gnu.org/licenses/>.
"""
Deprecated bread crumb module.
Bread crubms are now part of lava server
"""
from lava_server.bread_crumbs import BreadCrumb, LiveBreadCrumb, BreadCrumbTrail
| # Copyright (C) 2010 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of Launch Control.
#
# Launch Control is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# Launch Control is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Launch Control. If not, see <http://www.gnu.org/licenses/>.
from django.core.urlresolvers import reverse
import logging
class BreadCrumb(object):
def __init__(self, name, parent=None, needs=None):
self.name = name
self.view = None
self.parent = parent
self.needs = needs or []
def __repr__(self):
return "<BreadCrumb name=%r view=%r parent=%r>" % (
self.name, self.view, self.parent)
def __call__(self, view):
self.view = view
view._bread_crumb = self
return view
def get_name(self, kwargs):
try:
return self.name.format(**kwargs)
except:
logging.exception("Unable to construct breadcrumb name for view %r", self.view)
raise
def get_absolute_url(self, kwargs):
try:
return reverse(self.view, args=[kwargs[name] for name in self.needs])
except:
logging.exception("Unable to construct breadcrumb URL for view %r", self.view)
raise
class LiveBreadCrumb(object):
def __init__(self, bread_crumb, kwargs):
self.bread_crumb = bread_crumb
self.kwargs = kwargs
def get_name(self):
return self.bread_crumb.get_name(self.kwargs)
def get_absolute_url(self):
return self.bread_crumb.get_absolute_url(self.kwargs)
class BreadCrumbTrail(object):
def __init__(self, bread_crumb_list, kwargs):
self.bread_crumb_list = bread_crumb_list
self.kwargs = kwargs
def __iter__(self):
for bread_crumb in self.bread_crumb_list:
yield LiveBreadCrumb(bread_crumb, self.kwargs)
@classmethod
def leading_to(cls, view, **kwargs):
lst = []
while view is not None:
lst.append(view._bread_crumb)
view = view._bread_crumb.parent
lst.reverse()
return cls(lst, kwargs or {})
| agpl-3.0 | Python |
931267a0fedb97e3b53d1a3792dd2eee72e6510b | Add todo. | faneshion/MatchZoo,faneshion/MatchZoo | matchzoo/metrics/__init__.py | matchzoo/metrics/__init__.py | from .precision import Precision
from .average_precision import AveragePrecision
from .discounted_cumulative_gain import DiscountedCumulativeGain
from .mean_reciprocal_rank import MeanReciprocalRank
from .mean_average_precision import MeanAveragePrecision
from .normalized_discounted_cumulative_gain import \
NormalizedDiscountedCumulativeGain
# TODO: unify ALIAS to list form
| from .precision import Precision
from .average_precision import AveragePrecision
from .discounted_cumulative_gain import DiscountedCumulativeGain
from .mean_reciprocal_rank import MeanReciprocalRank
from .mean_average_precision import MeanAveragePrecision
from .normalized_discounted_cumulative_gain import \
NormalizedDiscountedCumulativeGain
| apache-2.0 | Python |
bc29c044f271ca9940b8357d1194412de4986e3c | Resuelve compatibilidad con el guardado desde el API | jchernandez88/OVC_API_calc | API/models.py | API/models.py | # -*- coding: utf-8 -*-
from django.db import models
class Operacion(models.Model):
OPERACIONES = (
(u'+', u'Suma'),
(u'-', u'Resta'),
(u'*', u'Multiplicación'),
(u'/', u'División'),
)
op1 = models.DecimalField(
max_digits=14,
decimal_places=4,
default=0.0,
help_text='Operador 1',
)
op2 = models.DecimalField(
max_digits=14,
decimal_places=4,
default=0.0,
help_text='Operador 2',
)
operacion = models.CharField(
max_length=1,
choices=OPERACIONES,
default='+',
help_text='Seleccione una operación',
)
resultado = models.DecimalField(
max_digits=14,
decimal_places=4,
null=True,
blank=True,
help_text='El resultado de la operación',
)
def __unicode__(self):
return "%s %s %s = %s" % (self.op1,self.operacion,self.op2,self.resultado)
def save(self, **kwargs):
res = 0.0
if self.operacion == '*':
res = self.op1 * self.op2
elif self.operacion == '/':
if self.op2 != 0:
res = self.op1 / self.op2
elif self.operacion == '-':
res = self.op1 - self.op2
else:
res = self.op1 + self.op2
self.resultado = res
super (Operacion, self).save()
| # -*- coding: utf-8 -*-
from django.db import models
class Operacion(models.Model):
OPERACIONES = (
(u'+', u'Suma'),
(u'-', u'Resta'),
(u'*', u'Multiplicación'),
(u'/', u'División'),
)
op1 = models.DecimalField(
max_digits=14,
decimal_places=4,
default=0.0,
help_text='Operador 1',
)
op2 = models.DecimalField(
max_digits=14,
decimal_places=4,
default=0.0,
help_text='Operador 2',
)
operacion = models.CharField(
max_length=1,
choices=OPERACIONES,
default='+',
help_text='Seleccione una operación',
)
resultado = models.DecimalField(
max_digits=14,
decimal_places=4,
null=True,
blank=True,
help_text='El resultado de la operación',
)
def __unicode__(self):
return "%s %s %s = %s" % (self.op1,self.operacion,self.op2,self.resultado)
def save(self):
res = 0.0
if self.operacion == '*':
res = self.op1 * self.op2
elif self.operacion == '/':
if self.op2 != 0:
res = self.op1 / self.op2
elif self.operacion == '-':
res = self.op1 - self.op2
else:
res = self.op1 + self.op2
self.resultado = res
super (Operacion, self).save()
| mit | Python |
665c9f9811d6ee432384b205211aa053e4f401fe | Update SARSA | davidrobles/mlnd-capstone-code | capstone/algorithms/sarsa.py | capstone/algorithms/sarsa.py | import random
from capstone.policy import EGreedyPolicy
from capstone.policy import RandomPolicy
class Sarsa(object):
def __init__(self, env, policy=EGreedyPolicy(), qf={}, alpha=0.1,
gamma=0.99, n_episodes=1000):
self.env = env
self.policy = policy
self.qf = qf
self.alpha = alpha
self.gamma = gamma
self.n_episodes = n_episodes
def init(self):
'''Initializes the q-value if unvisited'''
state = self.env.cur_state()
actions = self.env.actions(state)
for action in actions:
if (state, action) not in self.qf:
self.qf[(state, action)] = random.random() - 0.5
def max_qvalue(self):
if self.env.is_terminal():
return 0
state = self.env.cur_state()
actions = self.env.actions(state)
return max([self.qf[(state, action)] for action in actions])
def learn(self):
for episode in range(self.n_episodes):
print('Episode {}'.format(episode))
self.env.reset()
step = 0
self.init()
action = self.policy.action(self.env, qf=self.qf)
while True:
print('Step {}'.format(step))
state = self.env.cur_state()
reward, next_state = self.env.do_action(action)
self.init()
next_action = self.policy.action(self.env, qf=self.qf)
update_value = reward + (self.gamma * self.qf[(next_state, next_action)]) - self.qf[(state, action)]
self.qf[(state, action)] += self.alpha * update_value
step += 1
if self.env.is_terminal():
break
| import random
from capstone.policy import RandomPolicy
class Sarsa(object):
def __init__(self, env, policy=RandomPolicy(), qf={}, alpha=0.1,
gamma=0.99, n_episodes=1000):
self.env = env
self.policy = policy
self.qf = qf
self.alpha = alpha
self.gamma = gamma
self.n_episodes = n_episodes
def init(self):
'''Initializes the q-value if unvisited'''
state = self.env.cur_state()
actions = self.env.actions(state)
for action in actions:
if (state, action) not in self.qf:
self.qf[(state, action)] = random.random() - 0.5
def max_qvalue(self):
if self.env.is_terminal():
return 0
state = self.env.cur_state()
actions = self.env.actions(state)
return max([self.qf[(state, action)] for action in actions])
def learn(self):
for episode in range(self.n_episodes):
print('Episode {}'.format(episode))
self.env.reset()
step = 0
while not self.env.is_terminal():
print('Step {}'.format(step))
self.init()
state = self.env.cur_state()
action = self.policy.action(self.env, qf=self.qf)
reward, next_state = self.env.do_action(action)
self.init()
max_qvalue = self.max_qvalue()
update_value = reward + (self.gamma * max_qvalue) - self.qf[(state, action)]
self.qf[(state, action)] += self.alpha * update_value
step += 1
| mit | Python |
1d4bbdfac78c63566438e59e7ca6836d4cee07be | Simplify run_swf_command helper | pior/caravan | caravan/commands/__init__.py | caravan/commands/__init__.py | import importlib
import inspect
import sys
import boto3
from botocore.exceptions import ClientError
from caravan.swf import get_connection
def run_swf_command(command_name, **kwargs):
connection = kwargs.get('connection')
if connection is None:
connection = get_connection()
command = getattr(connection, command_name)
callargs = {k: v for k, v in kwargs.items() if v is not None}
try:
response = command(**callargs)
except ClientError as err:
sys.exit(err)
else:
metadata = response.pop('ResponseMetadata', {})
success = metadata.get('HTTPStatusCode') == 200
if success:
return response
else:
sys.exit('Error: %s' % response)
def find_classes(module, cls):
def predicate(obj):
return inspect.isclass(obj) and issubclass(obj, cls) and obj is not cls
members = inspect.getmembers(module, predicate)
return [obj for name, obj in members]
class ClassesLoaderFromModule(object):
"""Load and return classes in a module that inherit from a class.
This module must be in the python sys path.
"""
def __init__(self, cls):
self.cls = cls
def __repr__(self):
return '<ClassesLoader(%s)>' % self.cls.__name__
def __call__(self, arg):
sys.path.append('.')
try:
module = importlib.import_module(arg)
finally:
sys.path.pop()
classes = find_classes(module, self.cls)
if not classes:
raise ValueError("No workflow in module %s" % arg)
return classes
| import importlib
import inspect
import sys
import boto3
from botocore.exceptions import ClientError
from caravan.swf import get_connection
def is_response_success(response):
status_code = response.get('ResponseMetadata', {}).get('HTTPStatusCode')
return status_code == 200
def run_swf_command(command, **kwargs):
connection = get_connection()
command = getattr(connection, command)
callargs = {k: v for k, v in kwargs.items() if v is not None}
try:
response = command(**callargs)
except ClientError as err:
sys.exit(err)
else:
if is_response_success(response):
response.pop('ResponseMetadata')
return response
else:
sys.exit('Error: %s' % response)
def find_classes(module, cls):
def predicate(obj):
return inspect.isclass(obj) and issubclass(obj, cls) and obj is not cls
members = inspect.getmembers(module, predicate)
return [obj for name, obj in members]
class ClassesLoaderFromModule(object):
"""Load and return classes in a module that inherit from a class.
This module must be in the python sys path.
"""
def __init__(self, cls):
self.cls = cls
def __repr__(self):
return '<ClassesLoader(%s)>' % self.cls.__name__
def __call__(self, arg):
sys.path.append('.')
try:
module = importlib.import_module(arg)
finally:
sys.path.pop()
classes = find_classes(module, self.cls)
if not classes:
raise ValueError("No workflow in module %s" % arg)
return classes
| mit | Python |
324af0eefaa3e453049b050633f52c403d5746cd | Update __init__.py | Fillll/reddit2telegram,Fillll/reddit2telegram | reddit2telegram/channels/r_apphookup/__init__.py | reddit2telegram/channels/r_apphookup/__init__.py | # Just empty file
| mit | Python | |
43a974f2fe1203160699c044aab80492b2cfd5d6 | Make SignalQueue singleton | clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage | py/g1/asyncs/kernels/g1/asyncs/kernels/signals.py | py/g1/asyncs/kernels/g1/asyncs/kernels/signals.py | __all__ = [
'SignalQueue',
]
import signal
import socket
import struct
import threading
from g1.bases.assertions import ASSERT
from g1.bases.classes import SingletonMeta
from . import adapters
class SignalQueue(metaclass=SingletonMeta):
"""Signal queue.
Python runtime implements a UNIX signal handler that writes signal
number to a file descriptor (which is globally unique, by the way).
``SignalQueue`` wraps this feature with a queue-like interface.
NOTE: This class is a singleton (calling ``SignalQueue()`` returns
the same instance). We make this design choice because UNIX signal
handling is always strange and global.
"""
def __init__(self):
ASSERT.is_(threading.current_thread(), threading.main_thread())
sock_r, self._sock_w = socket.socketpair()
self._sock_r = adapters.SocketAdapter(sock_r)
self._sock_w.setblocking(False)
self._original_wakeup_fd = signal.set_wakeup_fd(self._sock_w.fileno())
self._original_handlers = {}
def subscribe(self, signum):
"""Subscribe to ``signum`` signal."""
if signum in self._original_handlers:
return
# Register a dummy signal handler to ask Python to write the
# signal number to the wakeup file descriptor.
self._original_handlers[signum] = signal.signal(signum, _noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(signum, False)
def unsubscribe(self, signum):
if signum not in self._original_handlers:
return
signal.signal(signum, self._original_handlers.pop(signum))
# Should we also restore ``signal.siginterrupt``?
def close(self):
for signum in tuple(self._original_handlers):
self.unsubscribe(signum)
signal.set_wakeup_fd(self._original_wakeup_fd)
self._sock_r.target.close()
self._sock_w.close()
async def get(self):
one_byte = await self._sock_r.recv(1)
signum = struct.unpack('B', one_byte)[0]
return signal.Signals(signum) # pylint: disable=no-member
def _noop(*_):
pass
| __all__ = [
'SignalQueue',
]
import signal
import socket
import struct
import threading
from g1.bases.assertions import ASSERT
from . import adapters
class SignalQueue:
"""Signal queue.
Python runtime implements a UNIX signal handler that writes signal
number to a file descriptor (which is globally unique, by the way).
``SignalQueue`` wraps this feature with a queue-like interface.
"""
def __init__(self):
ASSERT.is_(threading.current_thread(), threading.main_thread())
sock_r, self._sock_w = socket.socketpair()
self._sock_r = adapters.SocketAdapter(sock_r)
self._sock_w.setblocking(False)
self._original_wakeup_fd = signal.set_wakeup_fd(self._sock_w.fileno())
self._original_handlers = {}
def subscribe(self, signum):
"""Subscribe to ``signum`` signal."""
if signum in self._original_handlers:
return
# Register a dummy signal handler to ask Python to write the
# signal number to the wakeup file descriptor.
self._original_handlers[signum] = signal.signal(signum, _noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(signum, False)
def unsubscribe(self, signum):
if signum not in self._original_handlers:
return
signal.signal(signum, self._original_handlers.pop(signum))
# Should we also restore ``signal.siginterrupt``?
def close(self):
for signum in tuple(self._original_handlers):
self.unsubscribe(signum)
signal.set_wakeup_fd(self._original_wakeup_fd)
self._sock_r.target.close()
self._sock_w.close()
async def get(self):
one_byte = await self._sock_r.recv(1)
signum = struct.unpack('B', one_byte)[0]
return signal.Signals(signum) # pylint: disable=no-member
def _noop(*_):
pass
| mit | Python |
fbcae346a308a4cf327ea5696cdc76f6c1d13e24 | make it 1 jvm | calvingit21/h2o-2,111t8e/h2o-2,h2oai/h2o,rowhit/h2o-2,h2oai/h2o,eg-zhang/h2o-2,vbelakov/h2o,111t8e/h2o-2,111t8e/h2o-2,111t8e/h2o-2,elkingtonmcb/h2o-2,h2oai/h2o-2,vbelakov/h2o,100star/h2o,h2oai/h2o-2,vbelakov/h2o,100star/h2o,calvingit21/h2o-2,h2oai/h2o,100star/h2o,rowhit/h2o-2,elkingtonmcb/h2o-2,rowhit/h2o-2,vbelakov/h2o,rowhit/h2o-2,elkingtonmcb/h2o-2,rowhit/h2o-2,100star/h2o,eg-zhang/h2o-2,100star/h2o,h2oai/h2o-2,vbelakov/h2o,rowhit/h2o-2,h2oai/h2o-2,elkingtonmcb/h2o-2,h2oai/h2o-2,111t8e/h2o-2,calvingit21/h2o-2,111t8e/h2o-2,calvingit21/h2o-2,calvingit21/h2o-2,eg-zhang/h2o-2,rowhit/h2o-2,rowhit/h2o-2,elkingtonmcb/h2o-2,h2oai/h2o-2,100star/h2o,111t8e/h2o-2,100star/h2o,eg-zhang/h2o-2,calvingit21/h2o-2,111t8e/h2o-2,elkingtonmcb/h2o-2,vbelakov/h2o,calvingit21/h2o-2,rowhit/h2o-2,h2oai/h2o,h2oai/h2o-2,rowhit/h2o-2,eg-zhang/h2o-2,h2oai/h2o-2,calvingit21/h2o-2,eg-zhang/h2o-2,h2oai/h2o,vbelakov/h2o,h2oai/h2o,vbelakov/h2o,calvingit21/h2o-2,vbelakov/h2o,h2oai/h2o,vbelakov/h2o,elkingtonmcb/h2o-2,h2oai/h2o,elkingtonmcb/h2o-2,eg-zhang/h2o-2,calvingit21/h2o-2,eg-zhang/h2o-2,eg-zhang/h2o-2,h2oai/h2o,elkingtonmcb/h2o-2,h2oai/h2o-2,111t8e/h2o-2,100star/h2o,100star/h2o,h2oai/h2o-2,elkingtonmcb/h2o-2,111t8e/h2o-2,eg-zhang/h2o-2,h2oai/h2o | py/testdir_single_jvm/test_rf_model_key_unique.py | py/testdir_single_jvm/test_rf_model_key_unique.py | import os, json, unittest, time, shutil, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts
import argparse
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_model_key_unique(self):
modelKeyDict = {}
for trial in xrange (1,5,1):
csvPathname = h2o.find_file('smalldata/iris/iris2.csv')
start = time.time()
# rfview=False used to inhibit the rfview completion
rfResult = h2o_cmd.runRF(trees=6, timeoutSecs=10, rfview=False, csvPathname=csvPathname)
print "RF #%d" % trial, "started on ", csvPathname, 'took', time.time() - start, 'seconds'
model_key = rfResult['model_key']
print "model_key:", model_key
if model_key in modelKeyDict:
raise Exception("same model_key used in RF #%d that matches prior RF #%d" % (trial, modelKeyDict[model_key]))
modelKeyDict[model_key] = trial
# just show the jobs still going, if any. maybe none, because short (iris)
a = h2o.nodes[0].jobs_admin()
print "jobs_admin():", h2o.dump_json(a)
if __name__ == '__main__':
h2o.unit_main()
| import os, json, unittest, time, shutil, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts
import argparse
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_model_key_unique(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
modelKeyDict = {}
for trial in xrange (1,5,1):
csvPathname = h2o.find_file('smalldata/iris/iris2.csv')
# key = h2o.nodes[0].put_file(csvPathname)
# parseKey = h2o.nodes[0].parse(key, key + "_" + str(trial) + ".hex")
h2o.verboseprint("Trial", trial)
start = time.time()
# rfview=False used to inhibit the rfview completion
rfResult = h2o_cmd.runRF(trees=6, timeoutSecs=10, rfview=False, csvPathname=csvPathname)
print "RF #%d" % trial, "started on ", csvPathname, 'took', time.time() - start, 'seconds'
model_key = rfResult['model_key']
print "model_key:", model_key
if model_key in modelKeyDict:
raise Exception("same model_key used in RF #%d that matches prior RF #%d" % (trial, modelKeyDict[model_key]))
modelKeyDict[model_key] = trial
# just show the jobs still going, if any. maybe none, because short (iris)
a = h2o.nodes[0].jobs_admin()
print "jobs_admin():", h2o.dump_json(a)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | Python |
2344e5bdc2663bcc222376faba32a292ed3e686d | Update python 3 classifiers | ajaali/cookiecutter-pypackage-minimal,kragniz/cookiecutter-pypackage-minimal | {{cookiecutter.package_name}}/setup.py | {{cookiecutter.package_name}}/setup.py | import setuptools
setuptools.setup(
name="{{ cookiecutter.package_name }}",
version="{{ cookiecutter.package_version }}",
url="{{ cookiecutter.package_url }}",
author="{{ cookiecutter.author_name }}",
author_email="{{ cookiecutter.author_email }}",
description="{{ cookiecutter.package_description }}",
long_description=open('README.rst').read(),
packages=setuptools.find_packages(),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
| import setuptools
setuptools.setup(
name="{{ cookiecutter.package_name }}",
version="{{ cookiecutter.package_version }}",
url="{{ cookiecutter.package_url }}",
author="{{ cookiecutter.author_name }}",
author_email="{{ cookiecutter.author_email }}",
description="{{ cookiecutter.package_description }}",
long_description=open('README.rst').read(),
packages=setuptools.find_packages(),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
)
| mit | Python |
b12c0e0f8706cac061d30854c3cab9586645d3aa | Update to latest stable release (#4543) | lgarren/spack,iulian787/spack,krafczyk/spack,iulian787/spack,mfherbst/spack,TheTimmy/spack,mfherbst/spack,EmreAtes/spack,lgarren/spack,skosukhin/spack,mfherbst/spack,matthiasdiener/spack,skosukhin/spack,skosukhin/spack,LLNL/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,matthiasdiener/spack,LLNL/spack,matthiasdiener/spack,skosukhin/spack,iulian787/spack,LLNL/spack,tmerrick1/spack,TheTimmy/spack,TheTimmy/spack,mfherbst/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,krafczyk/spack,EmreAtes/spack,LLNL/spack,tmerrick1/spack,lgarren/spack,TheTimmy/spack,iulian787/spack,TheTimmy/spack,lgarren/spack,EmreAtes/spack,matthiasdiener/spack,tmerrick1/spack,krafczyk/spack,krafczyk/spack,LLNL/spack,skosukhin/spack,lgarren/spack | var/spack/repos/builtin/packages/fontconfig/package.py | var/spack/repos/builtin/packages/fontconfig/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fontconfig(AutotoolsPackage):
"""Fontconfig is a library for configuring/customizing font access"""
homepage = "http://www.freedesktop.org/wiki/Software/fontconfig/"
url = "http://www.freedesktop.org/software/fontconfig/release/fontconfig-2.12.3.tar.gz"
version('2.12.3', 'aca0c734c1a38eb3ba12b2447dd90ab0')
version('2.12.1', 'ce55e525c37147eee14cc2de6cc09f6c')
version('2.11.1', 'e75e303b4f7756c2b16203a57ac87eba')
depends_on('freetype')
depends_on('libxml2')
depends_on('pkg-config', type='build')
depends_on('font-util')
def configure_args(self):
font_path = join_path(self.spec['font-util'].prefix, 'share', 'fonts')
return [
'--enable-libxml2',
'--disable-docs',
'--with-default-fonts={0}'.format(font_path)
]
@run_after('install')
def system_fonts(self):
# point configuration file to system-install fonts
# gtk applications were failing to display text without this
config_file = join_path(self.prefix, 'etc', 'fonts', 'fonts.conf')
filter_file('<dir prefix="xdg">fonts</dir>',
'<dir prefix="xdg">fonts</dir><dir>/usr/share/fonts</dir>',
config_file)
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fontconfig(AutotoolsPackage):
"""Fontconfig is a library for configuring/customizing font access"""
homepage = "http://www.freedesktop.org/wiki/Software/fontconfig/"
url = "http://www.freedesktop.org/software/fontconfig/release/fontconfig-2.12.1.tar.gz"
version('2.12.1', 'ce55e525c37147eee14cc2de6cc09f6c')
version('2.11.1', 'e75e303b4f7756c2b16203a57ac87eba')
depends_on('freetype')
depends_on('libxml2')
depends_on('pkg-config', type='build')
depends_on('font-util')
def configure_args(self):
font_path = join_path(self.spec['font-util'].prefix, 'share', 'fonts')
return [
'--enable-libxml2',
'--disable-docs',
'--with-default-fonts={0}'.format(font_path)
]
@run_after('install')
def system_fonts(self):
# point configuration file to system-install fonts
# gtk applications were failing to display text without this
config_file = join_path(self.prefix, 'etc', 'fonts', 'fonts.conf')
filter_file('<dir prefix="xdg">fonts</dir>',
'<dir prefix="xdg">fonts</dir><dir>/usr/share/fonts</dir>',
config_file)
| lgpl-2.1 | Python |
b5c931816099b8d9b738de9708c289c2541c3005 | Add dependency (#21271) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-cheroot/package.py | var/spack/repos/builtin/packages/py-cheroot/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCheroot(PythonPackage):
""" Highly-optimized, pure-python HTTP server """
homepage = "https://cheroot.cherrypy.org/"
pypi = "cheroot/cheroot-6.5.5.tar.gz"
version('8.3.0', sha256='a0577e1f28661727d472671a7cc4e0c12ea0cbc5220265e70f00a8b8cb628931')
version('6.5.5', sha256='f6a85e005adb5bc5f3a92b998ff0e48795d4d98a0fbb7edde47a7513d4100601')
depends_on('py-setuptools', type='build')
depends_on('py-setuptools-scm@1.15.0:', type='build')
depends_on('py-setuptools-scm-git-archive@1.0:', type='build')
depends_on('py-more-itertools@2.6:', type=('build', 'run'))
depends_on('py-six@1.11.0:', type=('build', 'run'))
depends_on('py-backports-functools-lru-cache', type=('build', 'run'),
when='^python@:3.3')
depends_on('py-jaraco-functools', when='@8.3.0:', type=('build', 'run'))
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyCheroot(PythonPackage):
""" Highly-optimized, pure-python HTTP server """
homepage = "https://cheroot.cherrypy.org/"
pypi = "cheroot/cheroot-6.5.5.tar.gz"
version('8.3.0', sha256='a0577e1f28661727d472671a7cc4e0c12ea0cbc5220265e70f00a8b8cb628931')
version('6.5.5', sha256='f6a85e005adb5bc5f3a92b998ff0e48795d4d98a0fbb7edde47a7513d4100601')
depends_on('py-setuptools', type='build')
depends_on('py-setuptools-scm@1.15.0:', type='build')
depends_on('py-setuptools-scm-git-archive@1.0:', type='build')
depends_on('py-more-itertools@2.6:', type=('build', 'run'))
depends_on('py-six@1.11.0:', type=('build', 'run'))
depends_on('py-backports-functools-lru-cache', type=('build', 'run'),
when='^python@:3.3')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
| lgpl-2.1 | Python |
b0bf2c4c99a3f2de5341eccb31bcab9bc299f85e | add version 2.0.1 to r-magrittr (#21085) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-magrittr/package.py | var/spack/repos/builtin/packages/r-magrittr/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMagrittr(RPackage):
"""A Forward-Pipe Operator for R
Provides a mechanism for chaining commands with a new forward-pipe
operator, %>%. This operator will forward a value, or the result of an
expression, into the next function call/expression. There is flexible
support for the type of right-hand side expressions. For more information,
see package vignette."""
homepage = "https://cloud.r-project.org/package=magrittr"
url = "https://cloud.r-project.org/src/contrib/magrittr_1.5.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/magrittr"
version('2.0.1', sha256='75c265d51cc2b34beb27040edb09823c7b954d3990a7a931e40690b75d4aad5f')
version('1.5', sha256='05c45943ada9443134caa0ab24db4a962b629f00b755ccf039a2a2a7b2c92ae8')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMagrittr(RPackage):
"""Provides a mechanism for chaining commands with a new forward-pipe
operator, %>%. This operator will forward a value, or the result of an
expression, into the next function call/expression. There is flexible
support for the type of right-hand side expressions. For more information,
see package vignette."""
homepage = "https://cloud.r-project.org/package=magrittr"
url = "https://cloud.r-project.org/src/contrib/magrittr_1.5.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/magrittr"
version('1.5', sha256='05c45943ada9443134caa0ab24db4a962b629f00b755ccf039a2a2a7b2c92ae8')
| lgpl-2.1 | Python |
4e10b47eefac56bdcc91a79b00a3c23afd0f7bd8 | Update Weather_Station.py | userdw/RaspberryPi_3_Starter_Kit | 07_Weather_Station/Weather_Station/Weather_Station.py | 07_Weather_Station/Weather_Station/Weather_Station.py | import MCP3202
import os
from time import sleep
try:
while True:
os.system("clear")
value1= MCP3202.readADC(0)
voltage = round(float(value1 * 5000 / 4095), 2)
temperature = (voltage - 500) / 10
tampil = round(float(temperature), 2)
print("Weather Station")
print("Curent Temperature : ", tampil, u"\xb0", "C")
print("")
print("Press CTRL+C to exit")
sleep(0.075)
except KeyboardInterrupt:
print("exit")
| import MCP3202
import os
from time import sleep
try:
while True:
os.system("clear")
value1= MCP3202.readADC(0)
voltage = round(float(value1 * 5000 / 4096), 2)
temperature = (voltage - 550) / 10
tampil = round(float(temperature), 2)
print("Weather Station")
print("Curent Temperature : ", tampil, u"\xb0", "C")
print("")
print("Press CTRL+C to exit")
sleep(0.075)
except KeyboardInterrupt:
print("exit")
| mit | Python |
b6f2fdbc037aafca734bf88cf47261427cd99519 | Add autospec | kubikusrubikus/mkdocs,pjbull/mkdocs,davidgillies/mkdocs,jeoygin/mkdocs,jamesbeebop/mkdocs,jeoygin/mkdocs,vi4m/mkdocs,hhg2288/mkdocs,hhg2288/mkdocs,cnbin/mkdocs,simonfork/mkdocs,samuelcolvin/mkdocs,samhatfield/mkdocs,lukfor/mkdocs,michaelmcandrew/mkdocs,jamesbeebop/mkdocs,rickpeters/mkdocs,nicoddemus/mkdocs,michaelmcandrew/mkdocs,gregelin/mkdocs,peter1000/mkdocs,dmehra/mkdocs,d0ugal/mkdocs,lbenet/mkdocs,waylan/mkdocs,ramramps/mkdocs,davidgillies/mkdocs,hhg2288/mkdocs,samuelcolvin/mkdocs,lukfor/mkdocs,justinkinney/mkdocs,vi4m/mkdocs,waylan/mkdocs,mlzummo/mkdocs,michaelmcandrew/mkdocs,samhatfield/mkdocs,peter1000/mkdocs,mkdocs/mkdocs,lbenet/mkdocs,vi4m/mkdocs,gregelin/mkdocs,lukfor/mkdocs,justinkinney/mkdocs,kubikusrubikus/mkdocs,davidgillies/mkdocs,longjl/mkdocs,tedmiston/mkdocs,jimporter/mkdocs,nicoddemus/mkdocs,samuelcolvin/mkdocs,d0ugal/mkdocs,longjl/mkdocs,pjbull/mkdocs,nicoddemus/mkdocs,jimporter/mkdocs,tedmiston/mkdocs,cnbin/mkdocs,simonfork/mkdocs,jeoygin/mkdocs,tedmiston/mkdocs,jamesbeebop/mkdocs,fujita-shintaro/mkdocs,dmehra/mkdocs,williamjmorenor/mkdocs,fujita-shintaro/mkdocs,ramramps/mkdocs,mkdocs/mkdocs,peter1000/mkdocs,ramramps/mkdocs,rickpeters/mkdocs,williamjmorenor/mkdocs,pjbull/mkdocs,fujita-shintaro/mkdocs,justinkinney/mkdocs,mlzummo/mkdocs,gregelin/mkdocs,d0ugal/mkdocs,kubikusrubikus/mkdocs,rickpeters/mkdocs,longjl/mkdocs,dmehra/mkdocs,mkdocs/mkdocs,williamjmorenor/mkdocs,lbenet/mkdocs,samhatfield/mkdocs,waylan/mkdocs,simonfork/mkdocs,mlzummo/mkdocs,jimporter/mkdocs,cnbin/mkdocs | mkdocs/tests/cli_tests.py | mkdocs/tests/cli_tests.py | #!/usr/bin/env python
# coding: utf-8
import unittest
import mock
from click.testing import CliRunner
from mkdocs import cli
class CLITests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
@mock.patch('mkdocs.serve.serve', autospec=True)
def test_serve(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_serve.call_count, 1)
@mock.patch('mkdocs.build.build', autospec=True)
def test_build(self, mock_build):
result = self.runner.invoke(
cli.cli, ["build", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.build.build', autospec=True)
def test_build_verbose(self, mock_build):
result = self.runner.invoke(
cli.cli, ["--verbose", "build"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.build.build', autospec=True)
def test_json(self, mock_build):
result = self.runner.invoke(
cli.cli, ["json", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.new.new', autospec=True)
def test_new(self, mock_new):
result = self.runner.invoke(
cli.cli, ["new", "project"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_new.call_count, 1)
@mock.patch('mkdocs.gh_deploy.gh_deploy', autospec=True)
def test_gh_deploy(self, mock_gh_deploy):
result = self.runner.invoke(
cli.cli, ["gh-deploy"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
| #!/usr/bin/env python
# coding: utf-8
import unittest
import mock
from click.testing import CliRunner
from mkdocs import cli
class CLITests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
@mock.patch('mkdocs.serve.serve')
def test_serve(self, mock_serve):
result = self.runner.invoke(
cli.cli, ["serve", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_serve.call_count, 1)
@mock.patch('mkdocs.build.build')
def test_build(self, mock_build):
result = self.runner.invoke(
cli.cli, ["build", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.build.build')
def test_build_verbose(self, mock_build):
result = self.runner.invoke(
cli.cli, ["--verbose", "build"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.build.build')
def test_json(self, mock_build):
result = self.runner.invoke(
cli.cli, ["json", ], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_build.call_count, 1)
@mock.patch('mkdocs.new.new')
def test_new(self, mock_new):
result = self.runner.invoke(
cli.cli, ["new", "project"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_new.call_count, 1)
@mock.patch('mkdocs.gh_deploy.gh_deploy')
def test_gh_deploy(self, mock_gh_deploy):
result = self.runner.invoke(
cli.cli, ["gh-deploy"], catch_exceptions=False)
self.assertEqual(result.exit_code, 0)
self.assertEqual(mock_gh_deploy.call_count, 1)
| bsd-2-clause | Python |
d838afa824fa6b626ba87e5b4eabe1e95c960016 | Bump version | markstory/lint-review,markstory/lint-review,markstory/lint-review | lintreview/__init__.py | lintreview/__init__.py | __version__ = '2.23.0'
| __version__ = '2.22.0'
| mit | Python |
f8d507cb7d439b8451a6633d46ab02df7858297f | Update ipc_lista1.5.py | any1m1c/ipc20161 | lista1/ipc_lista1.5.py | lista1/ipc_lista1.5.py | #ipc_lista1.5
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que converta metros para centímetros.
metros = input("Digite o valor em metros que deseja converter em centímetros: ")
centimetros = metros * 100
print "Esse valor equivale a: %d" %centimetros
| #ipc_lista1.5
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que converta metros para centímetros.
metros = input("Digite o valor em metros que deseja converter em centímetros: ")
centimetros = metros * 100
print "Esse valor equivale a: %d" %centime
| apache-2.0 | Python |
6ed3c705e8e89d1a69d7930271d6e1144d28722a | Update ipc_lista2.7.py | any1m1c/ipc20161 | lista2/ipc_lista2.7.py | lista2/ipc_lista2.7.py | #EQUIPE 2
#
#
#
# Ana Beatriz Faria Frota - 1615310027
# Nahan Trindade Passos - 1615310021
#
n1 = int(input("Insira um número: "))
n2 = int(input("Insira outro número: "))
n3 = int(input("Insira mais outro número: "))
if n1>n2 and n1>n3:
print ("O primeiro número é maior")
if n2>n3:
print ("O terceiro número é maior")
else:
print ("O segundo número é maior")
if n2>n1 and n2>n3:
print ("O segundo número á maior")
if n1>n3:
print ("O terceiro número é menor")
else:
print ("O primeiro número é menor")
if n3>n1 and n3>n2:
print ("O terceiro número é maior")
if n1>n2:
print ("O segundo número é menor")
else:
print ("O primeiro numero e menor")
| #
# Ana Beatriz Faria Frota - 1615310027
# Mateus Mota de Souza - 1615310016
# Matheus Henrique Araujo Batista - 1615310039
# Nahan Trindade Passos - 1615310021
# Victor Hugo Souza Correia - 1615310024
#
n1 = int(input("Insira um número: "))
n2 = int(input("Insira outro número: "))
n3 = int(input("Insira mais outro número: "))
if n1>n2 and n1>n3:
print ("O primeiro número é maior")
if n2>n3:
print ("O terceiro número é maior")
else:
print ("O segundo número é maior")
if n2>n1 and n2>n3:
print ("O segundo número á maior")
if n1>n3:
print ("O terceiro número é menor")
else:
print ("O primeiro número é menor")
if n3>n1 and n3>n2:
print ("O terceiro número é maior")
if n1>n2:
print ("O segundo número é menor")
else:
print ("O primeiro numero e menor")
| apache-2.0 | Python |
6cf4367485ae79e7dcd19733cbec4ec7c844d624 | test to new style | wkentaro/chainer,chainer/chainer,okuta/chainer,jnishi/chainer,tkerola/chainer,keisuke-umezawa/chainer,hvy/chainer,okuta/chainer,pfnet/chainer,ktnyt/chainer,chainer/chainer,wkentaro/chainer,ktnyt/chainer,niboshi/chainer,keisuke-umezawa/chainer,okuta/chainer,jnishi/chainer,jnishi/chainer,wkentaro/chainer,keisuke-umezawa/chainer,niboshi/chainer,chainer/chainer,niboshi/chainer,rezoo/chainer,wkentaro/chainer,jnishi/chainer,keisuke-umezawa/chainer,hvy/chainer,ktnyt/chainer,chainer/chainer,hvy/chainer,ktnyt/chainer,niboshi/chainer,hvy/chainer,okuta/chainer | tests/chainer_tests/distributions_tests/test_multivariate_normal.py | tests/chainer_tests/distributions_tests/test_multivariate_normal.py | from chainer import distributions
from chainer import testing
import numpy
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestMultivariateNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.MultivariateNormal
self.scipy_dist = stats.multivariate_normal
self.scipy_onebyone = True
self.event_shape = (3,)
self.test_targets = set([
"batch_shape", "entropy", "event_shape", "log_prob",
"support"])
loc = numpy.random.uniform(
-1, 1, self.shape + (3,)).astype(numpy.float32)
cov = numpy.random.normal(size=self.shape + (3, 3))
cov = numpy.matmul(
cov, numpy.rollaxis(cov, -1, -2)).astype(numpy.float32)
scale_tril = numpy.linalg.cholesky(cov).astype(numpy.float32)
self.params = {"loc": loc, "scale_tril": scale_tril}
self.scipy_params = {"mean": loc, "cov": cov}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape + (3,)).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| from chainer import distributions
from chainer import testing
import numpy
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestMultivariateNormal(testing.distribution_unittest):
def setUp(self):
testing.distribution_unittest.setUp(self)
from scipy import stats
self.dist = distributions.MultivariateNormal
self.scipy_dist = stats.multivariate_normal
self.scipy_onebyone = True
self.event_shape = (3,)
self.test_targets = set([
"batch_shape", "entropy", "event_shape", "log_prob",
"support"])
def params_init(self):
loc = numpy.random.uniform(
-1, 1, self.shape + (3,)).astype(numpy.float32)
cov = numpy.random.normal(size=self.shape + (3, 3))
cov = numpy.matmul(
cov, numpy.rollaxis(cov, -1, -2)).astype(numpy.float32)
scale_tril = numpy.linalg.cholesky(cov).astype(numpy.float32)
self.params = {"loc": loc, "scale_tril": scale_tril}
self.scipy_params = {"mean": loc, "cov": cov}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.shape + (3,)).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| mit | Python |
fdcc22dbe81bd930fa4764fd17a5b0d9b895bb3c | complete rework of HttpError: now it can set Content-Length | renskiy/marnadi | marnadi/http/errors.py | marnadi/http/errors.py | HTTP_200_OK = '200 OK'
HTTP_404_NOT_FOUND = '404 Not Found'
HTTP_405_METHOD_NOT_ALLOWED = '405 Method Not Allowed'
HTTP_500_INTERNAL_SERVER_ERROR = '500 Internal Server Error'
HTTP_501_NOT_IMPLEMENTED = '501 Not Implemented'
class HttpError(Exception):
@property
def headers(self):
default_headers = {
'Content-Type': 'text/plain',
'Content-Length': None,
}
for header, value in self._headers:
header, value = str(header).title(), str(value)
if header in default_headers:
default_headers[header] = value
else:
yield (header, value)
if default_headers['Content-Length'] is None:
default_headers['Content-Length'] = len(self.data)
for header in default_headers.iteritems():
yield header
def __init__(self, status=HTTP_500_INTERNAL_SERVER_ERROR,
data=None, headers=None):
self.status = status
self._headers = headers or ()
self.data = data or status
def __iter__(self):
yield self.data | from marnadi.http import descriptors
HTTP_200_OK = '200 OK'
HTTP_404_NOT_FOUND = '404 Not Found'
HTTP_405_METHOD_NOT_ALLOWED = '405 Method Not Allowed'
HTTP_500_INTERNAL_SERVER_ERROR = '500 Internal Server Error'
HTTP_501_NOT_IMPLEMENTED = '501 Not Implemented'
class HttpError(Exception):
status = HTTP_500_INTERNAL_SERVER_ERROR
headers = descriptors.Headers(
('Content-Type', 'text/plain')
)
def __init__(self, status=None, headers=None):
self.status = status or self.status
if headers:
self.headers.update(headers)
@property
def body(self):
yield self.status
def __iter__(self):
return self.body | mit | Python |
3d48066c78d693b89cb2daabfd1ebe756862edc5 | Remove dependency check done by Mopidy | hechtus/mopidy-gmusic,jaapz/mopidy-gmusic,Tilley/mopidy-gmusic,elrosti/mopidy-gmusic,jodal/mopidy-gmusic,jaibot/mopidy-gmusic,mopidy/mopidy-gmusic | mopidy_gmusic/__init__.py | mopidy_gmusic/__init__.py | from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '0.2.2'
class GMusicExtension(ext.Extension):
dist_name = 'Mopidy-GMusic'
ext_name = 'gmusic'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(GMusicExtension, self).get_config_schema()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['deviceid'] = config.String(optional=True)
return schema
def get_backend_classes(self):
from .actor import GMusicBackend
return [GMusicBackend]
| from __future__ import unicode_literals
import os
from mopidy import config, exceptions, ext
__version__ = '0.2.2'
class GMusicExtension(ext.Extension):
dist_name = 'Mopidy-GMusic'
ext_name = 'gmusic'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(GMusicExtension, self).get_config_schema()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['deviceid'] = config.String(optional=True)
return schema
def validate_environment(self):
try:
import gmusicapi # noqa
except ImportError as e:
raise exceptions.ExtensionError('gmusicapi library not found', e)
pass
def get_backend_classes(self):
from .actor import GMusicBackend
return [GMusicBackend]
| apache-2.0 | Python |
20479ca1bf0636041c4b97633ea39ce2bbf84c2c | Remove some unused variables | ipython/ipython,ipython/ipython | IPython/nbconvert/preprocessors/tests/test_svg2pdf.py | IPython/nbconvert/preprocessors/tests/test_svg2pdf.py | """Tests for the svg2pdf preprocessor"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.testing import decorators as dec
from IPython.nbformat import v4 as nbformat
from .base import PreprocessorTestsBase
from ..svg2pdf import SVG2PDFPreprocessor
class Testsvg2pdf(PreprocessorTestsBase):
"""Contains test functions for svg2pdf.py"""
simple_svg = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.0"
x="0.00000000"
y="0.00000000"
width="500.00000"
height="500.00000"
id="svg2">
<defs
id="defs4" />
<g
id="layer1">
<rect
width="300.00000"
height="300.00000"
x="100.00000"
y="100.00000"
style="opacity:1.0000000;fill:none;fill-opacity:1.0000000;fill-rule:evenodd;stroke:#000000;stroke-width:8.0000000;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-dashoffset:0.00000000;stroke-opacity:1.0000000"
id="rect5719" />
</g>
</svg>"""
def build_notebook(self):
"""Build a reveal slides notebook in memory for use with tests.
Overrides base in PreprocessorTestsBase"""
outputs = [nbformat.new_output(output_type='display_data',
data={'image/svg+xml':self.simple_svg})
]
cells=[nbformat.new_code_cell(source="", execution_count=1, outputs=outputs)]
return nbformat.new_notebook(cells=cells)
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = SVG2PDFPreprocessor()
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a SVG2PDFPreprocessor be constructed?"""
self.build_preprocessor()
@dec.onlyif_cmds_exist('inkscape')
def test_output(self):
"""Test the output of the SVG2PDFPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
self.assertIn('application/pdf', nb.cells[0].outputs[0].data)
| """Tests for the svg2pdf preprocessor"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.testing import decorators as dec
from IPython.nbformat import v4 as nbformat
from .base import PreprocessorTestsBase
from ..svg2pdf import SVG2PDFPreprocessor
class Testsvg2pdf(PreprocessorTestsBase):
"""Contains test functions for svg2pdf.py"""
simple_svg = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.0"
x="0.00000000"
y="0.00000000"
width="500.00000"
height="500.00000"
id="svg2">
<defs
id="defs4" />
<g
id="layer1">
<rect
width="300.00000"
height="300.00000"
x="100.00000"
y="100.00000"
style="opacity:1.0000000;fill:none;fill-opacity:1.0000000;fill-rule:evenodd;stroke:#000000;stroke-width:8.0000000;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4.0000000;stroke-dasharray:none;stroke-dashoffset:0.00000000;stroke-opacity:1.0000000"
id="rect5719" />
</g>
</svg>"""
def build_notebook(self):
"""Build a reveal slides notebook in memory for use with tests.
Overrides base in PreprocessorTestsBase"""
outputs = [nbformat.new_output(output_type='display_data',
data={'image/svg+xml':self.simple_svg})
]
slide_metadata = {'slideshow' : {'slide_type': 'slide'}}
subslide_metadata = {'slideshow' : {'slide_type': 'subslide'}}
cells=[nbformat.new_code_cell(source="", execution_count=1, outputs=outputs)]
return nbformat.new_notebook(cells=cells)
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = SVG2PDFPreprocessor()
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a SVG2PDFPreprocessor be constructed?"""
self.build_preprocessor()
@dec.onlyif_cmds_exist('inkscape')
def test_output(self):
"""Test the output of the SVG2PDFPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
self.assertIn('application/pdf', nb.cells[0].outputs[0].data)
| bsd-3-clause | Python |
576b74bdb3e4ddcdcd533be48ff755c4bcb17c3f | Fix width and height parameters for latest images functions | asmeurer/iterm2-tools | iterm2_tools/images.py | iterm2_tools/images.py | """
Functions for displaying images inline in iTerm2.
See https://iterm2.com/images.html.
"""
from __future__ import print_function, division, absolute_import
import sys
import os
import base64
IMAGE_CODE = '\033]1337;File=name={name};inline={inline};size={size};width={width};height={height}:{base64_img}\a'
def display_image_bytes(b, filename=None, inline=1, width='auto', height='auto'):
"""
Display the image given by the bytes b in the terminal.
If filename=None the filename defaults to "Unnamed file".
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension (the default).
See https://www.iterm2.com/documentation-images.html
"""
sys.stdout.buffer.write(image_bytes(b, filename=filename, inline=inline,
width=width, height=height))
sys.stdout.write('\n')
def image_bytes(b, filename=None, inline=1, width='auto', height='auto'):
"""
Return a bytes string that displays image given by bytes b in the terminal
If filename=None, the filename defaults to "Unnamed file"
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension (the default).
See https://www.iterm2.com/documentation-images.html
"""
data = {
'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'),
'inline': inline,
'size': len(b),
'base64_img': base64.b64encode(b).decode('ascii'),
'width': width,
'height': height,
}
# IMAGE_CODE is a string because bytes doesn't support formatting
return IMAGE_CODE.format(**data).encode('ascii')
def display_image_file(fn):
"""
Display an image in the terminal.
A newline is not printed.
"""
with open(os.path.realpath(os.path.expanduser(fn)), 'rb') as f:
sys.stdout.buffer.write(image_bytes(f.read(), filename=fn))
| """
Functions for displaying images inline in iTerm2.
See https://iterm2.com/images.html.
"""
from __future__ import print_function, division, absolute_import
import sys
import os
import base64
IMAGE_CODE = '\033]1337;File=name={name};inline={inline};size={size};width={width};height={height}:{base64_img}\a'
def display_image_bytes(b, filename=None, inline=1, width='auto', height='auto'):
"""
Display the image given by the bytes b in the terminal.
If filename=None the filename defaults to "Unnamed file".
width and height are strings, following the format
N: N character cells.
Npx: N pixels.
N%: N percent of the session's width or height.
'auto': The image's inherent size will be used to determine an appropriate
dimension (the default).
See https://www.iterm2.com/documentation-images.html
"""
sys.stdout.buffer.write(image_bytes(b, filename=filename, inline=inline))
sys.stdout.write('\n')
def image_bytes(b, filename=None, inline=1):
"""
Return a bytes string that displays image given by bytes b in the terminal
If filename=None, the filename defaults to "Unnamed file"
"""
data = {
'name': base64.b64encode((filename or 'Unnamed file').encode('utf-8')).decode('ascii'),
'inline': inline,
'size': len(b),
'base64_img': base64.b64encode(b).decode('ascii'),
'width': width,
'height': height,
}
# IMAGE_CODE is a string because bytes doesn't support formatting
return IMAGE_CODE.format(**data).encode('ascii')
def display_image_file(fn):
"""
Display an image in the terminal.
A newline is not printed.
"""
with open(os.path.realpath(os.path.expanduser(fn)), 'rb') as f:
sys.stdout.buffer.write(image_bytes(f.read(), filename=fn))
| mit | Python |
e7bf5e84629daffd2a625759addf4eea8423e115 | Put fill_event in the public API. | NSLS-II/dataportal,NSLS-II/dataportal,danielballan/datamuxer,danielballan/dataportal,ericdill/datamuxer,danielballan/datamuxer,NSLS-II/datamuxer,ericdill/databroker,tacaswell/dataportal,ericdill/datamuxer,ericdill/databroker,danielballan/dataportal,tacaswell/dataportal | dataportal/broker/__init__.py | dataportal/broker/__init__.py | from .simple_broker import (_DataBrokerClass, EventQueue, Header,
LocationError, IntegrityError, fill_event)
from .handler_registration import register_builtin_handlers
DataBroker = _DataBrokerClass() # singleton, used by pims_readers import below
from .pims_readers import Images, SubtractedImages
register_builtin_handlers()
| from .simple_broker import (_DataBrokerClass, EventQueue, Header,
LocationError, IntegrityError)
from .handler_registration import register_builtin_handlers
DataBroker = _DataBrokerClass() # singleton, used by pims_readers import below
from .pims_readers import Images, SubtractedImages
register_builtin_handlers()
| bsd-3-clause | Python |
886cd787111f659ee5f54758ca75f208e4a99c2b | Fix plural typo in comment | xtaran/debian-devel-changes-bot,lamby/debian-devel-changes-bot,xtaran/debian-devel-changes-bot,sebastinas/debian-devel-changes-bot,lamby/debian-devel-changes-bot,lamby/debian-devel-changes-bot | DebianDevelChanges/mailparsers/accepted_upload.py | DebianDevelChanges/mailparsers/accepted_upload.py | # -*- coding: utf-8 -*-
#
# Debian Changes Bot
# Copyright (C) 2008 Chris Lamb <chris@chris-lamb.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from DebianDevelChanges import MailParser
from DebianDevelChanges.utils import quoted_printable
from DebianDevelChanges.messages import AcceptedUploadMessage
class AcceptedUploadParser(MailParser):
@staticmethod
def parse(headers, body):
if headers.get('List-Id', '') not in ('<debian-devel-changes.lists.debian.org>',
'"backports.org changes" <backports-changes.lists.backports.org>'):
return
msg = AcceptedUploadMessage()
mapping = {
'Source': 'package',
'Version': 'version',
'Distribution': 'distribution',
'Urgency': 'urgency',
'Changed-By': 'by',
'Closes': 'closes',
}
for line in body:
for field, target in mapping.iteritems():
if line.startswith('%s: ' % field):
val = line[len(field) + 2:]
setattr(msg, target, val)
del mapping[field]
break
# If we have found all the fields, stop looking
if len(mapping) == 0:
break
if msg.by:
msg.by = quoted_printable(msg.by)
try:
if msg.closes:
msg.closes = [int(x) for x in msg.closes.split(' ')]
except ValueError:
return
return msg
| # -*- coding: utf-8 -*-
#
# Debian Changes Bot
# Copyright (C) 2008 Chris Lamb <chris@chris-lamb.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from DebianDevelChanges import MailParser
from DebianDevelChanges.utils import quoted_printable
from DebianDevelChanges.messages import AcceptedUploadMessage
class AcceptedUploadParser(MailParser):
@staticmethod
def parse(headers, body):
if headers.get('List-Id', '') not in ('<debian-devel-changes.lists.debian.org>',
'"backports.org changes" <backports-changes.lists.backports.org>'):
return
msg = AcceptedUploadMessage()
mapping = {
'Source': 'package',
'Version': 'version',
'Distribution': 'distribution',
'Urgency': 'urgency',
'Changed-By': 'by',
'Closes': 'closes',
}
for line in body:
for field, target in mapping.iteritems():
if line.startswith('%s: ' % field):
val = line[len(field) + 2:]
setattr(msg, target, val)
del mapping[field]
break
# If we have found all the field, stop looking
if len(mapping) == 0:
break
if msg.by:
msg.by = quoted_printable(msg.by)
try:
if msg.closes:
msg.closes = [int(x) for x in msg.closes.split(' ')]
except ValueError:
return
return msg
| agpl-3.0 | Python |
75726945934a049c9fc81066996f1670f29ead2c | Improve handling of SKIP_LONG_TESTS build variable. | rec/BiblioPixel,rec/BiblioPixel,rec/BiblioPixel,rec/BiblioPixel,ManiacalLabs/BiblioPixel,ManiacalLabs/BiblioPixel,ManiacalLabs/BiblioPixel,ManiacalLabs/BiblioPixel | test/long_test.py | test/long_test.py | import os, unittest
"""This module long_test provides a decorator, @long_test, that you can use to
mark tests which take a lot of wall clock time.
If the system environment variable SKIP_LONG_TESTS is set, tests decorated
with @long_test will not be run.
"""
SKIP_LONG_TESTS = os.getenv('SKIP_LONG_TESTS', '').lower().startswith('t')
long_test = unittest.skipIf(SKIP_LONG_TESTS, 'Long tests skipped.')
| import os, unittest
"""This module long_test provides a decorator, @long_test, that you can use to
mark tests which take a lot of wall clock time.
If the system environment variable SKIP_LONG_TESTS is set, tests decorated
with @long_test will not be run.
"""
SKIP_LONG_TESTS = os.getenv('SKIP_LONG_TESTS', None) is not None
long_test = unittest.skipIf(SKIP_LONG_TESTS, 'Long tests skipped.')
| mit | Python |
f3490a9fc6a21ec6a6bb1af9f77d270b003cf45a | update dit/__init__.py too | dit/dit,chebee7i/dit,dit/dit,Autoplectic/dit,Autoplectic/dit,Autoplectic/dit,dit/dit,Autoplectic/dit,dit/dit,chebee7i/dit,chebee7i/dit,chebee7i/dit,dit/dit,Autoplectic/dit | dit/__init__.py | dit/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
dit is a Python package for information theory on discrete random variables.
d = discrete
i = information
t = theory
However, the more precise statement (at this point) is that `dit` is a
Python package for sigma-algebras defined on finite sets. Presently,
a few assumptions are made which make `dit` unsuitable as a general
sigma algebra package (on finite sets). Some of these assumptions
deal with how the sample space and sigma algebras are formed from
the probability mass function (and its outcomes).
"""
# Order is important!
from .params import ditParams
from .npscalardist import ScalarDistribution
from .npdist import Distribution
from .distconst import *
import dit.algorithms
import dit.divergences
import dit.esoteric
import dit.multivariate
import dit.shannon
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
dit is a Python package for information theory on discrete random variables.
d = discrete
i = information
t = theory
However, the more precise statement (at this point) is that `dit` is a
Python package for sigma-algebras defined on finite sets. Presently,
a few assumptions are made which make `dit` unsuitable as a general
sigma algebra package (on finite sets). Some of these assumptions
deal with how the sample space and sigma algebras are formed from
the probability mass function (and its outcomes).
"""
# Order is important!
from .params import ditParams
from .npscalardist import ScalarDistribution
from .npdist import Distribution
from .distconst import *
import dit.algorithms
| bsd-3-clause | Python |
1c74da888aeae06f97f07f37b523d7b33d9dd210 | Bump version to 0.9.0 | clb6/jarvis-cli | jarvis_cli/__init__.py | jarvis_cli/__init__.py |
__version__ = '0.9.0'
EVENT_CATEGORIES_TO_DEFAULTS = {
"consumed": 100,
"produced": 100,
"experienced": 100,
"interacted": 80,
"formulated": 80,
"completed": 50,
"detected": 10,
"measured": 5
}
EVENT_CATEGORIES = list(EVENT_CATEGORIES_TO_DEFAULTS.keys())
|
__version__ = '0.8.0'
EVENT_CATEGORIES_TO_DEFAULTS = {
"consumed": 100,
"produced": 100,
"experienced": 100,
"interacted": 80,
"formulated": 80,
"completed": 50,
"detected": 10,
"measured": 5
}
EVENT_CATEGORIES = list(EVENT_CATEGORIES_TO_DEFAULTS.keys())
| apache-2.0 | Python |
40c6c591792f6601706d350bfb3025b389f978c5 | Update __init__.py | selvakarthik21/newspaper,selvakarthik21/newspaper | newspaperdemo/__init__.py | newspaperdemo/__init__.py | from flask import Flask, request, render_template, redirect, url_for,jsonify
from newspaper import Article
from xml.etree import ElementTree
app = Flask(__name__)
# Debug logging
import logging
import sys
# Defaults to stdout
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
log.info('Logging to console')
except:
_, ex, _ = sys.exc_info()
log.error(ex.message)
@app.route('/articles/show')
def index():
return render_template('index.html')
@app.route('/')
def show_article():
url_to_clean = request.args.get('url')
if not url_to_clean:
return jsonify({
'authors': '',
'title': '',
'text': '',
'keywords': '',
'summary': ''
})
article = Article(url_to_clean)
article.download()
article.parse()
try:
html_string = ElementTree.tostring(article.clean_top_node)
except:
html_string = "Error converting html to string."
try:
article.nlp()
except:
log.error("Couldn't process with NLP")
a = {
'authors': str(', '.join(article.authors)),
'title': article.title,
'text': article.text,
'keywords': str(', '.join(article.keywords)),
'summary': article.summary
}
return jsonify(a)
| from flask import Flask, request, render_template, redirect, url_for,jsonify
from newspaper import Article
from xml.etree import ElementTree
app = Flask(__name__)
# Debug logging
import logging
import sys
# Defaults to stdout
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
log.info('Logging to console')
except:
_, ex, _ = sys.exc_info()
log.error(ex.message)
@app.route('/articles/show')
def index():
return render_template('index.html')
@app.route('/')
def show_article():
url_to_clean = request.args.get('url_to_clean')
if not url_to_clean:
return jsonify({
'authors': '',
'title': '',
'text': '',
'keywords': '',
'summary': ''
})
article = Article(url_to_clean)
article.download()
article.parse()
try:
html_string = ElementTree.tostring(article.clean_top_node)
except:
html_string = "Error converting html to string."
try:
article.nlp()
except:
log.error("Couldn't process with NLP")
a = {
'authors': str(', '.join(article.authors)),
'title': article.title,
'text': article.text,
'keywords': str(', '.join(article.keywords)),
'summary': article.summary
}
return jsonify(a)
| mit | Python |
fb6e7aff9a30f0a1efbf7bf28735e985aaf50fe9 | Use xarray support to simplify GINI example | Unidata/MetPy,ShawnMurd/MetPy,dopplershift/MetPy,ahaberlie/MetPy,Unidata/MetPy,ahaberlie/MetPy,jrleeman/MetPy,jrleeman/MetPy,dopplershift/MetPy | examples/formats/GINI_Water_Vapor.py | examples/formats/GINI_Water_Vapor.py | # Copyright (c) 2015,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
GINI Water Vapor Imagery
========================
Use MetPy's support for GINI files to read in a water vapor satellite image and plot the
data using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
from metpy.cbook import get_test_data
from metpy.io import GiniFile
from metpy.plots import add_metpy_logo, add_timestamp, colortables
###########################################
# Open the GINI file from the test data
f = GiniFile(get_test_data('WEST-CONUS_4km_WV_20151208_2200.gini'))
print(f)
###########################################
# Get a Dataset view of the data (essentially a NetCDF-like interface to the
# underlying data). Pull out the data and (x, y) coordinates. We use `metpy.parse_cf` to
# handle parsing some netCDF Climate and Forecasting (CF) metadata to simplify working with
# projections.
ds = xr.open_dataset(f)
x = ds.variables['x'][:]
y = ds.variables['y'][:]
dat = ds.metpy.parse_cf('WV')
###########################################
# Plot the image. We use MetPy's xarray/cartopy integration to automatically handle parsing
# the projection information.
fig = plt.figure(figsize=(10, 12))
add_metpy_logo(fig, 125, 145)
ax = fig.add_subplot(1, 1, 1, projection=dat.metpy.cartopy_crs)
wv_norm, wv_cmap = colortables.get_with_range('WVCIMSS', 100, 260)
wv_cmap.set_under('k')
im = ax.imshow(dat[:], cmap=wv_cmap, norm=wv_norm,
extent=(x.min(), x.max(), y.min(), y.max()), origin='upper')
ax.add_feature(cfeature.COASTLINE.with_scale('50m'))
add_timestamp(ax, f.prod_desc.datetime, y=0.02, high_contrast=True)
plt.show()
| # Copyright (c) 2015,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
GINI Water Vapor Imagery
========================
Use MetPy's support for GINI files to read in a water vapor satellite image and plot the
data using CartoPy.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
from metpy.cbook import get_test_data
from metpy.io import GiniFile
from metpy.plots import add_metpy_logo, add_timestamp, colortables
###########################################
# Open the GINI file from the test data
f = GiniFile(get_test_data('WEST-CONUS_4km_WV_20151208_2200.gini'))
print(f)
###########################################
# Get a Dataset view of the data (essentially a NetCDF-like interface to the
# underlying data). Pull out the data, (x, y) coordinates, and the projection
# information.
ds = xr.open_dataset(f)
x = ds.variables['x'][:]
y = ds.variables['y'][:]
dat = ds.variables['WV']
proj_var = ds.variables[dat.attrs['grid_mapping']]
print(proj_var)
###########################################
# Create CartoPy projection information for the file
globe = ccrs.Globe(ellipse='sphere', semimajor_axis=proj_var.attrs['earth_radius'],
semiminor_axis=proj_var.attrs['earth_radius'])
proj = ccrs.LambertConformal(central_longitude=proj_var.attrs['longitude_of_central_meridian'],
central_latitude=proj_var.attrs['latitude_of_projection_origin'],
standard_parallels=[proj_var.attrs['standard_parallel']],
globe=globe)
###########################################
# Plot the image
fig = plt.figure(figsize=(10, 12))
add_metpy_logo(fig, 125, 145)
ax = fig.add_subplot(1, 1, 1, projection=proj)
wv_norm, wv_cmap = colortables.get_with_range('WVCIMSS', 100, 260)
wv_cmap.set_under('k')
im = ax.imshow(dat[:], cmap=wv_cmap, norm=wv_norm,
extent=(x.min(), x.max(), y.min(), y.max()), origin='upper')
ax.add_feature(cfeature.COASTLINE.with_scale('50m'))
add_timestamp(ax, f.prod_desc.datetime, y=0.02, high_contrast=True)
plt.show()
| bsd-3-clause | Python |
4a6d56acef9ab67c41386f32093ea517a8129f37 | Fix failing test for checking get_owner | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/win_status.py | salt/modules/win_status.py | '''
Module for returning various status data about a minion.
These data can be useful for compiling into stats later.
:depends: - pythoncom
- wmi
'''
import logging
import salt.utils
log = logging.getLogger(__name__)
try:
import pythoncom
import wmi
import salt.utils.winapi
has_required_packages = True
except ImportError:
if salt.utils.is_windows():
log.exception('pywin32 and wmi python packages are required '
'in order to use the status module.')
has_required_packages = False
__opts__ = {}
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and has_required_packages:
return 'status'
return False
def procs():
'''
Return the process data
CLI Example::
salt '*' status.procs
'''
with salt.utils.winapi.Com():
wmi_obj = wmi.WMI()
processes = wmi_obj.win32_process()
process_info = {}
for proc in processes:
process_info[proc.ProcessId] = _get_process_info(proc)
return process_info
def _get_process_info(proc):
cmd = (proc.CommandLine or '').encode('utf-8')
name = proc.Name.encode('utf-8')
info = dict(
cmd=cmd,
name=name,
**_get_process_owner(proc)
)
return info
def _get_process_owner(process):
owner = {}
domain, error_code, user = '', '', ''
try:
domain, error_code, user = process.GetOwner()
owner['user'] = user.encode('utf-8')
owner['user_domain'] = domain.encode('utf-8')
except Exception as exc:
pass
if not error_code:
owner['user'] = user.encode('utf-8')
owner['user_domain'] = domain.encode('utf-8')
elif process.ProcessId in [0, 4] and error_code == 2:
# Access Denied for System Idle Process and System
owner['user'] = 'SYSTEM'
owner['user_domain'] = 'NT AUTHORITY'
else:
log.warning('Error getting owner of process; PID=\'{0}\'; Error: {1}'
.format(process.ProcessId, error_code))
return owner
| '''
Module for returning various status data about a minion.
These data can be useful for compiling into stats later.
:depends: - pythoncom
- wmi
'''
import logging
import salt.utils
log = logging.getLogger(__name__)
try:
import pythoncom
import wmi
import salt.utils.winapi
has_required_packages = True
except ImportError:
if salt.utils.is_windows():
log.exception('pywin32 and wmi python packages are required '
'in order to use the status module.')
has_required_packages = False
__opts__ = {}
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and has_required_packages:
return 'status'
return False
def procs():
'''
Return the process data
CLI Example::
salt '*' status.procs
'''
with salt.utils.winapi.Com():
wmi_obj = wmi.WMI()
processes = wmi_obj.win32_process()
process_info = {}
for proc in processes:
process_info[proc.ProcessId] = _get_process_info(proc)
return process_info
def _get_process_info(proc):
cmd = (proc.CommandLine or '').encode('utf-8')
name = proc.Name.encode('utf-8')
info = dict(
cmd=cmd,
name=name,
**_get_process_owner(proc)
)
return info
def _get_process_owner(process):
owner = {}
domain, error_code, user = process.GetOwner()
if not error_code:
owner['user'] = user.encode('utf-8')
owner['user_domain'] = domain.encode('utf-8')
elif process.ProcessId in [0, 4] and error_code == 2:
# Access Denied for System Idle Process and System
owner['user'] = 'SYSTEM'
owner['user_domain'] = 'NT AUTHORITY'
else:
log.warning('Error getting owner of process; PID=\'{0}\'; Error: {1}'
.format(process.ProcessId, error_code))
return owner
| apache-2.0 | Python |
f1d43fdd3bf2bb9aea5fa2a610778e816c0d8d33 | remove .waf if exists at prepare_samples | pfi/maf,pfi/maf | samples/prepare_samples.py | samples/prepare_samples.py | #!/usr/bin/env python
"""
Prepares for running samples.
It just generates maf.py and copies maf.py and waf to each sample directory.
Please run this script before trying these samples.
"""
import glob
import os
import shutil
import subprocess
if __name__ == '__main__':
os.chdir('..')
subprocess.check_call('./generate_maf.py')
os.chdir('samples')
for d in glob.glob('*/'):
for dotwaf in glob.glob(os.path.join(d, ".waf-*")):
shutil.rmtree(dotwaf)
for f in '../maf.py', '../waf':
shutil.copy(f, d)
| #!/usr/bin/env python
"""
Prepares for running samples.
It just generates maf.py and copies maf.py and waf to each sample directory.
Please run this script before trying these samples.
"""
import glob
import os
import shutil
import subprocess
if __name__ == '__main__':
os.chdir('..')
subprocess.check_call('./generate_maf.py')
os.chdir('samples')
for d in glob.glob('*/'):
for f in '../maf.py', '../waf':
shutil.copy(f, d)
| bsd-2-clause | Python |
525cc6aea35264a7fb967905823faed62ac7522c | Drop Py2 and six on salt/beacons/proxy_example.py | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/beacons/proxy_example.py | salt/beacons/proxy_example.py | """
Example beacon to use with salt-proxy
.. code-block:: yaml
beacons:
proxy_example:
endpoint: beacon
"""
import logging
import salt.utils.http
# Important: If used with salt-proxy
# this is required for the beacon to load!!!
__proxyenabled__ = ["*"]
__virtualname__ = "proxy_example"
log = logging.getLogger(__name__)
def __virtual__():
"""
Trivially let the beacon load for the test example.
For a production beacon we should probably have some expression here.
"""
return True
def validate(config):
"""
Validate the beacon configuration
"""
if not isinstance(config, list):
return False, ("Configuration for proxy_example beacon must be a list.")
return True, "Valid beacon configuration"
def beacon(config):
"""
Called several times each second
https://docs.saltstack.com/en/latest/topics/beacons/#the-beacon-function
.. code-block:: yaml
beacons:
proxy_example:
- endpoint: beacon
"""
# Important!!!
# Although this toy example makes an HTTP call
# to get beacon information
# please be advised that doing CPU or IO intensive
# operations in this method will cause the beacon loop
# to block.
_config = {}
list(map(_config.update, config))
beacon_url = "{}{}".format(__opts__["proxy"]["url"], _config["endpoint"])
ret = salt.utils.http.query(beacon_url, decode_type="json", decode=True)
return [ret["dict"]]
| # -*- coding: utf-8 -*-
"""
Example beacon to use with salt-proxy
.. code-block:: yaml
beacons:
proxy_example:
endpoint: beacon
"""
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import salt libs
import salt.utils.http
from salt.ext.six.moves import map
# Important: If used with salt-proxy
# this is required for the beacon to load!!!
__proxyenabled__ = ["*"]
__virtualname__ = "proxy_example"
log = logging.getLogger(__name__)
def __virtual__():
"""
Trivially let the beacon load for the test example.
For a production beacon we should probably have some expression here.
"""
return True
def validate(config):
"""
Validate the beacon configuration
"""
if not isinstance(config, list):
return False, ("Configuration for proxy_example beacon must be a list.")
return True, "Valid beacon configuration"
def beacon(config):
"""
Called several times each second
https://docs.saltstack.com/en/latest/topics/beacons/#the-beacon-function
.. code-block:: yaml
beacons:
proxy_example:
- endpoint: beacon
"""
# Important!!!
# Although this toy example makes an HTTP call
# to get beacon information
# please be advised that doing CPU or IO intensive
# operations in this method will cause the beacon loop
# to block.
_config = {}
list(map(_config.update, config))
beacon_url = "{0}{1}".format(__opts__["proxy"]["url"], _config["endpoint"])
ret = salt.utils.http.query(beacon_url, decode_type="json", decode=True)
return [ret["dict"]]
| apache-2.0 | Python |
d62bc64db740b8467aeabb5a88bd46503c0bcd31 | add time command, run twice for testing cache speed | viraintel/OWASP-Nettacker,viraintel/OWASP-Nettacker,viraintel/OWASP-Nettacker,viraintel/OWASP-Nettacker | scripts/__travis_test__.py | scripts/__travis_test__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
# Test help menu command
os.system('time python nettacker.py --help')
# Test show version command
os.system('time python nettacker.py --version')
# Test all modules command
os.system("time python nettacker.py -i 127.0.0.1 -u user1,user2 -p pass1,pass2 -m all -g 21,25,80,443 -t 2 -T 0.1")
# Test all modules for second time (testing cache) command
os.system("time python nettacker.py -i 127.0.0.1 -u user1,user2 -p pass1,pass2 -m all -g 21,25,80,443 -t 2 -T 0.1")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
os.system('python nettacker.py --help')
os.system("python nettacker.py -i 127.0.0.1 -u user1,user2 -p pass1,pass2 -m all -g 21,25,80,443 -t 2 -T 0.1")
| apache-2.0 | Python |
6addf98a5ce2ca4e61f809664660418351cb8922 | Remove unused convert function from validator | ipython/ipython,ipython/ipython | IPython/nbformat/validator.py | IPython/nbformat/validator.py | from __future__ import print_function
#!/usr/bin/env python
# -*- coding: utf8 -*-
import json
import os
from IPython.external.jsonschema import Draft3Validator
import IPython.external.jsonpointer as jsonpointer
from IPython.utils.py3compat import iteritems
from .current import nbformat, nbformat_schema
schema_path = os.path.join(
os.path.split(__file__)[0], "v%d" % nbformat, nbformat_schema)
def validate(nbjson, key='', verbose=True):
# load the schema file
with open(schema_path, 'r') as fh:
schema_json = json.load(fh)
# resolve internal references
v3schema = resolve_ref(schema_json)
v3schema = jsonpointer.resolve_pointer(v3schema, key)
errors = 0
v = Draft3Validator(v3schema)
for error in v.iter_errors(nbjson):
errors = errors + 1
if verbose:
print(error)
return errors
def resolve_ref(json, schema=None):
"""return a json with resolved internal references
only support local reference to the same json
"""
if not schema:
schema = json
# if it's a list, resolve references for each item in the list
if type(json) is list:
resolved = []
for item in json:
resolved.append(resolve_ref(item, schema=schema))
# if it's a dictionary, resolve references for each item in the
# dictionary
elif type(json) is dict:
resolved = {}
for key, ref in iteritems(json):
# if the key is equal to $ref, then replace the entire
# dictionary with the resolved value
if key == '$ref':
assert len(json) == 1
pointer = jsonpointer.resolve_pointer(schema, ref)
resolved = resolve_ref(pointer, schema=schema)
else:
resolved[key] = resolve_ref(ref, schema=schema)
# otherwise it's a normal object, so just return it
else:
resolved = json
return resolved
| from __future__ import print_function
#!/usr/bin/env python
# -*- coding: utf8 -*-
import json
import os
from IPython.external.jsonschema import Draft3Validator
import IPython.external.jsonpointer as jsonpointer
from IPython.utils.py3compat import iteritems
from .current import nbformat, nbformat_schema
schema_path = os.path.join(
os.path.split(__file__)[0], "v%d" % nbformat, nbformat_schema)
def validate(nbjson, key='', verbose=True):
# load the schema file
with open(schema_path, 'r') as fh:
schema_json = json.load(fh)
# resolve internal references
v3schema = resolve_ref(schema_json)
v3schema = jsonpointer.resolve_pointer(v3schema, key)
errors = 0
v = Draft3Validator(v3schema)
for error in v.iter_errors(nbjson):
errors = errors + 1
if verbose:
print(error)
return errors
def resolve_ref(json, schema=None):
"""return a json with resolved internal references
only support local reference to the same json
"""
if not schema:
schema = json
# if it's a list, resolve references for each item in the list
if type(json) is list:
resolved = []
for item in json:
resolved.append(resolve_ref(item, schema=schema))
# if it's a dictionary, resolve references for each item in the
# dictionary
elif type(json) is dict:
resolved = {}
for key, ref in iteritems(json):
# if the key is equal to $ref, then replace the entire
# dictionary with the resolved value
if key == '$ref':
assert len(json) == 1
pointer = jsonpointer.resolve_pointer(schema, ref)
resolved = resolve_ref(pointer, schema=schema)
else:
resolved[key] = resolve_ref(ref, schema=schema)
# otherwise it's a normal object, so just return it
else:
resolved = json
return resolved
def convert(namein, nameout, indent=2):
"""resolve the references of namein, save the result in nameout"""
jsn = None
with open(namein) as file:
jsn = json.load(file)
v = resolve_ref(jsn)
x = jsonpointer.resolve_pointer(v, '/notebook')
with open(nameout, 'w') as file:
json.dump(x, file, indent=indent)
| bsd-3-clause | Python |
41fc2f6506c18df4aaa84320c2ae62db0aacd2ac | Fix bugs in compile_grm build extension | google/language-resources,google/language-resources,googlei18n/language-resources,google/language-resources,google/language-resources,google/language-resources,googlei18n/language-resources,google/language-resources,googlei18n/language-resources,googlei18n/language-resources,googlei18n/language-resources,googlei18n/language-resources | mul_034/build_defs.bzl | mul_034/build_defs.bzl | # Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compile_grm(src=None, out=None, data=[]):
"""Compile a Thrax grm file to a FAR file."""
# Rather than invoking the Thrax compiler directly in a genrule, we go via an
# intermediate sh_binary. The reason for this is that, unlike a genrule, an
# sh_binary as a runfiles directory in which all runtime dependencies are
# present as symlinks in a predictable directory layout. This allows the Thrax
# compiler to read additional runtime data (e.g. symbol tables) from
# predictable relative paths, regardless of whether they were present in the
# source tree or generated by build rules.
suffix = ".grm"
if src.endswith(suffix):
name = src[:-len(suffix)]
else:
name = src
if not name:
fail("Ill-formed src name: " + src)
if not out:
out = name + ".far"
genrule_name = "compile_%s_grm" % name
tool_name = genrule_name + "_helper"
native.sh_binary(
name = tool_name,
srcs = ["//mul_034:compile_grm_helper.sh"],
data = data + [
src,
"@thrax//:thraxcompiler",
],
)
native.genrule(
name = genrule_name,
srcs = [src],
outs = [out],
cmd = "$(location %s) $< $@" % tool_name,
tools = [tool_name],
)
def script_test(script):
native.sh_test(
name = "%s_test" % script,
timeout = "short",
srcs = ["//utils:eval.sh"],
args = [
"""
grep '^[^#]' $(location %s_test.tsv) |
cut -f 1 |
$(location //utils:thrax_g2p) \
--far=$(location %s.far) \
--far_g2p_key=CODEPOINTS_TO_GRAPHEMES \
--phoneme_syms=$(location grapheme.syms) |
diff -B -I '^#' -U0 - $(location %s_test.tsv)
""" % (script, script, script),
],
data = [
"%s.far" % script,
"%s.syms" % script,
"%s_test.tsv" % script,
"grapheme.syms",
"//utils:thrax_g2p",
],
)
| # Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compile_grm(src=None, out=None, data=[]):
"""Compile a Thrax grm file to a FAR file."""
# Rather than invoking the Thrax compiler directly in a genrule, we go via an
# intermediate sh_binary. The reason for this is that, unlike a genrule, an
# sh_binary as a runfiles directory in which all runtime dependencies are
# present as symlinks in a predictable directory layout. This allows the Thrax
# compiler to read additional runtime data (e.g. symbol tables) from
# predictable relative paths, regardless of whether they were present in the
# source tree or generated by build rules.
suffix = ".grm"
if src.endswith(suffix):
name = src[:len(suffix)]
else:
name = src
if not name:
fail("Ill-formed src name: " + src)
if not out:
out = name + ".far"
genrule_name = "compile_%s_grm" % name
tool_name = genrule_name + "_helper"
native.sh_binary(
name = tool_name,
srcs = ["compile_grm_helper.sh"],
data = data + [
src,
"@thrax//:thraxcompiler",
],
)
native.genrule(
name = genrule_name,
srcs = [src],
outs = [out],
cmd = "$(location %s) $< $@" % tool_name,
tools = [tool_name],
)
def script_test(script):
native.sh_test(
name = "%s_test" % script,
timeout = "short",
srcs = ["//utils:eval.sh"],
args = [
"""
grep '^[^#]' $(location %s_test.tsv) |
cut -f 1 |
$(location //utils:thrax_g2p) \
--far=$(location %s.far) \
--far_g2p_key=CODEPOINTS_TO_GRAPHEMES \
--phoneme_syms=$(location grapheme.syms) |
diff -B -I '^#' -U0 - $(location %s_test.tsv)
""" % (script, script, script),
],
data = [
"%s.far" % script,
"%s.syms" % script,
"%s_test.tsv" % script,
"grapheme.syms",
"//utils:thrax_g2p",
],
)
| apache-2.0 | Python |
39ddd32ab8fd9237d7c347389ec06fff6216f89b | Add date-time field in serializer.py | futami/measone,futami/measone | meas/serializer.py | meas/serializer.py | from rest_framework import serializers
from .models import Condition, Entry
class ConditionSerializer(serializers.ModelSerializer):
class Meta:
model = Condition
fields = ('description', 'condition', 'serial', 'uuid', 'created_at')
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = Entry
fields = ('uuid', 'lane', 'item', 'value', 'unit', 'created_at')
| from rest_framework import serializers
from .models import Condition, Entry
class ConditionSerializer(serializers.ModelSerializer):
class Meta:
model = Condition
fields = ('description', 'condition', 'serial', 'uuid')
class EntrySerializer(serializers.ModelSerializer):
class Meta:
model = Entry
fields = ('uuid', 'lane', 'item', 'value', 'unit')
| mit | Python |
1c0708355c7b3653c32e8a49c6ebe02266795b90 | Update `keras` serializer for `tensorflow.contrib` version (#1067) | blaze/distributed,dask/distributed,dask/distributed,mrocklin/distributed,mrocklin/distributed,mrocklin/distributed,blaze/distributed,dask/distributed,dask/distributed | distributed/protocol/keras.py | distributed/protocol/keras.py | from __future__ import print_function, division, absolute_import
from .serialize import register_serialization, serialize, deserialize
def serialize_keras_model(model):
import keras
if keras.__version__ < '1.2.0':
raise ImportError("Need Keras >= 1.2.0. "
"Try pip install keras --upgrade --no-deps")
header = model._updated_config()
weights = model.get_weights()
headers, frames = list(zip(*map(serialize, weights)))
header['headers'] = headers
header['nframes'] = [len(L) for L in frames]
frames = [frame for L in frames for frame in L]
return header, frames
def deserialize_keras_model(header, frames):
from keras.models import model_from_config
n = 0
weights = []
for head, length in zip(header['headers'], header['nframes']):
x = deserialize(head, frames[n: n + length])
weights.append(x)
n += length
model = model_from_config(header)
model.set_weights(weights)
return model
for module in ['keras', 'tensorflow.contrib.keras.python.keras']:
for name in ['engine.training.Model', 'models.Model', 'models.Sequential']:
register_serialization('.'.join([module, name]), serialize_keras_model,
deserialize_keras_model)
| from __future__ import print_function, division, absolute_import
from .serialize import register_serialization, serialize, deserialize
def serialize_keras_model(model):
import keras
if keras.__version__ < '1.2.0':
raise ImportError("Need Keras >= 1.2.0. "
"Try pip install keras --upgrade --no-deps")
header = model._updated_config()
weights = model.get_weights()
headers, frames = list(zip(*map(serialize, weights)))
header['headers'] = headers
header['nframes'] = [len(L) for L in frames]
frames = [frame for L in frames for frame in L]
return header, frames
def deserialize_keras_model(header, frames):
from keras.models import model_from_config
n = 0
weights = []
for head, length in zip(header['headers'], header['nframes']):
x = deserialize(head, frames[n: n + length])
weights.append(x)
n += length
model = model_from_config(header)
model.set_weights(weights)
return model
register_serialization('keras.engine.training.Model', serialize_keras_model,
deserialize_keras_model)
register_serialization('keras.models.Model', serialize_keras_model,
deserialize_keras_model)
register_serialization('keras.models.Sequential', serialize_keras_model,
deserialize_keras_model)
| bsd-3-clause | Python |
3c077d82881e3dd51eb0b3906e43f9e038346cb6 | Remove `_allowed_symbols`, this is no longer used by the document generation. | tensorflow/federated,tensorflow/federated,tensorflow/federated | tensorflow_federated/python/core/test/__init__.py | tensorflow_federated/python/core/test/__init__.py | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test classes/functions for testing usage of TFF.
This package contains functions and utilities that can make testing usage of TFF
easier or more convenient and parallels the `tf.test` package.
"""
from tensorflow_federated.python.core.impl.reference_executor import ReferenceExecutor
from tensorflow_federated.python.core.test.static_assert import assert_contains_secure_aggregation
from tensorflow_federated.python.core.test.static_assert import assert_contains_unsecure_aggregation
from tensorflow_federated.python.core.test.static_assert import assert_not_contains_secure_aggregation
from tensorflow_federated.python.core.test.static_assert import assert_not_contains_unsecure_aggregation
| # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test classes/functions for testing usage of TFF.
This package contains functions and utilities that can make testing usage of TFF
easier or more convenient and parallels the `tf.test` package.
"""
from tensorflow_federated.python.core.impl.reference_executor import ReferenceExecutor
from tensorflow_federated.python.core.test.static_assert import assert_contains_secure_aggregation
from tensorflow_federated.python.core.test.static_assert import assert_contains_unsecure_aggregation
from tensorflow_federated.python.core.test.static_assert import assert_not_contains_secure_aggregation
from tensorflow_federated.python.core.test.static_assert import assert_not_contains_unsecure_aggregation
# Used by doc generation script.
_allowed_symbols = [
"ReferenceExecutor",
"assert_contains_secure_aggregation",
"assert_contains_unsecure_aggregation",
"assert_not_contains_secure_aggregation",
"assert_not_contains_unsecure_aggregation",
]
| apache-2.0 | Python |
68f9e73d336eb973c72d2ba8332467bf83896c04 | Add additional tests | scitran/core,scitran/core,scitran/api,scitran/core,scitran/core,scitran/api | tests/integration_tests/python/test_collection.py | tests/integration_tests/python/test_collection.py | def test_collections(data_builder, as_admin, as_user):
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
# create collection
r = as_admin.post('/collections', json={
'label': 'SciTran/Testing'
})
assert r.ok
collection = r.json()['_id']
# get all collections w/ stats=true
r = as_admin.get('/collections', params={'stats': 'true'})
assert r.ok
assert all('session_count' in coll for coll in r.json())
# get collection
r = as_admin.get('/collections/' + collection)
assert r.ok
# test empty update
r = as_admin.put('/collections/' + collection, json={})
assert r.status_code == 400
# add session to collection
r = as_admin.put('/collections/' + collection, json={
'contents': {
'operation': 'add',
'nodes': [
{'level': 'session', '_id': session}
],
}
})
assert r.ok
# test if collection is listed at acquisition
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok
assert collection in r.json()['collections']
###
# Test user only sees sessions/acquisitions they have access to
###
project2 = data_builder.create_project()
session2 = data_builder.create_session(project=project2)
acquisition2 = data_builder.create_acquisition(session=session2)
# add session2 to collection
r = as_admin.put('/collections/' + collection, json={
'contents': {
'operation': 'add',
'nodes': [
{'level': 'session', '_id': session2}
],
}
})
assert r.ok
# test user cannot access sessions/acquisitions of collection without perms
r = as_user.get('/collections/' + collection)
assert r.status_code == 403
# add user to collection
r = as_user.get('/users/self')
assert r.ok
uid = r.json()['_id']
r = as_admin.post('/collections/' + collection + '/permissions', json={'_id': uid, 'access': 'ro'})
assert r.ok
# test user cannot see sessions or acquisitions
r = as_user.get('/collections/' + collection + '/sessions')
assert r.ok
assert r.json() == []
r = as_user.get('/collections/' + collection + '/acquisitions')
assert r.ok
assert r.json() == []
# add user to project
r = as_admin.post('/projects/' + project2 + '/permissions', json={'_id': uid, 'access': 'ro'})
assert r.ok
# test user can now see some of sessions and acquisitions
r = as_user.get('/collections/' + collection + '/sessions')
assert r.ok
sessions = r.json()
assert len(sessions) == 1
assert sessions[0]['_id'] == session2
r = as_user.get('/collections/' + collection + '/acquisitions')
assert r.ok
acquisitions = r.json()
assert len(acquisitions) == 1
assert acquisitions[0]['_id'] == acquisition2
# delete collection
r = as_admin.delete('/collections/' + collection)
assert r.ok
# try to get deleted collection
r = as_admin.get('/collections/' + collection)
assert r.status_code == 404
# test if collection is listed at acquisition
r = as_admin.get('/acquisitions/' + acquisition)
assert collection not in r.json()['collections']
| def test_collections(data_builder, as_admin):
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
# create collection
r = as_admin.post('/collections', json={
'label': 'SciTran/Testing',
'public': True
})
assert r.ok
collection = r.json()['_id']
# get all collections w/ stats=true
r = as_admin.get('/collections', params={'stats': 'true'})
assert r.ok
assert all('session_count' in coll for coll in r.json())
# get collection
r = as_admin.get('/collections/' + collection)
assert r.ok
# test empty update
r = as_admin.put('/collections/' + collection, json={})
assert r.status_code == 400
# add session to collection
r = as_admin.put('/collections/' + collection, json={
'contents': {
'operation': 'add',
'nodes': [
{'level': 'session', '_id': session}
],
}
})
assert r.ok
# test if collection is listed at acquisition
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok
assert collection in r.json()['collections']
# delete collection
r = as_admin.delete('/collections/' + collection)
assert r.ok
# try to get deleted collection
r = as_admin.get('/collections/' + collection)
assert r.status_code == 404
# test if collection is listed at acquisition
r = as_admin.get('/acquisitions/' + acquisition)
assert collection not in r.json()['collections']
| mit | Python |
b2f89f81572dc13bdca5d8740181648d0fa28cb8 | add run dependency to xorg-cf-files (#21251) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/imake/package.py | var/spack/repos/builtin/packages/imake/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Imake(AutotoolsPackage, XorgPackage):
"""The imake build system."""
homepage = "http://www.snake.net/software/imake-stuff/"
xorg_mirror_path = "util/imake-1.0.7.tar.gz"
version('1.0.7', sha256='6bda266a07eb33445d513f1e3c82a61e4822ccb94d420643d58e1be5f881e5cb')
depends_on('xproto')
depends_on('xorg-cf-files', type='run')
depends_on('pkgconfig', type='build')
def configure_args(self):
args = []
cfgdir = self.spec['xorg-cf-files'].prefix.lib.X11.config
args.append('--with-config-dir={0}'.format(cfgdir))
return args
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Imake(AutotoolsPackage, XorgPackage):
"""The imake build system."""
homepage = "http://www.snake.net/software/imake-stuff/"
xorg_mirror_path = "util/imake-1.0.7.tar.gz"
version('1.0.7', sha256='6bda266a07eb33445d513f1e3c82a61e4822ccb94d420643d58e1be5f881e5cb')
depends_on('xproto')
depends_on('pkgconfig', type='build')
| lgpl-2.1 | Python |
1df3d651270035ffb0ffa4e1d5b51bb7b20e9907 | add v1.22-3 (#20890) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-dtw/package.py | var/spack/repos/builtin/packages/r-dtw/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDtw(RPackage):
"""Dynamic Time Warping Algorithms
A comprehensive implementation of dynamic time warping (DTW) algorithms in
R. DTW computes the optimal (least cumulative distance) alignment between
points of two time series. Common DTW variants covered include local
(slope) and global (window) constraints, subsequence matches, arbitrary
distance definitions, normalizations, minimum variance matching, and so on.
Provides cumulative distances, alignments, specialized plot styles, etc.,
as described in Giorgino (2009) <doi:10.18637/jss.v031.i07>."""
homepage = "https://cloud.r-project.org/package=dtw"
url = "https://cloud.r-project.org/src/contrib/dtw_1.18-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/dtw"
version('1.22-3', sha256='df7cf9adf613422ddb22a160597eb5f5475ab6c67c0d790092edb7f72ba98f00')
version('1.20-1', sha256='43ca1a47a7c81a2b5d5054da1be8b8af79a85d6f9ce7b4512e9ed91f790f60f0')
version('1.18-1', sha256='d9dab25bdf61705f9f28dc5ca1c96a7465b269233e878516c52c01b5a0da21ad')
version('1.17-1', sha256='0fc3afcebc58135c78abd7545a3549466ac051a058f913db16214c12141a6e4d')
version('1.16', sha256='7d7e34c41ff6021991bcf8a913b2b6b82680018f65fdd90af2150a07457e9cdb')
version('1.15', sha256='28ba2110d4c305f332fad93337cdae24b9de4163b8ddf33d476f9dddc63160f1')
version('1.14-3', sha256='6989358d8d97428418c2b34ae38647efcee2e0ce095800a657d5d83d7083c9e3')
depends_on('r@2.10.0:', type=('build', 'run'))
depends_on('r-proxy', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDtw(RPackage):
"""A comprehensive implementation of dynamic time warping (DTW) algorithms
in R. DTW computes the optimal (least cumulative distance) alignment
between points of two time series."""
homepage = "https://cloud.r-project.org/package=dtw"
url = "https://cloud.r-project.org/src/contrib/dtw_1.18-1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/dtw"
version('1.20-1', sha256='43ca1a47a7c81a2b5d5054da1be8b8af79a85d6f9ce7b4512e9ed91f790f60f0')
version('1.18-1', sha256='d9dab25bdf61705f9f28dc5ca1c96a7465b269233e878516c52c01b5a0da21ad')
version('1.17-1', sha256='0fc3afcebc58135c78abd7545a3549466ac051a058f913db16214c12141a6e4d')
version('1.16', sha256='7d7e34c41ff6021991bcf8a913b2b6b82680018f65fdd90af2150a07457e9cdb')
version('1.15', sha256='28ba2110d4c305f332fad93337cdae24b9de4163b8ddf33d476f9dddc63160f1')
version('1.14-3', sha256='6989358d8d97428418c2b34ae38647efcee2e0ce095800a657d5d83d7083c9e3')
depends_on('r@2.10.0:', type=('build', 'run'))
depends_on('r-proxy', type=('build', 'run'))
| lgpl-2.1 | Python |
e879c4176d8940e53fd6b749582ec6da60287060 | add version 0.6-2 to r-gmp (#21028) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-gmp/package.py | var/spack/repos/builtin/packages/r-gmp/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGmp(RPackage):
"""Multiple Precision Arithmetic
Multiple Precision Arithmetic (big integers and rationals, prime number
tests, matrix computation), "arithmetic without limitations" using the C
library GMP (GNU Multiple Precision Arithmetic)."""
homepage = "http://mulcyber.toulouse.inra.fr/projects/gmp"
url = "https://cloud.r-project.org/src/contrib/gmp_0.5-13.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/gmp"
version('0.6-2', sha256='6bfcb45b3f1e7da27d8773f911027355cab371d150c3dabf7dbaf8fba85b7f0e')
version('0.5-13.5', sha256='f681ab2ff3d1e379ba8ac44a8abddd08d08170723e885abc0b469b6fa8fe5510')
version('0.5-13.4', sha256='f05605b40fc39fc589e3a4d2f526a591a649faa45eef7f95c096e1bff8775196')
version('0.5-13.1', sha256='2f805374a26742cd43f6b2054130d8670eda1940070aabb9971e9e48226d0976')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r@3.5.0:', when='@0.6-2:', type=('build', 'run'))
depends_on('gmp@4.2.3:')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGmp(RPackage):
"""Multiple Precision Arithmetic (big integers and rationals, prime
number tests, matrix computation), "arithmetic without limitations"
using the C library GMP (GNU Multiple Precision Arithmetic)."""
homepage = "http://mulcyber.toulouse.inra.fr/projects/gmp"
url = "https://cloud.r-project.org/src/contrib/gmp_0.5-13.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/gmp"
version('0.5-13.5', sha256='f681ab2ff3d1e379ba8ac44a8abddd08d08170723e885abc0b469b6fa8fe5510')
version('0.5-13.4', sha256='f05605b40fc39fc589e3a4d2f526a591a649faa45eef7f95c096e1bff8775196')
version('0.5-13.1', sha256='2f805374a26742cd43f6b2054130d8670eda1940070aabb9971e9e48226d0976')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('gmp@4.2.3:')
| lgpl-2.1 | Python |
26f740b25a2d8a5fcddc67f5a0d1dc18ce2c132b | add new versions (#24443) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/sleef/package.py | var/spack/repos/builtin/packages/sleef/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sleef(CMakePackage):
"""SIMD Library for Evaluating Elementary Functions, vectorized libm and DFT."""
homepage = "https://sleef.org"
url = "https://github.com/shibatch/sleef/archive/3.2.tar.gz"
git = "https://github.com/shibatch/sleef.git"
version('master', branch='master')
version('3.5.1_2020-12-22', commit='e0a003ee838b75d11763aa9c3ef17bf71a725bff') # py-torch@1.8:1.9
version('3.5.1', sha256='415ee9b1bcc5816989d3d4d92afd0cd3f9ee89cbd5a33eb008e69751e40438ab', preferred=True)
version('3.4.0_2019-07-30', commit='7f523de651585fe25cade462efccca647dcc8d02') # py-torch@1.3:1.7
version('3.4.0_2019-05-13', commit='9b249c53a80343cc1a394ca961d7d5696ea76409', # py-torch@1.2
git='https://github.com/zdevito/sleef.git')
version('3.3.1_2018-12-09', commit='191f655caa25526ae226cf88dd2529265176014a', # py-torch@1.1
git='https://github.com/zdevito/sleef.git')
version('3.2_2018-05-09', commit='6ff7a135a1e31979d1e1844a2e7171dfbd34f54f') # py-torch@0.4.1:1.0
version('3.2', sha256='3130c5966e204e6d6a3ace81e543d12b5b21f60897f1c185bfa587c1bd77bee2')
# Some versions have ICE when building RelWithDebInfo with GCC 7
# See https://github.com/shibatch/sleef/issues/234
# See https://github.com/pytorch/pytorch/issues/26892
# See https://github.com/pytorch/pytorch/pull/26993
variant('build_type', default='Release',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
depends_on('cmake@3.4.3:', type='build')
depends_on('ninja', type='build')
generator = 'Ninja'
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sleef(CMakePackage):
"""SIMD Library for Evaluating Elementary Functions,
vectorized libm and DFT."""
homepage = "http://sleef.org"
url = "https://github.com/shibatch/sleef/archive/3.2.tar.gz"
version('3.2', sha256='3130c5966e204e6d6a3ace81e543d12b5b21f60897f1c185bfa587c1bd77bee2')
| lgpl-2.1 | Python |
696071e11f655578ef7c24118d9de26785779ae3 | Update filter_blacklist_hashtag_medias.py | instagrambot/instabot,instagrambot/instabot,ohld/instabot | examples/filter_blacklist_hashtag_medias.py | examples/filter_blacklist_hashtag_medias.py | """
instabot filters out the media with your set blacklist hashtags
Workflow:
Try to follow a media with your blacklist hashtag in the
description and see how bot filters it out.
"""
import os
import sys
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
blacklist_hashtag_input = input("\n Enter a blacklist hashtag: ")
bot = Bot(filter_users=True,
filter_private_users=True,
filter_previously_followed=True,
filter_business_accounts=True,
filter_verified_accounts=True,
blacklist_hashtags=[blacklist_hashtag_input],)
bot.login()
bot.like_hashtag(blacklist_hashtag_input, amount=2)
| """
instabot filters out the media with your set blacklist hashtags
Workflow:
Try to follow a media with your blacklist hashtag in the
description and see how bot filters it out.
"""
import os
import sys
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
blacklist_hashtag_input = input("\n Enter a hashtag you want to filter out: ")
bot = Bot(filter_users=True,
filter_private_users=True,
filter_previously_followed=True,
filter_business_accounts=True,
filter_verified_accounts=True,
blacklist_hashtags=[blacklist_hashtag_input],)
bot.login()
bot.like_hashtag(blacklist_hashtag_input, amount=2)
| apache-2.0 | Python |
fc3e492c25fcaf98ba0f48e01422d339541b8f70 | Add coding for python 2.7 | DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative | ynr/apps/ynr_refactoring/helpers/popolo_fields.py | ynr/apps/ynr_refactoring/helpers/popolo_fields.py | # -*- coding: utf-8 -*-
"""
A helper to move away from SimplePopoloField and ComplexPopoloField
models.
"""
class BaseField():
def __init__(self, *args, **kwargs):
self.required = False
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
class SimplePopoloField(BaseField):
pass
honorific_prefix = SimplePopoloField(
name='honorific_prefix',
label='Title / pre-nominal honorific (e.g. Dr, Sir, etc.)',
info_type_key='text',
order=1,
required=False,
)
name = SimplePopoloField(
name='name',
label='Full name',
info_type_key='text',
order=2,
required=True,
)
honorific_suffix = SimplePopoloField(
name='honorific_suffix',
label='Post-nominal letters (e.g. CBE, DSO, etc.)',
info_type_key='text',
order=3,
required=False,
)
email = SimplePopoloField(
name='email',
label='Email',
info_type_key='email',
order=4,
required=False,
)
gender = SimplePopoloField(
name='gender',
label='Gender (e.g. “male”, “female”)',
info_type_key='text',
order=5,
required=False,
)
birth_date = SimplePopoloField(
name='birth_date',
label='Date of birth (a four digit year or a full date)',
info_type_key='text',
order=6,
required=False,
)
biography = SimplePopoloField(
name='biography',
label='Biography',
info_type_key='text_multiline',
order=10,
required=False,
)
simple_fields = [
honorific_prefix,
name,
honorific_suffix,
email,
gender,
birth_date,
biography,
]
simple_fields = sorted(simple_fields, key=lambda x: x.order)
| """
A helper to move away from SimplePopoloField and ComplexPopoloField
models.
"""
class BaseField():
def __init__(self, *args, **kwargs):
self.required = False
for k, v in kwargs.items():
setattr(self, k, v)
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
class SimplePopoloField(BaseField):
pass
honorific_prefix = SimplePopoloField(
name='honorific_prefix',
label='Title / pre-nominal honorific (e.g. Dr, Sir, etc.)',
info_type_key='text',
order=1,
required=False,
)
name = SimplePopoloField(
name='name',
label='Full name',
info_type_key='text',
order=2,
required=True,
)
honorific_suffix = SimplePopoloField(
name='honorific_suffix',
label='Post-nominal letters (e.g. CBE, DSO, etc.)',
info_type_key='text',
order=3,
required=False,
)
email = SimplePopoloField(
name='email',
label='Email',
info_type_key='email',
order=4,
required=False,
)
gender = SimplePopoloField(
name='gender',
label='Gender (e.g. “male”, “female”)',
info_type_key='text',
order=5,
required=False,
)
birth_date = SimplePopoloField(
name='birth_date',
label='Date of birth (a four digit year or a full date)',
info_type_key='text',
order=6,
required=False,
)
biography = SimplePopoloField(
name='biography',
label='Biography',
info_type_key='text_multiline',
order=10,
required=False,
)
simple_fields = [
honorific_prefix,
name,
honorific_suffix,
email,
gender,
birth_date,
biography,
]
simple_fields = sorted(simple_fields, key=lambda x: x.order)
| agpl-3.0 | Python |
28ce152686cb10f40acf2632766a457477e7008b | remove 32 bit python hack | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild | packages/gtk+3.py | packages/gtk+3.py | class GtkPackage (GnomeXzPackage):
def __init__ (self):
GnomeXzPackage.__init__ (self, 'gtk+', version_major = '3.16', version_minor = '2',
configure_flags = [
'--with-gdktarget=quartz',
'--enable-quartz-backend',
'--enable-debug',
'--enable-static',
'--disable-glibtest',
'--disable-introspection',
'--disable-cloudprint',
'--disable-wayland-backend',
'--disable-schemas-compile',
'gio_can_sniff=yes'
]
)
self.gdk_target = 'quartz'
if Package.profile.name == 'darwin':
self.gdk_target = 'quartz'
self.sources.extend ([
# Custom gtkrc
'patches/gtkrc'
])
if Package.profile.name == 'darwin' and not Package.profile.m64:
self.configure_flags.extend ([
# fix build on lion, it uses 64-bit host even with -m32
'--build=i386-apple-darwin11.2.0',
])
def prep (self):
Package.prep (self)
if Package.profile.name == 'darwin':
for p in range (2, len (self.sources)):
self.sh ('patch -p1 --ignore-whitespace < "%{sources[' + str (p) + ']}"')
def arch_build (self, arch):
if arch == 'darwin-32':
self.sh ('export CC="$CC -arch i386"')
self.sh ('export CXX="$CXX -arch i386"')
self.local_ld_flags = ['-arch i386', '-DX_LOCALE']
self.local_gcc_flags = ['-arch i386', '-fstrict-aliasing']
elif arch == 'darwin-64':
self.sh ('export CC="$CC -arch x86_64"')
self.sh ('export CXX="$CXX -arch x86_64"')
self.local_ld_flags = ['-arch x86_64', '-DX_LOCALE']
self.local_gcc_flags = ['-arch x86_64', '-fstrict-aliasing']
Package.arch_build (self, arch, defaults = False)
def install(self):
Package.install(self)
if Package.profile.name == 'darwin':
self.install_gtkrc ()
def install_gtkrc(self):
gtkrc = self.sources[1]
origin = gtkrc if os.path.isabs (gtkrc) else os.path.join (self.package_dir (), gtkrc)
destdir = os.path.join (self.prefix, "etc", "gtk-2.0")
if not os.path.exists (destdir):
os.makedirs(destdir)
self.sh('cp %s %s' % (origin, destdir))
GtkPackage ()
| class GtkPackage (GnomeXzPackage):
def __init__ (self):
GnomeXzPackage.__init__ (self, 'gtk+', version_major = '3.16', version_minor = '2',
configure_flags = [
'--with-gdktarget=quartz',
'--enable-quartz-backend',
'--enable-debug',
'--enable-static',
'--disable-glibtest',
'--disable-introspection',
'--disable-cloudprint',
'--disable-wayland-backend',
'--disable-schemas-compile',
'gio_can_sniff=yes'
]
)
self.gdk_target = 'quartz'
if Package.profile.name == 'darwin':
self.gdk_target = 'quartz'
self.sources.extend ([
# Custom gtkrc
'patches/gtkrc'
])
if Package.profile.name == 'darwin' and not Package.profile.m64:
self.configure_flags.extend ([
# fix build on lion, it uses 64-bit host even with -m32
'--build=i386-apple-darwin11.2.0',
])
def prep (self):
Package.prep (self)
if Package.profile.name == 'darwin':
for p in range (2, len (self.sources)):
self.sh ('patch -p1 --ignore-whitespace < "%{sources[' + str (p) + ']}"')
def arch_build (self, arch):
if arch == 'darwin-32':
self.sh ('export CC="$CC -arch i386"')
self.sh ('export CXX="$CXX -arch i386"')
self.local_ld_flags = ['-arch i386', '-DX_LOCALE']
self.local_gcc_flags = ['-arch i386', '-fstrict-aliasing']
# The following will only work with bash according to:
# http://www.gossamer-threads.com/lists/python/python/30602
os.environ['VERSIONER_PYTHON_PREFER_32_BIT'] = 'yes'
elif arch == 'darwin-64':
self.sh ('export CC="$CC -arch x86_64"')
self.sh ('export CXX="$CXX -arch x86_64"')
self.local_ld_flags = ['-arch x86_64', '-DX_LOCALE']
self.local_gcc_flags = ['-arch x86_64', '-fstrict-aliasing']
Package.arch_build (self, arch, defaults = False)
def install(self):
Package.install(self)
if Package.profile.name == 'darwin':
self.install_gtkrc ()
def install_gtkrc(self):
gtkrc = self.sources[1]
origin = gtkrc if os.path.isabs (gtkrc) else os.path.join (self.package_dir (), gtkrc)
destdir = os.path.join (self.prefix, "etc", "gtk-2.0")
if not os.path.exists (destdir):
os.makedirs(destdir)
self.sh('cp %s %s' % (origin, destdir))
GtkPackage ()
| mit | Python |
00c92c618e778b9891283a699ecc2ea6d7a08510 | Update pango to 1.36.8 | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild | packages/pango.py | packages/pango.py | class PangoPackage (GnomeXzPackage):
def __init__ (self):
GnomeXzPackage.__init__ (self,
'pango',
version_major = '1.36',
version_minor = '8',
configure_flags = [
'--without-x',
'--enable-debug'
]
)
self.sources.extend ([
# 1
# Bug 321419 - Allow environment var substitution in Pango config
# https://bugzilla.gnome.org/show_bug.cgi?id=321419
'patches/pango-relative-config-file.patch',
# BXC 10257 - Characters outside the Basic Multilingual Plane don't render correctly
# https://bugzilla.xamarin.com/show_bug.cgi?id=10257
'patches/pango-coretext-astral-plane-1.patch',
'patches/pango-coretext-astral-plane-2.patch',
# Bug 15787 - Caret position is wrong when there are ligatures
# https://bugzilla.xamarin.com/show_bug.cgi?id=15787
'patches/pango-disable-ligatures.patch',
# https://bugzilla.xamarin.com/show_bug.cgi?id=22199
'patches/pango-fix-ct_font_descriptor_get_weight-crasher.patch',
# https://bugzilla.gnome.org/show_bug.cgi?id=734372
'patches/pango-coretext-condensed-trait.patch'
])
def prep (self):
GnomePackage.prep (self)
if Package.profile.name == 'darwin':
for p in range (1, len (self.sources)):
self.sh ('patch -p1 < "%{sources[' + str (p) + ']}"')
PangoPackage ()
| class PangoPackage (GnomeXzPackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.35',
version_minor = '0',
configure_flags = [
'--without-x',
'--enable-debug'
]
)
self.sources.extend ([
# 1
# Bug 321419 - Allow environment var substitution in Pango config
# https://bugzilla.gnome.org/show_bug.cgi?id=321419
'patches/pango-relative-config-file.patch',
# BXC 10257 - Characters outside the Basic Multilingual Plane don't render correctly
# https://bugzilla.xamarin.com/show_bug.cgi?id=10257
'patches/pango-coretext-astral-plane-1.patch',
'patches/pango-coretext-astral-plane-2.patch',
# Bug 15787 - Caret position is wrong when there are ligatures
# https://bugzilla.xamarin.com/show_bug.cgi?id=15787
'patches/pango-disable-ligatures.patch',
# https://bugzilla.xamarin.com/show_bug.cgi?id=22199
'patches/pango-fix-ct_font_descriptor_get_weight-crasher.patch',
# https://bugzilla.gnome.org/show_bug.cgi?id=734372
'patches/pango-coretext-condensed-trait.patch'
])
def prep (self):
GnomePackage.prep (self)
if Package.profile.name == 'darwin':
for p in range (1, len (self.sources)):
self.sh ('patch -p1 < "%{sources[' + str (p) + ']}"')
PangoPackage ()
| mit | Python |
a26e4f7a5a5605c0e39f2ae54708b60013d08c9a | Split out tag regexp so other modules can reference it | jcmcken/pallium,jcmcken/pallium | pallium/config.py | pallium/config.py |
try:
import json
except ImportError:
import simplejson as json
import re
import socket
from copy import deepcopy
_STR_RE_VALID_TAG = "[A-Za-z0-9_\-]+"
_STR_RE_VALID_TAG_COMPLETE = "^%s$" % _STR_RE_VALID_TAG
_RE_VALID_TAG = re.compile(_STR_RE_VALID_TAG_COMPLETE)
DEFAULT_CONFIG_FILE = "/etc/pallium/config.json"
DEFAULT_CONFIG = {
# gmetad server hostname or IP
"server": "localhost",
# gmetad request port (prints entire metric tree)
"request_port": 8651,
# gmetad interactive port
"interactive_port": 8652,
# directory where alerts are stored
"alerts_dir": "/etc/pallium/alerts",
# directory where metalerts are stored
"metalerts_dir": "/etc/pallium/metalerts",
# who to send alert emails to
"email_to": [],
# who to send alert emails as
"email_from": "pallium@%s" % socket.getfqdn(),
# number of seconds between gmetad queries -- should not be set too low,
# especially for large grids
"check_every": 30,
}
def load_pallium_config(filename):
config = load_json_config(filename)
default = deepcopy(DEFAULT_CONFIG)
default.update(config)
return default
def load_json_config(filename):
return json.load(open(filename))
def valid_tag(tag):
return bool(_RE_VALID_TAG.search(tag))
|
try:
import json
except ImportError:
import simplejson as json
import re
import socket
from copy import deepcopy
_STR_RE_VALID_TAG = "^[A-Za-z0-9_\-]+$"
_RE_VALID_TAG = re.compile(_STR_RE_VALID_TAG)
DEFAULT_CONFIG_FILE = "/etc/pallium/config.json"
DEFAULT_CONFIG = {
# gmetad server hostname or IP
"server": "localhost",
# gmetad request port (prints entire metric tree)
"request_port": 8651,
# gmetad interactive port
"interactive_port": 8652,
# directory where alerts are stored
"alerts_dir": "/etc/pallium/alerts",
# directory where metalerts are stored
"metalerts_dir": "/etc/pallium/metalerts",
# who to send alert emails to
"email_to": [],
# who to send alert emails as
"email_from": "pallium@%s" % socket.getfqdn(),
# number of seconds between gmetad queries -- should not be set too low,
# especially for large grids
"check_every": 30,
}
def load_pallium_config(filename):
config = load_json_config(filename)
default = deepcopy(DEFAULT_CONFIG)
default.update(config)
return default
def load_json_config(filename):
return json.load(open(filename))
def valid_tag(tag):
return bool(_RE_VALID_TAG.search(tag))
| bsd-3-clause | Python |
5a3672eb16ea57b0757526966b496d71693743ec | Update get_stock() docstring. | scraperwiki/stock-tool,scraperwiki/stock-tool | pandas_finance.py | pandas_finance.py | #!/usr/bin/env python
import datetime
import scraperwiki
import numpy
import pandas.io.data as web
def get_stock(stock, start, end, service):
"""
Return data frame of finance data for stock.
Takes start and end datetimes, and service name of 'google' or 'yahoo'.
"""
return web.DataReader(stock, service, start, end)
def parse_finance_frame(stock, start, end, service='google'):
"""
Return rows of dicts from a finance data frame for scraperwiki.sqlite.
service can also be 'yahoo', start and end are datetimes.
"""
frame = get_stock(stock, start, end, service)
rows = []
for idx in range(len(frame)):
current_row_as_dict = frame.ix[idx].to_dict()
# have to convert dates because these are Pandas timestamps and
# dumptruck doesn't support them
current_row_as_dict['Date'] = frame.index[idx].to_datetime()
current_row_as_dict['Stock'] = stock
# horrible hack because data values are numpy.float64 and dumptruck
# doesn't support them
for key in current_row_as_dict:
if isinstance(current_row_as_dict[key], numpy.float64):
current_row_as_dict[key] = float(current_row_as_dict[key])
rows.append(current_row_as_dict)
return rows
def main():
"""
Dump stock data into scraperwiki.sqlite using pandas.io.data.
"""
# arbitrary start chosen
start = datetime.datetime(2014, 3, 1)
end = datetime.datetime.today()
stock_list = ['TWTR', 'FB']
rows = []
for stock in stock_list:
rows.extend(parse_finance_frame(stock, start, end))
scraperwiki.sqlite.save(data=rows, unique_keys=['Stock', 'Date'])
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import datetime
import scraperwiki
import numpy
import pandas.io.data as web
def get_stock(stock, start, end, service):
"""
Return data frame of finance data for stock.
Takes start and end datetimes.
"""
return web.DataReader(stock, service, start, end)
def parse_finance_frame(stock, start, end, service='google'):
"""
Return rows of dicts from a finance data frame for scraperwiki.sqlite.
service can also be 'yahoo', start and end are datetimes.
"""
frame = get_stock(stock, start, end, service)
rows = []
for idx in range(len(frame)):
current_row_as_dict = frame.ix[idx].to_dict()
# have to convert dates because these are Pandas timestamps and
# dumptruck doesn't support them
current_row_as_dict['Date'] = frame.index[idx].to_datetime()
current_row_as_dict['Stock'] = stock
# horrible hack because data values are numpy.float64 and dumptruck
# doesn't support them
for key in current_row_as_dict:
if isinstance(current_row_as_dict[key], numpy.float64):
current_row_as_dict[key] = float(current_row_as_dict[key])
rows.append(current_row_as_dict)
return rows
def main():
"""
Dump stock data into scraperwiki.sqlite using pandas.io.data.
"""
# arbitrary start chosen
start = datetime.datetime(2014, 3, 1)
end = datetime.datetime.today()
stock_list = ['TWTR', 'FB']
rows = []
for stock in stock_list:
rows.extend(parse_finance_frame(stock, start, end))
scraperwiki.sqlite.save(data=rows, unique_keys=['Stock', 'Date'])
if __name__ == '__main__':
main()
| agpl-3.0 | Python |
53617b562b5aeb8daef3f2808a3d177c14f88f4b | Bump to version 1.1.12 | kezabelle/pilkit,fladi/pilkit | pilkit/pkgmeta.py | pilkit/pkgmeta.py | __title__ = 'pilkit'
__author__ = 'Matthew Tretter'
__version__ = '1.1.12'
__license__ = 'BSD'
__all__ = ['__title__', '__author__', '__version__', '__license__']
| __title__ = 'pilkit'
__author__ = 'Matthew Tretter'
__version__ = '1.1.11'
__license__ = 'BSD'
__all__ = ['__title__', '__author__', '__version__', '__license__']
| bsd-3-clause | Python |
07b193353115b7c144e8986d0621a0dee9da440b | Fix py35 gate | ozamiatin/oslo.messaging,ozamiatin/oslo.messaging | oslo_messaging/tests/functional/zmq/test_startup.py | oslo_messaging/tests/functional/zmq/test_startup.py | # Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from oslo_messaging.tests.functional.zmq import multiproc_utils
class StartupOrderTestCase(multiproc_utils.MultiprocTestCase):
def setUp(self):
super(StartupOrderTestCase, self).setUp()
self.conf.prog = "test_prog"
self.conf.project = "test_project"
self.config(rpc_response_timeout=10)
log_path = os.path.join(self.conf.oslo_messaging_zmq.rpc_zmq_ipc_dir,
str(os.getpid()) + ".log")
sys.stdout = open(log_path, "wb", buffering=0)
def test_call_client_wait_for_server(self):
server = self.spawn_server(wait_for_server=True)
client = self.get_client(server.topic)
for _ in range(3):
reply = client.call_a()
self.assertIsNotNone(reply)
self.assertEqual(3, len(client.replies))
def test_call_client_dont_wait_for_server(self):
server = self.spawn_server(wait_for_server=False)
client = self.get_client(server.topic)
for _ in range(3):
reply = client.call_a()
self.assertIsNotNone(reply)
self.assertEqual(3, len(client.replies))
| # Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from oslo_messaging.tests.functional.zmq import multiproc_utils
class StartupOrderTestCase(multiproc_utils.MultiprocTestCase):
def setUp(self):
super(StartupOrderTestCase, self).setUp()
self.conf.prog = "test_prog"
self.conf.project = "test_project"
self.config(rpc_response_timeout=10)
log_path = os.path.join(self.conf.oslo_messaging_zmq.rpc_zmq_ipc_dir,
str(os.getpid()) + ".log")
sys.stdout = open(log_path, "w", buffering=0)
def test_call_client_wait_for_server(self):
server = self.spawn_server(wait_for_server=True)
client = self.get_client(server.topic)
for _ in range(3):
reply = client.call_a()
self.assertIsNotNone(reply)
self.assertEqual(3, len(client.replies))
def test_call_client_dont_wait_for_server(self):
server = self.spawn_server(wait_for_server=False)
client = self.get_client(server.topic)
for _ in range(3):
reply = client.call_a()
self.assertIsNotNone(reply)
self.assertEqual(3, len(client.replies))
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.