commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
40c8bec919f2e04befb021d51706f39793eb77a2 | Fix typo | tfyarn/factory.py | tfyarn/factory.py | from __future__ import print_function
from tfyarn.clusterspecgen_client import ClusterSpecGenClient
import os
import socket
import tensorflow
import time
def createClusterSpec(job_name, task_index, application_id=None, container_id=None, am_address=None):
if application_id is None:
application_id = os.environ['APPLICATION_ID']
if container_id is None:
container_id = os.environ['CONTAINER_ID']
if am_address is None:
am_address = os.environ['AM_ADDRESS']
client = ClusterSpecGenClient(am_address)
host = socket.gethostname()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
client.register_container(application_id, container_id, host, port, job_name, task_index)
while True:
time.sleep(0.2)
cluster_spec_list = client.get_cluster_spec()
if cluster_spec_list is None:
print(container_id + ': createTrainServer: clusterSpec: None')
pass
elif len(cluster_spec_list) == 0:
print(container_id + ': createTrainServer: clusterSpec: (empty)')
pass
else:
break
workers = []
pses = []
last_worker_task_index = -1
last_ps_task_index = -1
for container in cluster_spec_list:
if container.jobName == 'worker':
assert container.taskIndex == last_worker_task_index + 1
last_worker_task_index = container.taskIndex
workers.append(container.ip + ':' + str(container.port))
elif container.jobName == 'ps':
assert container.taskIndex == last_ps_task_index + 1
last_ps_task_index = container.taskIndex
pses.append(container.ip + ':' + str(container.port))
cluster_spec_map = {'worker': workers, 'ps': pses}
print(container_id + ': createTrainServer: clusterSpec: ', end='')
print(cluster_spec_map)
s.close()
return tensorflow.train.ClusterSpec(cluster_spec_map)
| from __future__ import print_function
from tfyarn.clusterspecgen_client import ClusterSpecGenClient
import os
import socket
import tensorflow
import time
def createClusterSpec(job_name, task_index, application_id=None, container_id=None, am_address=None):
if application_id is None:
application_id = os.environ['APPLICATION_ID']
if container_id is None:
container_id = os.environ['CONTAINER_ID']
if am_address is None:
am_address = os.environ['AM_ADDRESS']
client = ClusterSpecGenClient(am_address)
host = socket.gethostname()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
port = s.getsockname()[1]
client.register_container(application_id, container_id, host, port, job_name, task_index)
while True:
time.sleep(0.2)
cluster_spec_list = client.get_cluster_spec()
if cluster_spec_list is None:
print(container_id + ': createTrainServer: clusterSpec: None')
pass
elif len(cluster_spec_list) == 0:
print(container_id + ': createTrainServer: clusterSpec: (empty)')
pass
else:
break
workers = []
pses = []
last_worker_task_id = -1
last_ps_task_id = -1
for container in cluster_spec_list:
if container.jobName == 'worker':
assert container.taskIndex == last_worker_task_id + 1
last_worker_task_id = container.taskIndex
workers.append(container.ip + ':' + str(container.port))
elif container.jobName == 'ps':
assert container.taskIndex == last_ps_task_id + 1
last_ps_task_id = container.taskIndex
pses.append(container.ip + ':' + str(container.port))
cluster_spec_map = {'worker': workers, 'ps': pses}
print(container_id + ': createTrainServer: clusterSpec: ', end='')
print(cluster_spec_map)
s.close()
return tensorflow.train.ClusterSpec(cluster_spec_map)
| Python | 0.999999 |
4663589ae44437344ec88dc96dc2ca9bdf55b581 | add metric AUC | tgboost/metric.py | tgboost/metric.py | import numpy as np
def accuracy(preds, labels):
return np.mean(labels == preds.round())
def error(preds, labels):
return 1.0 - accuracy(preds,labels)
def mean_square_error(preds, labels):
return np.mean(np.square(preds - labels))
def mean_absolute_error(preds, labels):
return np.mean(np.abs(preds - labels))
def tied_rank(x):
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
# the auc code is from https://github.com/benhamner/Metrics, thanks benhamner
def auc(posterior, actual):
r = tied_rank(posterior)
num_positive = len([0 for x in actual if x==1])
num_negative = len(actual)-num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1])
auc = ((sum_positive - num_positive*(num_positive+1)/2.0) /
(num_negative*num_positive))
return auc
metrics = {"acc": accuracy,
"error": error,
"mse": mean_square_error,
"mae": mean_absolute_error,
"auc": auc}
def get_metric(eval_metric):
return metrics[eval_metric]
| import numpy as np
def accuracy(preds, labels):
return np.mean(labels == preds.round())
def error(preds, labels):
return 1.0 - accuracy(preds,labels)
def mean_square_error(preds, labels):
return np.mean(np.square(preds - labels))
def mean_absolute_error(preds, labels):
return np.mean(np.abs(preds - labels))
metrics = {"acc": accuracy,
"error": error,
"mse": mean_square_error,
"mae": mean_absolute_error}
def get_metric(eval_metric):
return metrics[eval_metric]
| Python | 0.999999 |
00140b48d7473c0f6738e5bc7894370baee9ef30 | Remove debugging | IATISimpleTester/lib/helpers.py | IATISimpleTester/lib/helpers.py | from collections import defaultdict
import re
from lxml import etree
from IATISimpleTester import app
# given an expression list and the name of an expression,
# select it,
def select_expression(expression_list, expression_name, default_expression_name=None):
expression_dicts = {x["id"]: x for x in expression_list}
if expression_name not in expression_dicts:
expression_name = default_expression_name
return expression_name, expression_dicts.get(expression_name)
def slugify(inp):
return inp.lower().replace(' ', '-')
def pprint(explanation):
explanation = explanation.strip().capitalize().replace('\n', '<br>') + '.'
return re.sub(r'`([^`]*)`', r'<code>\1</code>', explanation)
| from collections import defaultdict
import re
from lxml import etree
from IATISimpleTester import app
# given an expression list and the name of an expression,
# select it,
def select_expression(expression_list, expression_name, default_expression_name=None):
expression_dicts = {x["id"]: x for x in expression_list}
if expression_name not in expression_dicts:
expression_name = default_expression_name
return expression_name, expression_dicts.get(expression_name)
def slugify(inp):
return inp.lower().replace(' ', '-')
def pprint(explanation):
print(explanation)
explanation = explanation.strip().capitalize().replace('\n', '<br>') + '.'
return re.sub(r'`([^`]*)`', r'<code>\1</code>', explanation)
| Python | 0.000005 |
303a8c149c30d4dd1d9c833c6716d5ab0da88e04 | Change version number to 1.2. | cbclib/version.py | cbclib/version.py | """a cbclib version storage module."""
version_tuple = (1, 2, 0)
full_version = "%d.%d.%d" % version_tuple
| """a cbclib version storage module."""
version_tuple = (1, 1, 1)
full_version = "%d.%d.%d" % version_tuple
| Python | 0.999741 |
994b50c3856e01d3cec712515efe11c0f286781e | Remove deprecated alias | ipywidgets/__init__.py | ipywidgets/__init__.py | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interactive widgets for the Jupyter notebook.
Provide simple interactive controls in the notebook.
Each Widget corresponds to an object in Python and Javascript,
with controls on the page.
To put a Widget on the page, you can display it with Jupyter's display machinery::
from ipywidgets import IntSlider
slider = IntSlider(min=1, max=10)
display(slider)
Moving the slider will change the value. Most Widgets have a current value,
accessible as a `value` attribute.
"""
import os
from IPython import get_ipython
from ._version import version_info, __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__
from .widgets import *
from traitlets import link, dlink
def load_ipython_extension(ip):
"""Set up Jupyter to work with widgets"""
if not hasattr(ip, 'kernel'):
return
register_comm_target(ip.kernel)
def register_comm_target(kernel=None):
"""Register the jupyter.widget comm target"""
if kernel is None:
kernel = get_ipython().kernel
kernel.comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened)
def _handle_ipython():
"""Register with the comm target at import if running in Jupyter"""
ip = get_ipython()
if ip is None:
return
load_ipython_extension(ip)
_handle_ipython()
| # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Interactive widgets for the Jupyter notebook.
Provide simple interactive controls in the notebook.
Each Widget corresponds to an object in Python and Javascript,
with controls on the page.
To put a Widget on the page, you can display it with Jupyter's display machinery::
from ipywidgets import IntSlider
slider = IntSlider(min=1, max=10)
display(slider)
Moving the slider will change the value. Most Widgets have a current value,
accessible as a `value` attribute.
"""
import os
from IPython import get_ipython
from ._version import version_info, __version__, __protocol_version__, __jupyter_widgets_controls_version__, __jupyter_widgets_base_version__
from .widgets import *
from traitlets import link, dlink
def load_ipython_extension(ip):
"""Set up Jupyter to work with widgets"""
if not hasattr(ip, 'kernel'):
return
register_comm_target(ip.kernel)
def register_comm_target(kernel=None):
"""Register the jupyter.widget comm target"""
if kernel is None:
kernel = get_ipython().kernel
kernel.comm_manager.register_target('jupyter.widget', Widget.handle_comm_opened)
# deprecated alias
handle_kernel = register_comm_target
def _handle_ipython():
"""Register with the comm target at import if running in Jupyter"""
ip = get_ipython()
if ip is None:
return
load_ipython_extension(ip)
_handle_ipython()
| Python | 0.000003 |
8d5d8cc8d61596a62513039d79abb57f274333ef | Set version as 0.9.0 | alignak_backend_client/__init__.py | alignak_backend_client/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alignak REST backend client library
This module is a Python library used for the REST API of the Alignak backend
"""
# Application version and manifest
VERSION = (0, 9, 0)
__application__ = u"Alignak Backend client"
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__author__ = u"Alignak team"
__author_email__ = u"david.durieux@alignak.net"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__description__ = u"Alignak backend client library"
__releasenotes__ = u"""Alignak backend client library"""
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend-client"
__doc_url__ = "http://alignak-backend-client.readthedocs.org"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
manifest = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'doc': __doc_url__
}
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Alignak REST backend client library
This module is a Python library used for the REST API of the Alignak backend
"""
# Application version and manifest
VERSION = (0, 7, 0)
__application__ = u"Alignak Backend client"
__short_version__ = '.'.join((str(each) for each in VERSION[:2]))
__version__ = '.'.join((str(each) for each in VERSION[:4]))
__author__ = u"Alignak team"
__author_email__ = u"david.durieux@alignak.net"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__description__ = u"Alignak backend client library"
__releasenotes__ = u"""Alignak backend client library"""
__git_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend-client"
__doc_url__ = "http://alignak-backend-client.readthedocs.org"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
# Application manifest
manifest = {
'name': __application__,
'version': __version__,
'author': __author__,
'description': __description__,
'copyright': __copyright__,
'license': __license__,
'release': __releasenotes__,
'doc': __doc_url__
}
| Python | 0.999418 |
26d93980bcf2e8463c8fc390059563d74b189418 | Create commit_zero (was forgotten) in repoinit.py | tools/repoinit.py | tools/repoinit.py | #!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to initialise a Github repo to be
used as a basis for a Wrap db entry. Also calculates a basic
upstream.wrap."""
import datetime
import git
import hashlib
import os
import shutil
import sys
import urllib.request
upstream_templ = '''[wrap-file]
directory = %s
source_url = %s
source_filename = %s
source_hash = %s
'''
readme = '''This repository contains a Meson build definition for project {reponame}.
For more information please see http://mesonbuild.com.
'''
mit_license = '''Copyright (c) {year} The Meson development team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def initialize(reponame):
repo = git.Repo.init('.')
with open('readme.txt', 'w') as ofile:
ofile.write(readme.format(reponame=reponame))
with open('LICENSE.build', 'w') as ofile:
ofile.write(mit_license.format(year=datetime.datetime.now().year))
repo.index.add(['readme.txt', 'LICENSE.build'])
commit = repo.index.commit('Created repository for project %s.' % reponame)
tag = repo.create_tag('commit_zero', commit)
origin = repo.create_remote('origin', 'git@github.com:mesonbuild/%s.git' % reponame)
origin.push(repo.head.ref.name)
origin.push(tag)
shutil.rmtree('.git')
os.unlink('readme.txt')
def build_upstream_wrap(zipurl, filename, directory):
with urllib.request.urlopen(zipurl) as r:
data = r.read()
open(filename, 'wb').write(data)
h = hashlib.sha256()
h.update(data)
dhash = h.hexdigest()
with open('upstream.wrap', 'w') as ofile:
ofile.write(upstream_templ % (directory, zipurl, filename, dhash))
if __name__ == '__main__':
if len(sys.argv) != 5:
print(sys.argv[0], '<reponame> <zipurl> <filename> <directory>')
sys.exit(1)
reponame = sys.argv[1]
zipurl = sys.argv[2]
filename = sys.argv[3]
directory = sys.argv[4]
initialize(reponame)
build_upstream_wrap(zipurl, filename, directory)
print('Done, now do the branching + stuff.')
| #!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to initialise a Github repo to be
used as a basis for a Wrap db entry. Also calculates a basic
upstream.wrap."""
import datetime
import git
import hashlib
import os
import shutil
import sys
import urllib.request
upstream_templ = '''[wrap-file]
directory = %s
source_url = %s
source_filename = %s
source_hash = %s
'''
readme = '''This repository contains a Meson build definition for project {reponame}.
For more information please see http://mesonbuild.com.
'''
mit_license = '''Copyright (c) {year} The Meson development team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def initialize(reponame):
repo = git.Repo.init('.')
with open('readme.txt', 'w') as ofile:
ofile.write(readme.format(reponame=reponame))
with open('LICENSE.build', 'w') as ofile:
ofile.write(mit_license.format(year=datetime.datetime.now().year))
repo.index.add(['readme.txt', 'LICENSE.build'])
repo.index.commit('Created repository for project %s.' % reponame)
origin = repo.create_remote('origin', 'git@github.com:mesonbuild/%s.git' % reponame)
origin.push(repo.head.ref.name)
shutil.rmtree('.git')
os.unlink('readme.txt')
def build_upstream_wrap(zipurl, filename, directory):
with urllib.request.urlopen(zipurl) as r:
data = r.read()
open(filename, 'wb').write(data)
h = hashlib.sha256()
h.update(data)
dhash = h.hexdigest()
with open('upstream.wrap', 'w') as ofile:
ofile.write(upstream_templ % (directory, zipurl, filename, dhash))
if __name__ == '__main__':
if len(sys.argv) != 5:
print(sys.argv[0], '<reponame> <zipurl> <filename> <directory>')
sys.exit(1)
reponame = sys.argv[1]
zipurl = sys.argv[2]
filename = sys.argv[3]
directory = sys.argv[4]
initialize(reponame)
build_upstream_wrap(zipurl, filename, directory)
print('Done, now do the branching + stuff.')
| Python | 0.000001 |
d2ebde1d3fbee6b5f6856768e0fee6f7273e9a55 | Fix inconsistent field name | accelerator_abstract/models/base_mentor_program_office_hour.py | accelerator_abstract/models/base_mentor_program_office_hour.py | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from accelerator_abstract.models.accelerator_model import AcceleratorModel
HOUR_IS_PAST_MESSAGE = "This office hour is in the past"
HOUR_HAS_BEEN_CANCELED_MESSAGE = "This office hour has been canceled"
HOUR_NOT_SPECIFIED_MESSAGE = "Office hour has not been specified"
HOUR_OWNED_BY_ANOTHER_MESSAGE = "This office hour is owned by another user"
@python_2_unicode_compatible
class BaseMentorProgramOfficeHour(AcceleratorModel):
program = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Program"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
mentor = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='mentor_officehours',
on_delete=models.CASCADE)
finalist = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name="Finalist",
blank=True,
null=True,
related_name='finalist_officehours',
on_delete=models.CASCADE)
startup = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Startup"),
blank=True,
null=True,
related_name='startup_officehours',
on_delete=models.SET_NULL)
start_date_time = models.DateTimeField(db_index=True)
end_date_time = models.DateTimeField(db_index=True)
description = models.TextField(blank=True)
location = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Location"),
null=True,
blank=True,
on_delete=models.CASCADE)
notify_reservation = models.BooleanField(default=True)
topics = models.CharField(max_length=2000, blank=True, default="")
meeting_info = models.CharField(max_length=256, blank=True, default="")
class Meta(AcceleratorModel.Meta):
db_table = '{}_mentorprogramofficehour'.format(
AcceleratorModel.Meta.app_label)
abstract = True
verbose_name = "Office Hour"
unique_together = ('program', 'mentor', 'start_date_time')
ordering = ['start_date_time']
def __str__(self):
hour_type = "Reserved"
if self.is_open():
hour_type = "Open"
return "%s office hour with %s" % (hour_type, self.mentor)
def is_open(self):
return not bool(self.finalist)
| # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from accelerator_abstract.models.accelerator_model import AcceleratorModel
HOUR_IS_PAST_MESSAGE = "This office hour is in the past"
HOUR_HAS_BEEN_CANCELED_MESSAGE = "This office hour has been canceled"
HOUR_NOT_SPECIFIED_MESSAGE = "Office hour has not been specified"
HOUR_OWNED_BY_ANOTHER_MESSAGE = "This office hour is owned by another user"
@python_2_unicode_compatible
class BaseMentorProgramOfficeHour(AcceleratorModel):
program = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Program"),
on_delete=models.SET_NULL,
null=True,
blank=True,
)
mentor = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='mentor_officehours',
on_delete=models.CASCADE)
finalist = models.ForeignKey(settings.AUTH_USER_MODEL,
verbose_name="Finalist",
blank=True,
null=True,
related_name='finalist_officehours',
on_delete=models.CASCADE)
startup = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Startup"),
blank=True,
null=True,
related_name='startup_officehours',
on_delete=models.SET_NULL)
start_date_time = models.DateTimeField(db_index=True)
end_date_time = models.DateTimeField(db_index=True)
description = models.TextField(blank=True)
location = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "Location"),
null=True,
blank=True,
on_delete=models.CASCADE)
notify_reservation = models.BooleanField(default=True)
topics = models.TextField(blank=True)
meeting_info = models.CharField(max_length=256, blank=True, default="")
class Meta(AcceleratorModel.Meta):
db_table = '{}_mentorprogramofficehour'.format(
AcceleratorModel.Meta.app_label)
abstract = True
verbose_name = "Office Hour"
unique_together = ('program', 'mentor', 'start_date_time')
ordering = ['start_date_time']
def __str__(self):
hour_type = "Reserved"
if self.is_open():
hour_type = "Open"
return "%s office hour with %s" % (hour_type, self.mentor)
def is_open(self):
return not bool(self.finalist)
| Python | 0.000022 |
2c42b84a5ffd7ce42295488271781c08ab372bd3 | add website_multi_company_portal to demo addons | website_multi_company/__manifest__.py | website_multi_company/__manifest__.py | # -*- coding: utf-8 -*-
{
"name": """Real Multi Website""",
"summary": """Yes, you can set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!""",
"category": "eCommerce",
"live_test_url": "http://apps.it-projects.info/shop/product/website-multi-company?version=10.0",
"images": ['images/website_multi_company_main.png'],
"version": "1.2.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "apps@it-projects.info",
"website": "https://twitter.com/yelizariev",
"license": "LGPL-3",
"price": 400.00,
"currency": "EUR",
"depends": [
"website",
"website_multi_theme",
"ir_config_parameter_multi_company",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
"views/website_views.xml",
"views/website_templates.xml",
"views/website_menu_views.xml",
"views/website_theme_views.xml",
"views/res_config_views.xml",
],
"qweb": [
],
"demo": [
# "data/website_demo.xml",
],
"post_load": "post_load",
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": True,
"demo_title": "Real Multi Website",
"demo_addons": [
"website_multi_company_sale",
"website_multi_company_portal",
],
"demo_addons_hidden": [
"website_multi_company_demo",
],
"demo_url": "website-multi-company",
"demo_summary": "The module allows to set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!",
"demo_images": [
"images/website_multi_company_main.png",
]
}
| # -*- coding: utf-8 -*-
{
"name": """Real Multi Website""",
"summary": """Yes, you can set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!""",
"category": "eCommerce",
"live_test_url": "http://apps.it-projects.info/shop/product/website-multi-company?version=10.0",
"images": ['images/website_multi_company_main.png'],
"version": "1.2.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "apps@it-projects.info",
"website": "https://twitter.com/yelizariev",
"license": "LGPL-3",
"price": 400.00,
"currency": "EUR",
"depends": [
"website",
"website_multi_theme",
"ir_config_parameter_multi_company",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
"views/website_views.xml",
"views/website_templates.xml",
"views/website_menu_views.xml",
"views/website_theme_views.xml",
"views/res_config_views.xml",
],
"qweb": [
],
"demo": [
# "data/website_demo.xml",
],
"post_load": "post_load",
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": True,
"demo_title": "Real Multi Website",
"demo_addons": [
"website_multi_company_sale",
],
"demo_addons_hidden": [
"website_multi_company_demo",
],
"demo_url": "website-multi-company",
"demo_summary": "The module allows to set up multi-company, multi-website, multi-theme, multi-eCommerce on a single database!",
"demo_images": [
"images/website_multi_company_main.png",
]
}
| Python | 0 |
08300a23fc06b9fa46435cf83e62778064b95424 | Fix support for xarray < 0.11 until it is released. | cfgrib/cfgrib_.py | cfgrib/cfgrib_.py | #
# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
from __future__ import absolute_import, division, print_function
import numpy as np
from xarray import Variable
from xarray.core import indexing
from xarray.core.utils import Frozen, FrozenOrderedDict
from xarray.backends.common import AbstractDataStore, BackendArray
try:
from xarray.backends.locks import ensure_lock, SerializableLock
except ImportError:
# no locking for xarray <= 0.11
def ensure_lock(lock):
return lock
class SerializableLock(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
# FIXME: Add a dedicated lock just in case, even if ecCodes is supposed to be thread-safe in most
# circumstances. See: https://confluence.ecmwf.int/display/ECC/Frequently+Asked+Questions
ECCODES_LOCK = SerializableLock()
class CfGribArrayWrapper(BackendArray):
def __init__(self, datastore, array):
self.datastore = datastore
self.shape = array.shape
self.dtype = array.dtype
self.array = array
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem)
def _getitem(self, key):
with self.datastore.lock:
return self.array[key]
class CfGribDataStore(AbstractDataStore):
"""
Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.
"""
def __init__(self, filename, lock=None, **backend_kwargs):
import cfgrib
if lock is None:
lock = ECCODES_LOCK
self.lock = ensure_lock(lock)
# NOTE: filter_by_keys is a dict, but CachingFileManager only accepts hashable types
if 'filter_by_keys' in backend_kwargs:
backend_kwargs['filter_by_keys'] = tuple(backend_kwargs['filter_by_keys'].items())
self.ds = cfgrib.open_file(filename, mode='r', **backend_kwargs)
def open_store_variable(self, name, var):
if isinstance(var.data, np.ndarray):
data = var.data
else:
data = indexing.LazilyOuterIndexedArray(CfGribArrayWrapper(self, var.data))
encoding = self.ds.encoding.copy()
encoding['original_shape'] = var.data.shape
return Variable(var.dimensions, data, var.attributes, encoding)
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in self.ds.variables.items())
def get_attrs(self):
return Frozen(self.ds.attributes)
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
encoding = {}
encoding['unlimited_dims'] = {k for k, v in self.ds.dimensions.items() if v is None}
return encoding
| #
# Copyright 2017-2018 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Alessandro Amici - B-Open - https://bopen.eu
#
from __future__ import absolute_import, division, print_function
import numpy as np
from xarray import Variable
from xarray.core import indexing
from xarray.core.utils import Frozen, FrozenOrderedDict
from xarray.backends.common import AbstractDataStore, BackendArray
from xarray.backends.locks import ensure_lock, SerializableLock
# FIXME: Add a dedicated lock just in case, even if ecCodes is supposed to be thread-safe in most
# circumstances. See: https://confluence.ecmwf.int/display/ECC/Frequently+Asked+Questions
ECCODES_LOCK = SerializableLock()
class CfGribArrayWrapper(BackendArray):
def __init__(self, datastore, array):
self.datastore = datastore
self.shape = array.shape
self.dtype = array.dtype
self.array = array
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem)
def _getitem(self, key):
with self.datastore.lock:
return self.array[key]
class CfGribDataStore(AbstractDataStore):
"""
Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.
"""
def __init__(self, filename, lock=None, **backend_kwargs):
import cfgrib
if lock is None:
lock = ECCODES_LOCK
self.lock = ensure_lock(lock)
# NOTE: filter_by_keys is a dict, but CachingFileManager only accepts hashable types
if 'filter_by_keys' in backend_kwargs:
backend_kwargs['filter_by_keys'] = tuple(backend_kwargs['filter_by_keys'].items())
self.ds = cfgrib.open_file(filename, mode='r', **backend_kwargs)
def open_store_variable(self, name, var):
if isinstance(var.data, np.ndarray):
data = var.data
else:
data = indexing.LazilyOuterIndexedArray(CfGribArrayWrapper(self, var.data))
encoding = self.ds.encoding.copy()
encoding['original_shape'] = var.data.shape
return Variable(var.dimensions, data, var.attributes, encoding)
def get_variables(self):
return FrozenOrderedDict((k, self.open_store_variable(k, v))
for k, v in self.ds.variables.items())
def get_attrs(self):
return Frozen(self.ds.attributes)
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
encoding = {}
encoding['unlimited_dims'] = {k for k, v in self.ds.dimensions.items() if v is None}
return encoding
| Python | 0 |
b671d67aaf80df9297213973659c59a4ebd72e08 | test file changed | pycqed/tests/analysis_v2/test_Two_state_T1_analysis.py | pycqed/tests/analysis_v2/test_Two_state_T1_analysis.py | import unittest
import pycqed as pq
import os
from pycqed.analysis_v2 import measurement_analysis as ma
from pycqed.analysis_v2 import Two_state_T1_analysis as Ta
class Test_efT1_analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_efT1_analysis(self):
Ta.efT1_analysis(
t_start='20180606_144110', auto=True, close_figs=False)
self.fit_res['fit_res_P0'].params['tau1'].value
| import unittest
import pycqed as pq
import os
from pycqed.analysis_v2 import measurement_analysis as ma
from pycqed.analysis_v2 import Two_state_T1_analysis as Ta
class Test_efT1_analysis(unittest.TestCase):
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_efT1_analysis(self):
Ta.efT1_analysis(
t_start='20180606_144110', auto=True, close_figs=False)
| Python | 0.000001 |
196b9547b4dbcbfbf4891c7fd3ea3b9944018430 | Revert "Revert "Added script for cron job to load surveys to database."" | scripts/cronRefreshEdxQualtrics.py | scripts/cronRefreshEdxQualtrics.py | from surveyextractor import QualtricsExtractor
import getopt
import sys
### Script for scheduling regular EdxQualtrics updates
### Usage for cron should be "cronRefreshEdxQualtrics.py -m -s -r"
# Append directory for dependencies to PYTHONPATH
sys.path.append("/home/dataman/Code/qualtrics_etl/src/qualtrics_etl/")
qe = QualtricsExtractor()
opts, args = getopt.getopt(sys.argv[1:], 'amsr', ['--reset', '--loadmeta', '--loadsurveys', '--loadresponses'])
for opt, arg in opts:
if opt in ('-a', '--reset'):
qe.resetMetadata()
qe.resetSurveys()
qe.resetResponses()
elif opt in ('-m', '--loadmeta'):
qe.loadSurveyMetadata()
elif opt in ('-s', '--loadsurvey'):
qe.resetSurveys()
qe.loadSurveyData()
elif opt in ('-r', '--loadresponses'):
qe.loadResponseData()
| from surveyextractor import QualtricsExtractor
import getopt, sys
# Script for scheduling regular EdxQualtrics updates
# Usage for cron should be "cronRefreshEdxQualtrics.py -m -s -r"
qe = QualtricsExtractor()
opts, args = getopt.getopt(sys.argv[1:], 'amsr', ['--reset', '--loadmeta', '--loadsurveys', '--loadresponses'])
for opt, arg in opts:
if opt in ('-a', '--reset'):
qe.resetMetadata()
qe.resetSurveys()
qe.resetResponses()
elif opt in ('-m', '--loadmeta'):
qe.loadSurveyMetadata()
elif opt in ('-s', '--loadsurvey'):
qe.resetSurveys()
qe.loadSurveyData()
elif opt in ('-r', '--loadresponses'):
qe.loadResponseData()
| Python | 0 |
c047f33561f304a9932c1d43284c59ae51035c69 | update client | chat/consumers.py | chat/consumers.py | import re
import json
import logging
from channels import Group
from channels.sessions import channel_session
from .models import Room
from .models import Player
log = logging.getLogger(__name__)
@channel_session
def ws_connect(message):
# Extract the room from the message. This expects message.path to be of the
# form /chat/{label}/, and finds a Room if the message path is applicable,
# and if the Room exists. Otherwise, bails (meaning this is a some othersort
# of websocket). So, this is effectively a version of _get_object_or_404.
try:
prefix, label = message['path'].decode('ascii').strip('/').split('/')
if prefix != 'chat':
log.debug('invalid ws path=%s', message['path'])
return
room = Room.objects.get(label=label)
except ValueError:
log.debug('invalid ws path=%s', message['path'])
return
except Room.DoesNotExist:
log.debug('ws room does not exist label=%s', label)
return
log.debug('chat connect room=%s client=%s:%s',
room.label, message['client'][0], message['client'][1])
# Need to be explicit about the channel layer so that testability works
# This may be a FIXME?
Group('chat-'+label).add(message.reply_channel)
message.channel_session['room'] = room.label
@channel_session
def ws_receive(message):
# Look up the room from the channel session, bailing if it doesn't exist
try:
label = message.channel_session['room']
room = Room.objects.get(label=label)
except KeyError:
log.debug('no room in channel_session')
return
except Room.DoesNotExist:
log.debug('recieved message, buy room does not exist label=%s', label)
return
# Parse out a chat message from the content text, bailing if it doesn't
# conform to the expected message format.
try:
data = json.loads(message['text'])
except ValueError:
log.debug("ws message isn't json text=%s", text)
return
if set(data.keys()) != set(('handle', 'message')):
log.debug("ws message unexpected format data=%s", data)
return
if data:
player = None
try:
player = room.players.filter(position=data['handle']).first()
except ValueError:
log.debug("something is wrong")
return
if player is not None:
if player.address != message.reply_channel.name:
log.debug("this room's position has been occupied by another guy")
return
else:
room.players.create(position=data['handle'],address=message.reply_channel.name)
log.debug('chat message room=%s handle=%s message=%s',
room.label, data['handle'], data['message'])
m = room.messages.create(**data)
# See above for the note about Group
Group('chat-'+label).send({'text': json.dumps(m.as_dict())})
@channel_session
def ws_disconnect(message):
try:
label = message.channel_session['room']
room = Room.objects.get(label=label)
Group('chat-'+label).discard(message.reply_channel)
player = room.player.filter(address=message.reply_channel.name).first()
if player is not None:
room.player.filter(address=message.reply_channel.name).delete()
except (KeyError, Room.DoesNotExist):
pass
| import re
import json
import logging
from channels import Group
from channels.sessions import channel_session
from .models import Room
from .models import Player
log = logging.getLogger(__name__)
@channel_session
def ws_connect(message):
# Extract the room from the message. This expects message.path to be of the
# form /chat/{label}/, and finds a Room if the message path is applicable,
# and if the Room exists. Otherwise, bails (meaning this is a some othersort
# of websocket). So, this is effectively a version of _get_object_or_404.
try:
prefix, label = message['path'].decode('ascii').strip('/').split('/')
if prefix != 'chat':
log.debug('invalid ws path=%s', message['path'])
return
room = Room.objects.get(label=label)
except ValueError:
log.debug('invalid ws path=%s', message['path'])
return
except Room.DoesNotExist:
log.debug('ws room does not exist label=%s', label)
return
log.debug('chat connect room=%s client=%s:%s',
room.label, message['client'][0], message['client'][1])
# Need to be explicit about the channel layer so that testability works
# This may be a FIXME?
Group('chat-'+label).add(message.reply_channel)
message.channel_session['room'] = room.label
@channel_session
def ws_receive(message):
# Look up the room from the channel session, bailing if it doesn't exist
try:
label = message.channel_session['room']
room = Room.objects.get(label=label)
except KeyError:
log.debug('no room in channel_session')
return
except Room.DoesNotExist:
log.debug('recieved message, buy room does not exist label=%s', label)
return
# Parse out a chat message from the content text, bailing if it doesn't
# conform to the expected message format.
try:
data = json.loads(message['text'])
except ValueError:
log.debug("ws message isn't json text=%s", text)
return
if set(data.keys()) != set(('handle', 'message')):
log.debug("ws message unexpected format data=%s", data)
return
if data:
player = None
try:
player = room.players.filter(position=data['handle']).first()
except ValueError:
log.debug("something is wrong")
return
if player is not None:
if player.address != message.reply_channel.name:
log.debug("this room's position has been occupied by another guy")
return
else:
room.players.create(position=data['handle'],address=message.reply_channel.name)
log.debug('chat message room=%s handle=%s message=%s',
room.label, data['handle'], data['message'])
m = room.messages.create(**data)
# See above for the note about Group
Group('chat-'+label).send({'text': json.dumps(m.as_dict())})
@channel_session
def ws_disconnect(message):
try:
label = message.channel_session['room']
room = Room.objects.get(label=label)
Group('chat-'+label).discard(message.reply_channel)
except (KeyError, Room.DoesNotExist):
pass
| Python | 0.000001 |
f1111b6d7eb387e7287497c1853addd003a81f39 | Add a length limit | chatterbox/irc.py | chatterbox/irc.py | import time
import random
import irc.bot
class Bot(irc.bot.SingleServerIRCBot):
def __init__(self, generator, channels, nickname, server, port=6667):
super().__init__([(server, port)], nickname, nickname)
self.generator = generator
self.channels_to_join = channels
self.nick = nickname
def on_nicknameinuse(self, c, e):
self.nick = c.get_nickname() + '_'
c.nick(self.nick)
def on_welcome(self, c, e):
for channel in self.channels_to_join:
c.join(channel)
def on_privmsg(self, c, e):
sentence = self.generator.generate_sentence()[:450]
time.sleep((random.random() + 1) * 0.015 * len(sentence))
c.privmsg(e.source.nick, sentence)
def on_pubmsg(self, c, e):
if self.nick in e.arguments[0]:
sentence = self.generator.generate_sentence()[:450]
time.sleep((random.random() + 1) * 0.015 * len(sentence))
c.privmsg(e.target, sentence)
| import time
import random
import irc.bot
class Bot(irc.bot.SingleServerIRCBot):
def __init__(self, generator, channels, nickname, server, port=6667):
super().__init__([(server, port)], nickname, nickname)
self.generator = generator
self.channels_to_join = channels
self.nick = nickname
def on_nicknameinuse(self, c, e):
self.nick = c.get_nickname() + '_'
c.nick(self.nick)
def on_welcome(self, c, e):
for channel in self.channels_to_join:
c.join(channel)
def on_privmsg(self, c, e):
sentence = self.generator.generate_sentence()
time.sleep((random.random() + 1) * 0.015 * len(sentence))
c.privmsg(e.source.nick, sentence)
def on_pubmsg(self, c, e):
if self.nick in e.arguments[0]:
sentence = self.generator.generate_sentence()
time.sleep((random.random() + 1) * 0.015 * len(sentence))
c.privmsg(e.target, sentence)
| Python | 0.998833 |
7e4554b98c4bd431431e5c22845a18ba842349e8 | fix pol append logic error | scripts/mc_log_autocorrelations.py | scripts/mc_log_autocorrelations.py | #! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Record information about antenna autocorrelations, as logged into the Redis
server by the correlator software.
"""
from __future__ import absolute_import, division, print_function
import socket
import time
from builtins import int
import re
import datetime
from astropy.time import Time
import numpy as np
import redis
from hera_mc import autocorrelations, mc
# Preliminaries. We have a small validity check since the M&C design specifies
# the memory, network, and system load are to be 5-minute averages.
MONITORING_INTERVAL = 60 # seconds
# End of config.
parser = mc.get_mc_argument_parser()
parser.add_argument("--redishost", "-r", default="redishost",
help="The hostname of the redis server.")
parser.add_argument('--redisport', '-p', default=6379,
help="Port for the redis server connection.")
parser.add_argument('--debug', action='store_true',
help='Print out debugging information.')
parser.add_argument('--noop', action='store_true',
help='Do not actually save information to the database.')
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
# allocate the maximum size of the autocorrelations as a buffer.
auto = np.zeros(8192, dtype=np.float32)
# make a redis pool and connect to redis
redis_pool = redis.ConnectionPool(host=args.redishost, port=args.redisport)
rsession = redis.Redis(connection_pool=redis_pool)
with db.sessionmaker() as dbsession:
try:
while True:
hostname = socket.gethostname()
keys = [
k.decode("utf-8")
for k in rsession.keys()
if k.startswith(b"auto") and not k.endswith(b"timestamp")
]
ants = []
pols = []
for key in keys:
match = re.search(r"auto:(?P<ant>\d+)(?P<pol>e|n)", key)
if match is not None:
ant, pol = int(match.group("ant")), match.group("pol")
ants.append(ant)
pols.append(pol)
ants = np.unique(ants)
pols = np.unique(pols)
# We put an identical timestamp for all records. The records from the
# redis server also include timestamps (as JDs), but I think it's actually
# preferable to use our own clock here. Note that we also ensure that the
# records grabbed in one execution of this script have identical
# timestamps, which is a nice property.
auto_time = datetime.datetime.utcnow()
for ant in ants:
for pol in pols:
d = rsession.get("auto:{ant:d}{pol:s}".format(ant=ant, pol=pol))
if d is not None:
auto = np.frombuffer(d, dtype=np.float32)
# For now, we just compute the median:
ac = autocorrelations.Autocorrelations()
ac.time = auto_time
ac.antnum = ant
ac.polarization = pol
ac.measurement_type = autocorrelations.MeasurementTypes.median
# must turn np.float32 into plain Python float
ac.value = np.median(auto).item()
if args.debug:
print(auto.shape, repr(ac))
if not args.noop:
dbsession.add(ac)
dbsession.add_daemon_status('mc_log_autocorrelations',
hostname, Time.now(), 'good')
dbsession.commit()
time.sleep(MONITORING_INTERVAL)
except KeyboardInterrupt:
pass
except Exception:
dbsession.add_daemon_status('mc_log_autocorrelations',
hostname, Time.now(), 'errored')
dbsession.commit()
raise
| #! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Record information about antenna autocorrelations, as logged into the Redis
server by the correlator software.
"""
from __future__ import absolute_import, division, print_function
import socket
import time
from builtins import int
import re
import datetime
from astropy.time import Time
import numpy as np
import redis
from hera_mc import autocorrelations, mc
# Preliminaries. We have a small validity check since the M&C design specifies
# the memory, network, and system load are to be 5-minute averages.
MONITORING_INTERVAL = 60 # seconds
# End of config.
parser = mc.get_mc_argument_parser()
parser.add_argument("--redishost", "-r", default="redishost",
help="The hostname of the redis server.")
parser.add_argument('--redisport', '-p', default=6379,
help="Port for the redis server connection.")
parser.add_argument('--debug', action='store_true',
help='Print out debugging information.')
parser.add_argument('--noop', action='store_true',
help='Do not actually save information to the database.')
args = parser.parse_args()
db = mc.connect_to_mc_db(args)
# allocate the maximum size of the autocorrelations as a buffer.
auto = np.zeros(8192, dtype=np.float32)
# make a redis pool and connect to redis
redis_pool = redis.ConnectionPool(host=args.redishost, port=args.redisport)
rsession = redis.Redis(connection_pool=redis_pool)
with db.sessionmaker() as dbsession:
try:
while True:
hostname = socket.gethostname()
keys = [
k.decode("utf-8")
for k in rsession.keys()
if k.startswith(b"auto") and not k.endswith(b"timestamp")
]
ants = []
pols = []
for key in keys:
match = re.search(r"auto:(?P<ant>\d+)(?P<pol>e|n)", key)
if match is not None:
ant, pol = int(match.group("ant")), match.group("pol")
ants.append(ant)
pols.append(pols)
ants = np.unique(ants)
pols = np.unique(pols)
# We put an identical timestamp for all records. The records from the
# redis server also include timestamps (as JDs), but I think it's actually
# preferable to use our own clock here. Note that we also ensure that the
# records grabbed in one execution of this script have identical
# timestamps, which is a nice property.
auto_time = datetime.datetime.utcnow()
for ant in ants:
for pol in pols:
d = rsession.get("auto:{ant:d}{pol:s}".format(ant=ant, pol=pol))
if d is not None:
auto = np.frombuffer(d, dtype=np.float32)
# For now, we just compute the median:
ac = autocorrelations.Autocorrelations()
ac.time = auto_time
ac.antnum = ant
ac.polarization = pol
ac.measurement_type = autocorrelations.MeasurementTypes.median
# must turn np.float32 into plain Python float
ac.value = np.median(auto).item()
if args.debug:
print(auto.shape, repr(ac))
if not args.noop:
dbsession.add(ac)
dbsession.add_daemon_status('mc_log_autocorrelations',
hostname, Time.now(), 'good')
dbsession.commit()
time.sleep(MONITORING_INTERVAL)
except KeyboardInterrupt:
pass
except Exception:
dbsession.add_daemon_status('mc_log_autocorrelations',
hostname, Time.now(), 'errored')
dbsession.commit()
raise
| Python | 0.000003 |
58483cbd70f1ae5e55656b01238f26bd6da6f903 | Change format | captain_hook/services/telegram/commands/base/base_command.py | captain_hook/services/telegram/commands/base/base_command.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import telegram
class BaseCommand:
def __init__(self, config):
self.config = config
self.telegram_bot = telegram.Bot(self.config["token"])
self.bot_info = self.telegram_bot.getMe()
def run(self, messageObj, config):
raise NotImplementedError
def sendMessage(self, chat_id,
text,
parse_mode=telegram.ParseMode.MARKDOWN,
disable_web_page_preview=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
try:
self.telegram_bot.sendMessage(
chat_id,
text,
parse_mode,
disable_web_page_preview,
disable_notification,
reply_to_message_id,
reply_markup,
timeout,
**kwargs
)
except telegram.error.RetryAfter:
pass
def sendPhoto(self, chat_id,
photo='',
**kwargs):
try:
self.telegram_bot.send_photo(chat_id=chat_id, photo=photo, **kwargs)
except telegram.error.RetryAfter:
pass
def sendDocument(self, chat_id,
document='',
**kwargs):
try:
self.telegram_bot.send_document(chat_id=chat_id, document=document, **kwargs)
except telegram.error.RetryAfter:
pass
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import telegram
class BaseCommand:
def __init__(self, config):
self.config = config
self.telegram_bot = telegram.Bot(self.config["token"])
self.bot_info = self.telegram_bot.getMe()
def run(self, messageObj, config):
raise NotImplementedError
def sendMessage(self, chat_id,
text,
parse_mode=telegram.ParseMode.MARKDOWN,
disable_web_page_preview=None,
disable_notification=False,
reply_to_message_id=None,
reply_markup=None,
timeout=None,
**kwargs):
try:
self.telegram_bot.sendMessage(chat_id,
text,
parse_mode,
disable_web_page_preview,
disable_notification,
reply_to_message_id,
reply_markup,
timeout,
**kwargs
)
except telegram.error.RetryAfter:
pass
def sendPhoto(self, chat_id,
photo='',
**kwargs):
try:
self.telegram_bot.send_photo(chat_id=chat_id, photo=photo, **kwargs)
except telegram.error.RetryAfter:
pass
def sendDocument(self, chat_id,
document='',
**kwargs):
try:
self.telegram_bot.send_document(chat_id=chat_id, document=document, **kwargs)
except telegram.error.RetryAfter:
pass
| Python | 0.000004 |
6f03120a57d40491e7d8245e10989a3e03b9481d | Set up task list for cook robot | se306/src/package1/scripts/cook.py | se306/src/package1/scripts/cook.py | #!/usr/bin/env python
import roslib
import rospy
import std_msgs.msg
import navigation
from std_msgs.msg import String
class Cook(navigation.Navigation):
''' When a message is passed out from the scheduler, determine whether it is
relevant to this object. If so, take the neccessary action
'''
def process_event(self, action_msg):
message = str(action_msg).split("data: ")[1]
if ('Cook.cook_' in message):
self.task_list.append(message)
def perform_task(self, task):
self.status = "active"
if task =="Cook.cook_":
self.navigate.current_path = list(self.cook_path)
self.navigate.target_coordinate = self.navigate.current_path.pop(0)
def __init__(self):
self.rate = rospy.Rate(20)
self.task_list = []
self.status = "idle"
# Create a navigation object which will be used to manage all the calls
# relating to movement. Passed the robot's name so that the publisher
# and subscribers for it's navigation can be set up.
#Eventually we will make this input a variable instead of hardcoded
self.navigate = navigation.Navigation("robot_2")
rospy.Subscriber("scheduler", String, self.process_event)
while not rospy.is_shutdown():
self.navigate.movement_publisher.publish(self.navigate.move_cmd)
if (len(self.navigate.target_coordinate) == 0):
self.status = "idle"
if (len(self.task_list) > 0 and self.status == "idle"):
self.perform_task(self.task_list.pop(0))
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('cook_robot')
cook = Cook()
| #!/usr/bin/env python
import roslib
import rospy
import std_msgs.msg
import navigation
from std_msgs.msg import String
class Cook(navigation.Navigation):
''' When a message is passed out from the scheduler, determine whether it is
relevant to this object. If so, take the neccessary action
'''
def process_event(self, action_msg):
message = str(action_msg).split("data: ")[1]
if ('Cook.cook_' in message):
self.navigate.current_path = list(self.cook_path)
self.navigate.target_coordinate = self.navigate.current_path.pop(0)
def __init__(self):
self.rate = rospy.Rate(20)
# Create a navigation object which will be used to manage all the calls
# relating to movement. Passed the robot's name so that the publisher
# and subscribers for it's navigation can be set up.
#Eventually we will make this input a variable instead of hardcoded
self.navigate = navigation.Navigation("robot_2")
rospy.Subscriber("scheduler", String, self.process_event)
while not rospy.is_shutdown():
self.navigate.movement_publisher.publish(self.navigate.move_cmd)
self.rate.sleep()
if __name__ == '__main__':
rospy.init_node('cook_robot')
cook = Cook()
| Python | 0.999988 |
da55338b1bfc82bd303a3003fef881ceb3605b28 | Make views time-centric, not date-centric | tracking/views.py | tracking/views.py | import logging
from datetime import timedelta
from django import forms
from django.shortcuts import render
from django.contrib.auth.decorators import permission_required
from django.utils.timezone import now
from tracking.models import Visitor, Pageview
from tracking.settings import TRACK_PAGEVIEWS
log = logging.getLogger(__file__)
# tracking wants to accept more formats than default, here they are
input_formats = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m', # '2006-10'
'%Y', # '2006'
]
class DashboardForm(forms.Form):
start_time = forms.DateTimeField(
required=False, input_formats=input_formats)
end_time = forms.DateTimeField(
required=False, input_formats=input_formats)
@permission_required('tracking.view_visitor')
def dashboard(request):
"Counts, aggregations and more!"
end_time = now()
start_time = end_time - timedelta(days=1)
defaults = {'start_time': start_time, 'end_time': end_time}
form = DashboardForm(data=request.GET or defaults)
if form.is_valid():
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
# determine when tracking began
try:
track_start_time = Visitor.objects.earliest('start_time').start_time
except Visitor.DoesNotExist:
track_start_time = now()
# If the start_date is before tracking began, warn about incomplete data
warn_incomplete = (start_time < track_start_time)
# queries take `date` objects (for now)
user_stats = Visitor.objects.user_stats(start_time, end_time)
visitor_stats = Visitor.objects.stats(start_time, end_time)
if TRACK_PAGEVIEWS:
pageview_stats = Pageview.objects.stats(start_time, end_time)
else:
pageview_stats = None
context = {
'form': form,
'track_start_time': track_start_time,
'warn_incomplete': warn_incomplete,
'user_stats': user_stats,
'visitor_stats': visitor_stats,
'pageview_stats': pageview_stats,
}
return render(request, 'tracking/dashboard.html', context)
| import logging
from datetime import timedelta
from django import forms
from django.shortcuts import render
from django.contrib.auth.decorators import permission_required
from django.utils.timezone import now
from tracking.models import Visitor, Pageview
from tracking.settings import TRACK_PAGEVIEWS
log = logging.getLogger(__file__)
# tracking wants to accept more formats than default, here they are
input_formats = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m', # '2006-10'
'%Y', # '2006'
]
class DashboardForm(forms.Form):
start_time = forms.DateTimeField(
required=False, input_formats=input_formats)
end_time = forms.DateTimeField(
required=False, input_formats=input_formats)
@permission_required('tracking.view_visitor')
def dashboard(request):
"Counts, aggregations and more!"
end_time = now()
start_time = end_time - timedelta(days=1)
defaults = {'start_time': start_time, 'end_time': end_time}
form = DashboardForm(data=request.GET or defaults)
if form.is_valid():
start_time = form.cleaned_data['start_time']
end_time = form.cleaned_data['end_time']
# determine when tracking began
try:
track_start_time = Visitor.objects.earliest('start_time').start_time
except Visitor.DoesNotExist:
track_start_time = now()
# If the start_date is before tracking began, warn about incomplete data
warn_incomplete = (start_time < track_start_time)
# queries take `date` objects (for now)
start_date = start_time.date()
end_date = end_time.date()
user_stats = Visitor.objects.user_stats(start_date, end_date)
visitor_stats = Visitor.objects.stats(start_date, end_date)
if TRACK_PAGEVIEWS:
pageview_stats = Pageview.objects.stats(start_date, end_date)
else:
pageview_stats = None
context = {
'form': form,
'track_start_time': track_start_time,
'warn_incomplete': warn_incomplete,
'user_stats': user_stats,
'visitor_stats': visitor_stats,
'pageview_stats': pageview_stats,
}
return render(request, 'tracking/dashboard.html', context)
| Python | 0.999068 |
419ab74f1bb0c0d46a1547cb1d3bc9ab16d5b925 | Update cindy/setting.py | cindy/settings.py | cindy/settings.py | """
Django settings for cindy project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9suovzr#od0ywfg9e422zpqqx1e0dnp%xaw3vv2y@7mbscpswn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'heyrict.pythonanywhere.com',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cindy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cindy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ja-jp'
TIME_ZONE = 'Japan/Tokyo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| """
Django settings for cindy project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9suovzr#od0ywfg9e422zpqqx1e0dnp%xaw3vv2y@7mbscpswn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cindy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cindy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| Python | 0 |
d19b72f42801dde328ae1e1d935c5df3a5797d4e | update manage.py for refactored appstate/config modules | app/manage.py | app/manage.py | import os
import sys
import scriptine
from scriptine.shell import sh
from geobox.web import create_app
def babel_init_lang_command(lang):
"Initialize new language."
sh('pybabel init -i geobox/web/translations/messages.pot -d geobox/web/translations -l %s' % (lang,))
def babel_refresh_command():
"Extract messages and update translation files."
# get directory of all extension that also use translations
import wtforms
wtforms_dir = os.path.dirname(wtforms.__file__)
extensions = ' '.join([wtforms_dir])
sh('pybabel extract -F babel.cfg -k lazy_gettext -k _l -o geobox/web/translations/messages.pot geobox/web geobox/model geobox/lib ' + extensions)
sh('pybabel update -i geobox/web/translations/messages.pot -d geobox/web/translations')
def babel_compile_command():
"Compile translations."
sh('pybabel compile -d geobox/web/translations')
def fixtures_command():
from geobox.appstate import GeoBoxState
from geobox.model.fixtures import add_fixtures
app_state = GeoBoxState.initialize()
if os.path.exists(app_state.db_filename):
os.remove(app_state.db_filename)
app_state = GeoBoxState.initialize()
session = app_state.user_db_session()
add_fixtures(session)
session.commit()
def init_db_command():
from geobox.appstate import GeoBoxState
from geobox.model.fixtures import add_fixtures
app_state = GeoBoxState.initialize()
if os.path.exists(app_state.db_filename):
os.remove(app_state.db_filename)
app_state = GeoBoxState.initialize()
session = app_state.user_db_session()
session.commit()
def webserver_command(config='./geobox.ini'):
from geobox.appstate import GeoBoxState
from geobox.defaults import GeoBoxConfig
config = GeoBoxConfig.from_file(config)
if not config:
sys.exit(1)
app_state = GeoBoxState(config)
app = create_app(app_state)
# scriptine removed sub-command from argv,
# but Flask reloader needs complete sys.argv
sys.argv[1:1] = ['webserver']
app.run(port=config.get('web', 'port'))
if __name__ == '__main__':
scriptine.run() | import os
import sys
import scriptine
from scriptine.shell import sh
from geobox.web import create_app
def babel_init_lang_command(lang):
"Initialize new language."
sh('pybabel init -i geobox/web/translations/messages.pot -d geobox/web/translations -l %s' % (lang,))
def babel_refresh_command():
"Extract messages and update translation files."
# get directory of all extension that also use translations
import wtforms
wtforms_dir = os.path.dirname(wtforms.__file__)
extensions = ' '.join([wtforms_dir])
sh('pybabel extract -F babel.cfg -k lazy_gettext -k _l -o geobox/web/translations/messages.pot geobox/web geobox/model geobox/lib ' + extensions)
sh('pybabel update -i geobox/web/translations/messages.pot -d geobox/web/translations')
def babel_compile_command():
"Compile translations."
sh('pybabel compile -d geobox/web/translations')
def fixtures_command():
from geobox.config import GeoBoxState
from geobox.model.fixtures import add_fixtures
app_state = GeoBoxState.initialize()
if os.path.exists(app_state.db_filename):
os.remove(app_state.db_filename)
app_state = GeoBoxState.initialize()
session = app_state.user_db_session()
add_fixtures(session)
session.commit()
def init_db_command():
from geobox.config import GeoBoxState
from geobox.model.fixtures import add_fixtures
app_state = GeoBoxState.initialize()
if os.path.exists(app_state.db_filename):
os.remove(app_state.db_filename)
app_state = GeoBoxState.initialize()
session = app_state.user_db_session()
session.commit()
def webserver_command(config='./geobox.ini'):
from geobox.config import GeoBoxConfig, GeoBoxState
config = GeoBoxConfig.from_file(config)
if not config:
sys.exit(1)
app_state = GeoBoxState(config)
app = create_app(app_state)
# scriptine removed sub-command from argv,
# but Flask reloader needs complete sys.argv
sys.argv[1:1] = ['webserver']
app.run(port=config.get('web', 'port'))
if __name__ == '__main__':
scriptine.run() | Python | 0 |
0b77e09ac16006d1baa6a5f4093b51c1a13863e9 | Add as_dict method to Digit model | app/models.py | app/models.py | from app import db
class Digit(db.Model):
__tablename__ = 'digits'
id = db.Column(db.INTEGER, primary_key=True)
label = db.Column(db.INTEGER)
tsne_x = db.Column(db.REAL)
tsne_y = db.Column(db.REAL)
tsne_z = db.Column(db.REAL)
array = db.Column(db.String)
def __repr__(self):
return '<Digit %d %d>' % (self.id, self.label)
def as_dict(self, fields=None):
if not fields:
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
else:
return {c: getattr(self, c) for c in fields}
| from app import db
class Digit(db.Model):
id = db.Column(db.INTEGER, primary_key=True)
label = db.Column(db.INTEGER)
tsne_x = db.Column(db.REAL)
tsne_y = db.Column(db.REAL)
tsne_z = db.Column(db.REAL)
array = db.Column(db.String)
image = db.Column(db.BLOB)
def __repr__(self):
return '<Digit %d %d>' % (self.id, self.label)
| Python | 0.000004 |
0929935874570fce5b64f3abbece781a4133b565 | Disable flaky NaCl SRPC integration tests. | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | #!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# TODO(ncbray): Reenable when this issue is resolved.
# http://code.google.com/p/nativeclient/issues/detail?id=2091
tests_to_disable.append('run_ppapi_bad_browser_test')
# This thread safety stress test is flaky on at least Windows.
# See http://code.google.com/p/nativeclient/issues/detail?id=2124
# TODO(mseaborn): Reenable when this issue is resolved.
tests_to_disable.append('run_ppapi_ppb_var_browser_test')
# Te behavior of the URLRequest changed slightly and this test needs to be
# updated. http://code.google.com/p/chromium/issues/detail?id=94352
tests_to_disable.append('run_ppapi_ppb_url_request_info_browser_test')
# TODO(ncbray) why did these tests flake?
# http://code.google.com/p/nativeclient/issues/detail?id=2230
tests_to_disable.extend([
'run_pm_manifest_file_chrome_browser_test',
'run_srpc_basic_chrome_browser_test',
'run_srpc_hw_data_chrome_browser_test',
'run_srpc_hw_chrome_browser_test',
'run_srpc_manifest_file_chrome_browser_test',
'run_srpc_nameservice_chrome_browser_test',
'run_srpc_nrd_xfer_chrome_browser_test',
])
if sys.platform == 'darwin':
# The following test is failing on Mac OS X 10.5. This may be
# because of a kernel bug that we might need to work around.
# See http://code.google.com/p/nativeclient/issues/detail?id=1835
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_async_messaging_test')
# The following test fails on debug builds of Chromium.
# See http://code.google.com/p/nativeclient/issues/detail?id=2077
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_ppapi_example_font_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(script_dir)
chrome_dir = os.path.dirname(test_dir)
src_dir = os.path.dirname(chrome_dir)
nacl_integration_script = os.path.join(
src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
'--disable_tests=%s' % ','.join(tests_to_disable)] + args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| #!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# TODO(ncbray): Reenable when this issue is resolved.
# http://code.google.com/p/nativeclient/issues/detail?id=2091
tests_to_disable.append('run_ppapi_bad_browser_test')
# This thread safety stress test is flaky on at least Windows.
# See http://code.google.com/p/nativeclient/issues/detail?id=2124
# TODO(mseaborn): Reenable when this issue is resolved.
tests_to_disable.append('run_ppapi_ppb_var_browser_test')
# Te behavior of the URLRequest changed slightly and this test needs to be
# updated. http://code.google.com/p/chromium/issues/detail?id=94352
tests_to_disable.append('run_ppapi_ppb_url_request_info_browser_test')
if sys.platform == 'darwin':
# The following test is failing on Mac OS X 10.5. This may be
# because of a kernel bug that we might need to work around.
# See http://code.google.com/p/nativeclient/issues/detail?id=1835
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_async_messaging_test')
# The following test fails on debug builds of Chromium.
# See http://code.google.com/p/nativeclient/issues/detail?id=2077
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_ppapi_example_font_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(script_dir)
chrome_dir = os.path.dirname(test_dir)
src_dir = os.path.dirname(chrome_dir)
nacl_integration_script = os.path.join(
src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
'--disable_tests=%s' % ','.join(tests_to_disable)] + args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| Python | 0.000069 |
73f8895ae00f3d076c73bc49a03b870abb2a30cc | Fix typo | app/models.py | app/models.py | from django.db import models
import mongoengine
from mongoengine import Document, EmbeddedDocument
from mongoengine.fields import *
import os
# Create your models here.
class Greeting(models.Model):
when = models.DateTimeField('date created', auto_now_add=True)
USER = os.getenv('DATABASE_USER')
PASSWORD = os.getenv('DATABASE_PASSWORD')
MONGODB_URI = "mongodb+srv://" + USER + ":" + PASSWORD + "@fikanotedb.ltkpy.mongodb.net/fikanotedb?retryWrites=true&w=majority".format(USER, PASWORD)
mongoengine.connect('fikanotedb', host=MONGODB_URI)
class Shownote(EmbeddedDocument):
url = URLField()
title = StringField()
date = DateTimeField()
class FikanoteDB(Document):
title = StringField()
number = IntField()
person = ListField(StringField())
agenda = StringField()
date = DateTimeField()
shownotes = ListField(EmbeddedDocumentField(Shownote))
meta = {'collection': 'fikanotedb'}
class AgendaDB(Document):
url = URLField()
title = StringField()
date = DateTimeField()
meta = {'collection': 'agendadb'}
| from django.db import models
import mongoengine
from mongoengine import Document, EmbeddedDocument
from mongoengine.fields import *
import os
# Create your models here.
class Greeting(models.Model):
when = models.DateTimeField('date created', auto_now_add=True)
USER = os.getenv('DATABASE_USER')
PASWORD = os.getenv('DATABASE_PASSWORD')
MONGODB_URI = "mongodb+srv://{}:{}@fikanotedb.ltkpy.mongodb.net/fikanotedb?retryWrites=true&w=majority".format(USER, PASWORD)
mongoengine.connect('fikanotedb', host=MONGODB_URI)
class Shownote(EmbeddedDocument):
url = URLField()
title = StringField()
date = DateTimeField()
class FikanoteDB(Document):
title = StringField()
number = IntField()
person = ListField(StringField())
agenda = StringField()
date = DateTimeField()
shownotes = ListField(EmbeddedDocumentField(Shownote))
meta = {'collection': 'fikanotedb'}
class AgendaDB(Document):
url = URLField()
title = StringField()
date = DateTimeField()
meta = {'collection': 'agendadb'}
| Python | 0.999999 |
f181ef90e1a7a8e1c5676a4ffaf50ee8469305eb | Tweak armdecomp3.py | armdecomp3.py | armdecomp3.py | #!/usr/bin/env python3
import sys
from sys import argv, stdout
from os import SEEK_SET, SEEK_CUR, SEEK_END
from errno import EPIPE
from struct import pack, unpack
def bits(byte):
return ((byte >> 7) & 1,
(byte >> 6) & 1,
(byte >> 5) & 1,
(byte >> 4) & 1,
(byte >> 3) & 1,
(byte >> 2) & 1,
(byte >> 1) & 1,
(byte) & 1)
def decompress(indata, decompressed_size):
"""Decompress LZSS-compressed bytes. Returns a bytearray."""
data = bytearray()
it = iter(indata)
def writebyte(b):
data.append(b)
def readbyte():
return next(it)
def readshort():
# big-endian
a = next(it)
b = next(it)
return (a << 8) | b
def copybyte():
data.append(next(it))
while len(data) < decompressed_size:
b = readbyte()
if b == 0:
# dumb optimization
for _ in range(8):
copybyte()
continue
flags = bits(b)
for flag in flags:
if flag == 0:
try:
copybyte()
except StopIteration:
return data
elif flag == 1:
sh = readshort()
count = (sh >> 0xc) + 3
# +3 for overlays
# +1 for files
disp = (sh & 0xfff) + 3
for _ in range(count):
writebyte(data[-disp])
else:
raise ValueError(flag)
if decompressed_size <= len(data):
break
assert len(data) == decompressed_size
#extra = f.read()
#assert len(extra) == 0, repr(extra)
return data
def main(args):
f = open(args[0], "rb")
# grab the underlying binary stream
stdout = sys.stdout.detach()
# the compression header is at the end of the file
f.seek(-8, SEEK_END)
header = f.read(8)
# decompression goes backwards.
# end < here < start
# end_delta == here - decompression end address
# start_delta == decompression start address - here
end_delta, start_delta = unpack("<LL", header)
filelen = f.tell()
padding = end_delta >> 0x18
end_delta &= 0xFFFFFF
decompressed_size = start_delta + end_delta
f.seek(filelen - end_delta, SEEK_SET)
data = bytearray()
data.extend(f.read(end_delta - padding))
data.reverse()
#stdout.write(data.tostring())
uncompressed_data = decompress(data, decompressed_size)
uncompressed_data.reverse()
f.seek(0, SEEK_SET)
# first we write up to the portion of the file which was "overwritten" by
# the decompressed data, then the decompressed data itself.
# i wonder if it's possible for decompression to overtake the compressed
# data, so that the decompression code is reading its own output...
try:
stdout.write(f.read(filelen - end_delta))
stdout.write(uncompressed_data)
except IOError as e:
if e.errno == EPIPE:
# don't complain about a broken pipe
pass
else:
raise
def main2(args):
f = open(args[0], "rb")
data = f.read()
stdout = sys.stdout.detach()
stdout.write(decompress(data))
if __name__ == '__main__':
main(argv[1:])
#main2(argv[1:])
| #!/usr/bin/env python3
import sys
from sys import argv, stdout
from os import SEEK_SET, SEEK_CUR, SEEK_END
from errno import EPIPE
from struct import pack, unpack
def bits(byte):
return ((byte >> 7) & 1,
(byte >> 6) & 1,
(byte >> 5) & 1,
(byte >> 4) & 1,
(byte >> 3) & 1,
(byte >> 2) & 1,
(byte >> 1) & 1,
(byte) & 1)
def decompress(indata):
"""Decompress LZSS-compressed bytes. Returns a bytearray."""
data = bytearray()
it = iter(indata)
def writebyte(b):
data.append(b)
def readbyte():
return next(it)
def readshort():
# big-endian
a = next(it)
b = next(it)
return (a << 8) | b
def copybyte():
data.append(next(it))
header = bytes(next(it) for _ in range(4))
assert header[0] == 0x10
decompressed_size, = unpack("<L", header[1:] + b"\x00")
while len(data) < decompressed_size:
b = readbyte()
if b == 0:
# dumb optimization
for _ in range(8):
copybyte()
continue
flags = bits(b)
for flag in flags:
if flag == 0:
try:
copybyte()
except StopIteration:
return data
elif flag == 1:
sh = readshort()
count = (sh >> 0xc) + 3
# +3 for overlays
# +1 for files
disp = (sh & 0xfff) + 3
for _ in range(count):
writebyte(data[-disp])
else:
raise ValueError(flag)
if decompressed_size <= len(data):
break
assert len(data) == decompressed_size
#extra = f.read()
#assert len(extra) == 0, repr(extra)
return data
def main(args):
f = open(args[0], "rb")
# grab the underlying binary stream
stdout = sys.stdout.detach()
# the compression header is at the end of the file
f.seek(-8, SEEK_END)
header = f.read(8)
# decompression goes backwards.
# end < here < start
# end_delta == here - decompression end address
# start_delta == decompression start address - here
end_delta, start_delta = unpack("<LL", header)
filelen = f.tell()
padding = end_delta >> 0x18
end_delta &= 0xFFFFFF
decompressed_size = start_delta + end_delta
f.seek(filelen - end_delta, SEEK_SET)
header = b'\x10' + pack("<L", decompressed_size)[:3]
data = bytearray()
data.extend(f.read(end_delta - padding))
data.extend(header[::-1])
data.reverse()
#stdout.write(data.tostring())
uncompressed_data = decompress(data)
uncompressed_data.reverse()
f.seek(0, SEEK_SET)
# first we write up to the portion of the file which was "overwritten" by
# the decompressed data, then the decompressed data itself.
# i wonder if it's possible for decompression to overtake the compressed
# data, so that the decompression code is reading its own output...
try:
stdout.write(f.read(filelen - end_delta))
stdout.write(uncompressed_data)
except IOError as e:
if e.errno == EPIPE:
# don't complain about a broken pipe
pass
else:
raise
def main2(args):
f = open(args[0], "rb")
data = f.read()
stdout = sys.stdout.detach()
stdout.write(decompress(data))
if __name__ == '__main__':
main(argv[1:])
#main2(argv[1:])
| Python | 0 |
69d9a36eb9d4536d9999395016759ec0ba23ad82 | Fix playlist preview function | zou/app/services/playlists_service.py | zou/app/services/playlists_service.py | from zou.app.models.playlist import Playlist
from zou.app.models.preview_file import PreviewFile
from zou.app.utils import fields
from zou.app.services import shots_service, tasks_service
from zou.app.services.exception import PlaylistNotFoundException
def all_playlists_for_project(project_id):
return fields.serialize_value(Playlist.get_all_by(project_id=project_id))
def get_playlist_with_preview_file_revisions(playlist_id):
playlist = Playlist.get(playlist_id)
if playlist is None:
raise PlaylistNotFoundException()
playlist_dict = playlist.serialize()
if playlist_dict["shots"] is None:
playlist_dict["shots"] = []
for shot in playlist_dict["shots"]:
shot["preview_files"] = get_preview_files_for_shot(
shot["shot_id"]
)
return playlist_dict
def get_preview_files_for_shot(shot_id):
tasks = tasks_service.get_tasks_for_shot(shot_id)
previews = {}
for task in tasks:
preview_files = PreviewFile.query \
.filter_by(task_id=task["id"]) \
.order_by(PreviewFile.revision.desc()) \
.all()
task_type_id = task["task_type_id"]
if len(preview_files) > 0:
previews[task_type_id] = [
{
"id": str(preview_file.id),
"revision": preview_file.revision
}
for preview_file in preview_files
] # Do not add too much field to avoid building too big responses
return previews
| from zou.app.models.playlist import Playlist
from zou.app.models.preview_file import PreviewFile
from zou.app.utils import fields
from zou.app.services import shots_service, tasks_service
from zou.app.services.exception import PlaylistNotFoundException
def all_playlists_for_project(project_id):
return fields.serialize_value(Playlist.get_all_by(project_id=project_id))
def get_playlist_with_preview_file_revisions(playlist_id):
playlist = Playlist.get(playlist_id)
if playlist is None:
raise PlaylistNotFoundException()
playlist_dict = playlist.serialize()
if playlist_dict["shots"] is None:
playlist_dict["shots"] = []
for shot in playlist_dict["shots"]:
shot["preview_files"] = shots_service.get_preview_files_for_shot(
shot["shot_id"]
)
return playlist_dict
def get_preview_files_for_shot(shot_id):
tasks = tasks_service.get_tasks_for_shot(shot_id)
previews = {}
for task in tasks:
preview_files = PreviewFile.query \
.filter_by(task_id=task["id"]) \
.order_by(PreviewFile.revision.desc()) \
.all()
task_type_id = task["task_type_id"]
if len(preview_files) > 0:
previews[task_type_id] = [
{
"id": str(preview_file.id),
"revision": preview_file.revision
}
for preview_file in preview_files
] # Do not add too much field to avoid building too big responses
return previews
| Python | 0.000019 |
75f28330cd5cf0eea2ec99d8c3f9bf53de18d46c | correct typo | aot/config.py | aot/config.py | import logging
import toml
from os.path import exists
class Config:
CONF_FILE_TEMPLATE = 'config/config.{type}.toml'
def __init__(self):
self._config = None
def __getitem__(self, key):
if self._config is None:
raise RuntimeError(
'Configuration is not loaded. '
'Call load_config(type) before trying to use the configuration',
)
else:
return self._config[key]
def load_config(self, type, version='latest'):
config_path = self.CONF_FILE_TEMPLATE.format(type=type)
if type == 'dev' and not exists(config_path):
docker_config_file = self.CONF_FILE_TEMPLATE.format(type='docker')
logging.info(f'Note: {config_path} not found, using {docker_config_file}')
config_path = docker_config_file
with open(config_path, 'r') as config_file:
self._config = toml.load(config_file)
self._set_version_in_socket_name('api', version)
self._set_version_in_socket_name('cache', version)
def _set_version_in_socket_name(self, section_name, version):
socket = self._config[section_name].get('socket', None)
if socket:
socket = socket.format(version=version)
self._config[section_name]['socket'] = socket
config = Config()
| import logging
import toml
from os.path import exists
class Config:
CONF_FILE_TEMPLATE = 'config/config.{type}.toml'
def __init__(self):
self._config = None
def __getitem__(self, key):
if self._config is None:
raise RuntimeError(
'Configuration is not loaded. '
'Call load_config(type) before trying to use the coniguration',
)
else:
return self._config[key]
def load_config(self, type, version='latest'):
config_path = self.CONF_FILE_TEMPLATE.format(type=type)
if type == 'dev' and not exists(config_path):
docker_config_file = self.CONF_FILE_TEMPLATE.format(type='docker')
logging.info(f'Note: {config_path} not found, using {docker_config_file}')
config_path = docker_config_file
with open(config_path, 'r') as config_file:
self._config = toml.load(config_file)
self._set_version_in_socket_name('api', version)
self._set_version_in_socket_name('cache', version)
def _set_version_in_socket_name(self, section_name, version):
socket = self._config[section_name].get('socket', None)
if socket:
socket = socket.format(version=version)
self._config[section_name]['socket'] = socket
config = Config()
| Python | 0.000888 |
f2b49f524319cc6df2f6fcaabff114cc9156faf7 | make 'urls' to be consistent. | OIPA/api/dataset/serializers.py | OIPA/api/dataset/serializers.py | from django.urls import reverse
from rest_framework.serializers import (
HyperlinkedIdentityField, HyperlinkedRelatedField, ModelSerializer,
SerializerMethodField
)
from api.generics.serializers import DynamicFieldsModelSerializer
from iati.models import Activity
from iati_synchroniser.models import Dataset, DatasetNote, Publisher
class DatasetNoteSerializer(ModelSerializer):
class Meta:
model = DatasetNote
fields = (
'model',
'iati_identifier',
'exception_type',
'model',
'field',
'message',
'line_number',
'variable')
class SimplePublisherSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='publishers:publisher-detail')
class Meta:
model = Publisher
fields = (
'id',
'url',
'publisher_iati_id',
'display_name',
'name')
class SimpleDatasetSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='datasets:dataset-detail')
publisher = HyperlinkedRelatedField(
view_name='publishers:publisher-detail',
read_only=True)
type = SerializerMethodField()
class Meta:
model = Dataset
fields = (
'id',
'iati_id',
'type',
'url',
'name',
'title',
'filetype',
'publisher',
'source_url',
'iati_version',
'added_manually',
)
def get_type(self, obj):
return obj.get_filetype_display()
class DatasetSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='datasets:dataset-detail')
publisher = SimplePublisherSerializer()
filetype = SerializerMethodField()
activities = SerializerMethodField()
activity_count = SerializerMethodField()
notes = HyperlinkedIdentityField(
view_name='datasets:dataset-notes',)
DatasetNoteSerializer(many=True, source="datasetnote_set")
internal_url = SerializerMethodField()
class Meta:
model = Dataset
fields = (
'id',
'iati_id',
'url',
'name',
'title',
'filetype',
'publisher',
'source_url',
'activities',
'activity_count',
'activities_count_in_xml',
'activities_count_in_database',
'date_created',
'date_updated',
'last_found_in_registry',
'iati_version',
'sha1',
'note_count',
'notes',
'added_manually',
'is_parsed',
'export_in_progress',
'parse_in_progress',
'internal_url'
)
def get_filetype(self, obj):
return obj.get_filetype_display()
def get_activities(self, obj):
request = self.context.get('request')
url = request.build_absolute_uri(reverse('activities:activity-list'))
request_format = self.context.get('request').query_params.get('format')
return url + '?dataset=' + str(obj.id) + '&format={' \
'request_format}'.format(
request_format=request_format)
def get_activity_count(self, obj):
return Activity.objects.filter(dataset=obj.id).count()
def get_internal_url(self, obj):
request = self.context.get('request')
# Get internal url from the XML file in the local static folder
internal_url = obj.get_internal_url()
if internal_url:
return request.build_absolute_uri(internal_url)
return None
| from django.urls import reverse
from rest_framework.serializers import (
HyperlinkedIdentityField, HyperlinkedRelatedField, ModelSerializer,
SerializerMethodField
)
from api.generics.serializers import DynamicFieldsModelSerializer
from iati.models import Activity
from iati_synchroniser.models import Dataset, DatasetNote, Publisher
class DatasetNoteSerializer(ModelSerializer):
class Meta:
model = DatasetNote
fields = (
'model',
'iati_identifier',
'exception_type',
'model',
'field',
'message',
'line_number',
'variable')
class SimplePublisherSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='publishers:publisher-detail')
class Meta:
model = Publisher
fields = (
'id',
'url',
'publisher_iati_id',
'display_name',
'name')
class SimpleDatasetSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='datasets:dataset-detail')
publisher = HyperlinkedRelatedField(
view_name='publishers:publisher-detail',
read_only=True)
type = SerializerMethodField()
class Meta:
model = Dataset
fields = (
'id',
'iati_id',
'type',
'url',
'name',
'title',
'filetype',
'publisher',
'source_url',
'iati_version',
'added_manually',
)
def get_type(self, obj):
return obj.get_filetype_display()
class DatasetSerializer(DynamicFieldsModelSerializer):
url = HyperlinkedIdentityField(view_name='datasets:dataset-detail')
publisher = SimplePublisherSerializer()
filetype = SerializerMethodField()
activities = SerializerMethodField()
activity_count = SerializerMethodField()
notes = HyperlinkedIdentityField(
view_name='datasets:dataset-notes',)
DatasetNoteSerializer(many=True, source="datasetnote_set")
internal_url = SerializerMethodField()
class Meta:
model = Dataset
fields = (
'id',
'iati_id',
'url',
'name',
'title',
'filetype',
'publisher',
'source_url',
'activities',
'activity_count',
'activities_count_in_xml',
'activities_count_in_database',
'date_created',
'date_updated',
'last_found_in_registry',
'iati_version',
'sha1',
'note_count',
'notes',
'added_manually',
'is_parsed',
'export_in_progress',
'parse_in_progress',
'internal_url'
)
def get_filetype(self, obj):
return obj.get_filetype_display()
def get_activities(self, obj):
request = self.context.get('request')
url = request.build_absolute_uri(reverse('activities:activity-list'))
return url + '?dataset=' + str(obj.id)
def get_activity_count(self, obj):
return Activity.objects.filter(dataset=obj.id).count()
def get_internal_url(self, obj):
request = self.context.get('request')
# Get internal url from the XML file in the local static folder
internal_url = obj.get_internal_url()
if internal_url:
return request.build_absolute_uri(internal_url)
return None
| Python | 0.000002 |
fa6c7b32284bc4159e95b7bc339dab7517b2c255 | add sql example | client/ReadAir.py | client/ReadAir.py | # -*- coding: utf-8 -*-
import serial, time, MySQLdb, re
from socketIO_client import SocketIO, LoggingNamespace
''' SQL to create database:
CREATE DATABASE IF NOT EXISTS `airnow` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;
'''
''' SQL to create table:
CREATE TABLE IF NOT EXISTS `air_logs` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`pm25` float NOT NULL,
`aqi` int(11) NOT NULL,
`time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00'
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
'''
# open mysql connection
conn=MySQLdb.connect(host="localhost",user="airnow",passwd="password",db="airnow",charset="utf8")
sql = "INSERT INTO air_logs(`pm25`,`aqi`,`time`) VALUES(%s,%s,NOW())"
t = serial.Serial("com4", 2400) # serial port and baudrate
i = 0
with SocketIO('localhost', 8000, LoggingNamespace) as socketIO: # connect socket.io server
while True:
i = i + 1
str = t.readline() # read from serial port
socketIO.emit('airnow', str) # raise event to socket.io server
# record data to mysql
if i == 30: # about 30 seconds insert 1 record to database
i = 0 # reset counter
cursor = conn.cursor()
vals = re.split('[:; ]', str) # the str gotten from serial port is: "PM2.5:11.53; AQI:15;"
param = (vals[1], vals[4]) # put PM2.5 value and AQI value to param
n = cursor.execute(sql, param) # execute the sql query
cursor.execute("commit")
#print str #Debug
cursor.close()
# close mysql connection
conn.close() | # -*- coding: utf-8 -*-
import serial, time, MySQLdb, re
from socketIO_client import SocketIO, LoggingNamespace
# open a mysql connection
conn=MySQLdb.connect(host="localhost",user="airnow",passwd="password",db="airnow",charset="utf8")
''' SQL to create table:
CREATE TABLE IF NOT EXISTS `air_logs` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`pm25` float NOT NULL,
`aqi` int(11) NOT NULL,
`time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00'
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
'''
sql = "INSERT INTO air_logs(`pm25`,`aqi`,`time`) VALUES(%s,%s,NOW())"
t = serial.Serial("com4", 2400) # serial port and baudrate
i = 0
with SocketIO('localhost', 8000, LoggingNamespace) as socketIO: # connect socket.io server
while True:
i = i + 1
str = t.readline() # read from serial port
socketIO.emit('airnow', str) # raise event to socket.io server
# record data to mysql
if i == 30: # about 30 seconds insert 1 record to database
i = 0 # reset counter
cursor = conn.cursor()
vals = re.split('[:; ]', str) # the str gotten from serial port is: "PM2.5:11.53; AQI:15;"
param = (vals[1], vals[4]) # put PM2.5 value and AQI value to param
n = cursor.execute(sql, param) # execute the sql query
cursor.execute("commit")
#print str #Debug
cursor.close()
# close mysql connection
conn.close() | Python | 0.000243 |
2f2861f153d0ba0d088ffe95b196b4154b59ce31 | Replace constants with literal value. | seqr/management/commands/check_bam_cram_paths_tests.py | seqr/management/commands/check_bam_cram_paths_tests.py | import mock
from io import BytesIO
from django.core.management import call_command
from django.test import TestCase
class CheckBamCramPathsTest(TestCase):
fixtures = ['users', '1kg_project']
@mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path')
def test_normal_command(self, mock_validate_path):
mock_validate_path.return_value = ""
out = BytesIO()
call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out)
self.assertEqual('Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n',
out.getvalue())
@mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path')
def test_exception_command(self, mock_validate_path):
mock_validate_path.side_effect = Exception('Error accessing "/readviz/NA19675.cram"')
out = BytesIO()
call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out)
self.assertEqual('Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n',
out.getvalue())
| import mock
from io import BytesIO
from django.core.management import call_command
from django.test import TestCase
EXPECTED_EXCEPTION_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n'
EXPECTED_NORMAL_MSG = 'Error at /readviz/NA19675.cram (Individual: NA19675_1): Error accessing "/readviz/NA19675.cram" \n---- DONE ----\nChecked 1 samples\n1 failed samples: NA19675_1\n'
class CheckBamCramPathsTest(TestCase):
fixtures = ['users', '1kg_project']
@mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path')
def test_normal_command(self, mock_validate_path):
mock_validate_path.return_value = ""
out = BytesIO()
call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out)
self.assertEqual(EXPECTED_NORMAL_MSG, out.getvalue())
@mock.patch('seqr.views.utils.dataset_utils.validate_alignment_dataset_path')
def test_exception_command(self, mock_validate_path):
mock_validate_path.side_effect = Exception('Error accessing "/readviz/NA19675.cram"')
out = BytesIO()
call_command('check_bam_cram_paths', u'1kg project n\u00e5me with uni\u00e7\u00f8de', stdout=out)
self.assertEqual(EXPECTED_EXCEPTION_MSG, out.getvalue())
| Python | 0.000003 |
66f2e9cc8085f51348c797d5a6a2b011370e4c2f | Edit method for pages | fudcon/ui/backend/views.py | fudcon/ui/backend/views.py | # -*- coding: utf-8 -*-
from flask import (Blueprint,
redirect, render_template,
url_for, flash)
from fudcon.app import is_fudcon_admin, app
from fudcon.database import db
from fudcon.modules.contents.forms import AddPage
from fudcon.modules.contents.models import Content
bp = Blueprint('admin', __name__, url_prefix='/admin')
items_per_page = app.config['ITEMS_PER_PAGE']
@bp.route('/', methods=['GET', 'POST'])
@is_fudcon_admin
def index():
""" Admin blueprint for this application
"""
return render_template('backend/index.html',
title='Administration')
@bp.route('/pages', methods=['GET', 'POST'])
@bp.route('pages/<int:page>', methods=['GET', 'POST'])
@is_fudcon_admin
def pages(page=1):
paginate_params = (page, items_per_page, False)
queryset = Content.query.paginate(*paginate_params)
return render_template('backend/pages.html',
title='List pages',
pages=queryset)
@bp.route('/pages/add', methods=['GET', 'POST'])
def add_page():
""" Add page to the application
"""
form = AddPage()
action = url_for('admin.add_page')
if form.validate_on_submit():
content = Content(title=form.title.data,
description=form.description.data,
content_type=form.content_type.data,
is_on_user_menu=form.is_on_user_menu.data,
tag=form.tag.data,
active=form.active.data)
db.session.add(content)
db.session.commit()
flash('Page created')
return redirect(url_for('admin.page'))
return render_template('backend/pages_actions.html',
form=form,
title='Add page',
action=action)
@bp.route('/pages/edit/<int:page_id>', methods=['GET', 'POST'])
@is_fudcon_admin
def edit_page(page_id):
query_edit_page = Content.query.filter(Content.id ==
page_id).first_or_404()
form = AddPage(obj=query_edit_page)
action = url_for('admin.edit_page', page_id=page_id)
if form.validate_on_submit():
form.populate_obj(query_edit_page)
db.session.commit()
flash('Page edited')
return redirect(url_for('admin.page'))
return render_template('backend/pages_actions',
form=form,
action=action)
| # -*- coding: utf-8 -*-
from flask import (Blueprint,
redirect, render_template,
url_for, flash)
from fudcon.app import is_fudcon_admin, app
from fudcon.database import db
from fudcon.modules.contents.forms import AddPage
from fudcon.modules.contents.models import Content
bp = Blueprint('admin', __name__, url_prefix='/admin')
items_per_page = app.config['ITEMS_PER_PAGE']
@bp.route('/', methods=['GET', 'POST'])
@is_fudcon_admin
def index():
""" Admin blueprint for this application
"""
return render_template('backend/index.html',
title='Administration')
@bp.route('/pages', methods=['GET','POST'])
@bp.route('pages/<int:page>', methods=['GET', 'POST'])
@is_fudcon_admin
def pages(page=1):
paginate_params = (page, items_per_page, False)
queryset = Content.query.paginate(*paginate_params)
return render_template('backend/pages.html',
title='List pages',
pages=queryset)
@bp.route('/pages/add', methods=['GET', 'POST'])
def add_page():
""" Add page to the application
"""
form = AddPage()
action = url_for('admin.add_page')
if form.validate_on_submit():
content = Content(title=form.title.data,
description=form.description.data,
content_type=form.content_type.data,
is_on_user_menu=form.is_on_user_menu.data,
tag=form.tag.data,
active=form.active.data)
db.session.add(content)
db.session.commit()
flash('Page created')
return redirect(url_for('admin.page'))
return render_template('backend/pages_actions.html',
form=form,
title='Add page',
action=action)
| Python | 0.000001 |
a6ac3a7a0955fab9cce1d2866a064ff6d4943dd0 | bump version to 0.1.2 | json5/version.py | json5/version.py | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = '0.1.2'
| # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = '0.1.1'
| Python | 0.000001 |
8d40b5369a8c38477f004ac2eff467efe44ff3ce | Split device log processing by section #prefactor | corehq/ex-submodules/phonelog/utils.py | corehq/ex-submodules/phonelog/utils.py | from corehq.apps.users.util import format_username
from corehq.apps.users.dbaccessors import get_user_id_by_username
from phonelog.models import UserEntry, DeviceReportEntry
def device_users_by_xform(xform_id):
return list(
UserEntry.objects.filter(xform_id__exact=xform_id)
.distinct('username').values_list('username', flat=True)
)
def _force_list(obj_or_list):
return obj_or_list if isinstance(obj_or_list, list) else [obj_or_list]
def _get_logs(form, report_name, report_slug):
report = form.get(report_name, {}) or {}
if isinstance(report, list):
return filter(None, [log.get(report_slug) for log in report])
return report.get(report_slug, [])
def process_device_log(domain, xform):
_process_user_subreport(xform)
_process_log_subreport(domain, xform)
def _process_user_subreport(xform):
userlogs = _get_logs(xform.form_data, 'user_subreport', 'user')
UserEntry.objects.filter(xform_id=xform.form_id).delete()
DeviceReportEntry.objects.filter(xform_id=xform.form_id).delete()
to_save = []
for i, log in enumerate(_force_list(userlogs)):
to_save.append(UserEntry(
xform_id=xform.form_id,
i=i,
user_id=log["user_id"],
username=log["username"],
sync_token=log["sync_token"],
))
UserEntry.objects.bulk_create(to_save)
def _process_log_subreport(domain, xform):
form_data = xform.form_data
logs = _get_logs(form_data, 'log_subreport', 'log')
logged_in_username = None
logged_in_user_id = None
to_save = []
for i, log in enumerate(_force_list(logs)):
if not log:
continue
if log["type"] == 'login':
# j2me log = user_id_prefix-username
logged_in_username = log["msg"].split('-')[1]
cc_username = format_username(logged_in_username, domain)
logged_in_user_id = get_user_id_by_username(cc_username)
elif log["type"] == 'user' and log["msg"][:5] == 'login':
# android log = login|username|user_id
msg_split = log["msg"].split('|')
logged_in_username = msg_split[1]
logged_in_user_id = msg_split[2]
to_save.append(DeviceReportEntry(
xform_id=xform.form_id,
i=i,
domain=domain,
type=log["type"],
msg=log["msg"],
# must accept either date or datetime string
date=log["@date"],
server_date=xform.received_on,
app_version=form_data.get('app_version'),
device_id=form_data.get('device_id'),
username=logged_in_username,
user_id=logged_in_user_id,
))
DeviceReportEntry.objects.bulk_create(to_save)
| from corehq.apps.users.util import format_username
from corehq.apps.users.dbaccessors import get_user_id_by_username
from phonelog.models import UserEntry, DeviceReportEntry
def device_users_by_xform(xform_id):
return list(
UserEntry.objects.filter(xform_id__exact=xform_id)
.distinct('username').values_list('username', flat=True)
)
def _force_list(obj_or_list):
return obj_or_list if isinstance(obj_or_list, list) else [obj_or_list]
def _get_logs(form, report_name, report_slug):
report = form.get(report_name, {}) or {}
if isinstance(report, list):
return filter(None, [log.get(report_slug) for log in report])
return report.get(report_slug, [])
def process_device_log(domain, xform):
form_data = xform.form_data
userlogs = _get_logs(form_data, 'user_subreport', 'user')
UserEntry.objects.filter(xform_id=xform.form_id).delete()
DeviceReportEntry.objects.filter(xform_id=xform.form_id).delete()
to_save = []
for i, log in enumerate(_force_list(userlogs)):
to_save.append(UserEntry(
xform_id=xform.form_id,
i=i,
user_id=log["user_id"],
username=log["username"],
sync_token=log["sync_token"],
))
UserEntry.objects.bulk_create(to_save)
logs = _get_logs(form_data, 'log_subreport', 'log')
logged_in_username = None
logged_in_user_id = None
to_save = []
for i, log in enumerate(_force_list(logs)):
if not log:
continue
if log["type"] == 'login':
# j2me log = user_id_prefix-username
logged_in_username = log["msg"].split('-')[1]
cc_username = format_username(logged_in_username, domain)
logged_in_user_id = get_user_id_by_username(cc_username)
elif log["type"] == 'user' and log["msg"][:5] == 'login':
# android log = login|username|user_id
msg_split = log["msg"].split('|')
logged_in_username = msg_split[1]
logged_in_user_id = msg_split[2]
to_save.append(DeviceReportEntry(
xform_id=xform.form_id,
i=i,
domain=domain,
type=log["type"],
msg=log["msg"],
# must accept either date or datetime string
date=log["@date"],
server_date=xform.received_on,
app_version=form_data.get('app_version'),
device_id=form_data.get('device_id'),
username=logged_in_username,
user_id=logged_in_user_id,
))
DeviceReportEntry.objects.bulk_create(to_save)
| Python | 0 |
007c6283b3ed05b31f1cb9a2dd5a3166f465b828 | Move graphs.js to dot dir | generator/generate_data.py | generator/generate_data.py | # -*- coding: utf-8 -*-
import itertools
import json
import multiprocessing
import os
import re
import shutil
import urllib
import workerpool
import jobs
LAST_BUILD_URL_BASE = ('https://fuel-jenkins.mirantis.com/job/'
'nailgun_performance_tests/lastCompletedBuild/')
LAST_BUILD_INFO = LAST_BUILD_URL_BASE + 'api/json'
LAST_BUILD_TAR_BASE = LAST_BUILD_URL_BASE + 'artifact/results/results/'
CSV_URL = LAST_BUILD_URL_BASE + 'artifact/nailgun/nailgun_perf_test_report.csv'
CSV_TARGET_PATH = '/usr/share/nginx/html/test_report.csv'
DOT_TARGET_DIR = 'dot/'
DOT_INDEX_PATH = 'dot/graphs.json'
try:
with open('build_number', 'r') as bn_file:
previous_build_number = int(bn_file.read())
except (IOError, ValueError):
previous_build_number = 0
current_build_info = json.loads(urllib.urlopen(LAST_BUILD_INFO).read())
current_build_number = current_build_info['number']
if current_build_number > previous_build_number:
with open('build_number', 'w') as bn_file:
bn_file.write(str(current_build_number))
urllib.urlretrieve(CSV_URL, CSV_TARGET_PATH)
shutil.rmtree(DOT_TARGET_DIR)
os.mkdir(DOT_TARGET_DIR)
arts = [x['fileName'] for x in current_build_info['artifacts'] if 'tar.gz' in x['fileName']]
pool = workerpool.WorkerPool(size=2)
for filename in arts:
job = jobs.DownloadArtifactJob(
LAST_BUILD_TAR_BASE + filename,
DOT_TARGET_DIR,
filename
)
pool.put(job)
pool.shutdown()
pool.wait()
tests = [x for x in os.listdir(DOT_TARGET_DIR) if 'tar.gz' not in x and
'txt' not in x]
processing_jobs = []
for test in tests:
name = re.search(r'[^0-9._].*', test).group(0)
extractor = jobs.GraphExtractor(DOT_TARGET_DIR + test, name)
for graph in extractor.get_files():
job = jobs.ProcessGraphJob(graph, name)
processing_jobs.append(job)
def run_job(job):
job.run()
return {'test_name': job.test_name, 'graph': job.graph}
process_pool = multiprocessing.Pool(2)
processed_data_index = process_pool.map(run_job, processing_jobs)
process_pool.close()
graphs_index = {k: list(v) for k,v in itertools.groupby(processed_data_index, lambda x : x['test_name'])}
with open(DOT_INDEX_PATH, 'w') as graphs_file:
graphs_file.write(json.dumps(graphs_index))
| # -*- coding: utf-8 -*-
import itertools
import json
import multiprocessing
import os
import re
import shutil
import urllib
import workerpool
import jobs
LAST_BUILD_URL_BASE = ('https://fuel-jenkins.mirantis.com/job/'
'nailgun_performance_tests/lastCompletedBuild/')
LAST_BUILD_INFO = LAST_BUILD_URL_BASE + 'api/json'
LAST_BUILD_TAR_BASE = LAST_BUILD_URL_BASE + 'artifact/results/results/'
CSV_URL = LAST_BUILD_URL_BASE + 'artifact/nailgun/nailgun_perf_test_report.csv'
CSV_TARGET_PATH = '/usr/share/nginx/html/test_report.csv'
DOT_TARGET_DIR = 'dot/'
DOT_INDEX_PATH = 'graphs.json'
try:
with open('build_number', 'r') as bn_file:
previous_build_number = int(bn_file.read())
except (IOError, ValueError):
previous_build_number = 0
current_build_info = json.loads(urllib.urlopen(LAST_BUILD_INFO).read())
current_build_number = current_build_info['number']
if current_build_number > previous_build_number:
with open('build_number', 'w') as bn_file:
bn_file.write(str(current_build_number))
urllib.urlretrieve(CSV_URL, CSV_TARGET_PATH)
shutil.rmtree(DOT_TARGET_DIR)
os.mkdir(DOT_TARGET_DIR)
arts = [x['fileName'] for x in current_build_info['artifacts'] if 'tar.gz' in x['fileName']]
pool = workerpool.WorkerPool(size=2)
for filename in arts:
job = jobs.DownloadArtifactJob(
LAST_BUILD_TAR_BASE + filename,
DOT_TARGET_DIR,
filename
)
pool.put(job)
pool.shutdown()
pool.wait()
tests = [x for x in os.listdir(DOT_TARGET_DIR) if 'tar.gz' not in x and
'txt' not in x]
processing_jobs = []
for test in tests:
name = re.search(r'[^0-9._].*', test).group(0)
extractor = jobs.GraphExtractor(DOT_TARGET_DIR + test, name)
for graph in extractor.get_files():
job = jobs.ProcessGraphJob(graph, name)
processing_jobs.append(job)
def run_job(job):
job.run()
return {'test_name': job.test_name, 'graph': job.graph}
process_pool = multiprocessing.Pool(2)
processed_data_index = process_pool.map(run_job, processing_jobs)
process_pool.close()
graphs_index = {k: list(v) for k,v in itertools.groupby(processed_data_index, lambda x : x['test_name'])}
with open(DOT_INDEX_PATH, 'w') as graphs_file:
graphs_file.write(json.dumps(graphs_index))
| Python | 0.000001 |
8ba62b47d2d94eb56122f9061b8309e06cc62cdd | add .get() | kibitzr/stash.py | kibitzr/stash.py | import contextlib
import logging
logger = logging.getLogger(__name__)
class Stash(object):
FILENAME = 'stash.db'
@contextlib.contextmanager
def open(self):
import shelve
with contextlib.closing(shelve.open(self.FILENAME)) as db:
yield db
def read(self):
with self.open() as db:
return dict(db)
def write(self, data):
with self.open() as db:
for key, value in data.items():
db[key] = value
@classmethod
def print_content(cls):
for key, value in cls().read().items():
print("{0}: {1}".format(key, value))
class LazyStash(Stash):
def __init__(self):
self._stashobj = None
@property
def _stash(self):
if self._stashobj is None:
self._stashobj = self.read()
return self._stashobj
def __getitem__(self, key):
return self._stash[key]
def get(self, key, default=None):
try:
return self._stash[key]
except KeyError:
return default
| import contextlib
import logging
logger = logging.getLogger(__name__)
class Stash(object):
FILENAME = 'stash.db'
@contextlib.contextmanager
def open(self):
import shelve
with contextlib.closing(shelve.open(self.FILENAME)) as db:
yield db
def read(self):
with self.open() as db:
return dict(db)
def write(self, data):
with self.open() as db:
for key, value in data.items():
db[key] = value
@classmethod
def print_content(cls):
for key, value in cls().read().items():
print("{0}: {1}".format(key, value))
class LazyStash(Stash):
def __init__(self):
self._stash = None
@property
def stash(self):
if self._stash is None:
self._stash = self.read()
return self._stash
def __getitem__(self, key):
return self.stash[key]
| Python | 0.000001 |
c749d82035f72b9d57c52dfc2dbdd70f42fbdf66 | add missing import | ktbh/__init__.py | ktbh/__init__.py | import sys
import time
import pika
import json
import landing_page
def hand_off(amqp_host, out_queue, body):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=amqp_host))
channel = connection.channel()
channel.queue_declare(queue=out_queue, durable=True)
channel.basic_publish(exchange='',
routing_key=out_queue,
body=body,
properties=pika.BasicProperties(delivery_mode=2))
connection.close()
def add_landing_page(url, config):
amqp_host = config.get("main", "amqp_host")
out_queue = config.get("main", "lp_queue")
payload = json.dumps({
"url": url
})
hand_off(amqp_host, out_queue, payload)
def get_connection(host):
count = 0.4
while count < 60:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host))
return connection
except:
time.sleep(count)
count *= 1.7
sys.exit(1)
def handle_queue(amqp_host, queue_name, callback_fn):
connection = get_connection(amqp_host)
try:
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback_fn, queue=queue_name)
channel.start_consuming()
except:
pass
finally:
connection.close()
def examine_landing_pages(config):
out_queue = config.get("main", "lp_queue")
url_queue = config.get("main", "url_queue")
broken_queue = config.get("main", "broken_lp_queue")
amqp_host = config.get("main", "amqp_host")
def callback(ch, method, properties, body):
try:
args = json.loads(body)
url = args["url"]
count = 0
for text, href in landing_page.scrape(url):
payload = json.dumps({
"link_text": text,
"link_href": href
})
hand_off(amqp_host, url_queue, payload)
count += 1
if count == 0:
hand_off(amqp_host, broken_queue, json.dumps({"url": url}))
finally:
ch.basic_ack(delivery_tag = method.delivery_tag)
while True:
handle_queue(amqp_host, out_queue, callback)
| import time
import pika
import json
import landing_page
def hand_off(amqp_host, out_queue, body):
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=amqp_host))
channel = connection.channel()
channel.queue_declare(queue=out_queue, durable=True)
channel.basic_publish(exchange='',
routing_key=out_queue,
body=body,
properties=pika.BasicProperties(delivery_mode=2))
connection.close()
def add_landing_page(url, config):
amqp_host = config.get("main", "amqp_host")
out_queue = config.get("main", "lp_queue")
payload = json.dumps({
"url": url
})
hand_off(amqp_host, out_queue, payload)
def get_connection(host):
count = 0.4
while count < 60:
try:
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host))
return connection
except:
time.sleep(count)
count *= 1.7
sys.exit(1)
def handle_queue(amqp_host, queue_name, callback_fn):
connection = get_connection(amqp_host)
try:
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback_fn, queue=queue_name)
channel.start_consuming()
except:
pass
finally:
connection.close()
def examine_landing_pages(config):
out_queue = config.get("main", "lp_queue")
url_queue = config.get("main", "url_queue")
broken_queue = config.get("main", "broken_lp_queue")
amqp_host = config.get("main", "amqp_host")
def callback(ch, method, properties, body):
try:
args = json.loads(body)
url = args["url"]
count = 0
for text, href in landing_page.scrape(url):
payload = json.dumps({
"link_text": text,
"link_href": href
})
hand_off(amqp_host, url_queue, payload)
count += 1
if count == 0:
hand_off(amqp_host, broken_queue, json.dumps({"url": url}))
finally:
ch.basic_ack(delivery_tag = method.delivery_tag)
while True:
handle_queue(amqp_host, out_queue, callback)
| Python | 0.000042 |
2d08761a898ba96ff84fdbecef4a6d71cdb54926 | Use base64 to store pickled data in text fields. | src/django_future/models.py | src/django_future/models.py | import datetime
import cPickle
from django.db import models
from django.conf import settings
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
__all__ = ['ScheduledJob']
END_OF_TIME = datetime.datetime(2047, 9, 14)
class ScheduledJob(models.Model):
STATUSES = (
('scheduled', 'Scheduled'),
('running', 'Running'),
('failed', 'Failed'),
('complete', 'Complete'),
('expired', 'Expired'),
)
time_slot_start = models.DateTimeField()
time_slot_end = models.DateTimeField()
execution_start = models.DateTimeField(blank=True, null=True)
status = models.CharField(choices=STATUSES, max_length=32,
default='scheduled')
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.PositiveIntegerField(blank=True, null=True)
content_object = generic.GenericForeignKey()
callable_name = models.CharField(max_length=255)
args_pickled = models.TextField()
kwargs_pickled = models.TextField()
def _get_args(self):
return self._unpickle(self.args_pickled)
def _set_args(self, value):
self.args_pickled = self._pickle(value)
args = property(_get_args, _set_args)
def _get_kwargs(self):
return self._unpickle(self.kwargs_pickled)
def _set_kwargs(self, value):
self.kwargs_pickled = self._pickle(value)
kwargs = property(_get_kwargs, _set_kwargs)
def _pickle(self, value):
return cPickle.dumps(value).encode('base64')
def _unpickle(self, s):
return cPickle.loads(str(s).decode('base64'))
def __repr__(self):
return '<ScheduledJob (%s) callable=%r>' % (
self.status, self.callable_name)
def run(self):
# TODO: logging?
args = self.args
kwargs = self.kwargs
if '.' in self.callable_name:
module_name, function_name = self.callable_name.rsplit('.', 1)
module = __import__(module_name, fromlist=[function_name])
callable_func = getattr(module, function_name)
if self.content_object is not None:
args = [self.content_object] + list(args)
else:
callable_func = getattr(self.content_object, self.callable_name)
if hasattr(callable_func, 'job_as_parameter'):
args = [self] + list(args)
callable_func(*args, **kwargs)
def reschedule(self, date, callable_name=None, content_object=None,
expires='7d', args=None, kwargs=None):
"""Schedule a clone of this job."""
if callable_name is None:
callable_name = self.callable_name
if content_object is None:
content_object = self.content_object
if args is None:
args = self.args
if kwargs is None:
kwargs = self.kwargs
from django_future import schedule_job
return schedule_job(date, callable_name, content_object=content_object,
expires=expires, args=args, kwargs=kwargs)
| import datetime
import cPickle
from django.db import models
from django.conf import settings
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
__all__ = ['ScheduledJob']
END_OF_TIME = datetime.datetime(2047, 9, 14)
class ScheduledJob(models.Model):
STATUSES = (
('scheduled', 'Scheduled'),
('running', 'Running'),
('failed', 'Failed'),
('complete', 'Complete'),
('expired', 'Expired'),
)
time_slot_start = models.DateTimeField()
time_slot_end = models.DateTimeField()
execution_start = models.DateTimeField(blank=True, null=True)
status = models.CharField(choices=STATUSES, max_length=32,
default='scheduled')
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.PositiveIntegerField(blank=True, null=True)
content_object = generic.GenericForeignKey()
callable_name = models.CharField(max_length=255)
args_pickled = models.TextField()
kwargs_pickled = models.TextField()
def _get_args(self):
return cPickle.loads(str(self.args_pickled))
def _set_args(self, value):
self.args_pickled = cPickle.dumps(tuple(value))
args = property(_get_args, _set_args)
def _get_kwargs(self):
return cPickle.loads(str(self.kwargs_pickled))
def _set_kwargs(self, value):
self.kwargs_pickled = cPickle.dumps(value)
kwargs = property(_get_kwargs, _set_kwargs)
def __repr__(self):
return '<ScheduledJob (%s) callable=%r>' % (
self.status, self.callable_name)
def run(self):
# TODO: logging?
args = self.args
kwargs = self.kwargs
if '.' in self.callable_name:
module_name, function_name = self.callable_name.rsplit('.', 1)
module = __import__(module_name, fromlist=[function_name])
callable_func = getattr(module, function_name)
if self.content_object is not None:
args = [self.content_object] + list(args)
else:
callable_func = getattr(self.content_object, self.callable_name)
if hasattr(callable_func, 'job_as_parameter'):
args = [self] + list(args)
callable_func(*args, **kwargs)
def reschedule(self, date, callable_name=None, content_object=None,
expires='7d', args=None, kwargs=None):
"""Schedule a clone of this job."""
if callable_name is None:
callable_name = self.callable_name
if content_object is None:
content_object = self.content_object
if args is None:
args = self.args
if kwargs is None:
kwargs = self.kwargs
from django_future import schedule_job
return schedule_job(date, callable_name, content_object=content_object,
expires=expires, args=args, kwargs=kwargs)
| Python | 0 |
ba927036c6170c754f4f95d90e62928b5da5d726 | Write config file while init | gitcd/Git/Commands/Init.py | gitcd/Git/Commands/Init.py | from gitcd.Git.Command import Command
class Init(Command):
# no special subcommands, only run which is meant to be default
def execute(self, dummy: str):
self.config.setMaster(
self.interface.askFor(
"Branch name for production releases?",
False,
self.config.getMaster()
)
)
featureDefault = self.config.getFeature()
if featureDefault is None:
featureDefault = '<none>'
self.config.setFeature(
self.interface.askFor(
"Branch name for feature development?",
False,
featureDefault
)
)
testDefault = self.config.getTest()
if testDefault is None:
testDefault = '<none>'
self.config.setTest(
self.interface.askFor(
"Branch name for test releases?",
False,
testDefault
)
)
tagDefault = self.config.getTag()
if tagDefault is None:
tagDefault = '<none>'
self.config.setTag(
self.interface.askFor(
"Version tag prefix?",
False,
tagDefault
)
)
# ask for version type, manual or date
versionType = self.interface.askFor(
"Version type? You can either set your tag number" +
" manually or generate it by date.",
['manual', 'date'],
self.config.getVersionType()
)
self.config.setVersionType(versionType)
# if type is date ask for scheme
if versionType == 'date':
versionScheme = self.interface.askFor(
"Scheme for your date-tag?" +
" Year: %Y / Month: %m / Day: %d /" +
" Hour: %H / Minute: %M / Second: %S",
'%Y.%m.%d%H%M',
self.config.getVersionScheme()
)
else:
# you'll be asked for it while a release
versionScheme = None
# pass version scheme to config
self.config.setVersionScheme(versionScheme)
self.config.write()
| from gitcd.Git.Command import Command
class Init(Command):
# no special subcommands, only run which is meant to be default
def execute(self, dummy: str):
self.config.setMaster(
self.interface.askFor(
"Branch name for production releases?",
False,
self.config.getMaster()
)
)
featureDefault = self.config.getFeature()
if featureDefault is None:
featureDefault = '<none>'
self.config.setFeature(
self.interface.askFor(
"Branch name for feature development?",
False,
featureDefault
)
)
testDefault = self.config.getTest()
if testDefault is None:
testDefault = '<none>'
self.config.setTest(
self.interface.askFor(
"Branch name for test releases?",
False,
testDefault
)
)
tagDefault = self.config.getTag()
if tagDefault is None:
tagDefault = '<none>'
self.config.setTag(
self.interface.askFor(
"Version tag prefix?",
False,
tagDefault
)
)
# ask for version type, manual or date
versionType = self.interface.askFor(
"Version type? You can either set your tag number" +
" manually or generate it by date.",
['manual', 'date'],
self.config.getVersionType()
)
self.config.setVersionType(versionType)
# if type is date ask for scheme
if versionType == 'date':
versionScheme = self.interface.askFor(
"Scheme for your date-tag?" +
" Year: %Y / Month: %m / Day: %d /" +
" Hour: %H / Minute: %M / Second: %S",
'%Y.%m.%d%H%M',
self.config.getVersionScheme()
)
else:
# you'll be asked for it while a release
versionScheme = None
# pass version scheme to config
self.config.setVersionScheme(versionScheme)
| Python | 0.000001 |
8319a938e1a511073094ba49d95d91c64ccac118 | Refactor how the API handles options. | api/common.py | api/common.py | #!/usr/bin/python
import mechanize
import urllib
import simplejson
from urllib2 import HTTPError
import time
class HumbugAPI():
def __init__(self, email, api_key, verbose=False, site="https://app.humbughq.com"):
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
self.browser.add_password("https://app.humbughq.com/", "tabbott", "xxxxxxxxxxxxxxxxx", "wiki")
self.api_key = api_key
self.email = email
self.verbose = verbose
self.base_url = site
def send_message(self, submit_hash):
submit_hash["email"] = self.email
submit_hash["api-key"] = self.api_key
submit_data = urllib.urlencode([(k, v.encode('utf-8')) for k,v in submit_hash.items()])
res = self.browser.open(self.base_url + "/api/v1/send_message", submit_data)
return simplejson.loads(res.read())
def get_messages(self, options = {}):
options["email"] = self.email
options["api-key"] = self.api_key
submit_data = urllib.urlencode([(k, v.encode('utf-8')) for k,v in options.items()])
res = self.browser.open(self.base_url + "/api/v1/get_updates", submit_data)
return simplejson.loads(res.read())['zephyrs']
def call_on_each_message(self, callback, options = {}):
max_message_id = None
while True:
try:
options["first"] = "0"
options["last"] = str(last_received)
messages = self.get_messages(options)
except HTTPError, e:
# 502/503 typically means the server was restarted; sleep
# a bit, then try again
if self.verbose:
print "HTTP Error getting zephyrs; trying again soon."
print e
time.sleep(1)
except Exception, e:
# For other errors, just try again
print e
time.sleep(2)
continue
for message in sorted(messages, key=lambda x: x["id"]):
max_message_id = max(max_message_id, message["id"])
callback(message)
| #!/usr/bin/python
import mechanize
import urllib
import simplejson
from urllib2 import HTTPError
import time
class HumbugAPI():
def __init__(self, email, api_key, verbose=False, site="https://app.humbughq.com"):
self.browser = mechanize.Browser()
self.browser.set_handle_robots(False)
self.browser.add_password("https://app.humbughq.com/", "tabbott", "xxxxxxxxxxxxxxxxx", "wiki")
self.api_key = api_key
self.email = email
self.verbose = verbose
self.base_url = site
def send_message(self, submit_hash):
submit_hash["email"] = self.email
submit_hash["api-key"] = self.api_key
submit_data = urllib.urlencode([(k, v.encode('utf-8')) for k,v in submit_hash.items()])
res = self.browser.open(self.base_url + "/api/v1/send_message", submit_data)
return simplejson.loads(res.read())
def get_messages(self, last_received = None):
submit_hash = {}
submit_hash["email"] = self.email
submit_hash["api-key"] = self.api_key
if last_received is not None:
submit_hash["first"] = "0"
submit_hash["last"] = str(last_received)
submit_data = urllib.urlencode([(k, v.encode('utf-8')) for k,v in submit_hash.items()])
res = self.browser.open(self.base_url + "/api/v1/get_updates", submit_data)
return simplejson.loads(res.read())['zephyrs']
def call_on_each_message(self, callback):
max_message_id = None
while True:
try:
messages = self.get_messages(max_message_id)
except HTTPError, e:
# 502/503 typically means the server was restarted; sleep
# a bit, then try again
if self.verbose:
print "HTTP Error getting zephyrs; trying again soon."
print e
time.sleep(1)
except Exception, e:
# For other errors, just try again
print e
time.sleep(2)
continue
for message in sorted(messages, key=lambda x: x["id"]):
max_message_id = max(max_message_id, message["id"])
callback(message)
| Python | 0.000008 |
e2c92e8b6e8fb10addc73986914014b278598470 | Fix docstring in standardnormal example | spotpy/examples/spot_setup_standardnormal.py | spotpy/examples/spot_setup_standardnormal.py | '''
Copyright 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This example implements the Standard Normal function into SPOT.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import spotpy
class spot_setup(object):
def __init__(self,mean=0,std=1):
self.params = [spotpy.parameter.Uniform('x',-5,5,1.5,3.0)
]
self.mean=mean
self.std=std
def parameters(self):
return spotpy.parameter.generate(self.params)
def simulation(self,x):
simulations= (1.0/(std*np.sqrt(2*np.pi)))**((-1.0/2.0)*(((x-self.mean)/self.std)**2))
return simulations
def evaluation(self):
observations = [0]
return observations
def objectivefunction(self, simulation,evaluation):
objectivefunction = -spotpy.objectivefunctions.rmse(evaluation = evaluation,simulation = simulation)
return objectivefunction | '''
Copyright 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
This example implements the Rosenbrock function into SPOT.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import spotpy
class spot_setup(object):
def __init__(self,mean=0,std=1):
self.params = [spotpy.parameter.Uniform('x',-5,5,1.5,3.0)
]
self.mean=mean
self.std=std
def parameters(self):
return spotpy.parameter.generate(self.params)
def simulation(self,x):
simulations= (1.0/(std*np.sqrt(2*np.pi)))**((-1.0/2.0)*(((x-self.mean)/self.std)**2))
return simulations
def evaluation(self):
observations = [0]
return observations
def objectivefunction(self, simulation,evaluation):
objectivefunction = -spotpy.objectivefunctions.rmse(evaluation = evaluation,simulation = simulation)
return objectivefunction | Python | 0.000051 |
5db4d1f0b98b2dbef3041e4dd72ea634450e67ee | Use absolute directory path to avoid errors with empty path strings | maxmindupdater/__init__.py | maxmindupdater/__init__.py | """Function to keep a maxmind database file up to date"""
import hashlib
import os
import shutil
import sys
import tarfile
import requests
__version__ = '0.1.0'
__url__ = 'https://github.com/yola/maxmind-updater'
def _hash_file(filename):
if not os.path.exists(filename):
return ''
block_size = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(block_size)
return hasher.hexdigest()
def update_db(db_path, license_key, edition_id):
db_dir_path = os.path.abspath(os.path.dirname(db_path))
db_archive_path = '%s.tar.gz' % db_path
def maxmind_download(suffix, **kwargs):
return requests.get('https://download.maxmind.com/app/geoip_download',
params={'license_key': license_key,
'edition_id': edition_id,
'suffix': suffix,
},
**kwargs)
expected_md5 = maxmind_download('tar.gz.md5').content
curr_md5 = _hash_file(db_archive_path)
if expected_md5 == curr_md5 and os.path.exists(db_path):
return
with open(db_archive_path, 'wb') as local_zip:
for chunk in maxmind_download('tar.gz', stream=True
).iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
local_zip.write(chunk)
with tarfile.open(db_archive_path) as tar_file:
# We only want the mmdb file. Maxmind kindly includes things
# we don't want.
extract_members = [member for member in tar_file.getmembers()
if member.name.endswith('.mmdb')]
assert len(extract_members) == 1
tar_file.extractall(path=db_dir_path, members=extract_members)
# extractall keeps the subfolder structure. Account for this by
# appending the path to the db_dir_path where it was extracted.
new_db = os.path.join(db_dir_path, extract_members[0].path)
try:
pass
# TODO
# test_ip('8.8.8.8', new_db)
# test_ip('2001:420::', new_db)
except Exception:
sys.stderr.write('Retrieved invalid GeoIP database - '
'check MaxMind account details.\n')
raise
if not os.path.exists(db_dir_path):
os.makedirs(db_dir_path)
shutil.move(new_db, db_path)
os.rmdir(os.path.dirname(new_db))
| """Function to keep a maxmind database file up to date"""
import hashlib
import os
import shutil
import sys
import tarfile
import requests
__version__ = '0.1.0'
__url__ = 'https://github.com/yola/maxmind-updater'
def _hash_file(filename):
if not os.path.exists(filename):
return ''
block_size = 65536
hasher = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(block_size)
return hasher.hexdigest()
def update_db(db_path, license_key, edition_id):
db_dir_path = os.path.dirname(db_path)
db_archive_path = '%s.tar.gz' % db_path
def maxmind_download(suffix, **kwargs):
return requests.get('https://download.maxmind.com/app/geoip_download',
params={'license_key': license_key,
'edition_id': edition_id,
'suffix': suffix,
},
**kwargs)
expected_md5 = maxmind_download('tar.gz.md5').content
curr_md5 = _hash_file(db_archive_path)
if expected_md5 == curr_md5 and os.path.exists(db_path):
return
with open(db_archive_path, 'wb') as local_zip:
for chunk in maxmind_download('tar.gz', stream=True
).iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
local_zip.write(chunk)
with tarfile.open(db_archive_path) as tar_file:
# We only want the mmdb file. Maxmind kindly includes things
# we don't want.
extract_members = [member for member in tar_file.getmembers()
if member.name.endswith('.mmdb')]
assert len(extract_members) == 1
tar_file.extractall(path=db_dir_path, members=extract_members)
# extractall keeps the subfolder structure. Account for this by
# appending the path to the db_dir_path where it was extracted.
new_db = os.path.join(db_dir_path, extract_members[0].path)
try:
pass
# TODO
# test_ip('8.8.8.8', new_db)
# test_ip('2001:420::', new_db)
except Exception:
sys.stderr.write('Retrieved invalid GeoIP database - '
'check MaxMind account details.\n')
raise
if not os.path.exists(db_dir_path):
os.makedirs(db_dir_path)
shutil.move(new_db, db_path)
os.rmdir(os.path.dirname(new_db))
| Python | 0.000001 |
ac98be78363b98def729e129484a06c26324dccd | Use Undefined instead of the now deprecated INVALID (#1143) | graphene/types/datetime.py | graphene/types/datetime.py | from __future__ import absolute_import
import datetime
from aniso8601 import parse_date, parse_datetime, parse_time
from graphql import Undefined
from graphql.language import StringValueNode
from .scalars import Scalar
class Date(Scalar):
"""
The `Date` scalar type represents a Date
value as specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(date):
if isinstance(date, datetime.datetime):
date = date.date()
assert isinstance(
date, datetime.date
), 'Received not compatible date "{}"'.format(repr(date))
return date.isoformat()
@classmethod
def parse_literal(cls, node):
if isinstance(node, StringValueNode):
return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
try:
if isinstance(value, datetime.date):
return value
elif isinstance(value, str):
return parse_date(value)
except ValueError:
return Undefined
class DateTime(Scalar):
"""
The `DateTime` scalar type represents a DateTime
value as specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(dt):
assert isinstance(
dt, (datetime.datetime, datetime.date)
), 'Received not compatible datetime "{}"'.format(repr(dt))
return dt.isoformat()
@classmethod
def parse_literal(cls, node):
if isinstance(node, StringValueNode):
return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
try:
if isinstance(value, datetime.datetime):
return value
elif isinstance(value, str):
return parse_datetime(value)
except ValueError:
return Undefined
class Time(Scalar):
"""
The `Time` scalar type represents a Time value as
specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(time):
assert isinstance(
time, datetime.time
), 'Received not compatible time "{}"'.format(repr(time))
return time.isoformat()
@classmethod
def parse_literal(cls, node):
if isinstance(node, StringValueNode):
return cls.parse_value(node.value)
@classmethod
def parse_value(cls, value):
try:
if isinstance(value, datetime.time):
return value
elif isinstance(value, str):
return parse_time(value)
except ValueError:
return Undefined
| from __future__ import absolute_import
import datetime
from aniso8601 import parse_date, parse_datetime, parse_time
from graphql.error import INVALID
from graphql.language import StringValueNode
from .scalars import Scalar
class Date(Scalar):
"""
The `Date` scalar type represents a Date
value as specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(date):
if isinstance(date, datetime.datetime):
date = date.date()
assert isinstance(
date, datetime.date
), 'Received not compatible date "{}"'.format(repr(date))
return date.isoformat()
@classmethod
def parse_literal(cls, node):
if isinstance(node, StringValueNode):
return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
try:
if isinstance(value, datetime.date):
return value
elif isinstance(value, str):
return parse_date(value)
except ValueError:
return INVALID
class DateTime(Scalar):
"""
The `DateTime` scalar type represents a DateTime
value as specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(dt):
assert isinstance(
dt, (datetime.datetime, datetime.date)
), 'Received not compatible datetime "{}"'.format(repr(dt))
return dt.isoformat()
@classmethod
def parse_literal(cls, node):
if isinstance(node, StringValueNode):
return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
try:
if isinstance(value, datetime.datetime):
return value
elif isinstance(value, str):
return parse_datetime(value)
except ValueError:
return INVALID
class Time(Scalar):
"""
The `Time` scalar type represents a Time value as
specified by
[iso8601](https://en.wikipedia.org/wiki/ISO_8601).
"""
@staticmethod
def serialize(time):
assert isinstance(
time, datetime.time
), 'Received not compatible time "{}"'.format(repr(time))
return time.isoformat()
@classmethod
def parse_literal(cls, node):
if isinstance(node, StringValueNode):
return cls.parse_value(node.value)
@classmethod
def parse_value(cls, value):
try:
if isinstance(value, datetime.time):
return value
elif isinstance(value, str):
return parse_time(value)
except ValueError:
return INVALID
| Python | 0 |
d46368024ee89143bca15a2bdf23f8792970cf5c | add property 'external' to menu nodes | menu_external_urls/menu.py | menu_external_urls/menu.py | from menus.base import Modifier
from menus.menu_pool import menu_pool
from menu_external_urls.models import MenuExternalUrl
class MenuExternalUrlMod(Modifier):
"""
Adds ability to link page to an external URL.
"""
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
if post_cut:
return nodes
if breadcrumb:
return nodes
for node in nodes:
try:
#Load External URL into nodes
menu_external_url = MenuExternalUrl.objects.get(page=(node.id-1))
node.url = menu_external_url.menu_external_url
node.external = True
except:
pass
return nodes
menu_pool.register_modifier(MenuExternalUrlMod)
| from menus.base import Modifier
from menus.menu_pool import menu_pool
from menu_external_urls.models import MenuExternalUrl
class MenuExternalUrlMod(Modifier):
"""
Adds ability to link page to an external URL.
"""
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
if post_cut:
return nodes
if breadcrumb:
return nodes
for node in nodes:
try:
#Load External URL into nodes
menu_external_url = MenuExternalUrl.objects.get(page=(node.id-1))
node.url = menu_external_url.menu_external_url
except:
pass
return nodes
menu_pool.register_modifier(MenuExternalUrlMod)
| Python | 0 |
c973385f877d940231deb8d81e929647eadc280a | Use standard env var for DATABASE_URL | app/config.py | app/config.py | # -*- coding: utf-8 -*-
"""
Application configuration
"""
import os
from os.path import dirname, join
# get settings from environment, or credstash if running in AWS
env = os.environ
if env.get('SETTINGS') == 'AWS':
from lib.aws_env import env
ASSETS_DEBUG = False
DEBUG = bool(env.get('DEBUG', True))
HUMANIZE_USE_UTC = True
MARKDOWN_EXTENSIONS = [
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.smart_strong',
'markdown.extensions.smarty',
]
SECRET_KEY = env.get('SECRET_KEY', os.urandom(24))
SESSION_COOKIE_SECURE = False
SQLALCHEMY_DATABASE_PATH = join(dirname(__file__), '../development.db')
SQLALCHEMY_DATABASE_URI = env.get(
'DATABASE_URL',
'sqlite:///{}'.format(SQLALCHEMY_DATABASE_PATH))
SQLALCHEMY_TRACK_MODIFICATIONS = bool(env.get(
'SQLALCHEMY_TRACK_MODIFICATIONS',
False))
TESTING = bool(env.get('TESTING', False))
| # -*- coding: utf-8 -*-
"""
Application configuration
"""
import os
from os.path import dirname, join
# get settings from environment, or credstash if running in AWS
env = os.environ
if env.get('SETTINGS') == 'AWS':
from lib.aws_env import env
ASSETS_DEBUG = False
DEBUG = bool(env.get('DEBUG', True))
HUMANIZE_USE_UTC = True
MARKDOWN_EXTENSIONS = [
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.smart_strong',
'markdown.extensions.smarty',
]
SECRET_KEY = env.get('SECRET_KEY', os.urandom(24))
SESSION_COOKIE_SECURE = False
SQLALCHEMY_DATABASE_PATH = join(dirname(__file__), '../development.db')
SQLALCHEMY_DATABASE_URI = env.get(
'DATABASE_URI',
'sqlite:///{}'.format(SQLALCHEMY_DATABASE_PATH))
SQLALCHEMY_TRACK_MODIFICATIONS = bool(env.get(
'SQLALCHEMY_TRACK_MODIFICATIONS',
False))
TESTING = bool(env.get('TESTING', False))
| Python | 0.000001 |
2f5c855336a0d182ad731fc50b6dc652f64b80d3 | remove lazy=dynamic | app/models.py | app/models.py | from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column('username', db.String(32), unique=True, index=True)
password = db.Column('password', db.String(32))
rounds = db.relationship('Round', backref='user_o')
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
def __repr__(self):
return '<User %r>' % (self.username)
class Round(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
date = db.Column(db.DateTime)
tee_color = db.Column(db.String(32))
course_handicap = db.Column(db.Integer)
adj_score = db.Column(db.Integer)
handicap_index = db.Column(db.Float)
scores = db.relationship('Score', backref='round_o')
def __repr__(self):
return '<Round %r>' % (self.date)
class Score(db.Model):
id = db.Column(db.Integer, primary_key=True)
round_id = db.Column(db.Integer, db.ForeignKey('round.id'))
hole = db.Column(db.Integer)
score = db.Column(db.Integer)
putts = db.Column(db.Integer)
gir = db.Column(db.Integer)
def __repr__(self):
return '<Score %r>' % (self.id)
class Course(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(32), unique=True, index=True)
name = db.Column(db.String(64))
rounds = db.relationship('Round', backref='course_o')
tees = db.relationship('Tee', backref='course_o')
def __repr__(self):
return '<Course %r>' % (self.name)
class Tee(db.Model):
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
tee_color = db.Column(db.String(32))
rating = db.Column(db.Float)
slope = db.Column(db.Integer)
holes = db.relationship('Hole', backref='tee_o')
def __repr__(self):
return '<Tee %r>' % (self.tee_color)
class Hole(db.Model):
id = db.Column(db.Integer, primary_key=True)
tee_id = db.Column(db.Integer, db.ForeignKey('tee.id'))
hole = db.Column(db.Integer)
yardage = db.Column(db.Integer)
par = db.Column(db.Integer)
rating = db.Column(db.Float)
slope = db.Column(db.Integer)
def __repr__(self):
return '<Hole %r>' % (self.hole)
| from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column('username', db.String(32), unique=True, index=True)
password = db.Column('password', db.String(32))
rounds = db.relationship('Round', backref='user_o', lazy='dynamic')
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except NameError:
return str(self.id)
def __repr__(self):
return '<User %r>' % (self.username)
class Round(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
date = db.Column(db.DateTime)
tee_color = db.Column(db.String(32))
course_handicap = db.Column(db.Integer)
adj_score = db.Column(db.Integer)
handicap_index = db.Column(db.Float)
scores = db.relationship('Score', backref='round_o', lazy='dynamic')
def __repr__(self):
return '<Round %r>' % (self.date)
class Score(db.Model):
id = db.Column(db.Integer, primary_key=True)
round_id = db.Column(db.Integer, db.ForeignKey('round.id'))
hole = db.Column(db.Integer)
score = db.Column(db.Integer)
putts = db.Column(db.Integer)
gir = db.Column(db.Integer)
def __repr__(self):
return '<Score %r>' % (self.id)
class Course(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(32), unique=True, index=True)
name = db.Column(db.String(64))
rounds = db.relationship('Round', backref='course_o', lazy='dynamic')
tees = db.relationship('Tee', backref='course_o', lazy='dynamic')
def __repr__(self):
return '<Course %r>' % (self.name)
class Tee(db.Model):
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
tee_color = db.Column(db.String(32))
rating = db.Column(db.Float)
slope = db.Column(db.Integer)
holes = db.relationship('Hole', backref='tee_o', lazy='dynamic')
def __repr__(self):
return '<Tee %r>' % (self.tee_color)
class Hole(db.Model):
id = db.Column(db.Integer, primary_key=True)
tee_id = db.Column(db.Integer, db.ForeignKey('tee.id'))
hole = db.Column(db.Integer)
yardage = db.Column(db.Integer)
par = db.Column(db.Integer)
rating = db.Column(db.Float)
slope = db.Column(db.Integer)
def __repr__(self):
return '<Hole %r>' % (self.hole)
| Python | 0.99834 |
c313550b52369edad73e97769ca509075d9e4ef6 | Establish table relationships | app/models.py | app/models.py | from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class User(UserMixin, db.Model):
"""This class represents the user table."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), nullable=False, unique=True)
email = db.Column(db.String(256), nullable=False, unique=True)
user_password = db.Column(db.String(255), nullable=False)
bucketlists = db.relationship('BucketList', order_by="BucketList.id",
cascade="all,delete-orphan")
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
@property
def password(self):
raise AttributeError('You cannot access password')
@password.setter
def password(self):
self.user_password = generate_password_hash(self.password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class BucketList(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
created_by = db.Column(db.Integer, db.ForeignKey(User.id))
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return BucketList.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<BucketList: {}>".format(self.name)
class BucketListItem(db.Model):
"""This class represents the bucketlist_item table"""
__tablename__ = 'bucketlistitems'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
done = db.Column(db.Boolean, default=False)
bucketlist_id = db.Column(db.Integer, db.ForeignKey(BucketList.id))
def __init__(self, name, bucketlist_id):
"""Initialize with name and bucketlist_id"""
self.name = name
self.bucketlist_id = bucketlist_id
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all_items():
return BucketListItem.query.filter_by(bucketlist_id=BucketList.id)
def delete(self):
db.session.delete(self)
db.session.commit()
| from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class User(UserMixin, db.Model):
"""This class represents the user table."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), nullable=False, unique=True)
email = db.Column(db.String(256), nullable=False, unique=True)
user_password = db.Column(db.String(255), nullable=False)
bucketlists = db.relationship('BucketList', order_by="BucketList.id",
cascade="all,delete-orphan")
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
@property
def password(self):
raise AttributeError('You cannot access password')
@password.setter
def password(self):
self.user_password = generate_password_hash(self.password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class BucketList(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return BucketList.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<BucketList: {}>".format(self.name)
class BucketListItem(db.Model):
"""This class represents the bucketlist_item table"""
__tablename__ = 'bucketlistitems'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
done = db.Column(db.Boolean, default=False)
bucketlist_id = db.Column(db.Integer, db.ForeignKey(BucketList.id))
def __init__(self, name, bucketlist_id):
"""Initialize with name and bucketlist_id"""
self.name = name
self.bucketlist_id = bucketlist_id
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all_items():
return BucketListItem.query.filter_by(bucketlist_id=BucketList.id)
def delete(self):
db.session.delete(self)
db.session.commit()
| Python | 0.000023 |
7fe6130c1f94bdfcab1508bd9102d02f8fd123a0 | update tests | corehq/apps/cloudcare/tests/test_session.py | corehq/apps/cloudcare/tests/test_session.py | import uuid
from django.test import TestCase
from casexml.apps.case.models import CommCareCase
from corehq.apps.cloudcare.touchforms_api import (
get_user_contributions_to_touchforms_session,
)
from corehq.apps.custom_data_fields.models import (
CustomDataFieldsDefinition,
CustomDataFieldsProfile,
Field,
PROFILE_SLUG,
)
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
from corehq.apps.users.models import CommCareUser, WebUser
class SessionUtilsTest(TestCase):
def test_load_session_data_for_mobile_worker(self):
user = CommCareUser(
domain='cloudcare-tests',
username='worker@cloudcare-tests.commcarehq.org',
_id=uuid.uuid4().hex
)
data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)
self.assertEqual('worker', data['username'])
self.assertEqual(user._id, data['user_id'])
self.assertTrue(isinstance(data['user_data'], dict))
self.assertTrue(data['user_data']['commcare_project'], 'cloudcare-tests')
def test_default_user_data(self):
user = CommCareUser(
domain='cloudcare-tests',
username='worker@cloudcare-tests.commcarehq.org',
_id=uuid.uuid4().hex
)
user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data']
for key in ['commcare_first_name', 'commcare_last_name', 'commcare_phone_number']:
self.assertEqual(None, user_data[key])
user.first_name = 'first'
user.last_name = 'last'
user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data']
self.assertEqual('first', user_data['commcare_first_name'])
self.assertEqual('last', user_data['commcare_last_name'])
def test_user_data_profile(self):
definition = CustomDataFieldsDefinition(domain='cloudcare-tests', field_type=UserFieldsView.field_type)
definition.save()
definition.set_fields([
Field(slug='word', label='A Word'),
])
definition.save()
profile = CustomDataFieldsProfile(name='prof', fields={'word': 'supernova'}, definition=definition)
profile.save()
user = CommCareUser.create(
'cloudcare-tests',
'worker@cloudcare-tests.commcarehq.org',
'do you want to know a secret',
None,
None,
uuid=uuid.uuid4().hex,
metadata={PROFILE_SLUG: profile.id},
)
user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data']
self.assertEqual(profile.id, user_data[PROFILE_SLUG])
self.assertEqual('supernova', user_data['word'])
definition.delete()
def test_load_session_data_for_web_user(self):
user = WebUser(
username='web-user@example.com',
_id=uuid.uuid4().hex
)
data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)
self.assertEqual('web-user@example.com', data['username'])
self.assertEqual(user._id, data['user_id'])
self.assertTrue(isinstance(data['user_data'], dict))
self.assertTrue(data['user_data']['commcare_project'], 'cloudcare-tests')
def test_load_session_data_for_commconnect_case(self):
user = CommCareCase(
name='A case',
_id=uuid.uuid4().hex
)
data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)
self.assertEqual('A case', data['username'])
self.assertEqual(user._id, data['user_id'])
self.assertEqual({}, data['user_data'])
| import uuid
from django.test import TestCase
from casexml.apps.case.models import CommCareCase
from corehq.apps.cloudcare.touchforms_api import (
get_user_contributions_to_touchforms_session,
)
from corehq.apps.custom_data_fields.models import (
CustomDataFieldsDefinition,
CustomDataFieldsProfile,
Field,
PROFILE_SLUG,
)
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
from corehq.apps.users.models import CommCareUser, WebUser
class SessionUtilsTest(TestCase):
def test_load_session_data_for_mobile_worker(self):
user = CommCareUser(
domain='cloudcare-tests',
username='worker@cloudcare-tests.commcarehq.org',
_id=uuid.uuid4().hex
)
data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)
self.assertEqual('worker', data['username'])
self.assertEqual(user._id, data['user_id'])
self.assertTrue(isinstance(data['user_data'], dict))
def test_default_user_data(self):
user = CommCareUser(
domain='cloudcare-tests',
username='worker@cloudcare-tests.commcarehq.org',
_id=uuid.uuid4().hex
)
user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data']
for key in ['commcare_first_name', 'commcare_last_name', 'commcare_phone_number']:
self.assertEqual(None, user_data[key])
user.first_name = 'first'
user.last_name = 'last'
user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data']
self.assertEqual('first', user_data['commcare_first_name'])
self.assertEqual('last', user_data['commcare_last_name'])
def test_user_data_profile(self):
definition = CustomDataFieldsDefinition(domain='cloudcare-tests', field_type=UserFieldsView.field_type)
definition.save()
definition.set_fields([
Field(slug='word', label='A Word'),
])
definition.save()
profile = CustomDataFieldsProfile(name='prof', fields={'word': 'supernova'}, definition=definition)
profile.save()
user = CommCareUser.create(
'cloudcare-tests',
'worker@cloudcare-tests.commcarehq.org',
'do you want to know a secret',
None,
None,
uuid=uuid.uuid4().hex,
metadata={PROFILE_SLUG: profile.id},
)
user_data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)['user_data']
self.assertEqual(profile.id, user_data[PROFILE_SLUG])
self.assertEqual('supernova', user_data['word'])
definition.delete()
def test_load_session_data_for_web_user(self):
user = WebUser(
username='web-user@example.com',
_id=uuid.uuid4().hex
)
data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)
self.assertEqual('web-user@example.com', data['username'])
self.assertEqual(user._id, data['user_id'])
self.assertTrue(isinstance(data['user_data'], dict))
def test_load_session_data_for_commconnect_case(self):
user = CommCareCase(
name='A case',
_id=uuid.uuid4().hex
)
data = get_user_contributions_to_touchforms_session('cloudcare-tests', user)
self.assertEqual('A case', data['username'])
self.assertEqual(user._id, data['user_id'])
self.assertEqual({}, data['user_data'])
| Python | 0.000001 |
172c7a3ee0c75462f08e726716bf906ad88eadab | add test of plugin registry options | altair/utils/tests/test_plugin_registry.py | altair/utils/tests/test_plugin_registry.py | from ..plugin_registry import PluginRegistry
from typing import Callable
class TypedCallableRegistry(PluginRegistry[Callable[[int], int]]):
pass
class GeneralCallableRegistry(PluginRegistry):
pass
def test_plugin_registry():
plugins = TypedCallableRegistry()
assert plugins.names() == []
assert plugins.active == ''
assert plugins.get() is None
assert repr(plugins) == "TypedCallableRegistry(active='', registered=[])"
plugins.register('new_plugin', lambda x: x ** 2)
assert plugins.names() == ['new_plugin']
assert plugins.active == ''
assert plugins.get() is None
assert repr(plugins) == ("TypedCallableRegistry(active='', "
"registered=['new_plugin'])")
plugins.enable('new_plugin')
assert plugins.names() == ['new_plugin']
assert plugins.active == 'new_plugin'
assert plugins.get()(3) == 9
assert repr(plugins) == ("TypedCallableRegistry(active='new_plugin', "
"registered=['new_plugin'])")
def test_plugin_registry_extra_options():
plugins = GeneralCallableRegistry()
plugins.register('metadata_plugin', lambda x, p=2: x ** p)
plugins.enable('metadata_plugin')
assert plugins.get()(3) == 9
plugins.enable('metadata_plugin', p=3)
assert plugins.get()(3) == 27
| from ..plugin_registry import PluginRegistry
class RegistryTest(PluginRegistry):
pass
def test_plugin_registry():
plugins = RegistryTest()
assert plugins.names() == []
assert plugins.active == ''
assert plugins.get() is None
assert repr(plugins) == "RegistryTest(active='', registered=[])"
plugins.register('new_plugin', lambda x: x ** 2)
assert plugins.names() == ['new_plugin']
assert plugins.active == ''
assert plugins.get() is None
assert repr(plugins) == ("RegistryTest(active='', "
"registered=['new_plugin'])")
plugins.enable('new_plugin')
assert plugins.names() == ['new_plugin']
assert plugins.active == 'new_plugin'
assert plugins.get()(3) == 9
assert repr(plugins) == ("RegistryTest(active='new_plugin', "
"registered=['new_plugin'])")
| Python | 0 |
ebe5d80075ce818181a154b6ec772a08e335ae4a | fix test name | trtools/core/tests/test_timeseries.py | trtools/core/tests/test_timeseries.py | from unittest import TestCase
import pandas as pd
from pandas.core.groupby import BinGrouper
import trtools.util.testing as tm
import numpy as np
import trtools.core.timeseries as ts
# start on friday, so second day is saturday
df = tm.fake_ohlc(1000000, freq="5min", start="2000-01-07")
# business days and trading hours
df = df.ix[df.index.dayofweek < 5]
df = ts.trading_hours(df)
class TestBinning(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_downsample(self):
# these should be equivalent
grouped = df.downsample('D', drop_empty=False)
test = grouped.mean()
correct = df.resample('D', how='mean')
tm.assert_frame_equal(test, correct)
def test_downsample_drop_empty(self):
"""
the drop_empty which is the default will not include
empty groups into the GroupBy.
"""
grouped = df.downsample('D')
test = grouped.mean()
correct = df.resample('D', how='mean').dropna(how='all')
tm.assert_frame_equal(test, correct)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
| from unittest import TestCase
import pandas as pd
from pandas.core.groupby import BinGrouper
import trtools.util.testing as tm
import numpy as np
import trtools.core.timeseries as ts
# start on friday, so second day is saturday
df = tm.fake_ohlc(1000000, freq="5min", start="2000-01-07")
# business days and trading hours
df = df.ix[df.index.dayofweek < 5]
df = ts.trading_hours(df)
class TestBinning(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def downsample(self):
# these should be equivalent
grouped = df.downsample('D', drop_empty=False)
test = grouped.mean()
correct = df.resample('D', how='mean')
tm.assert_frame_equal(test, correct)
def test_downsample_drop_empty(self):
"""
the drop_empty which is the default will not include
empty groups into the GroupBy.
"""
grouped = df.downsample('D')
test = grouped.mean()
correct = df.resample('D', how='mean').dropna(how='all')
tm.assert_frame_equal(test, correct)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
| Python | 0.000546 |
c22024d29548f93d5bf6cfac6fc9f5bd02915e92 | Try reverse with strings | judge/views/problem.py | judge/views/problem.py | from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProblemSubmitForm
from judge.models import Problem, Profile, Submission
def get_result_table(code):
results = {}
for submission in Submission.objects.filter(problem__code=code) if code else Submission.objects.all():
r = None
if submission.result and submission.result not in ["IE"]:
r = submission.result
results[r] = results.get(r, 0) + 1
return results
def problem(request, code):
try:
problem = Problem.objects.get(code=code)
return render_to_response('problem.html', {'problem': problem, 'results': get_result_table(code),
'title': 'Problem %s' % problem.name},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
def problems(request):
return render_to_response('problems.html', {'problems': Problem.objects.all(), 'title': 'Problems'},
context_instance=RequestContext(request))
@login_required
def problem_submit(request, problem=None):
if request.method == 'POST':
form = ProblemSubmitForm(request.POST, instance=Submission(user=request.user.profile))
if form.is_valid():
model = form.save()
model.judge()
return HttpResponseRedirect(reverse('judge.view.submission_status', args=[str(model.id)]))
else:
initial = {'language': request.user.profile.language}
if problem is not None:
try:
initial['problem'] = Problem.objects.get(code=problem)
except ObjectDoesNotExist:
return Http404()
form = ProblemSubmitForm(initial=initial)
return render_to_response('problem_submit.html', {'form': form, 'title': 'Submit'},
context_instance=RequestContext(request)) | from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProblemSubmitForm
from judge.models import Problem, Profile, Submission
def get_result_table(code):
results = {}
for submission in Submission.objects.filter(problem__code=code) if code else Submission.objects.all():
r = None
if submission.result and submission.result not in ["IE"]:
r = submission.result
results[r] = results.get(r, 0) + 1
return results
def problem(request, code):
try:
problem = Problem.objects.get(code=code)
return render_to_response('problem.html', {'problem': problem, 'results': get_result_table(code),
'title': 'Problem %s' % problem.name},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
def problems(request):
return render_to_response('problems.html', {'problems': Problem.objects.all(), 'title': 'Problems'},
context_instance=RequestContext(request))
@login_required
def problem_submit(request, problem=None):
if request.method == 'POST':
form = ProblemSubmitForm(request.POST, instance=Submission(user=request.user.profile))
if form.is_valid():
model = form.save()
model.judge()
return HttpResponseRedirect(reverse('judge.view.submission_status', args=[model.id]))
else:
initial = {'language': request.user.profile.language}
if problem is not None:
try:
initial['problem'] = Problem.objects.get(code=problem)
except ObjectDoesNotExist:
return Http404()
form = ProblemSubmitForm(initial=initial)
return render_to_response('problem_submit.html', {'form': form, 'title': 'Submit'},
context_instance=RequestContext(request)) | Python | 0.999083 |
f386fce820fb60abfe1b18c141dfd8ce268c5f4f | Update queue_on_list.py (#851) | data_structures/queue/queue_on_list.py | data_structures/queue/queue_on_list.py | """Queue represented by a python list"""
class Queue():
def __init__(self):
self.entries = []
self.length = 0
self.front=0
def __str__(self):
printed = '<' + str(self.entries)[1:-1] + '>'
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
def put(self, item):
self.entries.append(item)
self.length = self.length + 1
"""Dequeues {@code item}
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
def get(self):
self.length = self.length - 1
dequeued = self.entries[self.front]
#self.front-=1
#self.entries = self.entries[self.front:]
self.entries = self.entries[1:]
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
def rotate(self, rotation):
for i in range(rotation):
self.put(self.get())
"""Enqueues {@code item}
@return item at front of self.entries"""
def front(self):
return self.entries[0]
"""Returns the length of this.entries"""
def size(self):
return self.length
| """Queue represented by a python list"""
class Queue():
def __init__(self):
self.entries = []
self.length = 0
self.front=0
def __str__(self):
printed = '<' + str(self.entries)[1:-1] + '>'
return printed
"""Enqueues {@code item}
@param item
item to enqueue"""
def put(self, item):
self.entries.append(item)
self.length = self.length + 1
"""Dequeues {@code item}
@requirement: |self.length| > 0
@return dequeued
item that was dequeued"""
def get(self):
self.length = self.length - 1
dequeued = self.entries[self.front]
self.front-=1
self.entries = self.entries[self.front:]
return dequeued
"""Rotates the queue {@code rotation} times
@param rotation
number of times to rotate queue"""
def rotate(self, rotation):
for i in range(rotation):
self.put(self.get())
"""Enqueues {@code item}
@return item at front of self.entries"""
def front(self):
return self.entries[0]
"""Returns the length of this.entries"""
def size(self):
return self.length
| Python | 0 |
91540aefccdccdeb0c668b8ce5a99bb5471a3200 | Change order slightly. | avenue/web.py | avenue/web.py | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Michael Babich
# See LICENSE.txt or http://opensource.org/licenses/MIT
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app, api
from avenue.database import content
from flask import render_template, make_response, redirect
def url_generator():
'''This function acts on a list of URLs, a text rule for each URL,
and a function that says what to do to that text rule to serve a
page. The action_list associates a subset of URLs with a
particular function to be used as the action for that group.
'''
data = api.read_data('forum')
threads = data['threads']
content.insert_data()
themes = content.get_themes()
nav = content.get_nav()
tags = content.get_tags()
redirects = content.get_urls()
def forum_set_tags():
'''Turns strings containing tag names into tag objects that
can be used to generate HTML/CSS renderings of the tag.
'''
for thread in threads:
for post in threads[thread]['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = tags[post['tags'][i]]
def forum_page(name):
'''Makes a forum page of the given thread name.
'''
thread = threads[name]
html = '%s :: %s :: %s' % (thread['title'], data['forum'], data['site'])
main = '%s -- %s' % (data['site'], data['forum'])
title = { 'html' : html,
'main' : main,
'thread' : thread['title'],
'url' : data['forum_url'] }
return render_template('forum.html',
style='night',
sidebar=nav,
title=title,
posts=thread['posts'],
threaded=thread['threaded'])
def setup_url_rule(urls, action):
'''Sets up URL rules, given a dictionary of urls and a
function that they will act on. It passes an anonymous
function to add_url_rule that always does a particular action
to a particular string when that URL is accessed.
'''
is_dict = type(urls) == dict
for url in urls:
text = urls[url] if is_dict else url
app.add_url_rule(url, url, lambda text: lambda: action(text))
forum_set_tags()
css = {}
for theme in themes:
css[themes[theme]['url']] = theme
setup_url_rule(redirects, redirect)
setup_url_rule(css, lambda theme: api.make_css(themes[theme]))
setup_url_rule(threads.keys(), forum_page)
| # -*- coding: utf-8 -*-
# Copyright (c) 2012 Michael Babich
# See LICENSE.txt or http://opensource.org/licenses/MIT
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app, api
from avenue.database import content
from flask import render_template, make_response, redirect
def url_generator():
'''This function acts on a list of URLs, a text rule for each URL,
and a function that says what to do to that text rule to serve a
page. The action_list associates a subset of URLs with a
particular function to be used as the action for that group.
'''
data = api.read_data('forum')
threads = data['threads']
content.insert_data()
themes = content.get_themes()
nav = content.get_nav()
tags = content.get_tags()
redirects = content.get_urls()
css = {}
for theme in themes:
css[themes[theme]['url']] = theme
def forum_set_tags():
'''Turns strings containing tag names into tag objects that
can be used to generate HTML/CSS renderings of the tag.
'''
for thread in threads:
for post in threads[thread]['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = tags[post['tags'][i]]
def forum_page(name):
'''Makes a forum page of the given thread name.
'''
thread = threads[name]
html = '%s :: %s :: %s' % (thread['title'], data['forum'], data['site'])
main = '%s -- %s' % (data['site'], data['forum'])
title = { 'html' : html,
'main' : main,
'thread' : thread['title'],
'url' : data['forum_url'] }
return render_template('forum.html',
style='night',
sidebar=nav,
title=title,
posts=thread['posts'],
threaded=thread['threaded'])
def setup_url_rule(urls, action):
'''Sets up URL rules, given a dictionary of urls and a
function that they will act on. It passes an anonymous
function to add_url_rule that always does a particular action
to a particular string when that URL is accessed.
'''
is_dict = type(urls) == dict
for url in urls:
text = urls[url] if is_dict else url
app.add_url_rule(url, url, lambda text: lambda: action(text))
forum_set_tags()
setup_url_rule(redirects, redirect)
setup_url_rule(css, lambda theme:
api.make_css(themes[theme]))
setup_url_rule(threads.keys(), forum_page)
| Python | 0 |
50d3fcb1ad4326a55bb156fd641ce40bf52a9a51 | rework router | ldapdb/router.py | ldapdb/router.py | # -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (c) 2009-2010, Bolloré telecom
# All rights reserved.
#
# See AUTHORS file for a full list of contributors.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Bolloré telecom nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
def is_ldap_model(model):
# FIXME: there is probably a better check than testing 'base_dn'
return hasattr(model, 'base_dn')
class Router(object):
"""A router to control all database operations on models in
the myapp application"""
def db_for_read(self, model, **hints):
"Point all operations on LDAP models to 'ldap'"
if is_ldap_model(model):
return 'ldap'
return None
def db_for_write(self, model, **hints):
"Point all operations on LDAP models to 'ldap'"
if is_ldap_model(model):
return 'ldap'
return None
| # -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (c) 2009-2010, Bolloré telecom
# All rights reserved.
#
# See AUTHORS file for a full list of contributors.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Bolloré telecom nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
class Router(object):
"""A router to control all database operations on models in
the myapp application"""
def db_for_read(self, model, **hints):
"Point all operations on LDAP models to 'ldap'"
from ldapdb.models import Model
if Model in model.__bases__:
return 'ldap'
return None
def db_for_write(self, model, **hints):
"Point all operations on LDAP models to 'ldap'"
from ldapdb.models import Model
if Model in model.__bases__:
return 'ldap'
return None
| Python | 0.000001 |
84bbcdcb547468385291a420f6d88eb594c03fd1 | Add lex and handle semicolons correctly | twosheds/shell.py | twosheds/shell.py | """
twosheds.shell
~~~~~~~~~~~~~~
This module implements the central user interface for access to an
operating system's kernel services.
"""
import os
import subprocess
import sys
import traceback
class Shell(object):
"""The shell is an sh-compatible command language interpreter that executes
commands read from standard input.
"""
BUILTINS = {'cd': os.chdir}
def __init__(self, aliases=None, builtins=None):
self.aliases = aliases or {}
self.builtins = builtins or self.BUILTINS
@property
def prompt(self):
"""Indicate to the user that the shell is waiting for a command."""
return "$ "
def output(self, msg):
"""Output a message."""
sys.stdout.write(msg)
def error(self, msg):
"""Output an error."""
sys.stderr.write(msg)
def read(self):
"""Accept a command from the user."""
try:
lines = self.lex(raw_input(self.prompt)).split(";")
for line in lines:
yield self.expand(line)
except EOFError:
raise SystemExit()
def lex(self, line):
return line.replace(";", " ; ")
def expand_aliases(self, line):
"""Expand aliases in a line."""
try:
command, args = line.split(" ", 1)
except ValueError:
command, args = line, ""
try:
return "%s %s" % (self.aliases[command], args)
except KeyError:
return line
def expand_variables(self, line):
"""Expand environmental variables in a line."""
tokens = line.split()
new_tokens = []
for token in tokens:
if token.startswith("$"):
try:
token = os.environ[token[1:]]
except KeyError:
pass
new_tokens.append(token)
return " ".join(new_tokens)
def expand(self, line):
"""Expand any macros in a command."""
return self.expand_variables(self.expand_aliases(line))
def eval(self, line):
"""Evaluate an input."""
if not line:
return
tokens = line.split()
command, args = tokens[0], tokens[1:]
try:
self.builtins[command](*args)
except KeyError:
subprocess.call(line, shell=True)
def interact(self, banner=None):
"""Interact with the user.
The optional banner argument specifies the banner to print before the
first interaction. By default, no banner is printed.
"""
if banner:
print(banner)
while True:
try:
for command in self.read():
self.eval(command)
except SystemExit:
break
except:
self.error(traceback.format_exc())
| """
twosheds.shell
~~~~~~~~~~~~~~
This module implements the central user interface for access to an
operating system's kernel services.
"""
import os
import subprocess
import sys
import traceback
class Shell(object):
"""The shell is an sh-compatible command language interpreter that executes
commands read from standard input.
"""
BUILTINS = {'cd': os.chdir}
def __init__(self, aliases=None, builtins=None):
self.aliases = aliases or {}
self.builtins = builtins or self.BUILTINS
@property
def prompt(self):
"""Indicate to the user that the shell is waiting for a command."""
return "$ "
def output(self, msg):
"""Output a message."""
sys.stdout.write(msg)
def error(self, msg):
"""Output an error."""
sys.stderr.write(msg)
def read(self):
"""Accept a command from the user."""
try:
return self.expand(raw_input(self.prompt))
except EOFError:
raise SystemExit()
def expand_aliases(self, line):
"""Expand aliases in a line."""
try:
command, args = line.split(" ", 1)
except ValueError:
command, args = line, ""
try:
return "%s %s" % (self.aliases[command], args)
except KeyError:
return line
def expand_variables(self, line):
"""Expand environmental variables in a line."""
tokens = line.split()
new_tokens = []
for token in tokens:
if token.startswith("$"):
try:
token = os.environ[token[1:]]
except KeyError:
pass
new_tokens.append(token)
return " ".join(new_tokens)
def expand(self, line):
"""Expand any macros in a command."""
return self.expand_variables(self.expand_aliases(line))
def eval(self, line):
"""Evaluate an input."""
if not line:
return
tokens = line.split()
command, args = tokens[0], tokens[1:]
try:
self.builtins[command](*args)
except KeyError:
subprocess.call(line, shell=True)
def interact(self, banner=None):
"""Interact with the user.
The optional banner argument specifies the banner to print before the
first interaction. By default, no banner is printed.
"""
if banner:
print(banner)
while True:
try:
self.eval(self.read())
except SystemExit:
break
except:
self.error(traceback.format_exc())
| Python | 0.000003 |
9b8ac5d82771a39773d39d6fd607dc7b2b304e37 | factor out common code in use_*() | txaio/__init__.py | txaio/__init__.py | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from txaio.interfaces import IFailedFuture
# This is the API
# see tx.py for Twisted implementation
# see aio.py for asyncio/trollius implementation
class _Config:
"""
This holds all valid configuration options, accessed as
class-level variables. For example, if you were using asyncio:
.. sourcecode:: python
txaio.config.loop = asyncio.get_event_loop()
``loop`` is populated automatically (while importing one of the
framework-specific libraries) but can be changed before any call
into this library. Currently, it's only used by :meth:`call_later`
If using asyncio, you must set this to an event-loop (by default,
we use asyncio.get_event_loop). If using Twisted, set this to a
reactor instance (by default we "from twisted.internet import
reactor" on the first call to call_later)
"""
#: the event-loop object to use
loop = None
__all__ = (
'using_twisted', # True if we're using Twisted
'using_asyncio', # True if we're using asyncio
'use_twisted', # sets the library to use Twisted, or exception
'use_asyncio', # sets the library to use asyncio, or exception
'config', # the config instance, access via attributes
'create_future', # create a Future (can be already resolved/errored)
'as_future', # call a method, and always return a Future
'reject', # errback a Future
'resolve', # callback a Future
'add_callbacks', # add callback and/or errback
'gather', # return a Future waiting for several other Futures
'IFailedFuture', # describes API for arg to errback()s
)
def use_twisted():
from txaio import tx
_use_framework(tx)
import txaio
txaio.using_twisted = True
txaio.using_asyncio = False
def use_asyncio():
from txaio import aio
_use_framework(aio)
import txaio
txaio.using_twisted = False
txaio.using_asyncio = True
def _use_framework(module):
"""
Internal helper, to set this modules methods to a specified
framework helper-methods.
"""
import txaio
for method_name in __all__:
if method_name in ['use_twisted', 'use_asyncio']:
continue
setattr(txaio, method_name,
getattr(module, method_name))
try:
from txaio.tx import * # noqa
using_twisted = True
except ImportError:
try:
from txaio.aio import * # noqa
using_asyncio = True
except ImportError: # pragma: no cover
# pragma: no cover
raise ImportError("Neither asyncio nor Twisted found.")
| ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from txaio.interfaces import IFailedFuture
# This is the API
# see tx.py for Twisted implementation
# see aio.py for asyncio/trollius implementation
class _Config:
"""
This holds all valid configuration options, accessed as
class-level variables. For example, if you were using asyncio:
.. sourcecode:: python
txaio.config.loop = asyncio.get_event_loop()
``loop`` is populated automatically (while importing one of the
framework-specific libraries) but can be changed before any call
into this library. Currently, it's only used by :meth:`call_later`
If using asyncio, you must set this to an event-loop (by default,
we use asyncio.get_event_loop). If using Twisted, set this to a
reactor instance (by default we "from twisted.internet import
reactor" on the first call to call_later)
"""
#: the event-loop object to use
loop = None
__all__ = (
'using_twisted', # True if we're using Twisted
'using_asyncio', # True if we're using asyncio
'use_twisted', # sets the library to use Twisted, or exception
'use_asyncio', # sets the library to use asyncio, or exception
'config', # the config instance, access via attributes
'create_future', # create a Future (can be already resolved/errored)
'as_future', # call a method, and always return a Future
'reject', # errback a Future
'resolve', # callback a Future
'add_callbacks', # add callback and/or errback
'gather', # return a Future waiting for several other Futures
'IFailedFuture', # describes API for arg to errback()s
)
def use_twisted():
from txaio import tx
import txaio
txaio.using_twisted = True
txaio.using_asyncio = False
for method_name in __all__:
if method_name in ['use_twisted', 'use_asyncio']:
continue
twisted_method = getattr(tx, method_name)
setattr(txaio, method_name, twisted_method)
def use_asyncio():
from txaio import aio
import txaio
txaio.using_twisted = False
txaio.using_asyncio = True
for method_name in __all__:
if method_name in ['use_twisted', 'use_asyncio']:
continue
twisted_method = getattr(aio, method_name)
setattr(txaio, method_name, twisted_method)
try:
from txaio.tx import * # noqa
using_twisted = True
except ImportError:
try:
from txaio.aio import * # noqa
using_asyncio = True
except ImportError: # pragma: no cover
# pragma: no cover
raise ImportError("Neither asyncio nor Twisted found.")
| Python | 0.998509 |
9501ab023a51ca6f3e37fcad3c9c9ff04223986b | update version to 0.4 | txjsonrpc/meta.py | txjsonrpc/meta.py | display_name = "txJSON-RPC"
library_name = "txjsonrpc"
version = "0.4"
author = "Duncan McGreggor"
author_email = "oubiwann@adytum.us"
license = "BSD, GPL"
url = "http://launchpad.net/%s" % library_name
description = "Code for creatig Twisted JSON-RPC servers and clients."
| display_name = "txJSON-RPC"
library_name = "txjsonrpc"
version = "0.3.1"
author = "Duncan McGreggor"
author_email = "oubiwann@adytum.us"
license = "BSD, GPL"
url = "http://launchpad.net/%s" % library_name
description = "Code for creatig Twisted JSON-RPC servers and clients."
| Python | 0.000001 |
0dc29df1e97b8c5f36320b55c659c8290f021c69 | Fix parallelization of number of topics script | DilipadTopicModelling/experiment_number_of_topics.py | DilipadTopicModelling/experiment_number_of_topics.py | import logging
import glob
from multiprocessing import Pool
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
alpha = 50.0/nTopics
logger.info('running Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, '
'beta: {})'.format(nTopics, nIter, alpha, beta))
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=alpha, beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
files = glob.glob('/home/jvdzwaan/data/dilipad/20112012/gov_opp/*')
out_dir = '/home/jvdzwaan/data/dilipad/res_20112012/{}'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.load(out_dir.format('corpus.json'),
# topicDict=out_dir.format('topicDict.dict'),
# opinionDict=out_dir.format('opinionDict.dict'))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
pool = Pool(processes=3)
results = [pool.apply_async(run_sampler, args=(corpus, n, nIter, beta,
out_dir))
for n in nTopics]
pool.close()
pool.join()
| import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
alpha = 50.0/nTopics
logger.info('running Gibbs sampler (nTopics: {}, nIter: {}, alpha: {}, '
'beta: {})'.format(nTopics, nIter, alpha, beta))
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=alpha, beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
| Python | 0.000141 |
443774e9adbd59d13ac4f6c076a22f67a9a113f1 | Fix courses liste | Rules/Courses.py | Rules/Courses.py | from ._shared import *
class Courses(Rule):
"""Handles shopping list"""
def __init__(self, bot):
self.bot = bot
def __call__(self, serv, author, args):
"""Handles shopping list"""
if len(args) < 3:
if len(args) == 2 and args[1] == "liste":
query = ("SELECT item, author, date FROM shopping WHERE bought=0")
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query)
serv.privmsg(author, 'Voici la liste de courses (également consultable sur http://hackens.org/jarvis?do=courses)')
for row in bdd_cursor:
serv.privmsg(author, '{0} (ajouté par {1} le {2})'.format(**row))
self.bot.ans(serv, author, "Liste de courses envoyée en PM.")
else:
raise InvalidArgs
try:
comment = " ".join(args[3:])
except KeyError:
comment = ""
if args[1] == "acheter":
query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " +
"comment LIKE %s")
values = (args[2], '%'+comment+'%')
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
row = bdd_cursor.fetchone()
if row[0] > 0:
self.bot.ans(serv,
author,
"Item déjà présent dans la liste de courses")
return
query = ("INSERT INTO shopping(item, author, comment, date, " +
"bought) VALUES(%s, %s, %s, %s, 0)")
values = (args[2], author, comment, datetime.datetime.now())
bdd_cursor.execute(query, values)
self.bot.ans(serv, author, "Item ajouté à la liste de courses.")
bdd_cursor.close()
bdd.close()
elif args[1] == "annuler":
query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " +
"comment LIKE %s")
values = (args[2], '%'+comment+'%')
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
row = bdd_cursor.fetchone()
if row[0] > 1:
self.bot.ans(serv, author,
"Requêtes trop ambiguë. Plusieurs entrées " +
"correspondent.")
return
query = ("DELETE FROM shopping WHERE item=%s AND " +
"comment LIKE %s")
bdd_cursor.execute(query, values)
self.bot.ans(serv, author, "Item supprimé de la liste de courses.")
bdd_cursor.close()
bdd.close()
elif args[1] == "acheté":
query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " +
"comment LIKE %s AND bought=0")
values = (args[2], '%'+comment+'%')
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
row = bdd_cursor.fetchone()
if row[0] > 1:
self.bot.ans(serv, author,
"Requêtes trop ambiguë. Plusieurs entrées " +
"correspondent.")
return
query = ("UPDATE shopping SET bought=1 WHERE item=%s AND " +
"comment LIKE %s AND bought=0")
bdd_cursor.execute(query, values)
self.bot.ans(serv, author, "Item marqué comme acheté.")
bdd_cursor.close()
bdd.close()
else:
raise InvalidArgs
def close(self):
pass
| from ._shared import *
class Courses(Rule):
"""Handles shopping list"""
def __init__(self, bot):
self.bot = bot
def __call__(self, serv, author, args):
"""Handles shopping list"""
if len(args) < 3:
raise InvalidArgs
try:
comment = " ".join(args[3:])
except KeyError:
comment = ""
if args[1] == "acheter":
query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " +
"comment LIKE %s")
values = (args[2], '%'+comment+'%')
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
row = bdd_cursor.fetchone()
if row[0] > 0:
self.bot.ans(serv,
author,
"Item déjà présent dans la liste de courses")
return
query = ("INSERT INTO shopping(item, author, comment, date, " +
"bought) VALUES(%s, %s, %s, %s, 0)")
values = (args[2], author, comment, datetime.datetime.now())
bdd_cursor.execute(query, values)
self.bot.ans(serv, author, "Item ajouté à la liste de courses.")
bdd_cursor.close()
bdd.close()
elif args[1] == "annuler":
query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " +
"comment LIKE %s")
values = (args[2], '%'+comment+'%')
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
row = bdd_cursor.fetchone()
if row[0] > 1:
self.bot.ans(serv, author,
"Requêtes trop ambiguë. Plusieurs entrées " +
"correspondent.")
return
query = ("DELETE FROM shopping WHERE item=%s AND " +
"comment LIKE %s")
bdd_cursor.execute(query, values)
self.bot.ans(serv, author, "Item supprimé de la liste de courses.")
bdd_cursor.close()
bdd.close()
elif args[1] == "acheté":
query = ("SELECT COUNT(*) as nb FROM shopping WHERE item=%s AND " +
"comment LIKE %s AND bought=0")
values = (args[2], '%'+comment+'%')
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
row = bdd_cursor.fetchone()
if row[0] > 1:
self.bot.ans(serv, author,
"Requêtes trop ambiguë. Plusieurs entrées " +
"correspondent.")
return
query = ("UPDATE shopping SET bought=1 WHERE item=%s AND " +
"comment LIKE %s AND bought=0")
bdd_cursor.execute(query, values)
self.bot.ans(serv, author, "Item marqué comme acheté.")
bdd_cursor.close()
bdd.close()
elif args[1] == "liste":
query = ("SELECT item, author, date FROM shopping WHERE bought=0 AND item LIKE %s")
values = (args[2],)
try:
bdd = self.bot.mysql_connect(serv)
assert(bdd is not None)
except AssertionError:
return
bdd_cursor = bdd.cursor()
bdd_cursor.execute(query, values)
for row in bdd_cursor:
serv.privmsg(author, '{item} (ajouté par {author} le {date})'.format(**row))
self.bot.ans(serv, author, "Liste de courses envoyée en PM.")
else:
raise InvalidArgs
def close(self):
pass
| Python | 0.000019 |
0f10ec94a7a62968aeafe10c55913e08bb0c7ce6 | Fix Bug: Type Error | Scripts/Judge.py | Scripts/Judge.py | #--coding:utf-8--
import re
import json
import chardet
class Judge():
def __init__(self,
SurnameCharacter = 'Surname.Chinese.json', SurnamePinyin = 'Surname.Pinyin.json'):
# self.SurnameCharacter = json.load(open(SurnameCharacter, 'rb'))
self.SurnamePinyin = json.load(open(SurnamePinyin, 'rb'))
self.Extractor = re.compile(r'^([\w]+)[ ]?.*?[ ]?(?:([\w]*)$)')
self.NotChineseCharacter = re.compile(ur'^[^\u4e00-\u9fa5]*$')
def SurnameJudge(self, Name):
Name = Name.decode(chardet.detect(Name).get('encoding', 'utf-8'))
if self.NotChineseCharacter.search(Name) == None: # True if Name contains Chinese Characters.
return True
Name = Name.lower()
Surname = self.Extractor.findall(Name)[0]
for element in Surname:
try:
if self.SurnamePinyin[element]:
return True
except KeyError:
pass
return False
def DescriptionJudge(self, Description):
Description = Description.decode(chardet.detect(Description).get('encoding', 'utf-8'))
if self.NotChineseCharacter.search(Description) == None: # Ture if Description contains Chinese Characters.
return True
return False
| #--coding:utf-8--
import re
import json
import chardet
class Judge():
def __init__(self,
SurnameCharacter = 'Surname.Chinese.json', SurnamePinyin = 'Surname.Pinyin.json'):
# self.SurnameCharacter = json.load(open(SurnameCharacter, 'rb'))
self.SurnamePinyin = json.load(open(SurnamePinyin, 'rb'))
self.Extractor = re.compile(r'^([\w]+)[ ]?.*?[ ]?(?:([\w]*)$)')
self.NotChineseCharacter = re.compile(ur'^[^\u4e00-\u9fa5]*$')
def SurnameJudge(self, Name):
Name = Name.decode(chardet.detect(Name)['encoding'])
if self.NotChineseCharacter.search(Name) == None: # True if Name contains Chinese Characters.
return True
Name = Name.lower()
Surname = self.Extractor.findall(Name)[0]
for element in Surname:
try:
if self.SurnamePinyin[element]:
return True
except KeyError:
pass
return False
def DescriptionJudge(self, Description):
Description = Description.decode(chardet.detect(Description)['encoding'])
if self.NotChineseCharacter.search(Description) == None: # Ture if Description contains Chinese Characters.
return True
return False
| Python | 0 |
16a54fc100874159da7212e35361e5c7110a7ab2 | Add /start route for expeditions | kancolle/api/expedition.py | kancolle/api/expedition.py | """Expedition blueprint."""
import datetime
from flask import Blueprint, g
from flask import request, abort
import time
import util
from db import Expedition, Fleet, Admiral
from util import prepare_api_blueprint, svdata
api_mission = Blueprint("api_mission", __name__)
prepare_api_blueprint(api_mission)
@api_mission.route("/start", methods=["GET", "POST"])
def start_mission():
# This is mostly an internal method.
# This sets up the fleet for an expedition, sending them out.
# First, get the required data from the request.
fleet_id = int(request.values.get("api_deck_id")) - 1
expedition_id = int(request.values.get("api_mission"))
# There's an extra value, api_mission.
# No idea what it does.
# Also, api_serial_cid
# This is presumably an anti-bot method by DMM.
# We don't have these, because we don't have the game source code (and never will)
# So we ignore this
# Get the expedition requested by the ID.
expedition = Expedition.query.filter(Expedition.id == expedition_id).first_or_404()
# Get the fleet requested by the ID.
try:
fleet = g.admiral.fleets[fleet_id]
except IndexError:
abort(404)
return
# Set the fleet up.
if fleet.expedition is not None:
# Nice try.
abort(400)
return
# Set the expedition && time.
fleet.expedition = expedition
fleet.expedition_completed = time.time() + expedition.time_taken
# Internal state updated, now to reflect this state on the rest of the app.
return svdata(
{"api_complatetime": util.
millisecond_timestamp(datetime.datetime.now() + datetime.timedelta(seconds=expedition.time_taken)),
"api_complatetime_str": datetime.datetime.fromtimestamp(fleet.expedition_completed / 1000)
.strftime('%Y-%m-%d %H:%M:%S')
})
| """Expedition blueprint."""
from flask import Blueprint
from util import prepare_api_blueprint
api_mission = Blueprint("api_mission", __name__)
prepare_api_blueprint(api_mission) | Python | 0 |
e75dca8d0b5b1872c509d6f1fa4bc880743a7f45 | fix crash with control layers | GlyphNote.glyphsPalette/Contents/Resources/plugin.py | GlyphNote.glyphsPalette/Contents/Resources/plugin.py | # encoding: utf-8
#######################################################################################
#
# Palette Plugin
#
# Read the docs:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Palette
#
#######################################################################################
from objc import IBOutlet, IBAction, nil
from GlyphsApp.plugins import *
from GlyphsApp import UPDATEINTERFACE
class GlyphNote (PalettePlugin):
dialogName = "com.mekkablue.GlyphNote"
dialog = objc.IBOutlet()
noteTextField = objc.IBOutlet()
def settings(self):
self.name = Glyphs.localize({
'en': u'Glyph Note',
'de': u'Glyphennotiz'
})
# The minimum/maximum height of the view in pixels. 'max' must be bigger than 'min'.
self.min = 30
self.max = 700
# Load .nib dialog (without .extension)
self.loadNib('IBdialog', __file__)
def start(self):
Glyphs.addCallback(self.update, UPDATEINTERFACE)
def __del__(self):
Glyphs.removeCallback(self.update, UPDATEINTERFACE)
@objc.IBAction
def setNote_(self, sender):
"""
Sets the glyph note to whatever has been entered
into the text field in the palette.
"""
# Extract font from sender
thisFont = self.windowController().document().font
# We’re in the Edit View
if thisFont.currentTab:
theseGlyphs = [l.parent for l in thisFont.selectedLayers]
# We’re in the Font view
else:
theseGlyphs = [g for g in thisFont.selection]
for thisGlyph in theseGlyphs:
thisGlyph.note = self.noteTextField.stringValue()
def update(self, sender):
# only update if there is a window:
if self.windowController():
theseGlyphs = []
thisFont = self.windowController().document().font
# We’re in the Edit View
if thisFont.currentTab:
theseGlyphs = [l.parent for l in thisFont.selectedLayers]
# We’re in the Font view
else:
theseGlyphs = [g for g in thisFont.selection]
allNotes = []
for thisGlyph in theseGlyphs:
try:
thisNote = thisGlyph.note
if thisNote == "":
thisNote = None
allNotes.append(thisNote)
except:
pass # can happen with control layers
numberOfDifferentNotes = len(set(allNotes))
# update glyph note in palette:
if numberOfDifferentNotes == 1:
self.noteTextField.setPlaceholderString_( Glyphs.localize({
'en': u'Empty glyph note%s.' % ("s" if len(theseGlyphs)>1 else ""),
'de': u'Leere Glyphennotiz%s.' % ("en" if len(theseGlyphs)>1 else ""),
}))
thisGlyphNote = allNotes[0]
if not thisGlyphNote:
thisGlyphNote = ""
self.noteTextField.setStringValue_(thisGlyphNote)
elif numberOfDifferentNotes == 0:
self.noteTextField.setPlaceholderString_(Glyphs.localize({
'en': u'No glyph selected.',
'de': u'Keine Glyphe ausgewählt.',
}))
self.noteTextField.setStringValue_("")
else:
self.noteTextField.setPlaceholderString_(Glyphs.localize({
'en': u'Multiple values.',
'de': u'Mehrere Werte.',
}))
self.noteTextField.setStringValue_("")
def __file__(self):
"""Please leave this method unchanged"""
return __file__
# Temporary Fix
# Sort ID for compatibility with v919 to v976
def setSortID_(self, id):
pass
def sortID(self):
return 0
| # encoding: utf-8
#######################################################################################
#
# Palette Plugin
#
# Read the docs:
# https://github.com/schriftgestalt/GlyphsSDK/tree/master/Python%20Templates/Palette
#
#######################################################################################
from objc import IBOutlet, IBAction, nil
from GlyphsApp.plugins import *
from GlyphsApp import UPDATEINTERFACE
class GlyphNote (PalettePlugin):
dialogName = "com.mekkablue.GlyphNote"
dialog = objc.IBOutlet()
noteTextField = objc.IBOutlet()
def settings(self):
self.name = Glyphs.localize({
'en': u'Glyph Note',
'de': u'Glyphennotiz'
})
# The minimum/maximum height of the view in pixels. 'max' must be bigger than 'min'.
self.min = 30
self.max = 700
# Load .nib dialog (without .extension)
self.loadNib('IBdialog', __file__)
def start(self):
Glyphs.addCallback(self.update, UPDATEINTERFACE)
def __del__(self):
Glyphs.removeCallback(self.update, UPDATEINTERFACE)
@objc.IBAction
def setNote_(self, sender):
"""
Sets the glyph note to whatever has been entered
into the text field in the palette.
"""
# Extract font from sender
thisFont = self.windowController().document().font
# We’re in the Edit View
if thisFont.currentTab:
theseGlyphs = [l.parent for l in thisFont.selectedLayers]
# We’re in the Font view
else:
theseGlyphs = [g for g in thisFont.selection]
for thisGlyph in theseGlyphs:
thisGlyph.note = self.noteTextField.stringValue()
def update(self, sender):
# only update if there is a window:
if self.windowController():
theseGlyphs = []
thisFont = self.windowController().document().font
# We’re in the Edit View
if thisFont.currentTab:
theseGlyphs = [l.parent for l in thisFont.selectedLayers]
# We’re in the Font view
else:
theseGlyphs = [g for g in thisFont.selection]
allNotes = []
for thisGlyph in theseGlyphs:
thisNote = thisGlyph.note
if thisNote == "":
thisNote = None
allNotes.append(thisNote)
numberOfDifferentNotes = len(set(allNotes))
# update glyph note in palette:
if numberOfDifferentNotes == 1:
self.noteTextField.setPlaceholderString_( Glyphs.localize({
'en': u'Empty glyph note%s.' % ("s" if len(theseGlyphs)>1 else ""),
'de': u'Leere Glyphennotiz%s.' % ("en" if len(theseGlyphs)>1 else ""),
}))
thisGlyphNote = theseGlyphs[0].note
if not thisGlyphNote:
thisGlyphNote = ""
self.noteTextField.setStringValue_(thisGlyphNote)
elif numberOfDifferentNotes == 0:
self.noteTextField.setPlaceholderString_(Glyphs.localize({
'en': u'No glyph selected.',
'de': u'Keine Glyphe ausgewählt.',
}))
self.noteTextField.setStringValue_("")
else:
self.noteTextField.setPlaceholderString_(Glyphs.localize({
'en': u'Multiple values.',
'de': u'Mehrere Werte.',
}))
self.noteTextField.setStringValue_("")
def __file__(self):
"""Please leave this method unchanged"""
return __file__
# Temporary Fix
# Sort ID for compatibility with v919 to v976
def setSortID_(self, id):
pass
def sortID(self):
return 0
| Python | 0 |
e7e244c3a9914bc2d562b008b00341cea31d2ef7 | add boolean_labels | lcdblib/utils/utils.py | lcdblib/utils/utils.py | import os
import contextlib
from collections.abc import Iterable
@contextlib.contextmanager
def temp_env(env):
"""
Context manager to temporarily set os.environ.
"""
env = dict(env)
orig = os.environ.copy()
_env = {k: str(v) for k, v in env.items()}
os.environ.update(_env)
try:
yield
finally:
os.environ.clear()
os.environ.update(orig)
def flatten(iter):
"""
Flatten an arbitrarily nested iterable whose innermost items are strings
into a flat list of strings.
"""
if isinstance(iter, dict):
iter = iter.values()
def gen():
for item in iter:
if isinstance(item, dict):
item = item.values()
if isinstance(item, Iterable) and not isinstance(item, str):
yield from flatten(item)
else:
yield item
return list(gen())
def test_flatten():
assert sorted(flatten({
'a': {
'b': {
'c': ['a','b','c'],
},
},
'x': ['e', 'f', 'g'],
'y': {
'z': 'd'
},
})) == ['a', 'b', 'c', 'd', 'e', 'f', 'g']
def updatecopy(orig, update_with, keys=None, override=False):
"""
Update a copy of a dictionary, with a bit more control than the built-in
dict.update.
Parameters
-----------
orig : dict
Dict to update
update_with : dict
Dict with new values
keys : list or None
If not None, then only consider these keys in `update_with`. Otherwise
consider all.
override : bool
If True, then this is similar to `dict.update`, except only those keys
in `keys` will be considered. If False (default), then if a key exists
in both `orig` and `update_with`, no updating will occur so `orig` will
retain its original value.
"""
d = orig.copy()
if keys is None:
keys = update_with.keys()
for k in keys:
if k in update_with:
if k in d and not override:
continue
d[k] = update_with[k]
return d
def boolean_labels(names, idx, mapping={True: 'AND', False: 'NOT'},
strip='AND_'):
"""
Creates labels for boolean lists.
For example:
>>> names = ['exp1', 'exp2', 'exp3']
>>> idx = [True, True, False]
>>> boolean_labels(names, idx)
'exp1_AND_exp2_NOT_exp3'
Parameters
----------
names : list
List of names to include in output
idx : list
List of booleans, same size as `names`
mapping : dict
Linking words to use for True and False
strip : str
Strip this text off the beginning of labels.
given a list of names and a same-size boolean, return strings like
a_NOT_b_AND_c
or
a_AND_b_AND_c_NOT_d_AND_e
"""
s = []
for i, (n, x) in enumerate(zip(names, idx)):
s.append(mapping[x] + '_' + n)
s = '_'.join(s)
if s.startswith(strip):
s = s.replace(strip, '', 1)
return s
| import os
import contextlib
from collections.abc import Iterable
@contextlib.contextmanager
def temp_env(env):
"""
Context manager to temporarily set os.environ.
"""
env = dict(env)
orig = os.environ.copy()
_env = {k: str(v) for k, v in env.items()}
os.environ.update(_env)
try:
yield
finally:
os.environ.clear()
os.environ.update(orig)
def flatten(iter):
"""
Flatten an arbitrarily nested iterable whose innermost items are strings
into a flat list of strings.
"""
if isinstance(iter, dict):
iter = iter.values()
def gen():
for item in iter:
if isinstance(item, dict):
item = item.values()
if isinstance(item, Iterable) and not isinstance(item, str):
yield from flatten(item)
else:
yield item
return list(gen())
def test_flatten():
assert sorted(flatten({
'a': {
'b': {
'c': ['a','b','c'],
},
},
'x': ['e', 'f', 'g'],
'y': {
'z': 'd'
},
})) == ['a', 'b', 'c', 'd', 'e', 'f', 'g']
def updatecopy(orig, update_with, keys=None, override=False):
"""
Update a copy of a dictionary, with a bit more control than the built-in
dict.update.
Parameters
-----------
orig : dict
Dict to update
update_with : dict
Dict with new values
keys : list or None
If not None, then only consider these keys in `update_with`. Otherwise
consider all.
override : bool
If True, then this is similar to `dict.update`, except only those keys
in `keys` will be considered. If False (default), then if a key exists
in both `orig` and `update_with`, no updating will occur so `orig` will
retain its original value.
"""
d = orig.copy()
if keys is None:
keys = update_with.keys()
for k in keys:
if k in update_with:
if k in d and not override:
continue
d[k] = update_with[k]
return d
| Python | 0.998838 |
ae03b5bf4c0ff2c5104bb7b7826adc135528788d | Change ProductMedia url property to return correctly | shoop/core/models/product_media.py | shoop/core/models/product_media.py | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import with_statement
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from easy_thumbnails.files import get_thumbnailer
from enumfields import Enum, EnumIntegerField
from filer.fields.file import FilerFileField
from parler.models import TranslatableModel, TranslatedFields
from shoop.core.fields import InternalIdentifierField
class ProductMediaKind(Enum):
GENERIC_FILE = 1
IMAGE = 2
DOCUMENTATION = 3
SAMPLE = 4
class Labels:
GENERIC_FILE = _('file')
IMAGE = _('image')
DOCUMENTATION = _('documentation')
SAMPLE = _('sample')
@python_2_unicode_compatible
class ProductMedia(TranslatableModel):
identifier = InternalIdentifierField(unique=True)
product = models.ForeignKey("Product", related_name="media", on_delete=models.CASCADE)
shops = models.ManyToManyField("Shop", related_name="product_media")
kind = EnumIntegerField(
ProductMediaKind, db_index=True, default=ProductMediaKind.GENERIC_FILE, verbose_name=_('kind')
)
file = FilerFileField(blank=True, null=True, verbose_name=_('file'), on_delete=models.CASCADE)
external_url = models.URLField(
blank=True, null=True, verbose_name=u'URL',
help_text=_("Enter URL to external file. If this field is filled, the selected media doesn't apply.")
)
ordering = models.IntegerField(default=0)
# Status
enabled = models.BooleanField(db_index=True, default=True, verbose_name=_("enabled"))
public = models.BooleanField(default=True, blank=True, verbose_name=_('public (shown on product page)'))
purchased = models.BooleanField(
default=False, blank=True, verbose_name=_('purchased (shown for finished purchases)')
)
translations = TranslatedFields(
title=models.CharField(blank=True, max_length=128, verbose_name=_('title')),
description=models.TextField(blank=True, verbose_name=_('description')),
)
class Meta:
verbose_name = _('product attachment')
verbose_name_plural = _('product attachments')
ordering = ["ordering", ]
def __str__(self): # pragma: no cover
return self.effective_title
@property
def effective_title(self):
title = self.safe_translation_getter("title")
if title:
return title
if self.file_id:
return self.file.label
if self.external_url:
return self.external_url
return _('attachment')
@property
def url(self):
if self.external_url:
return self.external_url
if self.file:
return self.file.url
return ""
@property
def easy_thumbnails_thumbnailer(self):
"""
Get `Thumbnailer` instance.
Will return `None` if file cannot be thumbnailed.
:rtype:easy_thumbnails.files.Thumbnailer|None
"""
if not self.file_id:
return None
if self.kind != ProductMediaKind.IMAGE:
return None
return get_thumbnailer(self.file)
def get_thumbnail(self, **kwargs):
"""
Get thumbnail for image
This will return `None` if there is no file or kind is not `ProductMediaKind.IMAGE`
:rtype: easy_thumbnails.files.ThumbnailFile|None
"""
kwargs.setdefault("size", (64, 64))
kwargs.setdefault("crop", True) # sane defaults
kwargs.setdefault("upscale", True) # sane defaults
if kwargs["size"] is (0, 0):
return None
thumbnailer = self.easy_thumbnails_thumbnailer
if not thumbnailer:
return None
return thumbnailer.get_thumbnail(thumbnail_options=kwargs)
| # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import with_statement
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from easy_thumbnails.files import get_thumbnailer
from enumfields import Enum, EnumIntegerField
from filer.fields.file import FilerFileField
from parler.models import TranslatableModel, TranslatedFields
from shoop.core.fields import InternalIdentifierField
class ProductMediaKind(Enum):
GENERIC_FILE = 1
IMAGE = 2
DOCUMENTATION = 3
SAMPLE = 4
class Labels:
GENERIC_FILE = _('file')
IMAGE = _('image')
DOCUMENTATION = _('documentation')
SAMPLE = _('sample')
@python_2_unicode_compatible
class ProductMedia(TranslatableModel):
identifier = InternalIdentifierField(unique=True)
product = models.ForeignKey("Product", related_name="media", on_delete=models.CASCADE)
shops = models.ManyToManyField("Shop", related_name="product_media")
kind = EnumIntegerField(
ProductMediaKind, db_index=True, default=ProductMediaKind.GENERIC_FILE, verbose_name=_('kind')
)
file = FilerFileField(blank=True, null=True, verbose_name=_('file'), on_delete=models.CASCADE)
external_url = models.URLField(
blank=True, null=True, verbose_name=u'URL',
help_text=_("Enter URL to external file. If this field is filled, the selected media doesn't apply.")
)
ordering = models.IntegerField(default=0)
# Status
enabled = models.BooleanField(db_index=True, default=True, verbose_name=_("enabled"))
public = models.BooleanField(default=True, blank=True, verbose_name=_('public (shown on product page)'))
purchased = models.BooleanField(
default=False, blank=True, verbose_name=_('purchased (shown for finished purchases)')
)
translations = TranslatedFields(
title=models.CharField(blank=True, max_length=128, verbose_name=_('title')),
description=models.TextField(blank=True, verbose_name=_('description')),
)
class Meta:
verbose_name = _('product attachment')
verbose_name_plural = _('product attachments')
ordering = ["ordering", ]
def __str__(self): # pragma: no cover
return self.effective_title
@property
def effective_title(self):
title = self.safe_translation_getter("title")
if title:
return title
if self.file_id:
return self.file.label
if self.external_url:
return self.external_url
return _('attachment')
@property
def url(self):
if not self.public:
raise ValueError("`get_effective_url()` may not be used on non-public media")
if self.file_id:
return self.file.url
else:
return self.external_url
@property
def easy_thumbnails_thumbnailer(self):
"""
Get `Thumbnailer` instance.
Will return `None` if file cannot be thumbnailed.
:rtype:easy_thumbnails.files.Thumbnailer|None
"""
if not self.file_id:
return None
if self.kind != ProductMediaKind.IMAGE:
return None
return get_thumbnailer(self.file)
def get_thumbnail(self, **kwargs):
"""
Get thumbnail for image
This will return `None` if there is no file or kind is not `ProductMediaKind.IMAGE`
:rtype: easy_thumbnails.files.ThumbnailFile|None
"""
kwargs.setdefault("size", (64, 64))
kwargs.setdefault("crop", True) # sane defaults
kwargs.setdefault("upscale", True) # sane defaults
if kwargs["size"] is (0, 0):
return None
thumbnailer = self.easy_thumbnails_thumbnailer
if not thumbnailer:
return None
return thumbnailer.get_thumbnail(thumbnail_options=kwargs)
| Python | 0 |
49d7ba5c4ddf858129bbdd3dea1c968aff8345c1 | Update hackerland_radio_transmitters.py | python/hackerrank/practice/hackerland_radio_transmitters.py | python/hackerrank/practice/hackerland_radio_transmitters.py | n, k = map(int, input().split())
arr = list(map(int, input().split()))
# arr=[1,7,8,15,16,18,19,21,23]
# n=9
# k=2
# sorted_arr = sorted(arr)
sorted_arr = []
coverage = (2 * k)
my_set = set()
for i in arr:
my_set.add(i)
for i in my_set:
sorted_arr.append(i)
# 7 2 4 6 5 9 12 11 - input representation of indexes
# 1 2 3 4 5 6 7 8 9 10 11 12 -
# - 2 - 3
# instead of binary search get next big element t
# print(sorted_arr)
def binary_search(l, r, x):
while l <= r:
mid = l + (r - l) // 2
# print(mid, '---', n)
if mid==0:
return -2
if sorted_arr[mid] == x:
return mid + 1
elif sorted_arr[mid] < x and sorted_arr[mid + 1] > x:
return mid + 1
elif sorted_arr[mid] < x:
l = mid + 1
else:
r = mid - 1
return -2
count = 1
# for i in sorted_arr:
index = 0
while index <= n - 1:
next_range = binary_search(0, len(sorted_arr) - 2, sorted_arr[index] + coverage)
# print(index, '---', sorted_arr[index], ' -- ', next_range)
if next_range == -2:
break
else:
index = next_range
count += 1
print(count)
# while True:
# # print("current index:{}".format(index))
# index += coverage
# count += 1
# nextrange = get_next_range(index)
# # print("next range:{}".format(nextrange))
# if nextrange < 0:
# # if index < n-1:
# # print("coming here")
# # count += 1
# break
# print(count)
| n, k = map(int, input().split())
arr = list(map(int, input().split()))
# arr=[1,7,8,15,16,18,19,21,23]
# n=9
# k=2
# sorted_arr = sorted(arr)
sorted_arr = []
coverage = (2 * k)
my_set = set()
for i in arr:
my_set.add(i)
for i in my_set:
sorted_arr.append(i)
# 7 2 4 6 5 9 12 11 - input representation of indexes
# 1 2 3 4 5 6 7 8 9 10 11 12 -
# - 2 - 3
# instead of binary search get next big element t
# print(sorted_arr)
def binary_search(l, r, x):
while l <= r:
mid = l + (r - l) // 2
# print(mid, '---', n)
if mid==0:
return -2
if sorted_arr[mid] == x:
return mid + 1
elif sorted_arr[mid] < x and sorted_arr[mid + 1] > x:
return mid + 1
elif sorted_arr[mid] < x:
l = mid + 1
else:
r = mid - 1
return -2
count = 1
# for i in sorted_arr:
index = 0
while index <= n - 1:
next_range = binary_search(0, len(sorted_arr) - 2, sorted_arr[index] + coverage)
# print(index, '---', sorted_arr[index], ' -- ', next_range)
if next_range == -2:
break
else:
index = next_range
count += 1
print(count)
# while True:
# # print("current index:{}".format(index))
# index += coverage
# count += 1
# nextrange = get_next_range(index)
# # print("next range:{}".format(nextrange))
# if nextrange < 0:
# # if index < n-1:
# # print("coming here")
# # count += 1
# break
# print(count)
| Python | 0.000001 |
b5a0c3424b83c779c80e94b3ccfd795eb0e23642 | FIX show only paid invoices in together participant barometer | crowdfunding_compassion/controllers/main.py | crowdfunding_compassion/controllers/main.py | ##############################################################################
#
# Copyright (C) 2020 Compassion CH (http://www.compassion.ch)
# @author: Quentin Gigon
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo.http import request, route
from odoo.addons.website_event_compassion.controllers.events_controller import (
EventsController,
)
class CrowdFundingWebsite(EventsController):
@route(["/my_account"], type="http", auth="user", website=True)
def my_account(self, form_id=None, **kw):
""" Inject data for forms. """
values = {}
partner = request.env.user.partner_id
participations = request.env["crowdfunding.participant"].search(
[
("partner_id", "=", partner.id),
("project_id.project_owner_id", "!=", partner.id),
]
)
donations = participations.mapped("invoice_line_ids").filtered(
lambda l: l.state == "paid")
owned_projects = request.env["crowdfunding.project"].search(
[("project_owner_id", "=", partner.id)]
)
kw["form_model_key"] = "cms.form.partner.coordinates"
coordinates_form = self.get_form("res.partner", partner.id, **kw)
if form_id is None or form_id == coordinates_form.form_id:
coordinates_form.form_process()
values.update(
{
"partner": partner,
"owned_projects": owned_projects,
"participating_projects": participations,
"donations": donations,
"coordinates_form": coordinates_form,
}
)
result = request.render(
"crowdfunding_compassion.myaccount_crowdfunding_view_template", values
)
return result
@route(["/my_account/project/update/"], type="http", auth="user", website=True)
def my_account_projects_update(self, project_id=None, **kw):
project = request.env["crowdfunding.project"].search([("id", "=", project_id)])
kw["form_model_key"] = "cms.form.crowdfunding.project.update"
project_update_form = self.get_form("crowdfunding.project", project.id, **kw)
project_update_form.form_process()
values = {
"form": project_update_form,
}
if project_update_form.form_success:
result = request.redirect("/my_account")
else:
result = request.render(
"crowdfunding_compassion.crowdfunding_form_template", values
)
return result
@route(
["/my_account/participation/update/"], type="http", auth="user", website=True
)
def my_account_participants_update(self, participant_id=None, **kw):
participant = request.env["crowdfunding.participant"].search(
[("id", "=", participant_id)]
)
kw["form_model_key"] = "cms.form.crowdfunding.participant.update"
participant_update_form = self.get_form(
"crowdfunding.participant", participant.id, **kw
)
participant_update_form.form_process()
values = {
"form": participant_update_form,
}
if participant_update_form.form_success:
result = request.redirect("/my_account")
else:
result = request.render(
"crowdfunding_compassion.crowdfunding_form_template", values
)
return result
| ##############################################################################
#
# Copyright (C) 2020 Compassion CH (http://www.compassion.ch)
# @author: Quentin Gigon
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo.http import request, route
from odoo.addons.website_event_compassion.controllers.events_controller import (
EventsController,
)
class CrowdFundingWebsite(EventsController):
@route(["/my_account"], type="http", auth="user", website=True)
def my_account(self, form_id=None, **kw):
""" Inject data for forms. """
values = {}
partner = request.env.user.partner_id
participations = request.env["crowdfunding.participant"].search(
[
("partner_id", "=", partner.id),
("project_id.project_owner_id", "!=", partner.id),
]
)
donations = participations.mapped("invoice_line_ids").filtered(
lambda l: l.state != "cancel")
owned_projects = request.env["crowdfunding.project"].search(
[("project_owner_id", "=", partner.id)]
)
kw["form_model_key"] = "cms.form.partner.coordinates"
coordinates_form = self.get_form("res.partner", partner.id, **kw)
if form_id is None or form_id == coordinates_form.form_id:
coordinates_form.form_process()
values.update(
{
"partner": partner,
"owned_projects": owned_projects,
"participating_projects": participations,
"donations": donations,
"coordinates_form": coordinates_form,
}
)
result = request.render(
"crowdfunding_compassion.myaccount_crowdfunding_view_template", values
)
return result
@route(["/my_account/project/update/"], type="http", auth="user", website=True)
def my_account_projects_update(self, project_id=None, **kw):
project = request.env["crowdfunding.project"].search([("id", "=", project_id)])
kw["form_model_key"] = "cms.form.crowdfunding.project.update"
project_update_form = self.get_form("crowdfunding.project", project.id, **kw)
project_update_form.form_process()
values = {
"form": project_update_form,
}
if project_update_form.form_success:
result = request.redirect("/my_account")
else:
result = request.render(
"crowdfunding_compassion.crowdfunding_form_template", values
)
return result
@route(
["/my_account/participation/update/"], type="http", auth="user", website=True
)
def my_account_participants_update(self, participant_id=None, **kw):
participant = request.env["crowdfunding.participant"].search(
[("id", "=", participant_id)]
)
kw["form_model_key"] = "cms.form.crowdfunding.participant.update"
participant_update_form = self.get_form(
"crowdfunding.participant", participant.id, **kw
)
participant_update_form.form_process()
values = {
"form": participant_update_form,
}
if participant_update_form.form_success:
result = request.redirect("/my_account")
else:
result = request.render(
"crowdfunding_compassion.crowdfunding_form_template", values
)
return result
| Python | 0 |
4ea4247f531c78e3d26f135c5b85bbe4b5f2ca5e | Reorder imports | coalib/tests/parsing/ConfParserTest.py | coalib/tests/parsing/ConfParserTest.py | from collections import OrderedDict
import os
import sys
import tempfile
import unittest
sys.path.insert(0, ".")
from coalib.misc.Compatability import FileNotFoundError
from coalib.parsing.ConfParser import ConfParser
from coalib.settings.Section import Section
class ConfParserTest(unittest.TestCase):
example_file = """to be ignored
a_default, another = val
TEST = tobeignored # do you know that thats a comment
test = push
t =
[MakeFiles]
j , another = a
multiline
value
# just a omment
# just a omment
nokey. = value
default.test = content
makefiles.lastone = val
"""
def setUp(self):
self.tempdir = tempfile.gettempdir()
self.file = os.path.join(self.tempdir, ".coafile")
self.nonexistentfile = os.path.join(self.tempdir, "e81k7bd98t")
with open(self.file, "w") as filehandler:
filehandler.write(self.example_file)
self.uut = ConfParser()
try:
os.remove(self.nonexistentfile)
except FileNotFoundError:
pass
def tearDown(self):
os.remove(self.file)
def test_parse(self):
default_should = OrderedDict([
('a_default', 'val'),
('another', 'val'),
('comment0', '# do you know that thats a comment'),
('test', 'content'),
('t', '')
])
makefiles_should = OrderedDict([
('j', 'a\nmultiline\nvalue'),
('another', 'a\nmultiline\nvalue'),
('comment1', '# just a omment'),
('comment2', '# just a omment'),
('lastone', 'val'),
('comment3', ''),
('a_default', 'val'),
('comment0', '# do you know that thats a comment'),
('test', 'content'),
('t', '')
])
self.assertRaises(FileNotFoundError,
self.uut.parse,
self.nonexistentfile)
sections = self.uut.parse(self.file)
self.assertNotEqual(self.uut.parse(self.file, True), sections)
key, val = sections.popitem(last=False)
self.assertTrue(isinstance(val, Section))
self.assertEqual(key, 'default')
is_dict = OrderedDict()
for k in val:
is_dict[k] = str(val[k])
self.assertEqual(is_dict, default_should)
key, val = sections.popitem(last=False)
self.assertTrue(isinstance(val, Section))
self.assertEqual(key, 'makefiles')
is_dict = OrderedDict()
for k in val:
is_dict[k] = str(val[k])
self.assertEqual(is_dict, makefiles_should)
self.assertEqual(val["comment1"].key, "comment1")
self.assertRaises(IndexError,
self.uut.get_section,
"inexistent section")
def test_config_directory(self):
self.uut.parse(self.tempdir)
if __name__ == '__main__':
unittest.main(verbosity=2)
| from collections import OrderedDict
import os
import sys
sys.path.insert(0, ".")
from coalib.misc.Compatability import FileNotFoundError
from coalib.parsing.ConfParser import ConfParser
from coalib.settings.Section import Section
import unittest
import tempfile
class ConfParserTest(unittest.TestCase):
example_file = """to be ignored
a_default, another = val
TEST = tobeignored # do you know that thats a comment
test = push
t =
[MakeFiles]
j , another = a
multiline
value
# just a omment
# just a omment
nokey. = value
default.test = content
makefiles.lastone = val
"""
def setUp(self):
self.tempdir = tempfile.gettempdir()
self.file = os.path.join(self.tempdir, ".coafile")
self.nonexistentfile = os.path.join(self.tempdir, "e81k7bd98t")
with open(self.file, "w") as filehandler:
filehandler.write(self.example_file)
self.uut = ConfParser()
try:
os.remove(self.nonexistentfile)
except FileNotFoundError:
pass
def tearDown(self):
os.remove(self.file)
def test_parse(self):
default_should = OrderedDict([
('a_default', 'val'),
('another', 'val'),
('comment0', '# do you know that thats a comment'),
('test', 'content'),
('t', '')
])
makefiles_should = OrderedDict([
('j', 'a\nmultiline\nvalue'),
('another', 'a\nmultiline\nvalue'),
('comment1', '# just a omment'),
('comment2', '# just a omment'),
('lastone', 'val'),
('comment3', ''),
('a_default', 'val'),
('comment0', '# do you know that thats a comment'),
('test', 'content'),
('t', '')
])
self.assertRaises(FileNotFoundError,
self.uut.parse,
self.nonexistentfile)
sections = self.uut.parse(self.file)
self.assertNotEqual(self.uut.parse(self.file, True), sections)
key, val = sections.popitem(last=False)
self.assertTrue(isinstance(val, Section))
self.assertEqual(key, 'default')
is_dict = OrderedDict()
for k in val:
is_dict[k] = str(val[k])
self.assertEqual(is_dict, default_should)
key, val = sections.popitem(last=False)
self.assertTrue(isinstance(val, Section))
self.assertEqual(key, 'makefiles')
is_dict = OrderedDict()
for k in val:
is_dict[k] = str(val[k])
self.assertEqual(is_dict, makefiles_should)
self.assertEqual(val["comment1"].key, "comment1")
self.assertRaises(IndexError,
self.uut.get_section,
"inexistent section")
def test_config_directory(self):
self.uut.parse(self.tempdir)
if __name__ == '__main__':
unittest.main(verbosity=2)
| Python | 0 |
7cee7de43fc77e362cf19a9484f243d66e034f59 | Refactor from_json | upstream/chunk.py | upstream/chunk.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from upstream.exc import ChunkError
class Chunk(object):
def __init__(self, filehash=None, decryptkey=None, filename=None,
filepath=None):
""" Stores information about an encryted chunk. Allows for
format conversions.
:param filehash: The hash for a file.
:param decryptkey: The decryption key for a file.
:param filename: Name of the file(destroyed on encryption).
:param filepath: Location of the file.
"""
self.filehash = filehash
self.decryptkey = decryptkey
self.filename = filename
self.filepath = filepath
def from_uri(self, uri):
"""
:param uri: URI as a string
:return:
"""
try:
self.filehash, self.decryptkey = str(uri).split("?key=")
except:
raise ChunkError("%s not format of <hash>?key=<key>")
def from_json(self, json_str):
self.json_str = json_str
data = json.loads(json_str)
self.filehash = data['filehash']
self.decryptkey = data['key']
# Gets
def get_uri(self):
if not self.has_hashes():
return
return self.filehash + "?key=" + self.decryptkey
def get_hashes(self):
if not self.has_hashes():
return
return self.filehash, self.decryptkey
def get_json(self):
if not self.has_hashes():
return
return json.dumps(
{
"key": self.decryptkey,
"filehash": self.filehash,
}
)
def has_hashes(self):
return self.filehash and self.decryptkey
# Extra metadata
def set_filename(self, filename):
self.filename = filename
def set_filepath(self, filepath):
self.filepath = filepath
def get_filename(self):
return self.filename
def get_filepath(self):
return self.filepath
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from upstream.exc import ChunkError
class Chunk(object):
def __init__(self, filehash=None, decryptkey=None, filename=None,
filepath=None):
""" Stores information about an encryted chunk. Allows for
format conversions.
:param filehash: The hash for a file.
:param decryptkey: The decryption key for a file.
:param filename: Name of the file(destroyed on encryption).
:param filepath: Location of the file.
"""
self.filehash = filehash
self.decryptkey = decryptkey
self.filename = filename
self.filepath = filepath
def from_uri(self, uri):
"""
:param uri: URI as a string
:return:
"""
try:
self.filehash, self.decryptkey = str(uri).split("?key=")
except:
raise ChunkError("%s not format of <hash>?key=<key>")
def load_json(self, raw):
self.raw_json = raw
data = json.loads(raw)
self.filehash = data['filehash']
self.decryptkey = data['key']
return self
# Gets
def get_uri(self):
if not self.has_hashes():
return
return self.filehash + "?key=" + self.decryptkey
def get_hashes(self):
if not self.has_hashes():
return
return self.filehash, self.decryptkey
def get_json(self):
if not self.has_hashes():
return
return json.dumps(
{
"key": self.decryptkey,
"filehash": self.filehash,
}
)
def has_hashes(self):
return self.filehash and self.decryptkey
# Extra metadata
def set_filename(self, filename):
self.filename = filename
def set_filepath(self, filepath):
self.filepath = filepath
def get_filename(self):
return self.filename
def get_filepath(self):
return self.filepath
| Python | 0.000009 |
1a089c634bc608e5862ce549ed598e50c02b8d09 | Bump version | users/__init__.py | users/__init__.py | __version__ = '0.1.3'
| __version__ = '0.1.2'
| Python | 0 |
14f7767ed95346ea89b13ddc0dcb6369292f6105 | Fix initial view creation with an empty db. | been/couch.py | been/couch.py | from hashlib import sha1
import couchdb
from core import Store
# Add time serialization to couchdb's json repertoire.
import json
import time
import calendar
class TimeEncoder(json.JSONEncoder):
def default(self, obj):
if type(obj) is time.struct_time:
return calendar.timegm(obj)
else:
return json.JSONEncoder.default(self, obj)
couchdb.json.use(
decode=json.JSONDecoder().decode,
encode=TimeEncoder().encode)
class CouchStore(Store):
def load(self):
self.server = couchdb.client.Server()
db_name = self.config.get('db_name', 'activity')
if not db_name in self.server:
self.server.create(db_name)
self.db = self.server[db_name]
self.init_views()
return self
def init_views(self):
views = {
"_id": "_design/activity",
"language": "javascript",
"views": {
"sources": {
"map": "function(doc) { if (doc.type == 'source') { emit(doc._id, doc) } }"
},
"events": {
"map": "function(doc) { if (doc.type == 'event') { emit(doc.timestamp, doc) } }"
},
"events-by-source": {
"map": "function(doc) { if (doc.type == 'event') { emit(doc.source, doc) } }"
},
"events-by-source-count": {
"map": "function(doc) { if (doc.type == 'event') { emit(doc.source, doc) } }",
"reduce": "_count"
},
"events-by-slug": {
"map": "function(doc) { if (doc.type == 'event' && doc.slug) { emit(doc.slug, doc) } }"
}
}
}
doc = self.db.get(views['_id'], {})
doc.update(views)
self.db[views['_id']] = doc
def get_sources(self):
return dict((row.key, row.value) for row in self.db.view('activity/sources'))
def store_source(self, source):
source_data = source.config.copy()
source_data['type'] = 'source'
self.db[source.source_id] = source_data
def store_events(self, events):
ids = {}
for event in events:
event.setdefault('_id', sha1(event['summary'].encode('utf-8')+str(event['timestamp'])).hexdigest())
ids[event['_id']] = event
event['type'] = 'event'
tries = 3
while ids and tries:
tries -= 1
result = self.db.update(ids.values())
for success, _id, info in result:
if success:
del ids[_id]
else:
ids[_id]['_rev'] = self.db[_id]['_rev']
if ids:
raise couchdb.ResourceConflict
def store_update(self, source, events):
for event in events:
event['kind'] = source.kind
event['source'] = source.source_id
self.store_events(events)
self.db[source.source_id] = source.config
def events(self, count=100):
return (event.value for event in self.db.view('activity/events', limit=count, descending=True))
def events_by_slug(self, slug):
return (event.value for event in self.db.view('activity/events-by-slug')[slug])
def events_by_source_count(self):
return dict((count.key, count.value) for count in self.db.view('activity/events-by-source-count', group_level=1))
def empty(self):
for event in self.db.view('activity/events'):
self.db.delete(event.value)
for row in self.db.view('activity/sources'):
source = row.value
source['since'] = {}
self.db[row.id] = source
| from hashlib import sha1
import couchdb
from core import Store
# Add time serialization to couchdb's json repertoire.
import json
import time
import calendar
class TimeEncoder(json.JSONEncoder):
def default(self, obj):
if type(obj) is time.struct_time:
return calendar.timegm(obj)
else:
return json.JSONEncoder.default(self, obj)
couchdb.json.use(
decode=json.JSONDecoder().decode,
encode=TimeEncoder().encode)
class CouchStore(Store):
def load(self):
self.server = couchdb.client.Server()
db_name = self.config.get('db_name', 'activity')
if not db_name in self.server:
self.server.create(db_name)
self.db = self.server[db_name]
self.init_views()
return self
def init_views(self):
doc = self.db.get('_design/activity', {})
doc.update({
"language": "javascript",
"views": {
"sources": {
"map": "function(doc) { if (doc.type == 'source') { emit(doc._id, doc) } }"
},
"events": {
"map": "function(doc) { if (doc.type == 'event') { emit(doc.timestamp, doc) } }"
},
"events-by-source": {
"map": "function(doc) { if (doc.type == 'event') { emit(doc.source, doc) } }"
},
"events-by-source-count": {
"map": "function(doc) { if (doc.type == 'event') { emit(doc.source, doc) } }",
"reduce": "_count"
},
"events-by-slug": {
"map": "function(doc) { if (doc.type == 'event' && doc.slug) { emit(doc.slug, doc) } }"
}
}
})
self.db[doc.id] = doc
def get_sources(self):
return dict((row.key, row.value) for row in self.db.view('activity/sources'))
def store_source(self, source):
source_data = source.config.copy()
source_data['type'] = 'source'
self.db[source.source_id] = source_data
def store_events(self, events):
ids = {}
for event in events:
event.setdefault('_id', sha1(event['summary'].encode('utf-8')+str(event['timestamp'])).hexdigest())
ids[event['_id']] = event
event['type'] = 'event'
tries = 3
while ids and tries:
tries -= 1
result = self.db.update(ids.values())
for success, _id, info in result:
if success:
del ids[_id]
else:
ids[_id]['_rev'] = self.db[_id]['_rev']
if ids:
raise couchdb.ResourceConflict
def store_update(self, source, events):
for event in events:
event['kind'] = source.kind
event['source'] = source.source_id
self.store_events(events)
self.db[source.source_id] = source.config
def events(self, count=100):
return (event.value for event in self.db.view('activity/events', limit=count, descending=True))
def events_by_slug(self, slug):
return (event.value for event in self.db.view('activity/events-by-slug')[slug])
def events_by_source_count(self):
return dict((count.key, count.value) for count in self.db.view('activity/events-by-source-count', group_level=1))
def empty(self):
for event in self.db.view('activity/events'):
self.db.delete(event.value)
for row in self.db.view('activity/sources'):
source = row.value
source['since'] = {}
self.db[row.id] = source
| Python | 0 |
03fe02df027ef34cada5417205e641c5238c2403 | __init__ | TBFW/__init__.py | TBFW/__init__.py | # coding=utf-8
"""
TBFW library
"""
__version__ = '2.0.0'
__author__ = 'Nephy Project Team'
__license__ = 'MIT'
from TBFW.core import Core
from TBFW.database import DBProvider
from TBFW.api import Plugin
from TBFW.exceptions import GeneralError, OutOfMemoryError
if __name__ == "__main__":
pass
| # coding=utf-8
"""
TBFW library
"""
__version__ = '2.0.0'
__author__ = 'Nephy Project Team'
__license__ = 'MIT'
from TBFW.core import Core
from TBFW.api import Plugin
if __name__ == "__main__":
pass
| Python | 0.998994 |
85a13b7ad7d10c5ff431090cb1de63b84e68ff08 | Add proper translation contexts to RemovableDriveOutputDevice | plugins/RemovableDriveOutputDevice/RemovableDriveOutputDevice.py | plugins/RemovableDriveOutputDevice/RemovableDriveOutputDevice.py | import os.path
from UM.Application import Application
from UM.Logger import Logger
from UM.Message import Message
from UM.Mesh.WriteMeshJob import WriteMeshJob
from UM.Mesh.MeshWriter import MeshWriter
from UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator
from UM.OutputDevice.OutputDevice import OutputDevice
from UM.OutputDevice import OutputDeviceError
from UM.i18n import i18nCatalog
catalog = i18nCatalog("uranium")
class RemovableDriveOutputDevice(OutputDevice):
def __init__(self, device_id, device_name):
super().__init__(device_id)
self.setName(device_name)
self.setShortDescription(catalog.i18nc("@action:button", "Save to Removable Drive"))
self.setDescription(catalog.i18nc("@info:tooltip", "Save to Removable Drive {0}").format(device_name))
self.setIconName("save_sd")
self.setPriority(1)
def requestWrite(self, node):
gcode_writer = Application.getInstance().getMeshFileHandler().getWriterByMimeType("text/x-gcode")
if not gcode_writer:
Logger.log("e", "Could not find GCode writer, not writing to removable drive %s", self.getName())
raise OutputDeviceError.WriteRequestFailedError()
file_name = None
for n in BreadthFirstIterator(node):
if n.getMeshData():
file_name = n.getName()
if file_name:
break
if not file_name:
Logger.log("e", "Could not determine a proper file name when trying to write to %s, aborting", self.getName())
raise OutputDeviceError.WriteRequestFailedError()
file_name = os.path.join(self.getId(), os.path.splitext(file_name)[0] + ".gcode")
try:
Logger.log("d", "Writing to %s", file_name)
stream = open(file_name, "wt")
job = WriteMeshJob(gcode_writer, stream, node, MeshWriter.OutputMode.TextMode)
job.setFileName(file_name)
job.progress.connect(self._onProgress)
job.finished.connect(self._onFinished)
message = Message(catalog.i18nc("@info:status", "Saving to Removable Drive <filename>{0}</filename>").format(self.getName()), 0, False, -1)
message.show()
job._message = message
job.start()
except PermissionError as e:
raise OutputDeviceError.PermissionDeniedError() from e
except OSError as e:
raise OutputDeviceError.WriteRequestFailedError() from e
def _onProgress(self, job, progress):
if hasattr(job, "_message"):
job._message.setProgress(progress)
self.writeProgress.emit(self, progress)
def _onFinished(self, job):
if hasattr(job, "_message"):
job._message.hide()
job._message = None
self.writeFinished.emit(self)
if job.getResult():
message = Message(catalog.i18nc("", "Saved to Removable Drive {0} as {1}").format(self.getName(), os.path.basename(job.getFileName())))
message.addAction("eject", catalog.i18nc("", "Eject"), "eject", catalog.i18nc("", "Eject removable device {0}").format(self.getName()))
message.actionTriggered.connect(self._onActionTriggered)
message.show()
self.writeSuccess.emit(self)
else:
message = Message(catalog.i18nc("", "Could not save to removable drive {0}: {1}").format(self.getName(), str(job.getError())))
message.show()
self.writeError.emit(self)
job.getStream().close()
def _onActionTriggered(self, message, action):
if action == "eject":
Application.getInstance().getOutputDeviceManager().getOutputDevicePlugin("RemovableDriveOutputDevice").ejectDevice(self)
| import os.path
from UM.Application import Application
from UM.Logger import Logger
from UM.Message import Message
from UM.Mesh.WriteMeshJob import WriteMeshJob
from UM.Mesh.MeshWriter import MeshWriter
from UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator
from UM.OutputDevice.OutputDevice import OutputDevice
from UM.OutputDevice import OutputDeviceError
from UM.i18n import i18nCatalog
catalog = i18nCatalog("uranium")
class RemovableDriveOutputDevice(OutputDevice):
def __init__(self, device_id, device_name):
super().__init__(device_id)
self.setName(device_name)
self.setShortDescription(catalog.i18nc("", "Save to Removable Drive"))
self.setDescription(catalog.i18nc("", "Save to Removable Drive {0}").format(device_name))
self.setIconName("save_sd")
self.setPriority(1)
def requestWrite(self, node):
gcode_writer = Application.getInstance().getMeshFileHandler().getWriterByMimeType("text/x-gcode")
if not gcode_writer:
Logger.log("e", "Could not find GCode writer, not writing to removable drive %s", self.getName())
raise OutputDeviceError.WriteRequestFailedError()
file_name = None
for n in BreadthFirstIterator(node):
if n.getMeshData():
file_name = n.getName()
if file_name:
break
if not file_name:
Logger.log("e", "Could not determine a proper file name when trying to write to %s, aborting", self.getName())
raise OutputDeviceError.WriteRequestFailedError()
file_name = os.path.join(self.getId(), os.path.splitext(file_name)[0] + ".gcode")
try:
Logger.log("d", "Writing to %s", file_name)
stream = open(file_name, "wt")
job = WriteMeshJob(gcode_writer, stream, node, MeshWriter.OutputMode.TextMode)
job.setFileName(file_name)
job.progress.connect(self._onProgress)
job.finished.connect(self._onFinished)
message = Message(catalog.i18nc("", "Saving to Removable Drive {0}").format(self.getName()), 0, False, -1)
message.show()
job._message = message
job.start()
except PermissionError as e:
raise OutputDeviceError.PermissionDeniedError() from e
except OSError as e:
raise OutputDeviceError.WriteRequestFailedError() from e
def _onProgress(self, job, progress):
if hasattr(job, "_message"):
job._message.setProgress(progress)
self.writeProgress.emit(self, progress)
def _onFinished(self, job):
if hasattr(job, "_message"):
job._message.hide()
job._message = None
self.writeFinished.emit(self)
if job.getResult():
message = Message(catalog.i18nc("", "Saved to Removable Drive {0} as {1}").format(self.getName(), os.path.basename(job.getFileName())))
message.addAction("eject", catalog.i18nc("", "Eject"), "eject", catalog.i18nc("", "Eject removable device {0}").format(self.getName()))
message.actionTriggered.connect(self._onActionTriggered)
message.show()
self.writeSuccess.emit(self)
else:
message = Message(catalog.i18nc("", "Could not save to removable drive {0}: {1}").format(self.getName(), str(job.getError())))
message.show()
self.writeError.emit(self)
job.getStream().close()
def _onActionTriggered(self, message, action):
if action == "eject":
Application.getInstance().getOutputDeviceManager().getOutputDevicePlugin("RemovableDriveOutputDevice").ejectDevice(self)
| Python | 0.999928 |
44202d1c178d76c5db22a9b9ce4e7138a0cb73c7 | upgrade to v3.9.4 | kiteconnect/__version__.py | kiteconnect/__version__.py | __title__ = "kiteconnect"
__description__ = "The official Python client for the Kite Connect trading API"
__url__ = "https://kite.trade"
__download_url__ = "https://github.com/zerodhatech/pykiteconnect"
__version__ = "3.9.4"
__author__ = "Zerodha Technology Pvt ltd. (India)"
__author_email__ = "talk@zerodha.tech"
__license__ = "MIT"
| __title__ = "kiteconnect"
__description__ = "The official Python client for the Kite Connect trading API"
__url__ = "https://kite.trade"
__download_url__ = "https://github.com/zerodhatech/pykiteconnect"
__version__ = "3.9.2"
__author__ = "Zerodha Technology Pvt ltd. (India)"
__author_email__ = "talk@zerodha.tech"
__license__ = "MIT"
| Python | 0.000001 |
f1217f04f17daa3d77c9a3197b33d87b8f775056 | Replace OpenERP by Odoo | l10n_ch_zip/__openerp__.py | l10n_ch_zip/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author Nicolas Bessi. Copyright Camptocamp SA
# Contributor: WinGo SA
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
##############################################################################
{'name': 'Switzerland - Postal codes (ZIP) list',
'summary': 'Loads all Swiss postal codes',
'version': '1.0.1',
'depends': ['base', 'base_location'],
'author': 'Camptocamp',
'description': """
Swiss postal code (ZIP) list
============================
This module will load all Swiss postal codes (ZIP) in Odoo to
ease the input of partners.
It is not mandatory to use Odoo in Switzerland, but can improve the user experience.
""",
'website': 'http://www.camptocamp.com',
'data': ['l10n_ch_better_zip.xml'],
'demo_xml': [],
'installable': True,
'active': False}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author Nicolas Bessi. Copyright Camptocamp SA
# Contributor: WinGo SA
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
##############################################################################
{'name': 'Switzerland - Postal codes (ZIP) list',
'summary': 'Loads all Swiss postal codes',
'version': '1.0.1',
'depends': ['base', 'base_location'],
'author': 'Camptocamp',
'description': """
Swiss postal code (ZIP) list
============================
This module will load all Swiss postal codes (ZIP) in OpenERP to
ease the input of partners.
It is not mandatory to use OpenERP in Switzerland, but can improve the user experience.
""",
'website': 'http://www.camptocamp.com',
'data': ['l10n_ch_better_zip.xml'],
'demo_xml': [],
'installable': True,
'active': False}
| Python | 0.000144 |
9c029788cf438ec5c796e22b77006559c1e59b99 | needs json | lablog/controllers/node.py | lablog/controllers/node.py | from flask import Blueprint, Response, render_template, request, g
from flask.views import MethodView
from lablog.app import App
from lablog import config
from lablog.util.jsontools import jsonify
from lablog.util import aes
from flask_oauthlib.provider import OAuth2Provider
from lablog.controllers.auth import oauth
from datetime import datetime
import logging
import json
node = Blueprint(
'node',
__name__,
template_folder=config.TEMPLATES,
url_prefix="/node",
)
SKEY = bytearray(["1","1","1","1","1","1","1","1","1","1","1","1","1","1","1",0x00])
KEY = buffer(SKEY)
@node.route("/nodes", methods=["GET"])
@oauth.require_oauth('inoffice')
def get_nodes():
res = g.INFLUX.query(query="SHOW SERIES FROM light")
nodes = []
for v in res.get_points():
nodes.append(v.get('node'))
return jsonify({"nodes":nodes})
@node.route("/<node_id>/sensors", methods=["POST"])
def node_sensors(node_id):
logging.info(request.data)
logging.info(config.SKEY)
logging.info(KEY)
j = aes.decrypt(request.data, KEY)
j = json.loads(j)
points = []
for k,v in j.iteritems():
p = dict(
measurement=k,
tags=dict(
node=str(node_id),
),
time=datetime.utcnow(),
fields=dict(
value=v
)
)
g.MONGO['lablog']['node_stream'].insert(p)
points.append(p)
g.INFLUX.write_points(points)
return jsonify({'success':True})
| from flask import Blueprint, Response, render_template, request, g
from flask.views import MethodView
from lablog.app import App
from lablog import config
from lablog.util.jsontools import jsonify
from lablog.util import aes
from flask_oauthlib.provider import OAuth2Provider
import logging
from lablog.controllers.auth import oauth
from datetime import datetime
node = Blueprint(
'node',
__name__,
template_folder=config.TEMPLATES,
url_prefix="/node",
)
SKEY = bytearray(["1","1","1","1","1","1","1","1","1","1","1","1","1","1","1",0x00])
KEY = buffer(SKEY)
@node.route("/nodes", methods=["GET"])
@oauth.require_oauth('inoffice')
def get_nodes():
res = g.INFLUX.query(query="SHOW SERIES FROM light")
nodes = []
for v in res.get_points():
nodes.append(v.get('node'))
return jsonify({"nodes":nodes})
@node.route("/<node_id>/sensors", methods=["POST"])
def node_sensors(node_id):
logging.info(request.data)
logging.info(config.SKEY)
logging.info(KEY)
j = aes.decrypt(request.data, KEY)
j = json.loads(j)
points = []
for k,v in j.iteritems():
p = dict(
measurement=k,
tags=dict(
node=str(node_id),
),
time=datetime.utcnow(),
fields=dict(
value=v
)
)
g.MONGO['lablog']['node_stream'].insert(p)
points.append(p)
g.INFLUX.write_points(points)
return jsonify({'success':True})
| Python | 0.999702 |
b37988c7d6b260793cc8e88e0057f1a59d2fcc0b | fix migration file | custom/icds_reports/migrations/0060_added_phone_number_to_views.py | custom/icds_reports/migrations/0060_added_phone_number_to_views.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-10 14:05
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
migrator = RawSQLMigration(('custom', 'icds_reports', 'migrations', 'sql_templates', 'database_views'))
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0059_update_blob_paths'),
]
operations = [
migrator.get_migration('agg_awc_daily.sql'),
migrator.get_migration('agg_ccs_record_monthly.sql'),
migrator.get_migration('agg_child_health_monthly.sql'),
migrator.get_migration('child_health_monthly.sql'),
migrator.get_migration('daily_attendance.sql'),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-10 14:05
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
from corehq.sql_db.operations import RawSQLMigration
migrator = RawSQLMigration(('custom', 'icds_reports', 'migrations', 'sql_templates', 'database_views'))
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0057_aggregateccsrecordpostnatalcareforms_is_ebf'),
]
operations = [
migrator.get_migration('agg_awc_daily.sql'),
migrator.get_migration('agg_ccs_record_monthly.sql'),
migrator.get_migration('agg_child_health_monthly.sql'),
migrator.get_migration('child_health_monthly.sql'),
migrator.get_migration('daily_attendance.sql'),
]
| Python | 0.000001 |
c5f2b65aa172b10206950a5981a06afef5742173 | Improve reliability of galera_consistency.py | galera_consistency.py | galera_consistency.py | import io
import optparse
import subprocess
def table_checksum(user, password, host):
"""Run pt-table-checksum with the user, password, and host specified."""
args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password]
if host:
args.extend(['-h', host])
out = io.StringIO()
err = io.StringIO()
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
# Let's poll the process to make sure it finishes before we return from
# this function.
while proc.poll() is None:
# Avoid the OS Pipe buffer from blocking the process
(stdout, stderr) = proc.communicate()
# Let's store the aggregated output in buffers
out.write(stdout)
err.write(stderr)
# The process has terminated, let's get the rest of stdout/stderr
(stdout, stderr) = proc.communicate()
out.write(stdout)
err.write(stderr)
# At this point we have a valid return code and the full stdout, stderr
# logs
return (proc.return_code, out.getvalue(), err.getvalue())
def main():
usage = "Usage: %prog [-h] [-H] username password"
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'-H', '--host',
action='store',
dest='host',
default=None,
help="Allow user to connect to something other than localhost"
)
(options, args) = parser.parse_args()
# We will need the username and password to connect to the database
if len(args) != 2:
parser.print_help()
raise SystemExit(True)
# According to
# http://www.percona.com/doc/percona-toolkit/2.2/pt-table-checksum.html
# If the exit status is 0, everything is okay, otherwise the exit status
# will be non-zero. We don't need stdout at the moment so we can discard
# it. Stderr should contain any problems we run across.
(status, _, err) = table_checksum(args[0], args[1], options.host)
if status != 0:
print "status err %s" % err.strip()
raise SystemExit(True)
print "status ok"
if __name__ == '__main__':
main()
| import optparse
import subprocess
def table_checksum(user, password, host):
args = ['/usr/bin/pt-table-checksum', '-u', user, '-p', password]
if host:
args.extend(['-h', host])
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return (proc.return_code, out, err)
def main():
usage = "Usage: %prog [-h] [-H] username password"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-H', '--host', action='store', dest='host',
default=None)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
raise SystemExit(True)
(status, _, err) = table_checksum(args[0], args[1], options.host)
if status != 0:
print "status err %s" % err
raise SystemExit(True)
print "status ok"
if __name__ == '__main__':
main()
| Python | 0 |
1b2fa45766b1ea5945f246d74bc4adf0114abe84 | Fix typo in description of config item | astroquery/splatalogue/__init__.py | astroquery/splatalogue/__init__.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Splatalogue Catalog Query Tool
-----------------------------------
:Author: Adam Ginsburg (adam.g.ginsburg@gmail.com)
:Originally contributed by:
Magnus Vilhelm Persson (magnusp@vilhelm.nu)
"""
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.splatalogue`.
"""
slap_url = _config.ConfigItem(
'http://find.nrao.edu/splata-slap/slap',
'Splatalogue SLAP interface URL (not used).')
query_url = _config.ConfigItem(
'http://www.cv.nrao.edu/php/splat/c_export.php',
'Splatalogue web interface URL.')
timeout = _config.ConfigItem(
60,
'Time limit for connecting to Splatalogue server.')
lines_limit = _config.ConfigItem(
1000,
'Limit to number of lines exported.')
conf = Conf()
from . import load_species_table
from . import utils
from .core import Splatalogue, SplatalogueClass
__all__ = ['Splatalogue', 'SplatalogueClass',
'Conf', 'conf',
]
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Splatalogue Catalog Query Tool
-----------------------------------
:Author: Adam Ginsburg (adam.g.ginsburg@gmail.com)
:Originally contributed by:
Magnus Vilhelm Persson (magnusp@vilhelm.nu)
"""
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.splatalogue`.
"""
slap_url = _config.ConfigItem(
'http://find.nrao.edu/splata-slap/slap',
'Splatalogue SLAP interface URL (not used).')
query_url = _config.ConfigItem(
'http://www.cv.nrao.edu/php/splat/c_export.php',
'SSplatalogue web interface URL.')
timeout = _config.ConfigItem(
60,
'Time limit for connecting to Splatalogue server.')
lines_limit = _config.ConfigItem(
1000,
'Limit to number of lines exported.')
conf = Conf()
from . import load_species_table
from . import utils
from .core import Splatalogue, SplatalogueClass
__all__ = ['Splatalogue', 'SplatalogueClass',
'Conf', 'conf',
]
| Python | 0.000107 |
a1c60939302bd60d0e7708d19b7eee3d2970bbfb | Fix minion state assertions - multiple keys possible | assertions.py | assertions.py | import re
import shlex
import subprocess
from config import SALT_KEY_CMD
def has_expected_state(expected_state, mapping, env):
assert expected_state in mapping
cmd = shlex.split(SALT_KEY_CMD.format(**env))
cmd.append("-L")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
output, unused_err = process.communicate()
return mapping[expected_state].search(output) is not None
def assert_minion_key_state(env, expected_state):
STATES_MAPPING = dict(
unaccepted=re.compile("Unaccepted Keys:(\n.+)*\n{HOSTNAME}".format(**env)),
accepted=re.compile("Accepted Keys:(\n.+)*\n{HOSTNAME}".format(**env))
)
assert has_expected_state(expected_state, STATES_MAPPING, env)
def assert_proxyminion_key_state(env, expected_state):
STATES_MAPPING = dict(
unaccepted=re.compile("Unaccepted Keys:(\n.+)*\n{PROXY_ID}".format(**env)),
accepted=re.compile("Accepted Keys:(\n.+)*\n{PROXY_ID}".format(**env))
)
assert has_expected_state(expected_state, STATES_MAPPING, env)
| import re
import shlex
import subprocess
from config import SALT_KEY_CMD
def assert_minion_key_state(env, expected_state):
STATES_MAPPING = dict(
unaccepted=re.compile("Unaccepted Keys:\n{HOSTNAME}".format(**env)),
accepted=re.compile("Accepted Keys:\n{HOSTNAME}".format(**env))
)
assert expected_state in STATES_MAPPING
cmd = shlex.split(SALT_KEY_CMD.format(**env))
cmd.append("-L")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
output, unused_err = process.communicate()
assert STATES_MAPPING[expected_state].search(output)
def assert_proxyminion_key_state(env, expected_state):
STATES_MAPPING = dict(
unaccepted=re.compile("Unaccepted Keys:\n{PROXY_ID}".format(**env)),
accepted=re.compile("Accepted Keys:\n{PROXY_ID}".format(**env))
)
assert expected_state in STATES_MAPPING
cmd = shlex.split(SALT_KEY_CMD.format(**env))
cmd.append("-L")
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env)
output, unused_err = process.communicate()
assert STATES_MAPPING[expected_state].search(output)
| Python | 0.000004 |
9845a0566e1c96ae72ce0ac6438e8ddd9f6db053 | Add better instructions on changing kernels | metatlas/tools/notebook.py | metatlas/tools/notebook.py | """Jupyter notebook helper functions"""
import logging
import os
import shutil
import sys
from pathlib import Path
import pandas as pd
from IPython.core.display import display, HTML
from metatlas.tools.logging import activate_logging
logger = logging.getLogger(__name__)
def configure_environment(log_level):
"""
Sets environment variables and configures logging
inputs:
log_level: one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
"""
activate_logging(console_level=log_level)
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def validate_kernel():
"""
Raise error if problem with kernel
When on NERSC, this will install the correct kernel if needed
"""
allowed_exe = [
"/global/common/software/m2650/metatlas-targeted-20210521/bin/python",
]
error_msg = "Invalid kernel setting in Jupyter Notebook."
on_nersc = "METATLAS_LOCAL" not in os.environ
if on_nersc and sys.executable not in allowed_exe:
install_kernel()
logger.critical('Please check that the kernel is set to "Metatlas Targeted".')
raise ValueError(error_msg)
try:
# pylint: disable=import-outside-toplevel,unused-import
import dataset # noqa: F401
except ModuleNotFoundError as module_error:
logger.critical(
'Could not find dataset module. Please check that the kernel is set to "Metatlas Targeted".'
)
raise ModuleNotFoundError from module_error
def install_kernel():
"""
Copies kernel.json from repo to active location under home directory.
Only for use on NERC!
"""
logger.info('Installing kernel.json for "Metatlas Targeted".')
repo_path = Path(__file__).resolve().parent.parent.parent
source = repo_path / "notebooks" / "kernels" / "metatlas-targeted.kernel.json"
dest_dir = Path.home() / ".local" / "share" / "jupyter" / "kernels" / "metatlas-targeted"
os.makedirs(dest_dir, exist_ok=True)
shutil.copyfile(source, dest_dir / "kernel.json")
logger.info(('Reload the page and then change kernel to "Metatlas Targeted". '
"On the menu bar at the top of this page select 'Kernel'>'Change Kernel..' "
"then find 'Metatlas Targeted' in the drop down list."))
def configure_pandas_display(max_rows=5000, max_columns=500, max_colwidth=100):
"""Set pandas display options"""
pd.set_option("display.max_rows", max_rows)
pd.set_option("display.max_columns", max_columns)
pd.set_option("display.max_colwidth", max_colwidth)
def configure_notebook_display():
"""Configure output from Jupyter"""
# set notebook to have minimal side margins
display(HTML("<style>.container { width:100% !important; }</style>"))
def setup(log_level):
"""High level function to prepare the metatlas notebook"""
configure_environment(log_level)
validate_kernel()
configure_notebook_display()
configure_pandas_display()
| """Jupyter notebook helper functions"""
import logging
import os
import shutil
import sys
from pathlib import Path
import pandas as pd
from IPython.core.display import display, HTML
from metatlas.tools.logging import activate_logging
logger = logging.getLogger(__name__)
def configure_environment(log_level):
"""
Sets environment variables and configures logging
inputs:
log_level: one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
"""
activate_logging(console_level=log_level)
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def validate_kernel():
"""
Raise error if problem with kernel
When on NERSC, this will install the correct kernel if needed
"""
allowed_exe = [
"/global/common/software/m2650/metatlas-targeted-20210521/bin/python",
]
error_msg = "Invalid kernel setting in Jupyter Notebook."
on_nersc = "METATLAS_LOCAL" not in os.environ
if on_nersc and sys.executable not in allowed_exe:
install_kernel()
logger.critical('Please check that the kernel is set to "Metatlas Targeted".')
raise ValueError(error_msg)
try:
# pylint: disable=import-outside-toplevel,unused-import
import dataset # noqa: F401
except ModuleNotFoundError as module_error:
logger.critical(
'Could not find dataset module. Please check that the kernel is set to "Metatlas Targeted".'
)
raise ModuleNotFoundError from module_error
def install_kernel():
"""
Copies kernel.json from repo to active location under home directory.
Only for use on NERC!
"""
logger.info('Installing kernel.json for "Metatlas Targeted".')
repo_path = Path(__file__).resolve().parent.parent.parent
source = repo_path / "notebooks" / "kernels" / "metatlas-targeted.kernel.json"
dest_dir = Path.home() / ".local" / "share" / "jupyter" / "kernels" / "metatlas-targeted"
os.makedirs(dest_dir, exist_ok=True)
shutil.copyfile(source, dest_dir / "kernel.json")
logger.info('Reload the page and change kernel to "Metatlas Targeted".')
def configure_pandas_display(max_rows=5000, max_columns=500, max_colwidth=100):
"""Set pandas display options"""
pd.set_option("display.max_rows", max_rows)
pd.set_option("display.max_columns", max_columns)
pd.set_option("display.max_colwidth", max_colwidth)
def configure_notebook_display():
"""Configure output from Jupyter"""
# set notebook to have minimal side margins
display(HTML("<style>.container { width:100% !important; }</style>"))
def setup(log_level):
"""High level function to prepare the metatlas notebook"""
configure_environment(log_level)
validate_kernel()
configure_notebook_display()
configure_pandas_display()
| Python | 0.005869 |
f3ec85cd7baf65036ed76a2c4ab4fe935b81b805 | introduce logging | midas/scripts/md_config.py | midas/scripts/md_config.py | # -*- coding: utf-8 -*-
import logging
import sys
from midas.scripts import MDCommand
import midas.config as md_cfg
logger = logging.getLogger(__name__)
class MDConfig(MDCommand):
""" Read all configuration files, print the final configuration
and exit.
This can be used to see how a configuration file (e.g. a job file)
alters the whole configuration or to generate a default
configuration file which is going to be altered in a second step.
"""
POS_ARG = { 'dest': 'job_cfg',
'nargs': '?',
'metavar': 'FILE',
'help': 'additional configuration file to read'}
def __init__(self, argv):
MDCommand.__init__(self, argv)
if self.args.job_cfg:
md_cfg.read(self.args.job_cfg)
def run(self):
md_cfg.get_configparser().write(sys.stdout)
| # -*- coding: utf-8 -*-
import sys
from midas.scripts import MDCommand
import midas.config as md_cfg
class MDConfig(MDCommand):
""" Read all configuration files, print the final configuration
and exit.
This can be used to see how a configuration file (e.g. a job file)
alters the whole configuration or to generate a default
configuration file which is going to be altered in a second step.
"""
POS_ARG = { 'dest': 'job_cfg',
'nargs': '?',
'metavar': 'FILE',
'help': 'additional configuration file to read'}
def __init__(self, argv):
MDCommand.__init__(self, argv)
if self.args.job_cfg:
md_cfg.read(self.args.job_cfg)
def run(self):
md_cfg.get_configparser().write(sys.stdout)
| Python | 0 |
8790eec0fdd94beeb4d0ceac8b24a1de77bd3eee | Update sql2rf.py | bin/sql2rf.py | bin/sql2rf.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""Script to search for records within an SQL database created using snapshot2sql
and convert to Researcher Format."""
# Import required modules
# import datetime
import getopt
# import sys
from iams2rf import *
__author__ = 'Victoria Morris'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
def usage():
"""Function to print information about the script"""
print('========================================')
print('sql2rf')
print('IAMS data extraction for Researcher Format')
print('========================================')
print('This utility searches an SQL database of IAMS records')
print('created using the utility snapshot2sql')
print('and converts matching records to Researcher Format')
print('\nCorrect syntax is:')
print('sql2rf -d DB_PATH -r REQUEST_PATH [OPTIONS]')
print('\nSearch DB_PATH for records meeting criteria in REQUEST_PATH.')
print(' -d Path to the SQL database')
print(' -r Path to Outlook message containing details of the request')
print('\nUse quotation marks (") around arguments which contain spaces')
print('\nIf REQUEST_PATH is not specified you will be given the option to set parameters for the output')
print('\nOptions:')
print(' -o OUTPUT_FOLDER to save output files.')
print(' --debug Debug mode.')
print(' --help Show this message and exit.')
exit_prompt()
def main(argv=None):
if argv is None:
name = str(sys.argv[1])
db_path, request_path, output_folder = '', '', ''
debug = False
try:
opts, args = getopt.getopt(argv, 'd:r:o:', ['db_path=', 'request_path=', 'output_folder=', 'debug', 'help'])
except getopt.GetoptError as err:
exit_prompt('Error: {}'.format(err))
if opts is None or not opts:
usage()
for opt, arg in opts:
if opt == '--help': usage()
elif opt == '--debug': debug = True
elif opt in ['-d', '--db_path']: db_path = arg
elif opt in ['-r', '--request_path']: request_path = arg
elif opt in ['-o', '--output_folder']: output_folder = arg
else: exit_prompt('Error: Option {} not recognised'.format(opt))
iams2rf_sql2rf(db_path, request_path, output_folder, debug)
print('\n\nAll processing complete')
print('----------------------------------------')
print(str(datetime.datetime.now()))
sys.exit()
if __name__ == '__main__':
main(sys.argv[1:])
| #!/usr/bin/env python
# -*- coding: utf8 -*-
"""Script to search for records within an SQL database created using snapshot2sql
and convert to Researcher Format."""
# Import required modules
# import datetime
import getopt
# import sys
from iams2rf import *
__author__ = 'Victoria Morris'
__license__ = 'MIT License'
__version__ = '1.0.0'
__status__ = '4 - Beta Development'
def usage():
print('========================================')
print('sql2rf')
print('IAMS data extraction for Researcher Format')
print('========================================')
print('This utility searches an SQL database of IAMS records')
print('created using the utility snapshot2sql')
print('and converts matching records to Researcher Format')
print('\nCorrect syntax is:')
print('sql2rf -d DB_PATH -r REQUEST_PATH -o OUTPUT_FOLDER [OPTIONS]')
print('\nSearch DB_PATH for records meeting criteria in REQUEST_PATH.')
print(' -d Path to the SQL database')
print(' -r Path to Outlook message containing details of the request')
print(' -o Folder to save Researcher Format output files')
print('\nUse quotation marks (") around arguments which contain spaces')
print('\nIf REQUEST_PATH is not specified you will be given the option to set parameters for the output')
print('\nOptions:')
print(' --debug Debug mode.')
print(' --help Show this message and exit.')
exit_prompt()
def main(argv=None):
if argv is None:
name = str(sys.argv[1])
db_path, request_path, output_folder = '', '', ''
debug = False
try:
opts, args = getopt.getopt(argv, 'd:r:o:', ['db_path=', 'request_path=', 'output_folder=', 'debug', 'help'])
except getopt.GetoptError as err:
exit_prompt('Error: {}'.format(err))
if opts is None or not opts:
usage()
for opt, arg in opts:
if opt == '--help': usage()
elif opt == '--debug': debug = True
elif opt in ['-d', '--db_path']: db_path = arg
elif opt in ['-r', '--request_path']: request_path = arg
elif opt in ['-o', '--output_folder']: output_folder = arg
else: exit_prompt('Error: Option {} not recognised'.format(opt))
iams2rf_sql2rf(db_path, request_path, output_folder, debug)
print('\n\nAll processing complete')
print('----------------------------------------')
print(str(datetime.datetime.now()))
sys.exit()
if __name__ == '__main__':
main(sys.argv[1:])
| Python | 0.000001 |
2d35031cfdb98503f326cc375f6d9962daf1faf8 | Set BaseAnimation.sleep_time as late as possible. | bibliopixel/animation/animation.py | bibliopixel/animation/animation.py | import contextlib, threading, time
from . runner import Runner
from .. import log
from .. threads.animation_threading import AnimationThreading
class BaseAnimation(object):
free_run = False
def __init__(self, led):
self._led = led
self.internal_delay = None
def preRun(self, amt=1):
self._led.all_off()
def step(self, amt=1):
raise RuntimeError("Base class step() called. This shouldn't happen")
def cleanup(self):
self.threading.stop_thread(wait=True)
self._led.cleanup()
def is_running(self):
if self.threading.stop_event.isSet():
return False
if self.runner.max_steps:
return self.cur_step < self.runner.max_steps
return not (self.runner.until_complete and self.completed)
def run_one_frame(self):
timestamps = []
def stamp():
timestamps.append(time.time())
stamp()
self.step(self.runner.amt)
stamp()
self._led.frame_render_time = timestamps[1] - timestamps[0]
self._led.push_to_driver()
stamp()
_report_framerate(timestamps)
self.cur_step += 1
if self.completed and self.runner.max_cycles > 0:
if self.cycle_count < self.runner.max_cycles - 1:
self.cycle_count += 1
self.completed = False
stamp()
self.threading.wait(self.sleep_time, timestamps)
@contextlib.contextmanager
def run_context(self):
if self.free_run:
self.sleep_time = None
elif self.internal_delay:
self.sleep_time = self.internal_delay
else:
self.sleep_time = self.runner.sleep_time
self._led.animation_sleep_time = self.sleep_time or 0
self.preRun(self.runner.amt)
try:
yield
finally:
self.cleanup()
def run_all_frames(self):
with self.run_context():
while self.is_running():
self.run_one_frame()
def set_runner(self, runner):
self.runner = runner
self.completed = False
self._step = 0
self.cur_step = 0
self.cycle_count = 0
def start(self):
self.threading = AnimationThreading(self.runner, self.run_all_frames)
self.threading.start()
def run(self, **kwds):
# DEPRECATED
self.set_runner(Runner(**kwds))
self.start()
def _report_framerate(timestamps):
total_time = timestamps[-1] - timestamps[0]
fps = int(1.0 / max(total_time, 0.001))
log.debug("%dms/%dfps / Frame: %dms / Update: %dms",
1000 * total_time,
fps,
1000 * (timestamps[1] - timestamps[0]),
1000 * (timestamps[2] - timestamps[1]))
| import contextlib, threading, time
from . runner import Runner
from .. import log
from .. threads.animation_threading import AnimationThreading
class BaseAnimation(object):
free_run = False
def __init__(self, led):
self._led = led
self.internal_delay = None
def preRun(self, amt=1):
self._led.all_off()
def step(self, amt=1):
raise RuntimeError("Base class step() called. This shouldn't happen")
def cleanup(self):
self.threading.stop_thread(wait=True)
self._led.cleanup()
def is_running(self):
if self.threading.stop_event.isSet():
return False
if self.runner.max_steps:
return self.cur_step < self.runner.max_steps
return not (self.runner.until_complete and self.completed)
def run_one_frame(self):
timestamps = []
def stamp():
timestamps.append(time.time())
stamp()
self.step(self.runner.amt)
stamp()
self._led.frame_render_time = timestamps[1] - timestamps[0]
self._led.push_to_driver()
stamp()
_report_framerate(timestamps)
self.cur_step += 1
if self.completed and self.runner.max_cycles > 0:
if self.cycle_count < self.runner.max_cycles - 1:
self.cycle_count += 1
self.completed = False
stamp()
self.threading.wait(self.sleep_time, timestamps)
@contextlib.contextmanager
def run_context(self):
self.preRun(self.runner.amt)
try:
yield
finally:
self.cleanup()
def run_all_frames(self):
with self.run_context():
while self.is_running():
self.run_one_frame()
def set_runner(self, runner):
self.runner = runner
self.completed = False
self._step = 0
self.cur_step = 0
self.cycle_count = 0
if self.free_run:
self.sleep_time = None
elif self.internal_delay:
self.sleep_time = self.internal_delay
else:
self.sleep_time = self.runner.sleep_time
self._led.animation_sleep_time = self.sleep_time or 0
def start(self):
self.threading = AnimationThreading(self.runner, self.run_all_frames)
self.threading.start()
def run(self, **kwds):
# DEPRECATED
self.set_runner(Runner(**kwds))
self.start()
def _report_framerate(timestamps):
total_time = timestamps[-1] - timestamps[0]
fps = int(1.0 / max(total_time, 0.001))
log.debug("%dms/%dfps / Frame: %dms / Update: %dms",
1000 * total_time,
fps,
1000 * (timestamps[1] - timestamps[0]),
1000 * (timestamps[2] - timestamps[1]))
| Python | 0 |
796952dca75a78e0b71b5809112bd0815fa87986 | Method instructions format: text/x-web-intelligent | bika/lims/content/method.py | bika/lims/content/method.py | from AccessControl import ClassSecurityInfo
from Products.CMFCore.permissions import ModifyPortalContent, View
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.ATExtensions.ateapi import RecordsField as RecordsField
from bika.lims.browser.widgets import RecordsWidget
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.config import PROJECTNAME
import sys
from bika.lims import bikaMessageFactory as _
from zope.interface import implements
schema = BikaSchema.copy() + Schema((
TextField('Instructions',
default_content_type = 'text/x-web-intelligent',
allowable_content_types = ('text/x-web-intelligent',),
default_output_type="text/html",
widget = TextAreaWidget(
label = _("Method Instructions",
"Instructions"),
description = _("Technical description and instructions intended for analysts"),
),
),
FileField('MethodDocument', # XXX Multiple Method documents please
widget = FileWidget(
label = _("Method Document"),
description = _("Load documents describing the method here"),
)
),
))
schema['description'].schemata = 'default'
schema['description'].widget.visible = True
schema['description'].widget.label = _("Description")
schema['description'].widget.description = _("Describes the method in layman terms. This information is made available to lab clients")
class Method(BaseFolder):
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
registerType(Method, PROJECTNAME)
| from AccessControl import ClassSecurityInfo
from Products.CMFCore.permissions import ModifyPortalContent, View
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.ATExtensions.ateapi import RecordsField as RecordsField
from bika.lims.browser.widgets import RecordsWidget
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.config import PROJECTNAME
import sys
from bika.lims import bikaMessageFactory as _
from zope.interface import implements
schema = BikaSchema.copy() + Schema((
TextField('Instructions',
default_content_type = 'text/plain',
allowable_content_types = ('text/plain',),
widget = TextAreaWidget(
label = _("Method Instructions",
"Instructions"),
description = _("Technical description and instructions intended for analysts"),
),
),
FileField('MethodDocument', # XXX Multiple Method documents please
widget = FileWidget(
label = _("Method Document"),
description = _("Load documents describing the method here"),
)
),
))
schema['description'].schemata = 'default'
schema['description'].widget.visible = True
schema['description'].widget.label = _("Description")
schema['description'].widget.description = _("Describes the method in layman terms. This information is made available to lab clients")
class Method(BaseFolder):
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
registerType(Method, PROJECTNAME)
| Python | 0.999106 |
7c382a33fa3f691fcbf89621b48c0c9e3a921d03 | update version number | vaspy/__init__.py | vaspy/__init__.py | __version__ = '0.1.1' # add d-band center calculation
class VasPy(object):
def __init__(self, filename):
"Base class to be inherited by all classes in VASPy."
self.filename = filename
class CarfileValueError(Exception):
"Exception raised for errors in the CONTCAR-like file."
pass
class UnmatchedDataShape(Exception):
"Exception raised for errors in unmatched data shape."
pass
| __version__ = '0.1.0' # add electro module
class VasPy(object):
def __init__(self, filename):
"Base class to be inherited by all classes in VASPy."
self.filename = filename
class CarfileValueError(Exception):
"Exception raised for errors in the CONTCAR-like file."
pass
class UnmatchedDataShape(Exception):
"Exception raised for errors in unmatched data shape."
pass
| Python | 0.000002 |
4d63320c2bf077e90cffb98286e0354dcab1fc64 | Make runTestCases.py possible to run independently | build-tools/runTestCases.py | build-tools/runTestCases.py | #! /usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import subprocess
import sys
import os
testLogDir = "testlog/"
if not os.path.exists(testLogDir):
os.makedirs(testLogDir)
testCmdFile = 'build-tools/test.conf'
tcCmdReg = re.compile('^mvn\s.*$')
tcNameReg = re.compile('-Dtest=(.+?)\s')
tcModuleReg = re.compile('-pl\s(.+?)\s')
with open(testCmdFile) as fp:
for line in fp:
match = tcCmdReg.findall(line)
if match:
logFilePath = testLogDir + tcNameReg.findall(line)[0] + ".log"
print("[INFO] Running " + tcNameReg.findall(line)[0] + " test case for \"" + tcModuleReg.findall(line)[0] + "\"...")
try:
#maven build
subprocess.check_call(match[0] + ">" + logFilePath, stderr=subprocess.STDOUT, shell=True)
print("[SUCCESS] Test case " + tcNameReg.findall(line)[0] + " for \"" + tcModuleReg.findall(line)[0]+ "\" is completed!")
except subprocess.CalledProcessError as e:
print("[ERROR] This test case requires \"pmalloc\" memory service to pass, please check if \"pmalloc\" has been configured correctly! If \"pmalloc\" is installed, please refer to testlog/" + tcNameReg.findall(line)[0] + ".log for detailed information.")
sys.exit(1)
print("[DONE] All test cases are completed! Log files are available under folder testlog!")
| #! /usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import subprocess
import sys
testCmdFile = 'build-tools/test.conf'
tcCmdReg = re.compile('^mvn\s.*$')
tcNameReg = re.compile('-Dtest=(.+?)\s')
tcModuleReg = re.compile('-pl\s(.+?)\s')
with open(testCmdFile) as fp:
for line in fp:
match = tcCmdReg.findall(line)
if match:
logFilePath = "testlog/" + tcNameReg.findall(line)[0] + ".log"
print("[INFO] Running " + tcNameReg.findall(line)[0] + " test case for \"" + tcModuleReg.findall(line)[0] + "\"...")
try:
#maven build
subprocess.check_call(match[0] + ">" + logFilePath, stderr=subprocess.STDOUT, shell=True)
print("[SUCCESS] Test case " + tcNameReg.findall(line)[0] + " for \"" + tcModuleReg.findall(line)[0]+ "\" is completed!")
except subprocess.CalledProcessError as e:
print("[ERROR] This test case requires \"pmalloc\" memory service to pass, please check if \"pmalloc\" has been configured correctly! If \"pmalloc\" is installed, please refer to testlog/" + tcNameReg.findall(line)[0] + ".log for detailed information.")
sys.exit(1)
print("[DONE] All test cases are completed! Log files are available under folder testlog!")
| Python | 0 |
bbdc969214e698a62020603dafac9165d7bf6a84 | add tests that train is called if no model passed | tests/cli/test_rasa_interactive.py | tests/cli/test_rasa_interactive.py | import argparse
from typing import Callable, Text
from unittest.mock import Mock
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import RunResult
import rasa
from rasa.cli import interactive, train
from rasa import train as rasa_train
def test_interactive_help(run: Callable[..., RunResult]):
output = run("interactive", "--help")
help_text = """usage: rasa interactive [-h] [-v] [-vv] [--quiet] [--e2e] [-m MODEL]
[--data DATA [DATA ...]] [--skip-visualization]
[--endpoints ENDPOINTS] [-c CONFIG] [-d DOMAIN]
[--out OUT] [--augmentation AUGMENTATION]
[--debug-plots] [--dump-stories] [--force]
[--persist-nlu-data]
{core} ... [model-as-positional-argument]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_interactive_core_help(run: Callable[..., RunResult]):
output = run("interactive", "core", "--help")
help_text = """usage: rasa interactive core [-h] [-v] [-vv] [--quiet] [-m MODEL] [-s STORIES]
[--skip-visualization] [--endpoints ENDPOINTS]
[-c CONFIG] [-d DOMAIN] [--out OUT]
[--augmentation AUGMENTATION] [--debug-plots]
[--dump-stories]
[model-as-positional-argument]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_pass_arguments_to_rasa_train(
default_stack_config: Text, monkeypatch: MonkeyPatch
) -> None:
# Create parser
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
interactive.add_subparser(sub_parser, [])
# Parse interactive command
args = parser.parse_args(["interactive", "--config", default_stack_config])
interactive._set_not_required_args(args)
# Mock actual training
mock = Mock()
monkeypatch.setattr(rasa, "train", mock.method)
# If the `Namespace` object does not have all required fields this will throw
train.train(args)
# Assert `train` was actually called
mock.method.assert_called_once()
def test_train_called_when_no_model_passed(
default_stack_config: Text, monkeypatch: MonkeyPatch,
) -> None:
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
interactive.add_subparser(sub_parser, [])
args = parser.parse_args(
[
"interactive",
"--config",
default_stack_config,
"--data",
"examples/moodbot/data",
]
)
interactive._set_not_required_args(args)
# Mock actual training and interactive learning methods
mock = Mock()
monkeypatch.setattr(train, "train", mock.train_model)
monkeypatch.setattr(interactive, "perform_interactive_learning", mock.method)
interactive.interactive(args)
mock.train_model.assert_called_once()
def test_train_core_called_when_no_model_passed_and_core(
default_stack_config: Text, monkeypatch: MonkeyPatch,
) -> None:
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
interactive.add_subparser(sub_parser, [])
args = parser.parse_args(
[
"interactive",
"core",
"--config",
default_stack_config,
"--stories",
"examples/moodbot/data/stories.md",
"--domain",
"examples/moodbot/domain.yml",
]
)
interactive._set_not_required_args(args)
# Mock actual training and interactive learning methods
mock = Mock()
monkeypatch.setattr(train, "train_core", mock.train_core)
monkeypatch.setattr(interactive, "perform_interactive_learning", mock.method)
interactive.interactive(args)
mock.train_core.assert_called_once()
| import argparse
from typing import Callable, Text
from unittest.mock import Mock
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import RunResult
import rasa
from rasa.cli import interactive, train
def test_interactive_help(run: Callable[..., RunResult]):
output = run("interactive", "--help")
help_text = """usage: rasa interactive [-h] [-v] [-vv] [--quiet] [--e2e] [-m MODEL]
[--data DATA [DATA ...]] [--skip-visualization]
[--endpoints ENDPOINTS] [-c CONFIG] [-d DOMAIN]
[--out OUT] [--augmentation AUGMENTATION]
[--debug-plots] [--dump-stories] [--force]
[--persist-nlu-data]
{core} ... [model-as-positional-argument]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_interactive_core_help(run: Callable[..., RunResult]):
output = run("interactive", "core", "--help")
help_text = """usage: rasa interactive core [-h] [-v] [-vv] [--quiet] [-m MODEL] [-s STORIES]
[--skip-visualization] [--endpoints ENDPOINTS]
[-c CONFIG] [-d DOMAIN] [--out OUT]
[--augmentation AUGMENTATION] [--debug-plots]
[--dump-stories]
[model-as-positional-argument]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
def test_pass_arguments_to_rasa_train(
default_stack_config: Text, monkeypatch: MonkeyPatch
) -> None:
# Create parser
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
interactive.add_subparser(sub_parser, [])
# Parse interactive command
args = parser.parse_args(["interactive", "--config", default_stack_config])
interactive._set_not_required_args(args)
# Mock actual training
mock = Mock()
monkeypatch.setattr(rasa, "train", mock.method)
# If the `Namespace` object does not have all required fields this will throw
train.train(args)
# Assert `train` was actually code
mock.method.assert_called_once()
| Python | 0 |
200a2492129cbfab4024c435e8971e79c8aa836f | Build scripts: the insertValue transformation no longer uses the re module | buildlib/transformations.py | buildlib/transformations.py | '''
The MIT License
Copyright (c) 2011 Steven G. Brown
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
# Developed with Python v3.0.1
import io, os, re, sys
def insertValue(includeTagName, value):
'''
Return a function that will transform the script contents by replacing an
inline #INCLUDE tag with the given value. For example, if this function is
called with includeTagName='version' and value='1.0', then any occurances of
'#INCLUDE version' in the given script will be replaced with '1.0'.
'''
def insertValueTransformation(fileContents):
return fileContents.replace('#INCLUDE ' + includeTagName + '#', value)
return insertValueTransformation
def insertExternalFiles(*includesDirectories):
'''
Return a function that will transform the script contents by including the
contents of external files. For example, if the script contains the line:
'#INCLUDE Frames.js;', then the file 'Frames.js' will be found in one of the
includes directories and inserted in this location. If the inserted file has
a license header, it will be removed. If the file to be inserted cannot be
found, a ValueError will be thrown.
'''
includesRegex = re.compile(r'^#INCLUDE ([^;]*);$', re.MULTILINE)
def insertExternalFilesTransformation(fileContents):
while True:
includesMatch = includesRegex.search(fileContents)
if not includesMatch:
break
with io.open(_findFile(includesDirectories, includesMatch.group(1))) as includeFile:
includeFileContents = _removeLicenseHeader(includeFile.read())
leadingFileContents = fileContents[:includesMatch.start()]
trailingFileContents = fileContents[includesMatch.end():]
if len(trailingFileContents) >= 2 and trailingFileContents[:2] != '\n\n':
trailingFileContents = '\n\n' + trailingFileContents
fileContents =\
leadingFileContents +\
'//' + includesMatch.group() + '\n' +\
'\n' +\
includeFileContents.strip() +\
trailingFileContents
return fileContents
return insertExternalFilesTransformation
def _findFile(searchDirectories, filename):
'''
Find a file in the given list of search directories. If found, the absolute
path to this file will be returned. Otherwise, a ValueError will be thrown.
'''
for directory in searchDirectories:
absolutePath = os.path.join(directory, filename)
if os.path.exists(absolutePath):
return absolutePath
raise ValueError('\'' + filename + '\' not found in ' + str(searchDirectories))
def _removeLicenseHeader(scriptContents):
'''
Return the given script contents with the license header removed.
'''
licenseHeaderRegex = re.compile(r'^.*?\n\s\*/\n\n\s*(.*)', re.DOTALL)
licenseHeaderMatch = licenseHeaderRegex.match(scriptContents)
if licenseHeaderMatch:
scriptContents = licenseHeaderMatch.group(1)
return scriptContents
def prepend(filePath):
'''
Return a function that will transform the script contents by prepending the
contents of the given file.
'''
with io.open(filePath) as fileToPrepend:
fileToPrependContents = fileToPrepend.read()
def prependTransformation(fileContents):
return fileToPrependContents + '\n' + fileContents
return prependTransformation
| '''
The MIT License
Copyright (c) 2011 Steven G. Brown
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
# Developed with Python v3.0.1
import io, os, re, sys
def insertValue(variableName, variableValue):
'''
Return a function that will transform the script contents by replacing an
inline #INCLUDE tag with variableValue. For example, if this function is
called with variableName='version' and variableValue='1.0', then any
occurances of '#INCLUDE version' in the given script will be replaced with
'1.0'.
'''
def insertValueTransformation(fileContents):
return re.sub(r'#INCLUDE ' + variableName + '#', variableValue, fileContents)
return insertValueTransformation
def insertExternalFiles(*includesDirectories):
'''
Return a function that will transform the script contents by including the
contents of external files. For example, if the script contains the line:
'#INCLUDE Frames.js;', then the file 'Frames.js' will be found in one of the
includes directories and inserted in this location. If the inserted file has
a license header, it will be removed. If the file to be inserted cannot be
found, a ValueError will be thrown.
'''
includesRegex = re.compile(r'^#INCLUDE ([^;]*);$', re.MULTILINE)
def insertExternalFilesTransformation(fileContents):
while True:
includesMatch = includesRegex.search(fileContents)
if not includesMatch:
break
with io.open(_findFile(includesDirectories, includesMatch.group(1))) as includeFile:
includeFileContents = _removeLicenseHeader(includeFile.read())
leadingFileContents = fileContents[:includesMatch.start()]
trailingFileContents = fileContents[includesMatch.end():]
if len(trailingFileContents) >= 2 and trailingFileContents[:2] != '\n\n':
trailingFileContents = '\n\n' + trailingFileContents
fileContents =\
leadingFileContents +\
'//' + includesMatch.group() + '\n' +\
'\n' +\
includeFileContents.strip() +\
trailingFileContents
return fileContents
return insertExternalFilesTransformation
def _findFile(searchDirectories, filename):
'''
Find a file in the given list of search directories. If found, the absolute
path to this file will be returned. Otherwise, a ValueError will be thrown.
'''
for directory in searchDirectories:
absolutePath = os.path.join(directory, filename)
if os.path.exists(absolutePath):
return absolutePath
raise ValueError('\'' + filename + '\' not found in ' + str(searchDirectories))
def _removeLicenseHeader(scriptContents):
'''
Return the given script contents with the license header removed.
'''
licenseHeaderRegex = re.compile(r'^.*?\n\s\*/\n\n\s*(.*)', re.DOTALL)
licenseHeaderMatch = licenseHeaderRegex.match(scriptContents)
if licenseHeaderMatch:
scriptContents = licenseHeaderMatch.group(1)
return scriptContents
def prepend(filePath):
'''
Return a function that will transform the script contents by prepending the
contents of the given file.
'''
with io.open(filePath) as fileToPrepend:
fileToPrependContents = fileToPrepend.read()
def prependTransformation(fileContents):
return fileToPrependContents + '\n' + fileContents
return prependTransformation
| Python | 0 |
e977d997ab66196b519c60dea34e360dfa4fb15d | Complete decreasing pivot swap reverse sol | lc0031_next_permutation.py | lc0031_next_permutation.py | """Leetcode 31. Next Permutation
Medium
URL: https://leetcode.com/problems/next-permutation/
Implement next permutation, which rearranges numbers into the lexicographically
next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible
order (ie, sorted in ascending order).
The replacement must be in-place and use only constant extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding
outputs are in the right-hand column.
1,2,3 -> 1,3,2
3,2,1 -> 1,2,3
1,1,5 -> 1,5,1
"""
class SolutionDecreasingPivotSwapReverse(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
Time complexity: O(n).
Space complexity: O(1).
"""
# From backward find the first pos (pivot) which is not in decreasing order.
i = len(nums) - 1
while i > 0 and nums[i - 1] >= nums[i]:
i -= 1
pivot = i - 1
# If we cannot find that number, all numbers are increasing. Reverse them.
if pivot == -1:
nums.reverse()
return None
# Find the first pos j with num which is bigger than pivot number. Swap them.
j = len(nums) - 1
while j > pivot and nums[j] <= nums[pivot]:
j -= 1
nums[pivot], nums[j] = nums[j], nums[pivot]
# Reverse the remaining numbers on the right of pivot.
left, right = pivot + 1, len(nums) - 1
while left < right:
nums[left], nums[right] = nums[right], nums[left]
left += 1
right -= 1
def main():
# 1,2,3 -> 1,3,2
nums = [1,2,3]
SolutionDecreasingPivotSwapReverse().nextPermutation(nums)
print nums
# 3,2,1 -> 1,2,3
nums = [3,2,1]
SolutionDecreasingPivotSwapReverse().nextPermutation(nums)
print nums
# 1,1,5 -> 1,5,1
nums = [1,1,5]
SolutionDecreasingPivotSwapReverse().nextPermutation(nums)
print nums
if __name__ == '__main__':
main()
| """Leetcode 31. Next Permutation
Medium
URL: https://leetcode.com/problems/next-permutation/
Implement next permutation, which rearranges numbers into the lexicographically
next greater permutation of numbers.
If such arrangement is not possible, it must rearrange it as the lowest possible
order (ie, sorted in ascending order).
The replacement must be in-place and use only constant extra memory.
Here are some examples. Inputs are in the left-hand column and its corresponding
outputs are in the right-hand column.
1,2,3 -> 1,3,2
3,2,1 -> 1,2,3
1,1,5 -> 1,5,1
"""
class Solution(object):
def nextPermutation(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000002 |
587c4603d3ab379e4ee22f2dcda7d7798cd35dcf | fix spacing around arguments | db_credentials/DBCredentials.py | db_credentials/DBCredentials.py | #! /usr/local/bin/python
import sys
import re
class DBCredentials:
def __init__(self):
self.creds = {
'host':'',
'port':'',
'username':'',
'password':'',
'database':'',
}
return;
# Load credentials from a file: no input validation.
#
def load_file(self, filename):
f = open(filename, 'r')
text = f.read()
f.close
#print text
tuples = re.findall(r'(\w+)=([^\s]+)', text)
#print tuples
#[('host', 'localhost'), ('username', 'foo'), ('password', 'bar')]
for tuple in tuples:
self.creds[ tuple[0] ] = tuple[1]
#print self.creds
return
def get_host(self):
return self.creds['host']
def set_host(self, host):
self.creds['host'] = host
# listener port - return if specified, otherwise default to 3306
#
def get_port(self):
if self.creds['port']:
return self.creds['port']
else:
return '3306'
def set_port(self, port):
self.creds['port'] = port
def get_username(self):
return self.creds['username']
def set_username(self, username):
self.creds['sid'] = username
def get_password(self):
return self.creds['password']
def set_password(self, password):
self.creds['password'] = password
# database
#
def get_database(self):
if self.creds['database'] == '' and self.creds['host'] != '':
self.creds['database'] = self.creds['host']
return self.creds['database']
def set_database(self, database):
self.creds['database'] = database
| #! /usr/local/bin/python
import sys
import re
class DBCredentials:
def __init__( self ):
self.creds = {
'host':'',
'port':'',
'username':'',
'password':'',
'database':'',
}
return;
# Load credentials from a file: no input validation.
#
def load_file( self, filename ):
f = open( filename, 'r' )
text = f.read()
f.close
#print text
tuples = re.findall( r'(\w+)=([^\s]+)', text )
#print tuples
#[('host', 'localhost'), ('username', 'foo'), ('password', 'bar')]
for tuple in tuples:
self.creds[ tuple[0] ] = tuple[1]
#print self.creds
return
def get_host( self ):
return self.creds['host']
def set_host( self, host ):
self.creds['host'] = host
# listener port - return if specified, otherwise default to 3306
#
def get_port( self ):
if self.creds['port']:
return self.creds['port']
else:
return '3306'
def set_port( self, port ):
self.creds['port'] = port
def get_username( self ):
return self.creds['username']
def set_username( self, username ):
self.creds['sid'] = username
def get_password( self ):
return self.creds['password']
def set_password( self, password ):
self.creds['password'] = password
# database
#
def get_database( self ):
if self.creds['database'] == '' and self.creds['host'] != '':
self.creds['database'] = self.creds['host']
return self.creds['database']
def set_database( self, database ):
self.creds['database'] = database
| Python | 0.000151 |
b939558f3d4bd0fa90f3f467ca85f698c4813046 | Update __init__.py | comps/__init__.py | comps/__init__.py | """
A simple application that provides an entry point for integrating
front end designers into a django project
"""
__version__ = '0.3.0'
| """
A simple application that provides an entry point for integrating
front end designers into a django project
"""
__version__ = '0.2.0'
| Python | 0.000001 |
86f143863fd9f0786fe83a5038b970b4782306ce | Check table exist | erpnext/patches/v7_0/update_missing_employee_in_timesheet.py | erpnext/patches/v7_0/update_missing_employee_in_timesheet.py | from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.table_exists("Time Log"):
timesheet = frappe.db.sql("""select tl.employee as employee, ts.name as name,
tl.modified as modified, tl.modified_by as modified_by, tl.creation as creation, tl.owner as owner
from
`tabTimesheet` ts, `tabTimesheet Detail` tsd, `tabTime Log` tl
where
tsd.parent = ts.name and tl.from_time = tsd.from_time and tl.to_time = tsd.to_time
and tl.hours = tsd.hours and tl.billing_rate = tsd.billing_rate and tsd.idx=1
and tl.docstatus < 2 and (ts.employee = '' or ts.employee is null)""", as_dict=1)
for data in timesheet:
ts_doc = frappe.get_doc('Timesheet', data.name)
if len(ts_doc.time_logs) == 1:
frappe.db.sql(""" update `tabTimesheet` set creation = %(creation)s,
owner = %(owner)s, modified = %(modified)s, modified_by = %(modified_by)s,
employee = %(employee)s where name = %(name)s""", data)
| from __future__ import unicode_literals
import frappe
def execute():
timesheet = frappe.db.sql("""select tl.employee as employee, ts.name as name,
tl.modified as modified, tl.modified_by as modified_by, tl.creation as creation, tl.owner as owner
from
`tabTimesheet` ts, `tabTimesheet Detail` tsd, `tabTime Log` tl
where
tsd.parent = ts.name and tl.from_time = tsd.from_time and tl.to_time = tsd.to_time
and tl.hours = tsd.hours and tl.billing_rate = tsd.billing_rate and tsd.idx=1
and tl.docstatus < 2 and (ts.employee = '' or ts.employee is null)""", as_dict=1)
for data in timesheet:
ts_doc = frappe.get_doc('Timesheet', data.name)
if len(ts_doc.time_logs) == 1:
frappe.db.sql(""" update `tabTimesheet` set creation = %(creation)s,
owner = %(owner)s, modified = %(modified)s, modified_by = %(modified_by)s,
employee = %(employee)s where name = %(name)s""", data)
| Python | 0 |
2d3b97e150f96ed4e7295d4e5b288951c3a7ee50 | fix freq-check plug&play problem | catsnapshot/snapschedule.py | catsnapshot/snapschedule.py | import time,schedule,datetime
import signal,os,sys
task_list = list()
write_list = []
feqcheck_list = []
def clean_task_list():
global task_list
global write_list
for i in range(len(task_list)):
task_list[i] = None
write_list = []
class schedule_sig_handler(object):
def __init__(self,status):
self.status = status
def handler_func(self,signum,frame):
if self.status == "working":
self.status = "exit"
else:
sys.exit(0)
def schedule_loop(interval=1,scheduler=schedule.default_scheduler):
sch_sig = schedule_sig_handler("idle")
# Set the signal handler
sigint_dh = signal.signal(signal.SIGINT,sch_sig.handler_func)
sigterm_dh = signal.signal(signal.SIGTERM,sch_sig.handler_func)
while True:
sch_sig.status = "working"
clean_task_list()
scheduler.run_pending()
# write snaplogs
for need_write in write_list:
need_write.logs.write(need_write.snaplog_file)
# feqcheck
schedule_feqcheck_work()
if sch_sig.status == "exit": sys.exit(0)
else: sch_sig.status = "idle"
sys.stdout.flush()
time.sleep(interval)
# Set signal handler to default handler
signal.signal(signal.SIGINT,sigint_dh)
signal.signal(signal.SIGTERM,sigterm_dh)
def schedule_check_path(snapmang):
if snapmang.check_path:
for path in snapmang.check_path:
if not os.path.exists(path):
return False
return True
def schedule_rerun(job):
"""Run the job , but not reschedule it."""
job.job_func()
job.last_run = datetime.datetime.now() # refresh last_run
def schedule_feqcheck_work():
global feqcheck_list
for snapmang in feqcheck_list:
if snapmang.latest_undone!=None and schedule_check_path(snapmang):
sleep(10) # waiting device
schedule_rerun(snapmang.latest_undone)
def schedule_work(snapmang,labels,index,job):
global task_list
global write_list
# check-path
if schedule_check_path(snapmang) is False:
snapmang.latest_undone = job
return
if task_list[index] == None:
task_list[index] = snapmang.snapshot(labels,auto_write=False)
write_list += [snapmang] # add to write_list
else: # if there is a snapshot in this time , just add label to it.(not take a new snapshot)
for label in labels:
task_list[index].labels.add(label)
snapmang.limit_check()
snapmang.latest_undone = None
def schedule_task(snapmang):
""" add a task to the scheduler """
global task_list
global feqcheck_list
if "schedule-time" in snapmang.configs:
schedule_time = snapmang.configs["schedule-time"]
# add to task_list
task_list += [None]
index = len(task_list)-1
# init undone and add to feqcheck_list
snapmang.latest_undone = None
if "feqcheck" in snapmang.configs:
if snapmang.configs["feqcheck"] == True:
feqcheck_list += [snapmang]
# schedule
for unit in schedule_time:
if unit in {"second","minute","hour","day"}:
job = schedule.every(int(schedule_time[unit]))
job.unit = unit+"s"
if "schedule-labels" in snapmang.configs and\
unit in snapmang.configs["schedule-labels"]:
job.do(schedule_work,snapmang,
snapmang.configs["schedule-labels"][unit],index,job)
else:
job.do(schedule_work,snapmang,["node"],index,job)
| import time,schedule,datetime
import signal,os,sys
task_list = list()
write_list = []
feqcheck_list = []
def clean_task_list():
global task_list
global write_list
for i in range(len(task_list)):
task_list[i] = None
write_list = []
class schedule_sig_handler(object):
def __init__(self,status):
self.status = status
def handler_func(self,signum,frame):
if self.status == "working":
self.status = "exit"
else:
sys.exit(0)
def schedule_loop(interval=1,scheduler=schedule.default_scheduler):
sch_sig = schedule_sig_handler("idle")
# Set the signal handler
sigint_dh = signal.signal(signal.SIGINT,sch_sig.handler_func)
sigterm_dh = signal.signal(signal.SIGTERM,sch_sig.handler_func)
while True:
sch_sig.status = "working"
clean_task_list()
scheduler.run_pending()
# write snaplogs
for need_write in write_list:
need_write.logs.write(need_write.snaplog_file)
# feqcheck
schedule_feqcheck_work()
if sch_sig.status == "exit": sys.exit(0)
else: sch_sig.status = "idle"
sys.stdout.flush()
time.sleep(interval)
# Set signal handler to default handler
signal.signal(signal.SIGINT,sigint_dh)
signal.signal(signal.SIGTERM,sigterm_dh)
def schedule_check_path(snapmang):
if snapmang.check_path:
for path in snapmang.check_path:
if not os.path.exists(path):
return False
return True
def schedule_rerun(job):
"""Run the job , but not reschedule it."""
job.job_func()
job.last_run = datetime.datetime.now() # refresh last_run
def schedule_feqcheck_work():
global feqcheck_list
for snapmang in feqcheck_list:
if snapmang.latest_undone!=None and schedule_check_path(snapmang):
schedule_rerun(snapmang.latest_undone)
def schedule_work(snapmang,labels,index,job):
global task_list
global write_list
# check-path
if schedule_check_path(snapmang) is False:
snapmang.latest_undone = job
return
if task_list[index] == None:
task_list[index] = snapmang.snapshot(labels,auto_write=False)
write_list += [snapmang] # add to write_list
else: # if there is a snapshot in this time , just add label to it.(not take a new snapshot)
for label in labels:
task_list[index].labels.add(label)
snapmang.limit_check()
snapmang.latest_undone = None
def schedule_task(snapmang):
""" add a task to the scheduler """
global task_list
global feqcheck_list
if "schedule-time" in snapmang.configs:
schedule_time = snapmang.configs["schedule-time"]
# add to task_list
task_list += [None]
index = len(task_list)-1
# init undone and add to feqcheck_list
snapmang.latest_undone = None
if "feqcheck" in snapmang.configs:
if snapmang.configs["feqcheck"] == True:
feqcheck_list += [snapmang]
# schedule
for unit in schedule_time:
if unit in {"second","minute","hour","day"}:
job = schedule.every(int(schedule_time[unit]))
job.unit = unit+"s"
if "schedule-labels" in snapmang.configs and\
unit in snapmang.configs["schedule-labels"]:
job.do(schedule_work,snapmang,
snapmang.configs["schedule-labels"][unit],index,job)
else:
job.do(schedule_work,snapmang,["node"],index,job)
| Python | 0.000001 |
14bc45b6447424da4c84f84c40f2d897198c73ab | use proper end boundary | custom/icds_reports/utils/aggregation_helpers/aww_incentive.py | custom/icds_reports/utils/aggregation_helpers/aww_incentive.py | from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.userreports.models import StaticDataSourceConfiguration, get_datasource_config
from corehq.apps.userreports.util import get_table_name
from custom.icds_reports.const import AWW_INCENTIVE_TABLE
from custom.icds_reports.utils.aggregation_helpers import BaseICDSAggregationHelper, month_formatter
class AwwIncentiveAggregationHelper(BaseICDSAggregationHelper):
aggregate_parent_table = AWW_INCENTIVE_TABLE
aggregate_child_table_prefix = 'icds_db_aww_incentive_'
@property
def ccs_record_case_ucr_tablename(self):
doc_id = StaticDataSourceConfiguration.get_doc_id(self.domain, 'static-ccs_record_cases')
config, _ = get_datasource_config(doc_id, self.domain)
return get_table_name(self.domain, config.table_id)
def aggregation_query(self):
month = self.month.replace(day=1)
tablename = self.generate_child_tablename(month)
query_params = {
"month": month_formatter(month),
"state_id": self.state_id
}
return """
INSERT INTO "{tablename}" (
state_id, month, awc_id, block_id, state_name, district_name, block_name,
supervisor_name, awc_name, aww_name, contact_phone_number, wer_weighed,
wer_eligible, awc_num_open, valid_visits, expected_visits
) (
SELECT
%(state_id)s AS state_id,
%(month)s AS month,
awcm.awc_id,
awcm.block_id,
awcm.state_name,
awcm.district_name,
awcm.block_name,
awcm.supervisor_name,
awcm.awc_name,
awcm.aww_name,
awcm.contact_phone_number,
awcm.wer_weighed,
awcm.wer_eligible,
awcm.awc_days_open,
sum(ccsm.valid_visits),
sum(ccsm.expected_visits)
FROM agg_awc_monthly as awcm
INNER JOIN agg_ccs_record_monthly AS ccsm
ON ccsm.month=awcm.month AND ccsm.awc_id=awcm.awc_id AND ccsm.aggregation_level=awcm.aggregation_level
WHERE awcm.month = %(month)s AND awcm.state_id = %(state_id)s and awcm.aggregation_level=5
GROUP BY awcm.awc_id, awcm.block_id, awcm.state_name, awcm.district_name,
awcm.block_name, awcm.supervisor_name, awcm.awc_name, awcm.aww_name,
awcm.contact_phone_number, awcm.wer_weighed, awcm.wer_eligible,
awcm.awc_days_open
);
/* update expected visits for cf cases (not in agg_ccs_record */
UPDATE {tablename} perf
SET expected_visits = expected_visits + ucr.expected
FROM (
SELECT SUM(0.39) AS expected, awc_id
FROM {ccs_record_case_ucr}
WHERE %(month)s - add > 183 AND (closed_on IS NULL OR date_trunc('month', closed_on)::DATE > %(month)s)
GROUP BY awc_id
) ucr
WHERE ucr.awc_id = perf.awc_id
""".format(
tablename=tablename,
ccs_record_case_ucr=self.ccs_record_case_ucr_tablename
), query_params
| from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.userreports.models import StaticDataSourceConfiguration, get_datasource_config
from corehq.apps.userreports.util import get_table_name
from custom.icds_reports.const import AWW_INCENTIVE_TABLE
from custom.icds_reports.utils.aggregation_helpers import BaseICDSAggregationHelper, month_formatter
class AwwIncentiveAggregationHelper(BaseICDSAggregationHelper):
aggregate_parent_table = AWW_INCENTIVE_TABLE
aggregate_child_table_prefix = 'icds_db_aww_incentive_'
@property
def ccs_record_case_ucr_tablename(self):
doc_id = StaticDataSourceConfiguration.get_doc_id(self.domain, 'static-ccs_record_cases')
config, _ = get_datasource_config(doc_id, self.domain)
return get_table_name(self.domain, config.table_id)
def aggregation_query(self):
month = self.month.replace(day=1)
tablename = self.generate_child_tablename(month)
query_params = {
"month": month_formatter(month),
"state_id": self.state_id
}
return """
INSERT INTO "{tablename}" (
state_id, month, awc_id, block_id, state_name, district_name, block_name,
supervisor_name, awc_name, aww_name, contact_phone_number, wer_weighed,
wer_eligible, awc_num_open, valid_visits, expected_visits
) (
SELECT
%(state_id)s AS state_id,
%(month)s AS month,
awcm.awc_id,
awcm.block_id,
awcm.state_name,
awcm.district_name,
awcm.block_name,
awcm.supervisor_name,
awcm.awc_name,
awcm.aww_name,
awcm.contact_phone_number,
awcm.wer_weighed,
awcm.wer_eligible,
awcm.awc_days_open,
sum(ccsm.valid_visits),
sum(ccsm.expected_visits)
FROM agg_awc_monthly as awcm
INNER JOIN agg_ccs_record_monthly AS ccsm
ON ccsm.month=awcm.month AND ccsm.awc_id=awcm.awc_id AND ccsm.aggregation_level=awcm.aggregation_level
WHERE awcm.month = %(month)s AND awcm.state_id = %(state_id)s and awcm.aggregation_level=5
GROUP BY awcm.awc_id, awcm.block_id, awcm.state_name, awcm.district_name,
awcm.block_name, awcm.supervisor_name, awcm.awc_name, awcm.aww_name,
awcm.contact_phone_number, awcm.wer_weighed, awcm.wer_eligible,
awcm.awc_days_open
);
/* update expected visits for cf cases (not in agg_ccs_record */
UPDATE {tablename} perf
SET expected_visits = expected_visits + ucr.expected
FROM (
SELECT SUM(0.39) AS expected, awc_id
FROM {ccs_record_case_ucr}
WHERE %(month)s - add > 183 AND (closed_on IS NULL OR date_trunc('month', closed_on)::DATE >= %(month)s)
GROUP BY awc_id
) ucr
WHERE ucr.awc_id = perf.awc_id
""".format(
tablename=tablename,
ccs_record_case_ucr=self.ccs_record_case_ucr_tablename
), query_params
| Python | 0.000083 |
f04a7d6ac961f1ebb5ac065c8e87d7b45119c288 | Add an integration test for charmhub.list_resources | tests/integration/test_charmhub.py | tests/integration/test_charmhub.py | import pytest
from .. import base
from juju.errors import JujuAPIError, JujuError
from juju import jasyncio
@base.bootstrapped
@pytest.mark.asyncio
async def test_info(event_loop):
async with base.CleanModel() as model:
_, name = await model.charmhub.get_charm_id("hello-juju")
assert name == "hello-juju"
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_info_with_channel(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.info("hello-juju", "latest/stable")
assert result.result.name == "hello-juju"
assert "latest/stable" in result.result.channel_map
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_info_not_found(event_loop):
async with base.CleanModel() as model:
try:
await model.charmhub.info("badnameforapp")
except JujuAPIError as e:
assert e.message == "badnameforapp not found"
else:
assert False
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_find(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.find("kube")
assert len(result.result) > 0
for resp in result.result:
assert resp.name != ""
assert resp.type_ in ["charm", "bundle"]
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_find_bundles(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.find("kube", charm_type="bundle")
assert len(result.result) > 0
for resp in result.result:
assert resp.name != ""
assert resp.type_ in ["bundle"]
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_find_all(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.find("")
assert len(result.result) > 0
for resp in result.result:
assert resp.name != ""
assert resp.type_ in ["charm", "bundle"]
@base.bootstrapped
@pytest.mark.asyncio
async def test_subordinate_charm_zero_units(event_loop):
# normally in pylibjuju deploy num_units defaults to 1, we switch
# that to 0 behind the scenes if we see that the charmhub charm
# we're deploying is a subordinate charm
async with base.CleanModel() as model:
# rsyslog-forwarder-ha is a subordinate charm
app = await model.deploy('rsyslog-forwarder-ha')
await jasyncio.sleep(5)
assert len(app.units) == 0
await app.destroy()
await jasyncio.sleep(5)
# note that it'll error if the user tries to use num_units
with pytest.raises(JujuError):
await model.deploy('rsyslog-forwarder-ha', num_units=175)
# (full disclosure: it'll quitely switch to 0 if user enters
# num_units=1, instead of erroring)
app2 = await model.deploy('rsyslog-forwarder-ha', num_units=1)
await jasyncio.sleep(5)
assert len(app2.units) == 0
@base.bootstrapped
@pytest.mark.asyncio
async def test_list_resources(event_loop):
async with base.CleanModel() as model:
resources = await model.charmhub.list_resources('postgresql')
assert type(resources) == list and len(resources) > 0
| import pytest
from .. import base
from juju.errors import JujuAPIError, JujuError
from juju import jasyncio
@base.bootstrapped
@pytest.mark.asyncio
async def test_info(event_loop):
async with base.CleanModel() as model:
_, name = await model.charmhub.get_charm_id("hello-juju")
assert name == "hello-juju"
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_info_with_channel(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.info("hello-juju", "latest/stable")
assert result.result.name == "hello-juju"
assert "latest/stable" in result.result.channel_map
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_info_not_found(event_loop):
async with base.CleanModel() as model:
try:
await model.charmhub.info("badnameforapp")
except JujuAPIError as e:
assert e.message == "badnameforapp not found"
else:
assert False
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_find(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.find("kube")
assert len(result.result) > 0
for resp in result.result:
assert resp.name != ""
assert resp.type_ in ["charm", "bundle"]
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_find_bundles(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.find("kube", charm_type="bundle")
assert len(result.result) > 0
for resp in result.result:
assert resp.name != ""
assert resp.type_ in ["bundle"]
@base.bootstrapped
@pytest.mark.asyncio
@pytest.mark.skip('CharmHub facade no longer exists')
async def test_find_all(event_loop):
async with base.CleanModel() as model:
result = await model.charmhub.find("")
assert len(result.result) > 0
for resp in result.result:
assert resp.name != ""
assert resp.type_ in ["charm", "bundle"]
@base.bootstrapped
@pytest.mark.asyncio
async def test_subordinate_charm_zero_units(event_loop):
# normally in pylibjuju deploy num_units defaults to 1, we switch
# that to 0 behind the scenes if we see that the charmhub charm
# we're deploying is a subordinate charm
async with base.CleanModel() as model:
# rsyslog-forwarder-ha is a subordinate charm
app = await model.deploy('rsyslog-forwarder-ha')
await jasyncio.sleep(5)
assert len(app.units) == 0
await app.destroy()
await jasyncio.sleep(5)
# note that it'll error if the user tries to use num_units
with pytest.raises(JujuError):
await model.deploy('rsyslog-forwarder-ha', num_units=175)
# (full disclosure: it'll quitely switch to 0 if user enters
# num_units=1, instead of erroring)
app2 = await model.deploy('rsyslog-forwarder-ha', num_units=1)
await jasyncio.sleep(5)
assert len(app2.units) == 0
| Python | 0 |
ff03fb51d751c3dc820c2aee9b8ba2a7c45f1f0b | Split up assertions in `user_create` test #580 | tests/lib/user/test_create_user.py | tests/lib/user/test_create_user.py | import dataclasses
from datetime import date
import pytest
from pycroft.lib.user import create_user
from tests import factories
from .assertions import assert_account_name, assert_membership_groups, assert_logmessage_startswith
@dataclasses.dataclass
class UserData:
name: str
login: str
email: str
mac: str
birthdate: date
class TestUserCreation:
@pytest.fixture(scope="class")
def member_group(self, class_session):
return factories.property.MemberPropertyGroupFactory.create()
@pytest.fixture(scope="class")
def room(self, class_session):
return factories.RoomFactory.create(patched_with_subnet=True)
@pytest.fixture(scope="class")
def user_data(self) -> UserData:
return UserData(
name="Hans",
login="hans66",
email="hans@hans.de",
mac="12:11:11:11:11:11",
birthdate=date.fromisoformat("1990-01-01"),
)
@pytest.fixture(scope="class")
def user_mail_capture(self):
# TODO actually test whether mails are sent out correctly instead of mocking
# mocking is only done because we don't test for the mails anyway
from unittest.mock import patch
with patch("pycroft.lib.user.user_send_mails") as p:
yield p
@pytest.fixture(scope="class", autouse=True)
def new_user(self, class_session, user_data, room, processor, member_group, user_mail_capture):
new_user, _ = create_user(
user_data.name,
user_data.login,
user_data.email,
user_data.birthdate,
processor=processor,
groups=(member_group,),
address=room.address,
)
return new_user
def test_user_base_data(self, new_user, user_data, room):
assert new_user.name == user_data.name
assert new_user.login == user_data.login
assert new_user.email == user_data.email
def test_user_address(self, new_user, user_data, room):
# TODO fix signature of `create_user` and also check for explicitly supplied address.
assert new_user.address == room.address
assert not new_user.has_custom_address
def test_user_memberships(self, new_user, member_group):
assert_membership_groups(new_user.active_memberships(), [member_group])
def test_unix_account(self, new_user):
assert new_user.unix_account.home_directory == f"/home/{new_user.login}"
def test_log_entries(self, new_user):
assert len(new_user.log_entries) == 2
first, second = new_user.log_entries
assert_logmessage_startswith(first, "Added to group Mitglied")
assert_logmessage_startswith(second, "User created")
def test_finance_account(self, new_user):
assert_account_name(new_user.account, f"User {new_user.id}")
assert new_user.account is not None
assert new_user.account.balance == 0
def test_one_mail_sent(self, user_mail_capture):
user_mail_capture.assert_called()
| import dataclasses
from datetime import date
import pytest
from pycroft.lib.user import create_user
from tests import factories
from .assertions import assert_account_name, assert_membership_groups, assert_logmessage_startswith
@dataclasses.dataclass
class UserData:
name: str
login: str
email: str
mac: str
birthdate: date
class TestUserCreation:
@pytest.fixture(scope="class")
def member_group(self, class_session):
return factories.property.MemberPropertyGroupFactory.create()
@pytest.fixture(scope="class")
def room(self, class_session):
return factories.RoomFactory.create(patched_with_subnet=True)
@pytest.fixture(scope="class")
def user_data(self) -> UserData:
return UserData(
name="Hans",
login="hans66",
email="hans@hans.de",
mac="12:11:11:11:11:11",
birthdate=date.fromisoformat("1990-01-01"),
)
@pytest.fixture(scope="class")
def user_mail_capture(self):
# TODO actually test whether mails are sent out correctly instead of mocking
# mocking is only done because we don't test for the mails anyway
from unittest.mock import patch
with patch("pycroft.lib.user.user_send_mails") as p:
yield p
@pytest.fixture(scope="class", autouse=True)
def new_user(self, class_session, user_data, room, processor, member_group, user_mail_capture):
new_user, _ = create_user(
user_data.name,
user_data.login,
user_data.email,
user_data.birthdate,
processor=processor,
groups=(member_group,),
address=room.address,
)
return new_user
def test_user_create(self, new_user, user_data, member_group, user_mail_capture):
# needs: new_user, self.user (the initiating data),
# self.config.member_group
assert new_user.name == user_data.name
assert new_user.login == user_data.login
assert new_user.email == user_data.email
# TODO fix signature and check for explicitly supplied address.
# assert new_user.address == config.dummy_address
assert_account_name(new_user.account, f"User {new_user.id}")
assert_membership_groups(new_user.active_memberships(), [member_group])
assert new_user.unix_account.home_directory == f"/home/{new_user.login}"
assert len(new_user.log_entries) == 2
first, second = new_user.log_entries
assert_logmessage_startswith(first, "Added to group Mitglied")
assert_logmessage_startswith(second, "User created")
assert new_user.account is not None
assert new_user.account.balance == 0
user_mail_capture.assert_called()
| Python | 0.002351 |
b8c05a7ea6abefa3014f8703864031876c211679 | Add link for total malaria cases for year 2015 for Indonesia | src/data/download_scripts/ID_malaria_down.py | src/data/download_scripts/ID_malaria_down.py | # This script downloads yearly malaria statistics from data.go.id
# It uses urllib and is compatible with both Python 2 and 3
import os
import sys
import logging #logs what goes on
DIRECTORY = '../../Data/raw/disease_ID'
OUTFILE = "yearly-malaria.csv"
URL = "http://data.go.id/dataset/cef9b348-91a9-4270-be1d-3cf64eb9d5b0/resource/42f31bb0-af59-4c96-9a74-db3283f9e316/download/kasusmalaria.csv"
URL2015 = "http://data.go.id/dataset/cef9b348-91a9-4270-be1d-3cf64eb9d5b0/resource/2965b760-0f7f-4bd7-9dbe-8d261729e12f/download/jumlahkasusangkakesakitanmalariaper1000pendudukberisiko.xlsx"
logger = logging.getLogger(__name__)
def download():
# compatibility check between python 2 and 3
if sys.version_info < (3, 0):
# for python 2, use this
try:
os.makedirs(DIRECTORY)
except OSError as e:
pass
import urllib as downloader
from urllib2 import URLError, HTTPError
else:
# for python 3, use this
os.makedirs(DIRECTORY, exist_ok=True)
import urllib.request as downloader
from urllib.error import URLError, HTTPError
output_path = os.path.join(DIRECTORY, OUTFILE)
# now retrieve the file
try:
downloader.urlretrieve(URL, output_path)
logger.info('Downloaded successfully to %s', os.path.abspath(output_path))
except (HTTPError, URLError) as e:
logger.error('Failed to download: %s', e.reason)
if __name__ == "__main__":
DIRECTORY = '../../../Data/raw/disease_ID'
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
download()
| # This script downloads yearly malaria statistics from data.go.id
# It uses urllib and is compatible with both Python 2 and 3
import os
import sys
import logging #logs what goes on
DIRECTORY = '../../Data/raw/disease_ID'
OUTFILE = "yearly-malaria.csv"
URL = "http://data.go.id/dataset/cef9b348-91a9-4270-be1d-3cf64eb9d5b0/resource/42f31bb0-af59-4c96-9a74-db3283f9e316/download/kasusmalaria.csv"
logger = logging.getLogger(__name__)
def download():
# compatibility check between python 2 and 3
if sys.version_info < (3, 0):
# for python 2, use this
try:
os.makedirs(DIRECTORY)
except OSError as e:
pass
import urllib as downloader
from urllib2 import URLError, HTTPError
else:
# for python 3, use this
os.makedirs(DIRECTORY, exist_ok=True)
import urllib.request as downloader
from urllib.error import URLError, HTTPError
output_path = os.path.join(DIRECTORY, OUTFILE)
# now retrieve the file
try:
downloader.urlretrieve(URL, output_path)
logger.info('Downloaded successfully to %s', os.path.abspath(output_path))
except (HTTPError, URLError) as e:
logger.error('Failed to download: %s', e.reason)
if __name__ == "__main__":
DIRECTORY = '../../../Data/raw/disease_ID'
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
download()
| Python | 0 |
193cc8025910b92f764e6e1339ce2ec213b20cc5 | Fix duck punching unit test. | tests/qtcore/duck_punching_test.py | tests/qtcore/duck_punching_test.py | #!/usr/bin/python
'''Test case for duck punching new implementations of C++ virtual methods into object instances.'''
import unittest
import types
from PySide.QtCore import QObject, QEvent
from helper import UsesQCoreApplication
class Duck(QObject):
def __init__(self):
QObject.__init__(self)
def childEvent(self, event):
QObject.childEvent(self, event)
class TestDuckPunchingOnQObjectInstance(UsesQCoreApplication):
'''Test case for duck punching new implementations of C++ virtual methods into object instances.'''
def setUp(self):
#Acquire resources
self.duck_childEvent_called = False
UsesQCoreApplication.setUp(self)
def tearDown(self):
#Release resources
del self.duck_childEvent_called
UsesQCoreApplication.tearDown(self)
def testChildEventMonkeyPatch(self):
#Test if the new childEvent injected on QObject instance is called from C++
parent = QObject()
def childEvent(obj, event):
self.duck_childEvent_called = True
QObject.childEvent(obj, event)
parent.childEvent = types.MethodType(childEvent, parent, QObject)
child = QObject()
child.setParent(parent)
self.assert_(self.duck_childEvent_called)
def testChildEventMonkeyPatchWithInheritance(self):
#Test if the new childEvent injected on a QObject's extension class instance is called from C++
parent = Duck()
def childEvent(obj, event):
QObject.childEvent(obj, event)
self.duck_childEvent_called = True
child = QObject()
child.setParent(parent)
parent.childEvent = types.MethodType(childEvent, parent, QObject)
child = QObject()
child.setParent(parent)
self.assert_(self.duck_childEvent_called)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/python
'''Test case for duck punching new implementations of C++ virtual methods into object instances.'''
import unittest
import types
from PySide.QtCore import QObject, QEvent
from helper import UsesQCoreApplication
class Duck(QObject):
def __init__(self):
QObject.__init__(self)
def childEvent(self, event):
QObject.childEvent(self, event)
class TestDuckPunchingOnQObjectInstance(UsesQCoreApplication):
'''Test case for duck punching new implementations of C++ virtual methods into object instances.'''
def setUp(self):
#Acquire resources
self.duck_childEvent_called = False
UsesQCoreApplication.setUp(self)
def tearDown(self):
#Release resources
del self.duck_childEvent_called
UsesQCoreApplication.tearDown(self)
def testChildEventMonkeyPatch(self):
#Test if the new childEvent injected on QObject instance is called from C++
parent = QObject()
def childEvent(obj, event):
self.duck_childEvent_called = True
QObject.childEvent(obj, event)
parent.event = types.MethodType(childEvent, parent, QObject)
child = QObject()
child.setParent(parent)
self.assert_(self.duck_childEvent_called)
def testChildEventMonkeyPatchWithInheritance(self):
#Test if the new childEvent injected on a QObject's extension class instance is called from C++
parent = Duck()
def childEvent(obj, event):
QObject.childEvent(obj, event)
self.duck_childEvent_called = True
child = QObject()
child.setParent(parent)
parent.event = types.MethodType(childEvent, parent, QObject)
child = QObject()
child.setParent(parent)
self.assert_(self.duck_childEvent_called)
if __name__ == '__main__':
unittest.main()
| Python | 0 |
b95c5dfad2956eca7b0891d3692c140a54b9db84 | Fix tests. | tests/test_google_image_handler.py | tests/test_google_image_handler.py | # -*- coding: utf-8 -*-
"""
robo.tests.test_handler_google_image
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for robo.handlers.google_image
:copyright: (c) 2015 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import requests
from mock import patch
from unittest import TestCase
from robo.robot import Robot
from robo.handlers.google_image import Client, GoogleImage
def dummy_response(m, filename=None):
response = requests.Response()
response.status_code = 200
if filename is None:
response._content = ''
else:
root_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(root_path, filename)
with open(file_path, 'r') as f:
data = f.read()
response._content = data
m.return_value = response
class NullAdapter(object):
def __init__(self, signal):
self.signal = signal
self.responses = []
def say(self, message, **kwargs):
self.responses.append(message)
return message
class TestClient(TestCase):
@classmethod
def setUpClass(cls):
os.environ['ROBO_GOOGLE_CSE_KEY'] = 'foo'
os.environ['ROBO_GOOGLE_CSE_ID'] = 'bar'
cls.client = Client()
@patch('robo.handlers.google_image.requests.get')
def test_generate_url(self, m):
""" Client().generate() should generate google search url. """
dummy_response(m, 'fixture.json')
ret = self.client.generate('cat')
self.assertTrue(ret.startswith('http'))
@patch('robo.handlers.google_image.requests.get')
def test_search_resource(self, m):
""" Client().search_resource() should search from Google. """
dummy_response(m, 'fixture.json')
ret = self.client.search_resource('cat')
self.assertTrue(isinstance(ret, dict))
self.assertTrue('link' in ret)
class TestGoogleImageHandler(TestCase):
@classmethod
def setUpClass(cls):
logger = logging.getLogger('robo')
logger.level = logging.ERROR
cls.robot = Robot('test', logger)
os.environ['ROBO_GOOGLE_CSE_KEY'] = 'foo'
os.environ['ROBO_GOOGLE_CSE_ID'] = 'bar'
client = GoogleImage()
client.signal = cls.robot.handler_signal
method = cls.robot.parse_handler_methods(client)
cls.robot.handlers.extend(method)
adapter = NullAdapter(cls.robot.handler_signal)
cls.robot.adapters['null'] = adapter
@patch('robo.handlers.google_image.requests.get')
def test_should_google_image(self, m):
""" GoogleImage().get() should search google. """
dummy_response(m, 'fixture.json')
self.robot.handler_signal.send('test image cat')
import sys
if sys.version_info[0] == 2:
self.assertRegexpMatches(self.robot.adapters['null'].responses[0],
r'^(http|https)://*')
else:
self.assertRegex(self.robot.adapters['null'].responses[0],
r'^(http|https)://*')
self.robot.adapters['null'].responses = []
| # -*- coding: utf-8 -*-
"""
robo.tests.test_handler_google_image
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for robo.handlers.google_image
:copyright: (c) 2015 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import requests
from mock import patch
from unittest import TestCase
from robo.robot import Robot
from robo.handlers.google_image import Client, GoogleImage
def dummy_response(m, filename=None):
response = requests.Response()
response.status_code = 200
if filename is None:
response._content = ''
else:
root_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(root_path, filename)
with open(file_path, 'r') as f:
data = f.read()
response._content = data
m.return_value = response
class NullAdapter(object):
def __init__(self, signal):
self.signal = signal
self.responses = []
def say(self, message, **kwargs):
self.responses.append(message)
return message
class TestClient(TestCase):
@classmethod
def setUpClass(cls):
os.environ['ROBO_GOOGLE_CSE_KEY'] = 'foo'
os.environ['ROBO_GOOGLE_CSE_ID'] = 'bar'
cls.client = Client()
@patch('robo.handlers.google_image.requests.get')
def test_generate_url(self, m):
""" Client().generate() should generate google search url. """
dummy_response(m, 'fixture.json')
ret = self.client.generate('cat')
self.assertTrue(ret.startswith('http://'))
@patch('robo.handlers.google_image.requests.get')
def test_search_resource(self, m):
""" Client().search_resource() should search from Google. """
dummy_response(m, 'fixture.json')
ret = self.client.search_resource('cat')
self.assertTrue(isinstance(ret, dict))
self.assertTrue('unescapedUrl' in ret)
class TestGoogleImageHandler(TestCase):
@classmethod
def setUpClass(cls):
logger = logging.getLogger('robo')
logger.level = logging.ERROR
cls.robot = Robot('test', logger)
os.environ['ROBO_GOOGLE_CSE_KEY'] = 'foo'
os.environ['ROBO_GOOGLE_CSE_ID'] = 'bar'
client = GoogleImage()
client.signal = cls.robot.handler_signal
method = cls.robot.parse_handler_methods(client)
cls.robot.handlers.extend(method)
adapter = NullAdapter(cls.robot.handler_signal)
cls.robot.adapters['null'] = adapter
@patch('robo.handlers.google_image.requests.get')
def test_should_google_image(self, m):
""" GoogleImage().get() should search google. """
dummy_response(m, 'fixture.json')
self.robot.handler_signal.send('test image cat')
import sys
if sys.version_info[0] == 2:
self.assertRegexpMatches(self.robot.adapters['null'].responses[0],
r'^(http|https)://*')
else:
self.assertRegex(self.robot.adapters['null'].responses[0],
r'^(http|https)://*')
self.robot.adapters['null'].responses = []
| Python | 0 |
3d237a6bf3a3dff684e08496f800a8957a9e3352 | Fix pep error. | hr_contract_hourly_rate/models/hr_hourly_rate_class.py | hr_contract_hourly_rate/models/hr_hourly_rate_class.py | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
from itertools import permutations
class hr_hourly_rate_class(models.Model):
_name = 'hr.hourly.rate.class'
_description = 'Hourly rate class'
name = fields.Char(string='Class Name', required=True, index=True)
line_ids = fields.One2many('hr.hourly.rate',
'class_id',
string='Hourly Rates')
contract_job_ids = fields.One2many('hr.contract.job',
'hourly_rate_class_id',
string='Contract Jobs')
@api.model
@api.constrains('line_ids')
def _check_overlapping_rates(self):
"""
Checks if a class has two rates that overlap in time.
"""
for hourly_rate_class in self:
for r1, r2 in permutations(hourly_rate_class.line_ids, 2):
if r1.date_end and \
(r1.date_start <= r2.date_start <= r1.date_end):
raise exceptions.Warning(
_("Error! You cannot have overlapping rates"))
elif not r1.date_end and (r1.date_start <= r2.date_start):
raise exceptions.Warning(
_("Error! You cannot have overlapping rates"))
return True
| # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions, _
from itertools import permutations
class hr_hourly_rate_class(models.Model):
_name = 'hr.hourly.rate.class'
_description = 'Hourly rate class'
name = fields.Char(string='Class Name', required=True, index=True)
line_ids = fields.One2many('hr.hourly.rate',
'class_id',
string='Hourly Rates')
contract_job_ids = fields.One2many('hr.contract.job',
'hourly_rate_class_id',
string='Contract Jobs')
@api.model
@api.constrains('line_ids')
def _check_overlapping_rates(self):
"""
Checks if a class has two rates that overlap in time.
"""
for hourly_rate_class in self:
for r1, r2 in permutations(hourly_rate_class.line_ids, 2):
if r1.date_end and (r1.date_start <= r2.date_start <= r1.date_end):
raise exceptions.Warning(
_("Error! You cannot have overlapping rates"))
elif not r1.date_end and (r1.date_start <= r2.date_start):
raise exceptions.Warning(
_("Error! You cannot have overlapping rates"))
return True
| Python | 0.000001 |
fa00db22832d43cfb4ed5b79be32e31153a5e570 | Include new app | core/settings.py | core/settings.py | """
Django settings for core project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fy5#xmxaf&@-30c_nm)0te&@=-g9y+45i6r03+%2(1q@vfztr_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
# 3rd Party
# Own Apps
'fineants',
'polls',
'login',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
ROOT_URLCONF = 'core.urls'
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'templates/core'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Configure Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
# Needed for login
import django.contrib.auth
django.contrib.auth.LOGIN_URL = '/'
| """
Django settings for core project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fy5#xmxaf&@-30c_nm)0te&@=-g9y+45i6r03+%2(1q@vfztr_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'polls',
'login',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
)
ROOT_URLCONF = 'core.urls'
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'templates/core'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Configure Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
# Needed for login
import django.contrib.auth
django.contrib.auth.LOGIN_URL = '/'
| Python | 0 |
f5fd631cb4571930cf71513db2622861fb4fcc39 | simplify put requests | lib/test_util.py | lib/test_util.py | import hashlib
from lettuce import world
from tornado.escape import json_decode
from tornado.httpclient import HTTPClient, HTTPRequest
from newebe.settings import TORNADO_PORT
from newebe.profile.models import UserManager, User
ROOT_URL = "http://localhost:%d/" % TORNADO_PORT
class NewebeClient(HTTPClient):
'''
Tornado client wrapper to write requests to Newebe faster.
'''
def login(self, password):
'''
Grab authentication cookie from login request.
'''
response = self.fetch(self.root_url + "login/json/",
method="POST", body='{"password":"%s"}' % password)
assert response.headers["Set-Cookie"].startswith("password=")
self.cookie = response.headers["Set-Cookie"]
def set_default_user(self, url=ROOT_URL):
'''
Set to DB default user. This is useful for automatic login.
'''
self.root_url = url
user = UserManager.getUser()
if user:
user.delete()
user = User(
name = "John Doe",
password = hashlib.sha224("password").hexdigest(),
key = None,
authorKey = "authorKey",
url = url,
description = "my description"
)
user.save()
def get(self, url):
'''
Perform a GET request.
'''
request = HTTPRequest(url)
if hasattr(self, "cookie") and self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def post(self, url, body):
'''
Perform a POST request.
'''
request = HTTPRequest(url, method="POST", body=body)
if hasattr(self, "cookie") and self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def put(self, url, body):
'''
Perform a PUT request.
'''
if hasattr(self, "root_url") and self.root_url:
url = self.root_url + url
request = HTTPRequest(url, method="PUT", body=body)
if hasattr(self, "cookie") and self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def delete(self, url):
'''
Perform a DELETE request.
'''
request = HTTPRequest(url, method="DELETE")
if self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def fetch_document_from_url(self, url):
'''
Retrieve newebe document from a givent url
'''
response = self.get(url)
assert response.code == 200
assert response.headers["Content-Type"] == "application/json"
return json_decode(response.body)
def fetch_documents_from_url(self, url):
'''
Retrieve newebe document list from a givent url
'''
response = self.get(url)
assert response.code == 200
assert response.headers["Content-Type"] == "application/json"
world.data = json_decode(response.body)
return world.data["rows"]
def fetch_document(self, path):
'''
Retrieve document from path located on localhost server.
'''
return self.fetch_document_from_url(self.root_url + path)
def fetch_documents(self, path):
'''
Retrieve document list from path located on localhost server.
'''
return self.fetch_documents_from_url(self.root_url + path)
| import hashlib
from lettuce import world
from tornado.escape import json_decode
from tornado.httpclient import HTTPClient, HTTPRequest
from newebe.settings import TORNADO_PORT
from newebe.profile.models import UserManager, User
ROOT_URL = "http://localhost:%d/" % TORNADO_PORT
class NewebeClient(HTTPClient):
'''
Tornado client wrapper to write requests to Newebe faster.
'''
def login(self, password):
'''
Grab authentication cookie from login request.
'''
response = self.fetch(self.root_url + "login/json/",
method="POST", body='{"password":"%s"}' % password)
assert response.headers["Set-Cookie"].startswith("password=")
self.cookie = response.headers["Set-Cookie"]
def set_default_user(self, url=ROOT_URL):
'''
Set to DB default user. This is useful for automatic login.
'''
self.root_url = url
user = UserManager.getUser()
if user:
user.delete()
user = User(
name = "John Doe",
password = hashlib.sha224("password").hexdigest(),
key = None,
authorKey = "authorKey",
url = url,
description = "my description"
)
user.save()
def get(self, url):
'''
Perform a GET request.
'''
request = HTTPRequest(url)
if hasattr(self, "cookie") and self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def post(self, url, body):
'''
Perform a POST request.
'''
request = HTTPRequest(url, method="POST", body=body)
if hasattr(self, "cookie") and self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def put(self, url, body):
'''
Perform a PUT request.
'''
request = HTTPRequest(url, method="PUT", body=body)
if hasattr(self, "cookie") and self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def delete(self, url):
'''
Perform a DELETE request.
'''
request = HTTPRequest(url, method="DELETE")
if self.cookie:
request.headers["Cookie"] = self.cookie
return HTTPClient.fetch(self, request)
def fetch_document_from_url(self, url):
'''
Retrieve newebe document from a givent url
'''
response = self.get(url)
assert response.code == 200
assert response.headers["Content-Type"] == "application/json"
return json_decode(response.body)
def fetch_documents_from_url(self, url):
'''
Retrieve newebe document list from a givent url
'''
response = self.get(url)
assert response.code == 200
assert response.headers["Content-Type"] == "application/json"
world.data = json_decode(response.body)
return world.data["rows"]
def fetch_document(self, path):
'''
Retrieve document from path located on localhost server.
'''
return self.fetch_document_from_url(self.root_url + path)
def fetch_documents(self, path):
'''
Retrieve document list from path located on localhost server.
'''
return self.fetch_documents_from_url(self.root_url + path)
| Python | 0.000002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.