commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
7e45baa3e2b372d5a81dce656781879c0408bcc1 | Fix overlapping text | almarklein/scikit-image,SamHames/scikit-image,michaelaye/scikit-image,Midafi/scikit-image,juliusbierk/scikit-image,chintak/scikit-image,chriscrosscutler/scikit-image,robintw/scikit-image,paalge/scikit-image,youprofit/scikit-image,blink1073/scikit-image,bennlich/scikit-image,juliusbierk/scikit-image,oew1v07/scikit-image,rjeli/scikit-image,emon10005/scikit-image,Midafi/scikit-image,SamHames/scikit-image,newville/scikit-image,youprofit/scikit-image,ClinicalGraphics/scikit-image,paalge/scikit-image,ajaybhat/scikit-image,vighneshbirodkar/scikit-image,Britefury/scikit-image,rjeli/scikit-image,GaZ3ll3/scikit-image,ajaybhat/scikit-image,emon10005/scikit-image,vighneshbirodkar/scikit-image,michaelpacer/scikit-image,paalge/scikit-image,keflavich/scikit-image,Hiyorimi/scikit-image,oew1v07/scikit-image,michaelpacer/scikit-image,Britefury/scikit-image,pratapvardhan/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,vighneshbirodkar/scikit-image,jwiggins/scikit-image,almarklein/scikit-image,dpshelio/scikit-image,bsipocz/scikit-image,newville/scikit-image,bennlich/scikit-image,warmspringwinds/scikit-image,rjeli/scikit-image,chintak/scikit-image,blink1073/scikit-image,robintw/scikit-image,GaZ3ll3/scikit-image,SamHames/scikit-image,bsipocz/scikit-image,SamHames/scikit-image,chintak/scikit-image,chriscrosscutler/scikit-image,ofgulban/scikit-image,WarrenWeckesser/scikits-image,Hiyorimi/scikit-image,chintak/scikit-image,michaelaye/scikit-image,jwiggins/scikit-image,ofgulban/scikit-image,pratapvardhan/scikit-image,keflavich/scikit-image,dpshelio/scikit-image,warmspringwinds/scikit-image,almarklein/scikit-image,ClinicalGraphics/scikit-image,WarrenWeckesser/scikits-image | doc/examples/plot_equalize.py | doc/examples/plot_equalize.py | """
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2 = np.percentile(img, 2)
p98 = np.percentile(img, 98)
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
f, axes = plt.subplots(nrows=2, ncols=4, figsize=(8, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
plt.subplots_adjust(wspace=0.4)
plt.show()
| """
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2 = np.percentile(img, 2)
p98 = np.percentile(img, 98)
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
f, axes = plt.subplots(2, 4, figsize=(8, 4))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
plt.subplots_adjust(wspace=0.4)
plt.show()
| bsd-3-clause | Python |
0611d3452d5f752e28bbcb39452a7433bfb4eb5d | remove unused import | chicagopython/chipy.org,chicagopython/chipy.org,agfor/chipy.org,agfor/chipy.org,agfor/chipy.org,chicagopython/chipy.org,chicagopython/chipy.org | chipy_org/urls.py | chipy_org/urls.py | from django.conf import settings
from django.conf.urls import url, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from django.views.generic import TemplateView
from chipy_org.apps.contact.views import ChipyContactView
from chipy_org.apps.meetings.views import MeetingListAPIView, MeetingMeetupSync
admin.autodiscover()
urlpatterns = [
url(r'', include('chipy_org.apps.main.urls')),
url(r'', include('social_auth.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^login/{0,1}$', TemplateView.as_view(template_name='login.html')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('chipy_org.apps.meetings.urls')),
url(r'^profiles/', include('chipy_org.apps.profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('chipy_org.apps.about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^sponsors/', include('chipy_org.apps.sponsors.urls')),
]
# Would love a back tracking url resolver
urlpatterns += [
url(r'^api/meetings/$', MeetingListAPIView.as_view()),
url(r'^api/meetings/(?P<meeting_id>\d+)/meetup/sync$',
MeetingMeetupSync.as_view())
]
if settings.SERVE_MEDIA:
urlpatterns += [
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
]
urlpatterns += staticfiles_urlpatterns()
| from django.conf import settings
from django.conf.urls import patterns, url, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from django.views.generic import TemplateView
from chipy_org.apps.contact.views import ChipyContactView
from chipy_org.apps.meetings.views import MeetingListAPIView, MeetingMeetupSync
admin.autodiscover()
urlpatterns = [
url(r'', include('chipy_org.apps.main.urls')),
url(r'', include('social_auth.urls')),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^login/{0,1}$', TemplateView.as_view(template_name='login.html')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('chipy_org.apps.meetings.urls')),
url(r'^profiles/', include('chipy_org.apps.profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('chipy_org.apps.about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^sponsors/', include('chipy_org.apps.sponsors.urls')),
]
# Would love a back tracking url resolver
urlpatterns += [
url(r'^api/meetings/$', MeetingListAPIView.as_view()),
url(r'^api/meetings/(?P<meeting_id>\d+)/meetup/sync$',
MeetingMeetupSync.as_view())
]
if settings.SERVE_MEDIA:
urlpatterns += [
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
]
urlpatterns += staticfiles_urlpatterns()
| mit | Python |
0f8e2337556832889b400b8f570390eca0a51817 | Update license metadata | PyCQA/astroid | astroid/__pkginfo__.py | astroid/__pkginfo__.py | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2017 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radosław Ganczarek <radoslaw@ganczarek.in>
# Copyright (c) 2016 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2017 Hugo <hugovk@users.noreply.github.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Calen Pennington <cale@edx.org>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2019 Uilian Ries <uilianries@gmail.com>
# Copyright (c) 2019 Thomas Hisch <t.hisch@gmail.com>
# Copyright (c) 2020 David Gilman <davidgilman1@gmail.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Konrad Weihmann <kweihmann@outlook.com>
# Copyright (c) 2020 Felix Mölder <felix.moelder@uni-due.de>
# Copyright (c) 2020 Michael <michael-k@users.noreply.github.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""astroid packaging information"""
version = "2.6.0-dev"
numversion = tuple(int(elem) for elem in version.split(".") if elem.isdigit())
extras_require = {}
install_requires = [
"lazy_object_proxy>=1.4.0",
"wrapt>=1.11,<1.13",
'typed-ast>=1.4.0,<1.5;implementation_name== "cpython" and python_version<"3.8"',
]
# pylint: disable=redefined-builtin; why license is a builtin anyway?
license = "LGPL-2.1-or-later"
author = "Python Code Quality Authority"
author_email = "code-quality@python.org"
mailinglist = "mailto://%s" % author_email
web = "https://github.com/PyCQA/astroid"
description = "An abstract syntax tree for Python with inference support."
classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
| # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2017 Ceridwen <ceridwenv@gmail.com>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radosław Ganczarek <radoslaw@ganczarek.in>
# Copyright (c) 2016 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2017 Hugo <hugovk@users.noreply.github.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Calen Pennington <cale@edx.org>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2019 Uilian Ries <uilianries@gmail.com>
# Copyright (c) 2019 Thomas Hisch <t.hisch@gmail.com>
# Copyright (c) 2020 David Gilman <davidgilman1@gmail.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 Konrad Weihmann <kweihmann@outlook.com>
# Copyright (c) 2020 Felix Mölder <felix.moelder@uni-due.de>
# Copyright (c) 2020 Michael <michael-k@users.noreply.github.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""astroid packaging information"""
version = "2.6.0-dev"
numversion = tuple(int(elem) for elem in version.split(".") if elem.isdigit())
extras_require = {}
install_requires = [
"lazy_object_proxy>=1.4.0",
"wrapt>=1.11,<1.13",
'typed-ast>=1.4.0,<1.5;implementation_name== "cpython" and python_version<"3.8"',
]
# pylint: disable=redefined-builtin; why license is a builtin anyway?
license = "LGPL"
author = "Python Code Quality Authority"
author_email = "code-quality@python.org"
mailinglist = "mailto://%s" % author_email
web = "https://github.com/PyCQA/astroid"
description = "An abstract syntax tree for Python with inference support."
classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Quality Assurance",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
| lgpl-2.1 | Python |
ced3b9ab9f9a78acec10b2c0f6527659e9956398 | Use Django's copy of six | sgaist/django-haystack,elishowk/django-haystack,django-searchstack/django-searchstack,barseghyanartur/django-haystack,dionysio/django-haystack,cyanut/django-haystack,blancltd/django-haystack,eventials/django-haystack,django-searchstack/django-searchstack,celerityweb/django-haystack,blancltd/django-haystack,kuanyui/django-haystack,antonyr/django-haystack,celerityweb/django-haystack,comandrei/django-haystack,elishowk/django-haystack,elishowk/django-haystack,barseghyanartur/django-haystack,askaliuk/django-haystack,jannon/django-haystack,ruimashita/django-haystack,zeehio/django-haystack,fisle/django-haystack,askaliuk/django-haystack,django-searchstack/django-searchstack,speedplane/django-haystack,zeehio/django-haystack,Stupeflix/django-haystack,vitalyvolkov/django-haystack,fisle/django-haystack,celerityweb/django-haystack,comandrei/django-haystack,antonyr/django-haystack,ruimashita/django-haystack,jannon/django-haystack,eventials/django-haystack,kybi/django-haystack,fisle/django-haystack,eventials/django-haystack,mixcloud/django-haystack,barseghyanartur/django-haystack,speedplane/django-haystack,streeter/django-haystack,kuanyui/django-haystack,mixcloud/django-haystack,ruimashita/django-haystack,vitalyvolkov/django-haystack,zeehio/django-haystack,dionysio/django-haystack,kuanyui/django-haystack,jannon/django-haystack,antonyr/django-haystack,comandrei/django-haystack,askaliuk/django-haystack,speedplane/django-haystack,Stupeflix/django-haystack,sgaist/django-haystack,blancltd/django-haystack,Stupeflix/django-haystack,cyanut/django-haystack,vitalyvolkov/django-haystack,dionysio/django-haystack,kybi/django-haystack,sgaist/django-haystack,cyanut/django-haystack,streeter/django-haystack,streeter/django-haystack | haystack/management/commands/clear_index.py | haystack/management/commands/clear_index.py | from __future__ import print_function
from __future__ import unicode_literals
from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from django.utils import six
class Command(BaseCommand):
help = "Clears out the search index completely."
base_options = (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='If provided, no prompts will be issued to the user and the data will be wiped out.'
),
make_option("-u", "--using", action="append", dest="using",
default=[],
help='Update only the named backend (can be used multiple times). '
'By default all backends will be updated.'
),
)
option_list = BaseCommand.option_list + base_options
def handle(self, **options):
"""Clears out the search index completely."""
from haystack import connections
self.verbosity = int(options.get('verbosity', 1))
using = options.get('using')
if not using:
using = connections.connections_info.keys()
if options.get('interactive', True):
print()
print("WARNING: This will irreparably remove EVERYTHING from your search index in connection '%s'." % "', '".join(using))
print("Your choices after this are to restore from backups or rebuild via the `rebuild_index` command.")
yes_or_no = six.moves.input("Are you sure you wish to continue? [y/N] ")
print
if not yes_or_no.lower().startswith('y'):
print("No action taken.")
sys.exit()
if self.verbosity >= 1:
print("Removing all documents from your index because you said so.")
for backend_name in using:
backend = connections[backend_name].get_backend()
backend.clear()
if self.verbosity >= 1:
print("All documents removed.")
| from __future__ import print_function
from __future__ import unicode_literals
from optparse import make_option
import sys
import six
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Clears out the search index completely."
base_options = (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='If provided, no prompts will be issued to the user and the data will be wiped out.'
),
make_option("-u", "--using", action="append", dest="using",
default=[],
help='Update only the named backend (can be used multiple times). '
'By default all backends will be updated.'
),
)
option_list = BaseCommand.option_list + base_options
def handle(self, **options):
"""Clears out the search index completely."""
from haystack import connections
self.verbosity = int(options.get('verbosity', 1))
using = options.get('using')
if not using:
using = connections.connections_info.keys()
if options.get('interactive', True):
print()
print("WARNING: This will irreparably remove EVERYTHING from your search index in connection '%s'." % "', '".join(using))
print("Your choices after this are to restore from backups or rebuild via the `rebuild_index` command.")
yes_or_no = six.moves.input("Are you sure you wish to continue? [y/N] ")
print
if not yes_or_no.lower().startswith('y'):
print("No action taken.")
sys.exit()
if self.verbosity >= 1:
print("Removing all documents from your index because you said so.")
for backend_name in using:
backend = connections[backend_name].get_backend()
backend.clear()
if self.verbosity >= 1:
print("All documents removed.")
| bsd-3-clause | Python |
84b569bd6f30d23547593f7a38d77145b25b20e7 | add setting, but doesn't seem to work | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient/clearinghouse/commands/management/commands/bootstrap_local_settings.py | src/python/expedient/clearinghouse/commands/management/commands/bootstrap_local_settings.py | '''Command to bootstrap local settings with None values.
Created on Aug 24, 2010
@author: jnaous
'''
from optparse import make_option
import pkg_resources
import os
from django.core.management.base import NoArgsCommand
from django.conf import settings
from expedient.clearinghouse.commands.utils import bootstrap_local_settings
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option(
'--path', action='store', dest='path',
default=settings.CONF_DIR,
help='Specifies the location where local settings should ' \
'be installed. This location should be added to your ' \
'PYTHONPATH. Defaults to %s' \
% os.path.abspath(settings.CONF_DIR),
),
)
help = "Bootstrap a localsettings.py file"
requires_model_validation = False
def handle_noargs(self, **options):
conf_dir = os.path.abspath(options.get('path', settings.CONF_DIR))
bootstrap_local_settings(conf_dir=conf_dir)
| '''Command to bootstrap local settings with None values.
Created on Aug 24, 2010
@author: jnaous
'''
from optparse import make_option
import pkg_resources
import os
from django.core.management.base import NoArgsCommand
from django.conf import settings
from expedient.clearinghouse.commands.utils import bootstrap_local_settings
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option(
'--path', action='store', dest='path',
default=settings.CONF_DIR,
help='Specifies the location where local settings should ' \
'be installed. This location should be added to your ' \
'PYTHONPATH. Defaults to %s' \
% os.path.abspath(settings.CONF_DIR),
),
)
help = "Bootstrap a localsettings.py file"
def handle_noargs(self, **options):
conf_dir = os.path.abspath(options.get('path', settings.CONF_DIR))
bootstrap_local_settings(conf_dir=conf_dir)
| bsd-3-clause | Python |
188d6f7884b372fa0bc21f938dae9da386795da7 | Bump release | Calysto/octave_kernel,Calysto/octave_kernel | octave_kernel/__init__.py | octave_kernel/__init__.py | """An Octave kernel for Jupyter"""
__version__ = '0.19.7'
| """An Octave kernel for Jupyter"""
__version__ = '0.19.6'
| bsd-3-clause | Python |
4c3aff7bf33d876646eb538c8338a5f9ed063a44 | fix syntax | jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab | jupyterlab/_version.py | jupyterlab/_version.py | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
VersionInfo = namedtuple('VersionInfo', [
'major',
'minor',
'micro',
'releaselevel',
'serial'
])
# DO NOT EDIT THIS DIRECTLY! It is managed by bumpversion
__version__ = '1.0.0a3'
parts = __version__.split('.')
if len(parts) < 2:
parts.append(0)
if len(parts) < 3:
parts.append(0)
if len(parts) < 4:
parts.append('final')
if len(parts) < 5:
parts.append(0)
version_info = VersionInfo(*parts)
| # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
VersionInfo = namedtuple('VersionInfo', [
'major',
'minor',
'micro',
'releaselevel',
'serial'
])
# DO NOT EDIT THIS DIRECTLY! It is managed by bumpversion
__version__ = '1.0.0a3'
parts = __version__.split('.')
if len(parts) < 2:
parts.append(0)
if len(parts) < 3:
parts.append(0)
if len(parts) < 4:
parts.append('final')
if len(parts) < 5:
parts.append(0)
version_info = VersionInfo(**parts)
| bsd-3-clause | Python |
b0ca54aa590c15ee4a33fb120b77a99b646d391a | Update read_html_file.py | gwu-business/salad-system-py,gwu-business/salad-system-py | software/read_html_file.py | software/read_html_file.py | # to run from root dir: `python software/read_html_file.py`
import code # to debug: `code.interact(local=locals())`
import os
from bs4 import BeautifulSoup
#
# READ HTML FILE
#
menu_dot_html = os.path.abspath(__file__).replace(os.path.relpath(__file__), "menu-items/index.html")
print "READING HTML FILE -- %(file_name)s" % {"file_name": menu_dot_html}
soup = BeautifulSoup(open(menu_dot_html),"lxml")
#
# SEARCH FILE CONTENTS
#
menu_item_list = soup.find(id="menu-item-list")
print menu_item_list
#
# MANIPULATE FILE CONTENTS
#
for i in [1,2,3,4,5]:
list_item = soup.new_tag('li')
list_item.string = str(i)
menu_item_list.append(list_item)
print menu_item_list
print soup
| # to run from root dir: `python software/read_html_file.py`
# source: https://wiki.python.org/moin/MiniDom
import code # to debug: `code.interact(local=locals())`
import os
from bs4 import BeautifulSoup
#
# READ HTML FILE
#
menu_dot_html = os.path.abspath(__file__).replace(os.path.relpath(__file__), "menu-items/index.html")
print "READING HTML FILE -- %(file_name)s" % {"file_name": menu_dot_html}
soup = BeautifulSoup(open(menu_dot_html),"lxml")
#
# SEARCH FILE CONTENTS
#
menu_item_list = soup.find(id="menu-item-list")
print menu_item_list
#
# MANIPULATE FILE CONTENTS
#
for i in [1,2,3,4,5]:
list_item = soup.new_tag('li')
list_item.string = str(i)
menu_item_list.append(list_item)
print menu_item_list
print soup
| mit | Python |
8478643d1826c7ee84da1cbe2846fb17beb2ec0f | revise my_func.py | adleff/python_ansible | class2/my_func.py | class2/my_func.py | #!/usr/bin/env python
# make a function that prints 'hello'
def my_func():
print 'hello'
def main():
my_func()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# make a function that prints 'hello'
def my_func():
print 'hello'
if __name__ == "__main__":
my_func()
| apache-2.0 | Python |
96cd690c399cd0c3f7d5b6a2add8a75bd7461c28 | remove napoleon_use_admonition_for_notes | cmeeren/apexpy,cmeeren/apexpy | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'Apex Python library'
year = u'2015'
author = u'Christer van der Meeren'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.0'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
pygments_style = 'trac'
templates_path = ['.']
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = True
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
autodoc_member_order='bysource'
napoleon_use_ivar=True
napoleon_use_rtype=False
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'Apex Python library'
year = u'2015'
author = u'Christer van der Meeren'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.1.0'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
pygments_style = 'trac'
templates_path = ['.']
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = True
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
autodoc_member_order='bysource'
napoleon_use_ivar=True
napoleon_use_admonition_for_notes=True
napoleon_use_rtype=False
| mit | Python |
2c91438c182562f69927037bd8c0192502ce07b5 | add support for telnet | sk2/autonetkit | autonetkit/ank_pika.py | autonetkit/ank_pika.py | import autonetkit.config as config
import autonetkit.log as log
import socket
use_rabbitmq = config.settings['Rabbitmq']['active']
if use_rabbitmq:
import pika
use_message_pipe = config.settings['Message Pipe']['active']
if use_message_pipe:
import telnetlib
#TODO: tidy this to be a try/except ImportError
#import pika.log
#pika.log.setup(pika.log.DEBUG, color=True)
#TODO: rename to messaging
class AnkPika(object):
def __init__(self, host):
try:
if use_rabbitmq:
log.debug("Using Rabbitmq with server %s " % host)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host = host))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='www',
type='direct')
self.publish = self.publish_pika
self.publish_compressed = self.publish_compressed_pika
if use_message_pipe:
#TODO: make message server also settable
port = config.settings['Message Pipe']['port']
self.telnet_port = port
self.publish = self.publish_telnet
self.publish_compressed = self.publish_telnet
#TODO: support use of both at once....
if not (use_rabbitmq or use_message_pipe):
log.debug("Not using Rabbitmq or telnet")
self.publish = self.publish_blank_stub
self.publish_compressed = self.publish_blank_stub
except socket.timeout:
log.warning("Socket Timeout: not using Rabbitmq")
self.publish = self.publish_blank_stub
self.publish_compressed = self.publish_blank_stub
def publish(self):
pass # will be replaced at init
def publish_compressed(self):
pass # will be replaced at init
def publish_telnet(self, exchange, routing_key, body):
try:
tn = telnetlib.Telnet("localhost", self.telnet_port)
tn.write(body)
tn.close()
except socket.error:
log.warning("Unable to connect to telnet on localhost at %s" % self.telnet_port)
def publish_compressed_telnet(self, exchange, routing_key, body):
import zlib
print "sending", body
#TODO: note don't compress - no upper bound if telnet sockets
#body = zlib.compress(body, 9)
self.tn.write(body + "__end__")
def publish_pika(self, exchange, routing_key, body):
self.channel.basic_publish(exchange= exchange,
routing_key = routing_key,
body= body)
def publish_compressed_pika(self, exchange, routing_key, body):
"""Compresses body using zlib before sending"""
import zlib
body = zlib.compress(body, 9)
self.publish(exchange, routing_key, body)
#TODO: implement callback
def publish_blank_stub(self, exchange, routing_key, body):
"""use if not using rabbitmq, simplifies calls elsewhere (publish does nothing)"""
#TODO: log that not sending for debug purposes
return
| import autonetkit.config as config
import autonetkit.log as log
use_rabbitmq = config.settings['Rabbitmq']['active']
if use_rabbitmq:
import pika
#TODO: tidy this to be a try/except ImportError
#import pika.log
#pika.log.setup(pika.log.DEBUG, color=True)
class AnkPika(object):
def __init__(self, host):
if use_rabbitmq:
log.debug("Using Rabbitmq with server %s " % host)
self.connection = pika.BlockingConnection(pika.ConnectionParameters(
host = host))
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange='www',
type='direct')
else:
log.debug("Not using Rabbitmq")
self.publish = self.publish_blank_stub
self.publish_compressed = self.publish_blank_stub
def publish(self, exchange, routing_key, body):
self.channel.basic_publish(exchange= exchange,
routing_key = routing_key,
body= body)
def publish_compressed(self, exchange, routing_key, body):
"""Compresses body using zlib before sending"""
import zlib
body = zlib.compress(body, 9)
self.publish(exchange, routing_key, body)
#TODO: implement callback
def publish_blank_stub(self, exchange, routing_key, body):
"""use if not using rabbitmq, simplifies calls elsewhere (publish does nothing)"""
#TODO: log that not sending for debug purposes
return
| bsd-3-clause | Python |
0d161e09baa174e3dee972c9069a484a5a272117 | Remove \'_static\' from docs build | numberoverzero/bloop,numberoverzero/bloop | docs/conf.py | docs/conf.py | import sys
import alabaster
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'bloop'
copyright = '2015, Joe Cross'
author = 'Joe Cross'
import pkg_resources
try:
release = pkg_resources.get_distribution('bloop').version
except pkg_resources.DistributionNotFound:
print('To build the documentation, The distribution information of bloop')
print('Has to be available. Either install the package into your')
print('development environment or run "setup.py develop" to setup the')
print('metadata. A virtualenv is recommended!')
sys.exit(1)
del pkg_resources
version = '.'.join(release.split('.')[:2])
language = 'en'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_theme_options = {
'github_user': 'numberoverzero',
'github_repo': 'bloop',
'github_banner': True,
'travis_button': True,
'show_powered_by': False,
'analytics_id': 'UA-65843067-1'
}
html_theme_path = [alabaster.get_path()]
html_static_path = []
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html'
]
}
| import sys
import alabaster
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'bloop'
copyright = '2015, Joe Cross'
author = 'Joe Cross'
import pkg_resources
try:
release = pkg_resources.get_distribution('bloop').version
except pkg_resources.DistributionNotFound:
print('To build the documentation, The distribution information of bloop')
print('Has to be available. Either install the package into your')
print('development environment or run "setup.py develop" to setup the')
print('metadata. A virtualenv is recommended!')
sys.exit(1)
del pkg_resources
version = '.'.join(release.split('.')[:2])
language = 'en'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_theme_options = {
'github_user': 'numberoverzero',
'github_repo': 'bloop',
'github_banner': True,
'travis_button': True,
'show_powered_by': False,
'analytics_id': 'UA-65843067-1'
}
html_theme_path = [alabaster.get_path()]
html_static_path = ['_static']
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html'
]
}
| mit | Python |
859f93891e4bf47d02899f03f0620fd1f29ca224 | Fix generate_sample_data.py - bug from #2978 | CZCV/s-dilation-caffe,wangg12/caffe,wangg12/caffe,tackgeun/caffe,CZCV/s-dilation-caffe,gnina/gnina,tackgeun/caffe,CZCV/s-dilation-caffe,gnina/gnina,gnina/gnina,tackgeun/caffe,gnina/gnina,tackgeun/caffe,wangg12/caffe,gnina/gnina,wangg12/caffe,CZCV/s-dilation-caffe,gnina/gnina | src/caffe/test/test_data/generate_sample_data.py | src/caffe/test/test_data/generate_sample_data.py | """
Generate data used in the HDF5DataLayer and GradientBasedSolver tests.
"""
import os
import numpy as np
import h5py
script_dir = os.path.dirname(os.path.abspath(__file__))
# Generate HDF5DataLayer sample_data.h5
num_cols = 8
num_rows = 10
height = 6
width = 5
total_size = num_cols * num_rows * height * width
data = np.arange(total_size)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
# We had a bug where data was copied into label, but the tests weren't
# catching it, so let's make label 1-indexed.
label = 1 + np.arange(num_rows)[:, np.newaxis]
label = label.astype('float32')
# We add an extra label2 dataset to test HDF5 layer's ability
# to handle arbitrary number of output ("top") Blobs.
label2 = label + 1
print data
print label
with h5py.File(script_dir + '/sample_data.h5', 'w') as f:
f['data'] = data
f['label'] = label
f['label2'] = label2
with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f:
f.create_dataset(
'data', data=data + total_size,
compression='gzip', compression_opts=1
)
f.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype='uint8',
)
f.create_dataset(
'label2', data=label2,
compression='gzip', compression_opts=1,
dtype='uint8',
)
with open(script_dir + '/sample_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/sample_data.h5\n')
f.write('src/caffe/test/test_data/sample_data_2_gzip.h5\n')
# Generate GradientBasedSolver solver_data.h5
num_cols = 3
num_rows = 8
height = 10
width = 10
data = np.random.randn(num_rows, num_cols, height, width)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
targets = np.random.randn(num_rows, 1)
targets = targets.astype('float32')
print data
print targets
with h5py.File(script_dir + '/solver_data.h5', 'w') as f:
f['data'] = data
f['targets'] = targets
with open(script_dir + '/solver_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/solver_data.h5\n')
| """
Generate data used in the HDF5DataLayer and GradientBasedSolver tests.
"""
import os
import numpy as np
import h5py
script_dir = os.path.dirname(os.path.abspath(__file__))
# Generate HDF5DataLayer sample_data.h5
num_cols = 8
num_rows = 10
height = 6
width = 5
total_size = num_cols * num_rows * height * width
data = np.arange(total_size)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
# We had a bug where data was copied into label, but the tests weren't
# catching it, so let's make label 1-indexed.
label = 1 + np.arange(num_rows)[:, np.newaxis]
label = label.astype('float32')
# We add an extra label2 dataset to test HDF5 layer's ability
# to handle arbitrary number of output ("top") Blobs.
label2 = label + 1
print data
print label
with h5py.File(script_dir + '/sample_data.h5', 'w') as f:
f['data'] = data
f['label'] = label
f['label2'] = label2
with h5py.File(script_dir + '/sample_data_uint8_gzip.h5', 'w') as f:
f.create_dataset(
'data', data=data + total_size,
compression='gzip', compression_opts=1
)
f.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype='uint8',
)
f.create_dataset(
'label2', data=label2,
compression='gzip', compression_opts=1,
dtype='uint8',
)
with open(script_dir + '/sample_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/sample_data.h5\n')
f.write('src/caffe/test/test_data/sample_uint8_gzip.h5\n')
# Generate GradientBasedSolver solver_data.h5
num_cols = 3
num_rows = 8
height = 10
width = 10
data = np.random.randn(num_rows, num_cols, height, width)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
targets = np.random.randn(num_rows, 1)
targets = targets.astype('float32')
print data
print targets
with h5py.File(script_dir + '/solver_data.h5', 'w') as f:
f['data'] = data
f['targets'] = targets
with open(script_dir + '/solver_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/solver_data.h5\n')
| agpl-3.0 | Python |
d52d0942831ada869a37cc30f018428b1e9481c5 | Add published date to the admin page for the episodes | matachi/sputnik,matachi/sputnik,matachi/sputnik,matachi/sputnik | podcasts/admin.py | podcasts/admin.py | from django.contrib import admin
from django.contrib.messages import constants
import socket
from podcasts.models import Podcast, Episode, PodcastUserProfile, Tag, Category
class EpisodeInline(admin.StackedInline):
model = Episode
extra = 1
def update_podcast(modeladmin, request, queryset):
for podcast in queryset:
for error in podcast.update():
if isinstance(error, socket.timeout):
message = "Downloading {}'s cover image timed out".format(
podcast)
modeladmin.message_user(request, message, constants.WARNING)
update_podcast.short_description = "Update selected podcasts with metadata " +\
"from their feeds"
def fetch_episodes(modeladmin, request, queryset):
for podcast in queryset:
podcast.fetch_episodes()
fetch_episodes.short_description = "Fetch new episodes for the selected " +\
"podcasts"
class PodcastAdmin(admin.ModelAdmin):
inlines = (EpisodeInline,)
list_display = ('title_or_unnamed', 'feed', 'link')
actions = [update_podcast, fetch_episodes]
admin.site.register(Podcast, PodcastAdmin)
class EpisodeAdmin(admin.ModelAdmin):
list_display = ('title', 'link', 'published')
list_filter = ('podcast',)
search_fields = ('title', 'description')
admin.site.register(Episode, EpisodeAdmin)
class PodcastUserProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(PodcastUserProfile, PodcastUserProfileAdmin)
class TagAdmin(admin.ModelAdmin):
pass
admin.site.register(Tag, TagAdmin)
class CategoryAdmin(admin.ModelAdmin):
pass
admin.site.register(Category, CategoryAdmin)
| from django.contrib import admin
from django.contrib.messages import constants
import socket
from podcasts.models import Podcast, Episode, PodcastUserProfile, Tag, Category
class EpisodeInline(admin.StackedInline):
model = Episode
extra = 1
def update_podcast(modeladmin, request, queryset):
for podcast in queryset:
for error in podcast.update():
if isinstance(error, socket.timeout):
message = "Downloading {}'s cover image timed out".format(
podcast)
modeladmin.message_user(request, message, constants.WARNING)
update_podcast.short_description = "Update selected podcasts with metadata " +\
"from their feeds"
def fetch_episodes(modeladmin, request, queryset):
for podcast in queryset:
podcast.fetch_episodes()
fetch_episodes.short_description = "Fetch new episodes for the selected " +\
"podcasts"
class PodcastAdmin(admin.ModelAdmin):
inlines = (EpisodeInline,)
list_display = ('title_or_unnamed', 'feed', 'link')
actions = [update_podcast, fetch_episodes]
admin.site.register(Podcast, PodcastAdmin)
class EpisodeAdmin(admin.ModelAdmin):
list_display = ('title', 'link')
list_filter = ('podcast',)
search_fields = ('title', 'description')
admin.site.register(Episode, EpisodeAdmin)
class PodcastUserProfileAdmin(admin.ModelAdmin):
pass
admin.site.register(PodcastUserProfile, PodcastUserProfileAdmin)
class TagAdmin(admin.ModelAdmin):
pass
admin.site.register(Tag, TagAdmin)
class CategoryAdmin(admin.ModelAdmin):
pass
admin.site.register(Category, CategoryAdmin)
| mit | Python |
0acceec5bae916550ec0da40d2108b4e478cdb7a | fix isjsonnet error | kubespray/kpm,ant31/kpm,ant31/kpm,ant31/kpm,ant31/kpm | kpm/commands/remove.py | kpm/commands/remove.py | from kpm.utils import parse_cmdline_variables
import kpm.deploy
from kpm.commands.deploy import DeployCmd
class RemoveCmd(DeployCmd):
name = 'remove'
help_message = "remove a package from kubernetes"
def _call(self):
variables = None
if self.variables is not None:
variables = parse_cmdline_variables(self.variables)
self.status = kpm.deploy.delete(self.package,
version=self.version,
dest=self.tmpdir,
namespace=self.namespace,
force=self.force,
dry=self.dry_run,
endpoint=self.registry_host,
proxy=self.api_proxy,
variables=variables,
shards=self.shards,
fmt=self.output)
| from kpm.utils import parse_cmdline_variables
import kpm.deploy
from kpm.commands.deploy import DeployCmd
class RemoveCmd(DeployCmd):
name = 'remove'
help_message = "remove a package from kubernetes"
def _call(self):
variables = None
if self.variables is not None:
variables = parse_cmdline_variables(self.variables)
self.status = kpm.deploy.delete(self.package,
version=self.version,
dest=self.tmpdir,
namespace=self.namespace,
force=self.force,
dry=self.dry_run,
endpoint=self.registry_host,
proxy=self.api_proxy,
variables=variables,
shards=self.shards,
jsonnet=self.isjsonnet,
fmt=self.output)
| apache-2.0 | Python |
cf98f7b8a0c8104d0830d9acab85f07047738cff | Update for docs/ | wwunlp/sner | docs/conf.py | docs/conf.py | import os
import sys
sys.path.append(os.path.abspath('../sner'))
project = 'SNER'
author = 'WWUNLP SNER Team'
copyright = '2017, ' + author
release = '0.1.1'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
source_suffix = '.rst'
master_doc = 'index'
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_theme_options = {
'description': 'Sumerian Named Entity Recognition'
}
| import os
import sys
sys.path.append(os.path.abspath('..'))
project = 'SNER'
author = 'WWUNLP SNER Team'
copyright = '2017, ' + author
release = '0.1.1'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
source_suffix = '.rst'
master_doc = 'index'
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_theme_options = {
'description': 'Sumerian Named Entity Recognition'
}
| mit | Python |
a2b1dc11dc4a845f16567388672aa5d51292c9e2 | Bump event grid module version | yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli,yugangw-msft/azure-cli | src/command_modules/azure-cli-eventgrid/setup.py | src/command_modules/azure-cli-eventgrid/setup.py | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "0.1.6"
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
DEPENDENCIES = [
'azure-cli-core',
'six',
'azure-mgmt-eventgrid==0.2.0',
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-eventgrid',
version=VERSION,
description='Microsoft Azure Command-Line Tools EventGrid Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.eventgrid'
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
| #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "0.1.5"
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
DEPENDENCIES = [
'azure-cli-core',
'six',
'azure-mgmt-eventgrid==0.2.0',
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli-eventgrid',
version=VERSION,
description='Microsoft Azure Command-Line Tools EventGrid Command Module',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
classifiers=CLASSIFIERS,
packages=[
'azure',
'azure.cli',
'azure.cli.command_modules',
'azure.cli.command_modules.eventgrid'
],
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
| mit | Python |
370f9a77a973bd6557a702f4f2f4bc82f940fc3a | Bump version to 0.1.1 | bopo/django-taggit-serializer,pombredanne/django-taggit-serializer,glemmaPaul/django-taggit-serializer | taggit_serializer/__init__.py | taggit_serializer/__init__.py | __version__ = '0.1.1' | __version__ = '0.1.0' | bsd-3-clause | Python |
f19d0cbcb5cb94b4813fbba5cb47a9e4b5a81374 | add few corrections | adriaaah/monit-dashboard,adriaaah/monit-dashboard,adriaaah/monit-dashboard | bin/monit-dashboard.py | bin/monit-dashboard.py | #!/usr/bin/python
import web
import requests
import xmltodict
import json
import os
import sys
import datetime
from collections import OrderedDict
from operator import itemgetter
urls = ('/', 'index',
'/help', 'help'
)
app = web.application(urls, globals())
render = web.template.render('templates/', base="layout")
# Uncomment to turn debug off
web.config.debug = False
# Variables
output = []
# Functions
def getMonit():
output = []
xmlQuery = "/_status?format=xml"
with open('{0}/conf/servers.json'.format(os.path.expanduser('.'))) as f:
cf = json.loads(f.read())
for site in cf:
s = cf[site]
r = requests.get(s['url'] + xmlQuery,
auth=(s['user'], s['passwd']))
allstat = json.loads(json.dumps(xmltodict.parse(r.text)['monit']))
services = allstat['service']
status = {}
server = {}
checks = OrderedDict()
for service in services:
name = service['name']
status[name] = int(service['status'])
checks[name] = status[name]
sorted_checks = OrderedDict()
sorted_checks = OrderedDict(sorted(checks.iteritems(), key=itemgetter(1), reverse=True))
server = dict(name=site, url=s['url'], result=sorted_checks)
output.append(server)
print(datetime.datetime.now())
return(output)
# Classes
class monitDashboard(web.application):
def run(self, port=8080, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
class index(object):
def GET(self):
return render.index(output=getMonit(), now=datetime.datetime.now())
class help(object):
def GET(self):
return render.help()
# Main
if __name__ == "__main__":
app = monitDashboard(urls, globals())
app.run(port=8080)
| #!/usr/bin/python
import web
import requests
import xmltodict
import json
import os
import sys
import datetime
from collections import OrderedDict
from operator import itemgetter
urls = ('/', 'index',
'/help', 'help'
)
app = web.application(urls, globals())
render = web.template.render('templates/', base="layout")
# Uncomment to turn debug off
web.config.debug = False
# Variables
output = []
# Functions
def getMonit():
output = []
server = {}
checks = OrderedDict()
xmlQuery = "/_status?format=xml"
with open('{0}/conf/servers.json'.format(os.path.expanduser('.'))) as f:
cf = json.loads(f.read())
for site in cf:
s = cf[site]
r = requests.get(s['url'] + xmlQuery,
auth=(s['user'], s['passwd']))
allstat = json.loads(json.dumps(xmltodict.parse(r.text)['monit']))
services = allstat['service']
status = {}
checks = OrderedDict()
for service in services:
name = service['name']
status[name] = int(service['status'])
checks[service['name']] = status[name]
sorted_checks = OrderedDict()
sorted_checks = OrderedDict(sorted(checks.iteritems(), key=itemgetter(1), reverse=True))
server = dict(name=site, url=s['url'], result=sorted_checks)
output.append(server)
print(datetime.datetime.now())
return(output)
# Classes
class monitDashboard(web.application):
def run(self, port=8080, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
class index(object):
def GET(self):
return render.index(output=getMonit(), now=datetime.datetime.now())
class help(object):
def GET(self):
return render.help()
# Main
if __name__ == "__main__":
app = monitDashboard(urls, globals())
app.run(port=8080)
| agpl-3.0 | Python |
90c1c0f257b1b26d3a4c6b081cbfe5d61d9e31d2 | refactor commonmark test | miyuchina/mistletoe,miyuchina/mistletoe | test/commonmark/commonmark.py | test/commonmark/commonmark.py | import json
import mistletoe
from pprint import pprint
from traceback import print_tb
def run_tests(test_entries, runnable):
return [run_test(test_entry, runnable) for test_entry in test_entries]
def run_test(test_entry, runnable):
test_case = test_entry['markdown'].splitlines(keepends=True)
try:
output = runnable(test_case)
success = compare(test_entry['html'].replace('\t', ' '), output)
if not success:
print_test_entry(test_entry, 'html', 'markdown', 'example')
print('output:', repr(output), '\n')
return success
except Exception as exception:
print_exception(exception, test_entry)
return False
def compare(expected, output):
return ''.join(expected.splitlines()) == ''.join(output.splitlines())
def print_exception(exception, test_entry):
print(exception.__class__.__name__ + ':', exception)
print('\nTraceback: ')
print_tb(exception.__traceback__)
print_test_entry(test_entry)
print('='*80)
def print_test_entry(test_entry, *keywords):
if keywords:
pprint({keyword: test_entry[keyword] for keyword in keywords})
else:
pprint(test_entry)
def main():
with open('commonmark.json', 'r') as fin:
test_entries = json.load(fin)
return run_tests(test_entries, mistletoe.markdown)
if __name__ == '__main__':
results = main()
print('failed:', len(list(filter(lambda x: not x, results))))
print(' total:', len(results))
| SKIPPED_TESTS = {}
def run_tests(test_entries, runnable):
return [run_test(test_entry, runnable) for test_entry in test_entries
if test_entry['example'] not in SKIPPED_TESTS]
def run_test(test_entry, runnable):
test_case = test_entry['markdown'].splitlines(keepends=True)
try:
output = runnable(test_case)
success = compare(test_entry['html'].replace('\t', ' '), output)
if not success:
print_test_entry(test_entry, 'html', 'markdown', 'example')
print('output:', repr(output), '\n')
return success
except Exception as exception:
print_exception(exception, test_entry)
def compare(expected, output):
return ''.join(expected.splitlines()) == ''.join(output.splitlines())
def print_exception(exception, test_entry):
from traceback import print_tb
print(exception.__class__.__name__ + ':', exception)
print('\nTraceback: ')
print_tb(exception.__traceback__)
print_test_entry(test_entry)
print('='*80)
def print_test_entry(test_entry, *keywords):
from pprint import pprint
if keywords:
pprint({keyword: test_entry[keyword] for keyword in keywords})
else:
pprint(test_entry)
def main():
import json
import mistletoe
with open('commonmark.json', 'r') as fin:
test_entries = json.load(fin)
return run_tests(test_entries, mistletoe.markdown)
if __name__ == '__main__':
results = main()
print('failed:', len(list(filter(lambda x: not x, results))))
print(' total:', len(results))
| mit | Python |
8940e5be1e5fbf30ccdf5327777b993b7f0b0a1f | add type | ClaudiaSaxer/PlasoScaffolder | src/plasoscaffolder/bll/mappings/init_mapping.py | src/plasoscaffolder/bll/mappings/init_mapping.py | # -*- coding: utf-8 -*-
""" Module representing function for the different files """
from plasoscaffolder.bll.mappings.mapping_helper import render_template
def get_formatter_init_create(plugin_name: str) -> str:
"""
renders formatter init if you want to create new init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "formatter_init_create_template.py"
return _render_init(file_name, plugin_name)
def get_formatter_init_edit(plugin_name: str) -> str:
"""
renders formatter init if you want to create new init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "formatter_init_edit_template.py"
return _render_init(file_name, plugin_name)
def get_parser_init_create(plugin_name: str) -> str:
"""
renders formatter init if you want to edit an existing init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "parser_init_create_template.py"
return _render_init(file_name, plugin_name)
def get_parser_init_edit(plugin_name: str) -> str:
"""
renders parser init if you want to create new init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "parser_init_edit_template.py"
return _render_init(file_name, plugin_name)
def _render_init(file_name: str, plugin_name: str) -> str:
"""
renders parser init if you want to edit an existing init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
context = {'plugin_name': plugin_name}
rendered = render_template(file_name, context)
return rendered
| # -*- coding: utf-8 -*-
""" Module representing function for the different files """
from plasoscaffolder.bll.mappings.mapping_helper import render_template
def get_formatter_init_create(plugin_name: str) -> str:
"""
renders formatter init if you want to create new init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "formatter_init_create_template.py"
return _render_init(file_name, plugin_name)
def get_formatter_init_edit(plugin_name: str) -> str:
"""
renders formatter init if you want to create new init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "formatter_init_edit_template.py"
return _render_init(file_name, plugin_name)
def get_parser_init_create(plugin_name: str) -> str:
"""
renders formatter init if you want to edit an existing init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "parser_init_create_template.py"
return _render_init(file_name, plugin_name)
def get_parser_init_edit(plugin_name: str) -> str:
"""
renders parser init if you want to create new init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
file_name = "parser_init_edit_template.py"
return _render_init(file_name, plugin_name)
def _render_init(file_name, plugin_name: str) -> str:
"""
renders parser init if you want to edit an existing init file
:param plugin_name: the plugin name
:return: string of the rendered template
"""
context = {'plugin_name': plugin_name}
rendered = render_template(file_name, context)
return rendered
| apache-2.0 | Python |
9144ef537e61fe0e1d5ff81243e899fad42ce1c1 | update lutin naming | generic-library/lua,generic-library/lua,generic-library/lua | lutin_lua.py | lutin_lua.py | #!/usr/bin/python
# --------------------------------------------------------
# -- Linear Math librairy
# --------------------------------------------------------
import lutinModule as module
import lutinTools as tools
def get_desc():
return "Lua Lua interpretic script module"
def create(target):
myModule = module.Module(__file__, 'lua', 'LIBRARY')
myModule.add_module_depend('etk')
myModule.compile_flags_CC([
'-DLUA_VERSION_TAG_NAME="\"5.2\""',
'-Wall'])
myModule.add_export_path(tools.get_current_path(__file__))
myModule.add_path(tools.get_current_path(__file__)+"/lua/")
myModule.add_export_flag_CC('-DLUA_COMPAT_ALL');
#ifeq ("$(TARGET_OS)","Windows")
# myModule.compile_flags_CC('-D_WIN32')
#else
myModule.compile_flags_CC('-DLUA_USE_LINUX')
#endif
myModule.add_src_file([
'lua/lapi.cpp',
'lua/lauxlib.cpp',
'lua/lbaselib.cpp',
'lua/lbitlib.cpp',
'lua/lcode.cpp',
'lua/lcorolib.cpp',
'lua/lctype.cpp',
'lua/ldblib.cpp',
'lua/ldebug.cpp',
'lua/ldo.cpp',
'lua/ldump.cpp',
'lua/lfunc.cpp',
'lua/lgc.cpp',
'lua/linit.cpp',
'lua/liolib.cpp',
'lua/llex.cpp',
'lua/lmathlib.cpp',
'lua/lmem.cpp',
'lua/loadlib.cpp',
'lua/lobject.cpp',
'lua/lopcodes.cpp',
'lua/loslib.cpp',
'lua/lparser.cpp',
'lua/lstate.cpp',
'lua/lstring.cpp',
'lua/lstrlib.cpp',
'lua/ltable.cpp',
'lua/ltablib.cpp',
'lua/ltm.cpp',
'lua/lundump.cpp',
'lua/lvm.cpp',
'lua/lzio.cpp'])
# add the currrent module at the
return myModule
| #!/usr/bin/python
# --------------------------------------------------------
# -- Linear Math librairy
# --------------------------------------------------------
import lutinModule
import lutinTools
def Create(target):
myModule = lutinModule.module(__file__, 'lua', 'LIBRARY')
myModule.AddModuleDepend('etk')
myModule.CompileFlags_CC([
'-DLUA_VERSION_TAG_NAME="\"5.2\""',
'-Wall'])
myModule.AddExportPath(lutinTools.GetCurrentPath(__file__))
myModule.AddPath(lutinTools.GetCurrentPath(__file__)+"/lua/")
myModule.AddExportFlag_CC('-DLUA_COMPAT_ALL');
#ifeq ("$(TARGET_OS)","Windows")
# myModule.CompileFlags_CC('-D_WIN32')
#else
myModule.CompileFlags_CC('-DLUA_USE_LINUX')
#endif
myModule.AddSrcFile([
'lua/lapi.cpp',
'lua/lauxlib.cpp',
'lua/lbaselib.cpp',
'lua/lbitlib.cpp',
'lua/lcode.cpp',
'lua/lcorolib.cpp',
'lua/lctype.cpp',
'lua/ldblib.cpp',
'lua/ldebug.cpp',
'lua/ldo.cpp',
'lua/ldump.cpp',
'lua/lfunc.cpp',
'lua/lgc.cpp',
'lua/linit.cpp',
'lua/liolib.cpp',
'lua/llex.cpp',
'lua/lmathlib.cpp',
'lua/lmem.cpp',
'lua/loadlib.cpp',
'lua/lobject.cpp',
'lua/lopcodes.cpp',
'lua/loslib.cpp',
'lua/lparser.cpp',
'lua/lstate.cpp',
'lua/lstring.cpp',
'lua/lstrlib.cpp',
'lua/ltable.cpp',
'lua/ltablib.cpp',
'lua/ltm.cpp',
'lua/lundump.cpp',
'lua/lvm.cpp',
'lua/lzio.cpp'])
# add the currrent module at the
return myModule
| mit | Python |
1570fe173cc3eb9acdbe8ebec88f50f3e9c81f7f | test fixed | turippj/turip-manifest,turippj/turip-manifest,turippj/turip-manifest | main_test.py | main_test.py | import sys
import unittest
sys.path.append('/home/travis/google-cloud-sdk/platform/google_appengine')
from google.appengine.ext import testbed
from google.appengine.ext import ndb
from google.appengine.api import search
import webapp2
import main
class TestHandlers(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_search_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache()
def test_Manifest(self):
# AddManifest Test
test_file = open('./test.json', 'r')
post_contents = {'file_data': test_file.read()}
request = webapp2.Request.blank('/api/manifest/add', POST=post_contents)
response = request.get_response(main.app)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, 'Upload Manifest success!')
# SearchManifest Test
request = webapp2.Request.blank('/ffff')
response = request.get_response(main.app)
self.assertEqual(response.status_int, 200)
def tearDown(self):
self.testbed.deactivate()
if __name__ == '__main__':
unittest.main()
| import sys
import unittest
sys.path.append('/home/travis/google-cloud-sdk/platform/google_appengine')
from google.appengine.ext import testbed
from google.appengine.ext import ndb
import webapp2
import main
class TestHandlers(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache()
def test_Manifest(self):
# AddManifest Test
test_file = open('./test.json', 'r')
post_contents = {'file_data': test_file.read()}
request = webapp2.Request.blank('/api/manifest/add', POST=post_contents)
response = request.get_response(main.app)
self.assertEqual(response.status_int, 200)
self.assertEqual(response.body, 'Upload Manifest success!')
# SearchManifest Test
request = webapp2.Request.blank('/ffff')
response = request.get_response(main.app)
self.assertEqual(response.status_int, 200)
def tearDown(self):
self.testbed.deactivate()
if __name__ == '__main__':
unittest.main()
| mit | Python |
ebd16d5d2554796630de871b6c6ec1aa66f5ad37 | Clarify the missing information. | ridibooks/lightweight-rest-tester,ridibooks/lightweight-rest-tester | rest_tester/setting/target.py | rest_tester/setting/target.py | from .api import API
from .tests import Tests
class TestTarget(object):
KEY_API = 'api'
KEY_TESTS = 'tests'
def __init__(self, json_data):
try:
self._api = API(json_data[self.KEY_API])
self._tests = Tests(json_data[self.KEY_TESTS])
except KeyError as e:
raise IncompleteTargetInformationError('Test case has missing information: %s' % str(e))
@property
def api(self):
return self._api
@property
def tests(self):
return self._tests
class IncompleteTargetInformationError(Exception):
"""Test types are not supported"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
| from .api import API
from .tests import Tests
class TestTarget(object):
KEY_API = 'api'
KEY_TESTS = 'tests'
def __init__(self, json_data):
try:
self._api = API(json_data[self.KEY_API])
self._tests = Tests(json_data[self.KEY_TESTS])
except KeyError:
raise IncompleteTargetInformationError('Test case has incomplete information.')
@property
def api(self):
return self._api
@property
def tests(self):
return self._tests
class IncompleteTargetInformationError(Exception):
"""Test types are not supported"""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
| mit | Python |
c940747eba6aa6d5eb4c9e0108efdb155fc9dcaa | Fix for Python 2.6. | scikit-hep/scikit-hep | tests/math/test_kinematics.py | tests/math/test_kinematics.py | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Tests for the skhep.math.kinematics module.
"""
# -----------------------------------------------------------------------------
# Import statements
# -----------------------------------------------------------------------------
import unittest
from skhep.math.kinematics import *
from skhep.units import GeV, ps
from skhep.constants import hbar
from skhep.utils.py23 import *
# -----------------------------------------------------------------------------
# Actual tests
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
# required for Python 2.6 only
self.test_kinematics_Kallen_function()
self.test_width_lifetime_conversions()
def test_kinematics_Kallen_function(self):
self.assertEqual(Kallen_function(2, 1, 0), 1)
def test_width_lifetime_conversions(self):
self.assertAlmostEqual(lifetime_to_width(1.5*ps)/GeV,4.388079676311604e-13)
self.assertTrue(1.5*ps * lifetime_to_width(1.5*ps) == hbar)
| #!/usr/bin/env python
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Tests for the skhep.math.kinematics module.
"""
# -----------------------------------------------------------------------------
# Import statements
# -----------------------------------------------------------------------------
import unittest
from skhep.math.kinematics import *
from skhep.units import GeV, ps
from skhep.constants import hbar
from skhep.utils.py23 import *
# -----------------------------------------------------------------------------
# Actual tests
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
# required for Python 2.6 only
self.test_Kallen_function()
self.test_width_lifetime_conversions()
def test_kinematics_Kallen_function(self):
self.assertEqual(Kallen_function(2, 1, 0), 1)
def test_width_lifetime_conversions(self):
self.assertAlmostEqual(lifetime_to_width(1.5*ps)/GeV,4.388079676311604e-13)
self.assertTrue(1.5*ps * lifetime_to_width(1.5*ps) == hbar)
| bsd-3-clause | Python |
64086acee22cfc2dde2fec9da1ea1b7745ce3d85 | Remove useless test about model representation | cgwire/zou | tests/misc/test_base_model.py | tests/misc/test_base_model.py | # -*- coding: UTF-8 -*-
from tests.base import ApiDBTestCase
class BaseModelTestCase(ApiDBTestCase):
def test_query(self):
pass
def test_get(self):
pass
def test_get_by(self):
pass
def test_get_all_by(self):
pass
def test_create(self):
pass
def test_get_id_map(self):
pass
def save(self):
pass
def delete(self):
pass
def update(self):
pass
| # -*- coding: UTF-8 -*-
from tests.base import ApiDBTestCase
class BaseModelTestCase(ApiDBTestCase):
def test_repr(self):
self.generate_fixture_project_status()
self.generate_fixture_project()
self.assertEqual(str(self.project), "<Project %s>" % self.project.name)
def test_query(self):
pass
def test_get(self):
pass
def test_get_by(self):
pass
def test_get_all_by(self):
pass
def test_create(self):
pass
def test_get_id_map(self):
pass
def save(self):
pass
def delete(self):
pass
def update(self):
pass
| agpl-3.0 | Python |
82dcc4d41fb76621475f56650bdd53e1e3d2263e | Return 1 on failure | nbigaouette/sorting,nbigaouette/sorting,nbigaouette/sorting,nbigaouette/sorting | profiling/plot.py | profiling/plot.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import re
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv) == 1:
print('Usage: plot.py path/to/build/profiling')
sys.exit(1)
csv_files = glob.glob(os.path.join(sys.argv[1], '*.csv'))
fig = plt.figure()
ax = fig.add_subplot(111)
colors = iter(plt.cm.rainbow(np.linspace(0,1,len(csv_files))))
p = re.compile(r'profiling_(.*?)_(.*?)\.csv')
ms_to_s = 1.0 / 1000.0
for csv_file in csv_files:
data = np.genfromtxt(csv_file, delimiter=',', skip_header=1).transpose()
j = data[0]
N = data[1]
avg = data[2]
std = data[3]
m = p.search(csv_file)
name = m.group(2)
name = name.replace('_', ' ')
ax.errorbar(N, avg*ms_to_s, yerr=std*ms_to_s,
label=name, color=next(colors), marker='o')
ax.grid(True)
ax.set_xlabel('N')
ax.set_ylabel('Timing [s]')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(loc='best')
plt.show()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import re
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
if len(sys.argv) == 1:
print('Usage: plot.py path/to/build/profiling')
sys.exit(0)
csv_files = glob.glob(os.path.join(sys.argv[1], '*.csv'))
fig = plt.figure()
ax = fig.add_subplot(111)
colors = iter(plt.cm.rainbow(np.linspace(0,1,len(csv_files))))
p = re.compile(r'profiling_(.*?)_(.*?)\.csv')
ms_to_s = 1.0 / 1000.0
for csv_file in csv_files:
data = np.genfromtxt(csv_file, delimiter=',', skip_header=1).transpose()
j = data[0]
N = data[1]
avg = data[2]
std = data[3]
m = p.search(csv_file)
name = m.group(2)
name = name.replace('_', ' ')
ax.errorbar(N, avg*ms_to_s, yerr=std*ms_to_s,
label=name, color=next(colors), marker='o')
ax.grid(True)
ax.set_xlabel('N')
ax.set_ylabel('Timing [s]')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(loc='best')
plt.show()
| bsd-3-clause | Python |
93daf0b40505dceae41e8977ca416d3f6c878de7 | Disable negative timestamp tests on windows #20 | antidot/Pyckson | tests/test_arrow_formatter.py | tests/test_arrow_formatter.py | import os
from datetime import datetime, date
from pyckson.dates.arrow import ArrowStringFormatter, ArrowTimestampFormatter
def test_parse_string_datetimes():
formatter = ArrowStringFormatter()
assert formatter.parse_datetime('2013-05-05 12:30:45') == datetime(2013, 5, 5, 12, 30, 45)
assert formatter.parse_date('2013-05-05 12:30:45') == date(2013, 5, 5)
def test_serialize_string_datetimes():
formatter = ArrowStringFormatter()
assert formatter.serialize_datetime(datetime(2013, 5, 5, 12, 30, 45)) == '2013-05-05T12:30:45+00:00'
assert formatter.serialize_date(date(2013, 5, 5)) == '2013-05-05'
def test_parse_int_datetimes():
formatter = ArrowTimestampFormatter()
assert formatter.parse_datetime(1367757045) == datetime(2013, 5, 5, 12, 30, 45)
if os.name != "nt":
assert formatter.parse_date(-11665296000) == date(1600, 5, 5)
def test_serialize_int_datetimes():
formatter = ArrowTimestampFormatter()
assert formatter.serialize_datetime(datetime(2013, 5, 5, 12, 30, 45)) == 1367757045
assert formatter.serialize_date(date(1600, 5, 5)) == -11665296000
| from datetime import datetime, date
from pyckson.dates.arrow import ArrowStringFormatter, ArrowTimestampFormatter
def test_parse_string_datetimes():
formatter = ArrowStringFormatter()
assert formatter.parse_datetime('2013-05-05 12:30:45') == datetime(2013, 5, 5, 12, 30, 45)
assert formatter.parse_date('2013-05-05 12:30:45') == date(2013, 5, 5)
def test_serialize_string_datetimes():
formatter = ArrowStringFormatter()
assert formatter.serialize_datetime(datetime(2013, 5, 5, 12, 30, 45)) == '2013-05-05T12:30:45+00:00'
assert formatter.serialize_date(date(2013, 5, 5)) == '2013-05-05'
def test_parse_int_datetimes():
formatter = ArrowTimestampFormatter()
assert formatter.parse_datetime(1367757045) == datetime(2013, 5, 5, 12, 30, 45)
assert formatter.parse_date(-11665296000) == date(1600, 5, 5)
def test_serialize_int_datetimes():
formatter = ArrowTimestampFormatter()
assert formatter.serialize_datetime(datetime(2013, 5, 5, 12, 30, 45)) == 1367757045
assert formatter.serialize_date(date(1600, 5, 5)) == -11665296000
| lgpl-2.1 | Python |
7000e89828ec82f8e5c26c39ac290cb329036e17 | Test Cache root level dictionary update via apply_changes | dls-controls/pymalcolm,dls-controls/pymalcolm,dls-controls/pymalcolm | tests/test_core/test_cache.py | tests/test_core/test_cache.py | import os
import sys
import unittest
from collections import OrderedDict
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
# import logging
# logging.basicConfig(level=logging.DEBUG)
import setup_malcolm_paths
from mock import MagicMock
# module imports
from malcolm.core.cache import Cache
class TestProcess(unittest.TestCase):
def test_addition(self):
c = Cache()
c.apply_changes([["thing"], {1: 2}])
self.assertEqual(c["thing"][1], 2)
def test_deletion(self):
c = Cache()
c["path"] = 2
c.apply_changes([["path"]])
self.assertEqual(list(c), [])
def test_change(self):
c = Cache()
c[1] = 3
c.apply_changes([["path"], 4])
self.assertEqual(c["path"], 4)
def test_cache_update(self):
c = Cache()
c["path"] = 2
c.apply_changes([[], {123:"test"}])
self.assertEqual("test", c[123])
with self.assertRaises(KeyError):
c["path"]
def test_non_string_path_errors(self):
c = Cache()
self.assertRaises(AssertionError, c.apply_changes, [[1], 3])
def test_walk_path(self):
c = Cache()
c[1] = {2: {3: "end"}}
walked = c.walk_path([1, 2, 3])
self.assertEqual(walked, "end")
if __name__ == "__main__":
unittest.main(verbosity=2)
| import os
import sys
import unittest
from collections import OrderedDict
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
# import logging
# logging.basicConfig(level=logging.DEBUG)
import setup_malcolm_paths
from mock import MagicMock
# module imports
from malcolm.core.cache import Cache
class TestProcess(unittest.TestCase):
def test_addition(self):
c = Cache()
c.apply_changes([["thing"], {1: 2}])
self.assertEqual(c["thing"][1], 2)
def test_deletion(self):
c = Cache()
c["path"] = 2
c.apply_changes([["path"]])
self.assertEqual(list(c), [])
def test_change(self):
c = Cache()
c[1] = 3
c.apply_changes([["path"], 4])
self.assertEqual(c["path"], 4)
def test_non_string_path_errors(self):
c = Cache()
self.assertRaises(AssertionError, c.apply_changes, [[1], 3])
def test_walk_path(self):
c = Cache()
c[1] = {2: {3: "end"}}
walked = c.walk_path([1, 2, 3])
self.assertEqual(walked, "end")
if __name__ == "__main__":
unittest.main()
| apache-2.0 | Python |
5feb6b02755ce2958ed57a3cef13a10e2e04bc3f | simplify self.users | ecreall/dace | dace/testing.py | dace/testing.py | import unittest
from pyramid.config import Configurator
from pyramid.testing import DummyRequest
from pyramid import testing
from substanced.db import root_factory
from substanced.principal import User
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings, root_factory=root_factory)
return config.make_wsgi_app()
class FunctionalTests(unittest.TestCase):
def setUp(self):
import tempfile
import os.path
self.tmpdir = tempfile.mkdtemp()
dbpath = os.path.join( self.tmpdir, 'test.db')
uri = 'file://' + dbpath
settings = {'zodbconn.uri': uri,
'substanced.secret': 'sosecret',
'substanced.initial_login': 'admin',
'substanced.initial_password': 'admin',
'pyramid.includes': [
'pyramid_tm',
'substanced',
'dace',
]}
app = main({}, **settings)
self.db = app.registry._zodb_databases['']
self.request = request = DummyRequest()
self.config = testing.setUp(registry=app.registry, request=request)
self.registry = self.config.registry
self.app = root_factory(request)
request.root = self.app
self.users = self.app['principals']['users']
alice = User(password='alice', email='alice@example.com')
self.users['alice'] = alice
request.user = self.users['admin']
# from webtest import TestApp
# self.testapp = TestApp(app)
def tearDown(self):
import shutil
testing.tearDown()
self.db.close()
shutil.rmtree(self.tmpdir)
| import unittest
from pyramid.config import Configurator
from pyramid.testing import DummyRequest
from pyramid import testing
from substanced.db import root_factory
from substanced.principal import User
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings, root_factory=root_factory)
return config.make_wsgi_app()
class FunctionalTests(unittest.TestCase):
def setUp(self):
import tempfile
import os.path
self.tmpdir = tempfile.mkdtemp()
dbpath = os.path.join( self.tmpdir, 'test.db')
uri = 'file://' + dbpath
settings = {'zodbconn.uri': uri,
'substanced.secret': 'sosecret',
'substanced.initial_login': 'admin',
'substanced.initial_password': 'admin',
'pyramid.includes': [
'pyramid_tm',
'substanced',
'dace',
]}
app = main({}, **settings)
self.db = app.registry._zodb_databases['']
self.request = request = DummyRequest()
self.config = testing.setUp(registry=app.registry, request=request)
self.registry = self.config.registry
self.app = root_factory(request)
request.root = self.app
self.users = {}
self.users['admin'] = self.app.data['principals'].data['users'].data['admin']
alice = User( password='alice', email='alice@test.test')
self.app.data['principals'].data['users']['alice'] = alice
self.users['alice'] = alice
request.user = self.users['admin']
# from webtest import TestApp
# self.testapp = TestApp(app)
def tearDown(self):
import shutil
testing.tearDown()
self.db.close()
shutil.rmtree(self.tmpdir)
| agpl-3.0 | Python |
6ac987a608a35ab6109a61ac543092bb9d3ac102 | Remove log entry | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/utils/win_powershell.py | salt/utils/win_powershell.py | # -*- coding: utf-8 -*-
'''
Common functions for working with powershell
'''
# Import Python libs
from __future__ import absolute_import
import logging
import os
log = logging.getLogger(__name__)
__virtualname__ = 'powershell'
def __virtual__():
'''
Load only on Windows
'''
if not salt.utils.is_windows():
return (False, 'Failed to load win_powershell: '
'The utility only works on Windows systems.')
return __virtualname__
def module_exists(name):
'''
See if a module is installed
Look in paths specified in PSModulePath environment variable
Use this utility instead of attempting to import the module with powershell.
Using powershell to try to import the module is expensive.
Args:
name (str):
The name of the module to check
Returns:
bool: True if present, otherwise returns False
Example:
.. code-block:: python
import salt.utils.win_powershell
exists = salt.utils.win_powershell.module_exists('ServerManager')
'''
ps_module_paths = os.environ['PSModulePath']
for path in ps_module_paths.split(';'):
mod_path = '\\'.join([path, name, ''.join([name, '.psd1'])])
if os.path.isfile(mod_path):
return True
return False
| # -*- coding: utf-8 -*-
'''
Common functions for working with powershell
'''
# Import Python libs
from __future__ import absolute_import
import logging
import os
log = logging.getLogger(__name__)
__virtualname__ = 'powershell'
def __virtual__():
'''
Load only on Windows
'''
if not salt.utils.is_windows():
return (False, 'Failed to load win_powershell: '
'The utility only works on Windows systems.')
return __virtualname__
def module_exists(name):
'''
See if a module is installed
Look in paths specified in PSModulePath environment variable
Use this utility instead of attempting to import the module with powershell.
Using powershell to try to import the module is expensive.
Args:
name (str):
The name of the module to check
Returns:
bool: True if present, otherwise returns False
Example:
.. code-block:: python
import salt.utils.win_powershell
exists = salt.utils.win_powershell.module_exists('ServerManager')
'''
ps_module_paths = os.environ['PSModulePath']
for path in ps_module_paths.split(';'):
mod_path = '\\'.join([path, name, ''.join([name, '.psd1'])])
if os.path.isfile(mod_path):
return True
log.debug('Powershell Module {0} not installed'.format(name))
return False
| apache-2.0 | Python |
ee54c907c79a7f71072ad40bdc278f9ee48eb77a | Test now usesdownload_testdata() | castelao/pyARGO | tests/test_profile_from_nc.py | tests/test_profile_from_nc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from argo.argo import profile_from_nc
from argo.utils import download_testdata
datafile = download_testdata('20150127_prof.nc')
def validate_profile(p):
assert hasattr(p, 'keys')
assert hasattr(p, 'attributes')
assert 'datetime' in p.attributes
def test_extract_1_profile():
pp = profile_from_nc(datafile, 0)
assert len(pp) == 1
for p in pp:
validate_profile(p)
def test_extract_list_profiles():
pp = profile_from_nc(datafile, [0, 2, 5])
assert len(pp) == 3
for p in pp:
validate_profile(p)
def test_extract_all_profiles():
pp = profile_from_nc(datafile)
assert len(pp) > 1
for p in pp:
validate_profile(p)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from argo.argo import profile_from_nc
def validate_profile(p):
assert hasattr(p, 'keys')
assert hasattr(p, 'attributes')
assert 'datetime' in p.attributes
def test_extract_1_profile():
pp = profile_from_nc('test_data/20150127_prof.nc', 0)
assert len(pp) == 1
for p in pp:
validate_profile(p)
def test_extract_list_profiles():
pp = profile_from_nc('test_data/20150127_prof.nc', [0, 2, 5])
assert len(pp) == 3
for p in pp:
validate_profile(p)
def test_extract_all_profiles():
pp = profile_from_nc('test_data/20150127_prof.nc')
assert len(pp) > 1
for p in pp:
validate_profile(p)
| bsd-3-clause | Python |
4080dd6bb3e0a0ba5e5bf34229a000a1e3925cfe | Use asynchronous calls in Strip tool | fluendo/cerbero,fluendo/cerbero,fluendo/cerbero,fluendo/cerbero,fluendo/cerbero | cerbero/tools/strip.py | cerbero/tools/strip.py | #!/usr/bin/env python3
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import asyncio
from cerbero.config import Platform
from cerbero.utils import shell, messages as m
class Strip(object):
'''Wrapper for the strip tool'''
def __init__(self, config, excludes=None, keep_symbols=None):
self.config = config
self.excludes = excludes or []
self.keep_symbols = keep_symbols or []
self.strip_cmd = os.environ['STRIP']
async def async_strip_file(self, path):
if not self.strip_cmd:
m.warning('Strip command is not defined')
return
for f in self.excludes:
if f in path:
return
if self.config.target_platform == Platform.DARWIN:
cmd = [self.strip_cmd, '-x', path]
else:
cmd = [self.strip_cmd]
for symbol in self.keep_symbols:
cmd += ['-K', symbol]
cmd += ['--strip-unneeded', path]
try:
await shell.async_call(cmd)
except Exception as e:
m.warning(e)
def strip_file(self, path):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.async_strip_file(path))
def strip_dir(self, dir_path):
if not self.strip_cmd:
m.warning('Strip command is not defined')
return
tasks = []
for dirpath, dirnames, filenames in os.walk(dir_path):
for f in filenames:
tasks.append(self.async_strip_file(os.path.join(dirpath, f)))
shell.run_until_complete(tasks)
| #!/usr/bin/env python3
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class Strip(object):
'''Wrapper for the strip tool'''
STRIP_CMD = '$STRIP'
def __init__(self, config, excludes=None, keep_symbols=None):
self.config = config
self.excludes = excludes or []
self.keep_symbols = keep_symbols or []
def strip_file(self, path):
for f in self.excludes:
if f in path:
return
try:
if self.config.target_platform == Platform.DARWIN:
shell.call("%s -x %s" % (self.STRIP_CMD, path))
else:
shell.call("%s %s --strip-unneeded %s" % (self.STRIP_CMD,
' '.join(['-K %s' % x for x in self.keep_symbols]), path))
except:
pass
def strip_dir(self, dir_path):
for dirpath, dirnames, filenames in os.walk(dir_path):
for f in filenames:
self.strip_file(os.path.join(dirpath, f))
| lgpl-2.1 | Python |
d4cfd743b235e0113618afa2415c807ea3485498 | Add test functionality to mysql_database state | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/states/mysql_database.py | salt/states/mysql_database.py | '''
MySQL Database Management
=========================
The mysql_database module is used to create and manage MySQL databases, databases can be set
as either absent or present
.. code-block:: yaml
frank:
mysql_database:
- present
'''
def present(name):
'''
Ensure that the named database is present with the specified properties
name
The name of the database to manage
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
# check if database exists
if __salt__['mysql.db_exists'](name):
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Database {0} is not present and needs to be created'
).format(name)
return ret
# The database is not present, make it!
if __salt__['mysql.db_create'](name):
ret['comment'] = 'The database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
def absent(name):
'''
Ensure that the named database is absent
name
The name of the database to remove
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if db exists and remove it
if __salt__['mysql.db_exists'](name):
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('Database {0} is present and needs to be removed'
).format(name)
return ret
if __salt__['mysql.db_remove'](name):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
# fallback
ret['comment'] = ('Database {0} is not present, so it cannot be removed'
).format(name)
return ret
| '''
MySQL Database Management
=========================
The mysql_database module is used to create and manage MySQL databases, databases can be set
as either absent or present
.. code-block:: yaml
frank:
mysql_database:
- present
'''
def present(name):
'''
Ensure that the named database is present with the specified properties
name
The name of the database to manage
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
# check if database exists
if __salt__['mysql.db_exists'](name):
return ret
# The database is not present, make it!
if __salt__['mysql.db_create'](name):
ret['comment'] = 'The database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
def absent(name):
'''
Ensure that the named database is absent
name
The name of the database to remove
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
#check if db exists and remove it
if __salt__['mysql.db_exists'](name):
if __salt__['mysql.db_remove'](name):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
# fallback
ret['comment'] = 'Database {0} is not present, so it cannot be removed'.format(name)
return ret
| apache-2.0 | Python |
028f997970854e38238bb91b3c6e24a622ad4cd0 | Fix session constructors for gae_login. | google/ctfscoreboard,google/ctfscoreboard,google/ctfscoreboard,google/ctfscoreboard | scoreboard/auth/appengine.py | scoreboard/auth/appengine.py | """Appengine based login support."""
import flask
from google.appengine.api import users
from scoreboard.app import app
from scoreboard import controllers
from scoreboard import errors
from scoreboard import models
from scoreboard import utils
def login_user(_):
"""Login based on GAE Auth."""
gae_user = users.get_current_user()
if not gae_user:
return None
user = models.User.get_by_email(gae_user.email())
if user and flask.request:
user.last_login_ip = flask.request.remote_addr
models.db.session.commit()
return user
def get_login_uri():
return users.create_login_url('/gae_login')
def get_register_uri():
if not users.get_current_user():
return users.create_login_url('/register')
return '/register'
def logout():
pass
def register(flask_request):
gae_user = users.get_current_user()
if not gae_user:
raise errors.LoginError(
'Cannot register if not logged into AppEngine.')
data = flask_request.get_json()
user = controllers.register_user(gae_user.email(), data['nick'], '',
data.get('team_id'), data.get('team_name'), data.get('team_code'))
if users.is_current_user_admin():
user.promote()
return user
@app.route('/gae_login')
def gae_login_handler():
user = login_user(None)
gae_user = users.get_current_user()
if gae_user and not user:
app.logger.info('No user found for user %s' % gae_user.email())
return flask.redirect('/register')
elif not user:
app.logger.error('No user found and not logged in.')
return flask.redirect(get_register_uri())
utils.session_for_user(user)
return flask.redirect('/')
| """Appengine based login support."""
import flask
from google.appengine.api import users
from scoreboard.app import app
from scoreboard import controllers
from scoreboard import errors
from scoreboard import models
def login_user(_):
"""Login based on GAE Auth."""
gae_user = users.get_current_user()
if not gae_user:
return None
user = models.User.get_by_email(gae_user.email())
if user and flask.request:
user.last_login_ip = flask.request.remote_addr
models.db.session.commit()
return user
def get_login_uri():
return users.create_login_url('/gae_login')
def get_register_uri():
if not users.get_current_user():
return users.create_login_url('/register')
return '/register'
def logout():
pass
def register(flask_request):
gae_user = users.get_current_user()
if not gae_user:
raise errors.LoginError(
'Cannot register if not logged into AppEngine.')
data = flask_request.get_json()
user = controllers.register_user(gae_user.email(), data['nick'], '',
data.get('team_id'), data.get('team_name'), data.get('team_code'))
if users.is_current_user_admin():
user.promote()
return user
@app.route('/gae_login')
def gae_login_handler():
user = login_user(None)
gae_user = users.get_current_user()
if gae_user and not user:
app.logger.info('No user found for user %s' % gae_user.email())
return flask.redirect('/register')
elif not user:
app.logger.error('No user found and not logged in.')
return flask.redirect(get_register_uri())
flask.session['user'] = user.uid
return flask.redirect('/')
| apache-2.0 | Python |
07658324cb59eba77f5e807bd1b0dca8e7ed4548 | Fix format_infirmier | regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data,regardscitoyens/sunshine-data | scripts/format_infirmiers.py | scripts/format_infirmiers.py | # -*- coding: utf-8 -*-
import pandas as pd
import sys
from utils import find_zipcode, euro2float, str2date
header_mapping = {
'ORIGIN': 'ORIGIN',
'LABO': 'LABO',
'QUALITE_NOM_PRENOM': 'BENEF_PS_QUALITE_NOM_PRENOM',
'ADRESSE': 'BENEF_PS_ADR',
'QUALIFICATION': 'BENEF_PS_QUALIFICATION',
'MONTANT_AVANTAGE': 'DECL_AVANT_MONTANT',
'DATE_AVANTAGE': 'DECL_AVANT_DATE',
'NATURE_AVANTAGE': 'DECL_AVANT_NATURE',
'SPECIALITE': 'BENEF_PS_SPECIALITE',
'NUMERO_ORDINAL': 'BENEF_PS_RPPS',
'DATE_SIGNATURE_CONVENTION': 'DECL_CONV_DATE',
'OBJET': 'DECL_CONV_OBJET',
'PROGRAMME': 'DECL_CONV_PROGRAMME',
'BENEF_PS_CODEPOSTAL': 'BENEF_PS_CODEPOSTAL'
}
input_filename = sys.argv[1]
output_filename = sys.argv[2]
df = pd.read_csv(input_filename, sep=';', encoding='utf-8')
df['MONTANT_AVANTAGE'] = df['MONTANT_AVANTAGE'].apply(euro2float)
df['DATE_AVANTAGE'] = df['DATE_AVANTAGE'].apply(str2date)
df['DATE_SIGNATURE_CONVENTION'] = df['DATE_SIGNATURE_CONVENTION'].apply(str2date)
df['QUALITE_NOM_PRENOM'] = df['QUALITE'] + ' ' + df['NOM'] + ' ' + df['PRENOM']
df['QUALITE_NOM_PRENOM'] = df['QUALITE_NOM_PRENOM'].str.replace(',', '-')
df['ORIGIN'] = 'Infirmier'
df['ADRESSE'] = df['ADRESSE'].str.replace(',', '-')
df['OBJET'] = df['OBJET'].str.replace(',', '-')
df['PROGRAMME'] = df['PROGRAMME'].str.replace(',', '-')
df['BENEF_PS_CODEPOSTAL'] = df['ADRESSE'].apply(find_zipcode)
for origin, target in header_mapping.items():
df[target] = df[origin]
df[header_mapping.values()].to_csv(output_filename, index=False, encoding='utf-8')
| # -*- coding: utf-8 -*-
import pandas as pd
import sys
from utils import find_zipcode, euro2float, str2date
header_mapping = {
'ORIGIN': 'ORIGIN',
'LABO': 'LABO',
'QUALITE_NOM_PRENOM': 'BENEF_PS_QUALITE_NOM_PRENOM',
'ADRESSE': 'BENEF_PS_ADR',
'QUALIFICATION': 'BENEF_PS_QUALIFICATION',
'MONTANT_AVANTAGE': 'DECL_AVANT_MONTANT',
'DATE_AVANTAGE': 'DECL_AVANT_DATE',
'NATURE_AVANTAGE': 'DECL_AVANT_NATURE',
'SPECIALITE': 'BENEF_PS_SPECIALITE',
'NUMERO_ORDINAL': 'BENEF_PS_RPPS',
'DATE_SIGNATURE_CONVENTION': 'DECL_CONV_DATE',
'OBJET': 'DECL_CONV_OBJET',
'PROGRAMME': 'DECL_CONV_PROGRAMME',
'BENEF_PS_CODEPOSTAL': 'BENEF_PS_CODEPOSTAL'
}
input_filename = sys.argv[1]
output_filename = sys.argv[2]
df = pd.read_csv(input_filename, sep=';', encoding='utf-8')
df['MONTANT_AVANTAGE'] = df['MONTANT_AVANTAGE'].apply(euro2float)
df['DATE_AVANTAGE'] = df['DATE_AVANTAGE'].apply(str2date)
df['DATE_SIGNATURE_CONVENTION'] = df['DATE_SIGNATURE_CONVENTION'].apply(str2date)
df['QUALITE_NOM_PRENOM'] = df['QUALITE'] + ' ' + df['NOM'] + ' ' + df['PRENOM']
df['QUALITE_NOM_PRENOM'] = df['QUALITE_NOM_PRENOM'].str.replace(',', '-')
df['ORIGIN'] = 'Infirmier'
df['ADRESSE'] = df['ADRESSE'].str.replace(',', '-')
df['OBJET'] = df['OBJET'].str.replace(',', '-')
df['PROGRAMME'] = df['PROGRAMME'].str.replace(',', '-')
df['BENEF_PS_CODEPOSTAL'] = df['ADRESSE'].apply(find_zipcode)
df['DECL_AVANT_NATURE'] = df['DECL_AVANT_NATURE'].str.replace(',', '-')
for origin, target in header_mapping.items():
df[target] = df[origin]
df[header_mapping.values()].to_csv(output_filename, index=False, encoding='utf-8')
| agpl-3.0 | Python |
1f5d65024df8f0e0b70e75dbac071e72bd4ad29b | Update datacite Cannonical uri harvesting to return a single uri instead of a list | mehanig/scrapi,erinspace/scrapi,jeffreyliu3230/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,fabianvf/scrapi,alexgarciac/scrapi,CenterForOpenScience/scrapi,felliott/scrapi,mehanig/scrapi,fabianvf/scrapi | scrapi/harvesters/datacite.py | scrapi/harvesters/datacite.py | '''
Harvester for the DataCite MDS for the SHARE project
Example API call: http://oai.datacite.org/oai?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base.helpers import updated_schema, oai_extract_dois
class DataciteHarvester(OAIHarvester):
short_name = 'datacite'
long_name = 'DataCite MDS'
url = 'http://oai.datacite.org/oai'
base_url = 'http://oai.datacite.org/oai'
property_list = ['date', 'identifier', 'setSpec', 'description']
timezone_granularity = True
@property
def schema(self):
return updated_schema(self._schema, {
"description": ("//dc:description/node()", get_second_description),
"uris": {
"canonicalUri": ('//dc:identifier/node()', lambda x: oai_extract_dois(x)[0]),
"objectUris": ('//dc:identifier/node()', oai_extract_dois)
}
})
def get_second_description(descriptions):
''' In the DataCite OAI PMH api, there are often 2 descriptions: A type and
a longer kind of abstract. If there are two options, pick the second one which
is almost always the longer abstract
'''
if descriptions:
if len(descriptions) > 1:
return descriptions[1]
else:
return descriptions[0]
return ''
| '''
Harvester for the DataCite MDS for the SHARE project
Example API call: http://oai.datacite.org/oai?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
from scrapi.base.helpers import updated_schema, oai_extract_dois
class DataciteHarvester(OAIHarvester):
short_name = 'datacite'
long_name = 'DataCite MDS'
url = 'http://oai.datacite.org/oai'
base_url = 'http://oai.datacite.org/oai'
property_list = ['date', 'identifier', 'setSpec', 'description']
timezone_granularity = True
@property
def schema(self):
return updated_schema(self._schema, {
"description": ("//dc:description/node()", get_second_description),
"uris": {
"canonicalUri": ('//dc:identifier/node()', oai_extract_dois),
"objectUris": ('//dc:identifier/node()', oai_extract_dois)
}
})
def get_second_description(descriptions):
''' In the DataCite OAI PMH api, there are often 2 descriptions: A type and
a longer kind of abstract. If there are two options, pick the second one which
is almost always the longer abstract
'''
if descriptions:
if len(descriptions) > 1:
return descriptions[1]
else:
return descriptions[0]
return ''
| apache-2.0 | Python |
8a042586f1f446ccf4e4779e49783cf94eba3fb7 | Migrate sensor to async (#4663) | Cinntax/home-assistant,ct-23/home-assistant,rohitranjan1991/home-assistant,dmeulen/home-assistant,tinloaf/home-assistant,mezz64/home-assistant,tboyce021/home-assistant,toddeye/home-assistant,nkgilley/home-assistant,pschmitt/home-assistant,eagleamon/home-assistant,tboyce1/home-assistant,molobrakos/home-assistant,qedi-r/home-assistant,ewandor/home-assistant,stefan-jonasson/home-assistant,Teagan42/home-assistant,MungoRae/home-assistant,postlund/home-assistant,MungoRae/home-assistant,titilambert/home-assistant,keerts/home-assistant,kyvinh/home-assistant,leppa/home-assistant,ct-23/home-assistant,morphis/home-assistant,JshWright/home-assistant,nugget/home-assistant,LinuxChristian/home-assistant,tinloaf/home-assistant,alexmogavero/home-assistant,shaftoe/home-assistant,dmeulen/home-assistant,mKeRix/home-assistant,persandstrom/home-assistant,shaftoe/home-assistant,LinuxChristian/home-assistant,kyvinh/home-assistant,jabesq/home-assistant,miniconfig/home-assistant,morphis/home-assistant,robjohnson189/home-assistant,jamespcole/home-assistant,kyvinh/home-assistant,robjohnson189/home-assistant,morphis/home-assistant,Duoxilian/home-assistant,ma314smith/home-assistant,miniconfig/home-assistant,jawilson/home-assistant,toddeye/home-assistant,tboyce1/home-assistant,ewandor/home-assistant,miniconfig/home-assistant,florianholzapfel/home-assistant,kyvinh/home-assistant,HydrelioxGitHub/home-assistant,nugget/home-assistant,open-homeautomation/home-assistant,happyleavesaoc/home-assistant,tchellomello/home-assistant,LinuxChristian/home-assistant,tboyce1/home-assistant,fbradyirl/home-assistant,turbokongen/home-assistant,MungoRae/home-assistant,tinloaf/home-assistant,DavidLP/home-assistant,JshWright/home-assistant,jamespcole/home-assistant,sdague/home-assistant,jamespcole/home-assistant,LinuxChristian/home-assistant,keerts/home-assistant,alexmogavero/home-assistant,leppa/home-assistant,mezz64/home-assistant,dmeulen/home-assistant,GenericStudent/home-assistant,stefan-jonasson/home-assistant,Zac-HD/home-assistant,GenericStudent/home-assistant,balloob/home-assistant,HydrelioxGitHub/home-assistant,molobrakos/home-assistant,sander76/home-assistant,ma314smith/home-assistant,sander76/home-assistant,ma314smith/home-assistant,xifle/home-assistant,w1ll1am23/home-assistant,postlund/home-assistant,balloob/home-assistant,Danielhiversen/home-assistant,nugget/home-assistant,keerts/home-assistant,tboyce1/home-assistant,LinuxChristian/home-assistant,happyleavesaoc/home-assistant,MartinHjelmare/home-assistant,open-homeautomation/home-assistant,FreekingDean/home-assistant,auduny/home-assistant,sdague/home-assistant,Duoxilian/home-assistant,ma314smith/home-assistant,jawilson/home-assistant,ct-23/home-assistant,nkgilley/home-assistant,MartinHjelmare/home-assistant,morphis/home-assistant,mKeRix/home-assistant,aronsky/home-assistant,kennedyshead/home-assistant,persandstrom/home-assistant,soldag/home-assistant,fbradyirl/home-assistant,qedi-r/home-assistant,alexmogavero/home-assistant,eagleamon/home-assistant,PetePriority/home-assistant,florianholzapfel/home-assistant,open-homeautomation/home-assistant,HydrelioxGitHub/home-assistant,aequitas/home-assistant,happyleavesaoc/home-assistant,keerts/home-assistant,xifle/home-assistant,robbiet480/home-assistant,alexmogavero/home-assistant,FreekingDean/home-assistant,PetePriority/home-assistant,PetePriority/home-assistant,partofthething/home-assistant,robbiet480/home-assistant,dmeulen/home-assistant,jnewland/home-assistant,rohitranjan1991/home-assistant,pschmitt/home-assistant,mKeRix/home-assistant,MungoRae/home-assistant,ct-23/home-assistant,home-assistant/home-assistant,turbokongen/home-assistant,open-homeautomation/home-assistant,molobrakos/home-assistant,robjohnson189/home-assistant,lukas-hetzenecker/home-assistant,tboyce021/home-assistant,Duoxilian/home-assistant,florianholzapfel/home-assistant,auduny/home-assistant,stefan-jonasson/home-assistant,miniconfig/home-assistant,JshWright/home-assistant,DavidLP/home-assistant,Cinntax/home-assistant,xifle/home-assistant,Zac-HD/home-assistant,balloob/home-assistant,w1ll1am23/home-assistant,adrienbrault/home-assistant,auduny/home-assistant,Zac-HD/home-assistant,eagleamon/home-assistant,joopert/home-assistant,jabesq/home-assistant,shaftoe/home-assistant,xifle/home-assistant,eagleamon/home-assistant,Duoxilian/home-assistant,jabesq/home-assistant,jnewland/home-assistant,ewandor/home-assistant,aronsky/home-assistant,happyleavesaoc/home-assistant,jnewland/home-assistant,mKeRix/home-assistant,MungoRae/home-assistant,florianholzapfel/home-assistant,tchellomello/home-assistant,shaftoe/home-assistant,robjohnson189/home-assistant,Zac-HD/home-assistant,titilambert/home-assistant,kennedyshead/home-assistant,adrienbrault/home-assistant,MartinHjelmare/home-assistant,JshWright/home-assistant,rohitranjan1991/home-assistant,home-assistant/home-assistant,aequitas/home-assistant,persandstrom/home-assistant,aequitas/home-assistant,fbradyirl/home-assistant,DavidLP/home-assistant,joopert/home-assistant,stefan-jonasson/home-assistant,ct-23/home-assistant,soldag/home-assistant,Danielhiversen/home-assistant,lukas-hetzenecker/home-assistant,Teagan42/home-assistant,partofthething/home-assistant | homeassistant/components/sensor/__init__.py | homeassistant/components/sensor/__init__.py | """
Component to interface with various sensors that can be monitored.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sensor/
"""
import asyncio
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
yield from component.async_setup(config)
return True
| """
Component to interface with various sensors that can be monitored.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/sensor/
"""
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
def setup(hass, config):
"""Track states and offer events for sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
component.setup(config)
return True
| apache-2.0 | Python |
7310c2ce4b8ccd69374a85877c2df97a2b6ade70 | Add _fields cache Change _update to _apply and add option for non-required fields | limbera/django-nap,MarkusH/django-nap | nap/dataviews/views.py | nap/dataviews/views.py |
from collections import defaultdict
from inspect import classify_class_attrs
from django.forms import ValidationError
from django.utils.functional import cached_property
from .fields import field
from .utils import DictObject
class DataView(object):
def __init__(self, obj=None, **kwargs):
if obj is None:
obj = DictObject()
self._obj = obj
self._kwargs = kwargs
@cached_property
def _fields(self):
return {
name: prop
for name, kind, cls, prop in classify_class_attrs(self.__class__)
if isinstance(prop, field)
}
@cached_property
def _field_names(self):
return tuple(self._fields.keys())
def _reduce(self):
'''
Reduce our instance to its serialisable state.
Returns a dict.
'''
return {
name: getattr(self, name)
for name in self._field_names
}
def _apply(self, data, update=False):
'''
Update an instance from supplied data.
If update is False, all fields not tagged as ._required=False MUST be
supplied in the data dict.
'''
errors = defaultdict(list)
for name in self._field_names:
try:
setattr(self, name, data[name])
except KeyError:
if self.update:
pass
if getattr(self._fields[name], '_required', True):
errors[name].append(
ValidationError('This field is required')
)
except ValidationError as e:
errors[name].append(e.message)
self._errors = dict(errors)
if errors:
raise ValidationError(self._errors)
return self._obj
|
from collections import defaultdict
from inspect import classify_class_attrs
from django.forms import ValidationError
from django.utils.functional import cached_property
from .fields import field
from .utils import DictObject
class DataView(object):
def __init__(self, obj=None, **kwargs):
if obj is None:
obj = DictObject()
self._obj = obj
self._kwargs = kwargs
@cached_property
def _field_names(self):
return tuple(
name
for name, kind, cls, prop in classify_class_attrs(self.__class__)
if isinstance(prop, field)
)
def _reduce(self):
'''
Reduce our instance to its serialisable state.
Returns a dict.
'''
return {
name: getattr(self, name)
for name in self._field_names
}
def _update(self, data):
'''
Update an instance from supplied data.
'''
errors = defaultdict(list)
for name in self._field_names:
if name in data:
try:
setattr(self, name, data[name])
except ValidationError as e:
errors[name].append(e.message)
self._errors = dict(errors)
if errors:
raise ValidationError(self._errors)
return self._obj
| bsd-3-clause | Python |
ee60d49f57c450a56579f8bbe2c4382a93f60f38 | Fix test_utf8 for Python 3. | aherlihy/Monary,aherlihy/Monary | test/test_utf8.py | test/test_utf8.py | # -*- coding: utf-8 -*-
# Monary - Copyright 2011-2014 David J. C. Beach
# Please see the included LICENSE.TXT and NOTICE.TXT for licensing information.
import sys
import pymongo
import monary
expected = ["aあ", "âéÇ", "αλΩ", "眥¨≠"]
if sys.version_info[0] < 3:
# Python 2: convert from str to unicode.
expected = [s.decode('utf-8') for s in expected]
def setup():
with pymongo.MongoClient() as c:
c.drop_database("monary_test")
c.monary_test.data.insert(
{"test": my_str, "sequence": i}
for i, my_str in enumerate(expected))
def teardown():
with pymongo.MongoClient() as c:
c.drop_database("monary_test")
def test_utf8():
with monary.Monary("127.0.0.1") as m:
data, lens, sizes = m.query(
"monary_test", "data", {}, ["test", "test", "test"],
["string:12", "length", "size"], sort="sequence")
assert (lens < sizes).all()
for monary_bytes, monary_len, expected_str, in zip(data, lens, expected):
monary_str = monary_bytes.decode('utf8')
# We got the same string out from Monary as we inserted w/ PyMongo.
assert monary_str == expected_str
# Monary's idea of "length" == len(string).
assert monary_len == len(expected_str)
| # -*- coding: utf-8 -*-
# Monary - Copyright 2011-2014 David J. C. Beach
# Please see the included LICENSE.TXT and NOTICE.TXT for licensing information.
import pymongo
import monary
expected = ["aあ".decode('utf-8'),
"âéÇ".decode('utf-8'),
"αλΩ".decode('utf-8'),
"眥¨≠".decode('utf-8')]
def setup():
with pymongo.MongoClient() as c:
c.drop_database("monary_test")
c.monary_test.data.insert(
{"test": my_str, "sequence": i}
for i, my_str in enumerate(expected))
def teardown():
with pymongo.MongoClient() as c:
c.drop_database("monary_test")
def test_utf8():
with monary.Monary("127.0.0.1") as m:
data, lens, sizes = m.query(
"monary_test", "data", {}, ["test", "test", "test"],
["string:12", "length", "size"], sort="sequence")
assert (lens < sizes).all()
for monary_bytes, monary_len, expected_str, in zip(data, lens, expected):
monary_str = monary_bytes.decode('utf8')
# We got the same string out from Monary as we inserted w/ PyMongo.
assert monary_str == expected_str
# Monary's idea of "length" == len(string).
assert monary_len == len(expected_str)
| apache-2.0 | Python |
2f5918a02f7c1a4d6ccb3db01cf6d79d6aebeb76 | test on template - old way of testing home page removed | mbdebian/py-playground,mbdebian/py-playground | tdd-python/from_videos/superlists/lists/tests.py | tdd-python/from_videos/superlists/lists/tests.py | from django.http import HttpRequest
from django.test import TestCase
from lists.views import home_page
# Create your tests here.
class HomePageTet(TestCase):
def test_home_page_is_about_todo_lists(self):
request = HttpRequest()
response = home_page(request)
with open('lists/templates/home.html') as f:
expected_content = f.read()
self.assertEqual(response.content.decode(), expected_content)
| from django.http import HttpRequest
from django.test import TestCase
from lists.views import home_page
# Create your tests here.
class HomePageTet(TestCase):
def test_home_page_is_about_todo_lists(self):
request = HttpRequest()
response = home_page(request)
self.assertTrue(response.content.startswith(b'<html>'))
self.assertIn(b'<title>To-Do Lists</title>', response.content)
self.assertTrue(response.content.strip().endswith(b'</html>'))
with open('lists/templates/home.html') as f:
expected_content = f.read()
self.assertEqual(response.content.decode(), expected_content)
| apache-2.0 | Python |
5e5affd62d9774eb6af23e5f5fa63e9aeb0f817f | fix attention flop | undertherain/benchmarker,undertherain/benchmarker,undertherain/benchmarker,undertherain/benchmarker | benchmarker/modules/problems/attention/pytorch.py | benchmarker/modules/problems/attention/pytorch.py | import torch.nn as nn
class Net(nn.MultiheadAttention):
def forward(self, data):
super().forward(data, data, data)
def get_kernel(params):
assert params["mode"] == "inference"
cnt_samples = params["problem"]["size"][0]
len_seq = params["problem"]["size"][1]
embed_dim = params["problem"]["size"][2]
cnt_projections = 4
#ops_proj = 2 * cnt_samples * len_seq * embed_dim * cnt_projections
ops_proj = 2 * cnt_samples * len_seq * embed_dim * embed_dim * cnt_projections
ops_Q_K = 2 * cnt_samples * len_seq * len_seq * embed_dim
ops_Q_Kt_V = ops_Q_K
params["problem"]["gflop_estimated"] = ((ops_proj + ops_Q_K + ops_Q_Kt_V) * params["nb_epoch"]) / (10 ** 9)
# expected sizes: cnt_itmes, len_seq, dims
net = Net(embed_dim=embed_dim,
num_heads=params["problem"]["cnt_heads"],
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None)
return net
| import torch.nn as nn
class Net(nn.MultiheadAttention):
def forward(self, data):
super().forward(data, data, data)
def get_kernel(params):
assert params["mode"] == "inference"
cnt_samples = params["problem"]["size"][0]
len_seq = params["problem"]["size"][1]
embed_dim = params["problem"]["size"][2]
cnt_projections = 4
ops_proj = 2 * cnt_samples * len_seq * embed_dim * cnt_projections
ops_Q_K = 2 * cnt_samples * len_seq * len_seq * embed_dim
ops_Q_Kt_V = ops_Q_K
params["problem"]["gflop_estimated"] = ((ops_proj + ops_Q_K + ops_Q_Kt_V) * params["nb_epoch"]) / (10 ** 9)
# expected sizes: cnt_itmes, len_seq, dims
net = Net(embed_dim=embed_dim,
num_heads=params["problem"]["cnt_heads"],
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None)
return net
| mpl-2.0 | Python |
b52123473454487df5c617dd354fafeb500668b0 | Reformat codesearch/__init__.py | chromium/codesearch-py,chromium/codesearch-py | codesearch/__init__.py | codesearch/__init__.py | # Copyright 2017 The Chromium Authors.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd.
from __future__ import absolute_import
from .client_api import \
CodeSearch, \
NoFileSpecError, \
NotFoundError, \
ServerError, \
XrefNode
from .messages import \
AnnotatedText, \
Annotation, \
AnnotationResponse, \
AnnotationType, \
AnnotationTypeValue, \
CallGraphRequest, \
CallGraphResponse, \
CodeBlock, \
CodeBlockType, \
CodeSearchProtoJsonEncoder, \
CodeSearchProtoJsonSymbolizedEncoder, \
CompoundRequest, \
CompoundResponse, \
DirInfoRequest, \
DirInfoResponse, \
DirInfoResponseChild, \
EdgeEnumKind, \
FileInfo, \
FileInfoRequest, \
FileInfoResponse, \
FileResult, \
FileSpec, \
FormatRange, \
FormatType, \
GobInfo, \
InternalLink, \
InternalPackage, \
KytheNodeKind, \
KytheXrefKind, \
MatchReason, \
Message, \
Modifiers, \
Node, \
NodeEnumKind, \
SearchRequest, \
SearchResponse, \
SearchResult, \
SingleMatch, \
Snippet, \
StatusRequest, \
StatusResponse, \
VanityGitOnBorgHostname, \
XrefSearchRequest, \
XrefSearchResponse, \
XrefSearchResult, \
XrefSignature, \
XrefSingleMatch, \
XrefTypeCount
from .paths import \
GetPackageRelativePath, \
GetSourceRoot, \
NoSourceRootError
# Only useful for testing against this library.
from .testing_support import \
DisableNetwork, \
EnableNetwork, \
InstallTestRequestHandler
__all__ = []
| # Copyright 2017 The Chromium Authors.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd.
from __future__ import absolute_import
from .client_api import CodeSearch, XrefNode, ServerError, NoFileSpecError, \
NotFoundError
from .messages import Message, AnnotationTypeValue, AnnotationType, \
InternalLink, XrefSignature, NodeEnumKind, KytheNodeKind, KytheXrefKind, \
Annotation, FileSpec, FormatType, FormatRange, AnnotatedText, \
CodeBlockType, Modifiers, CodeBlock, FileInfo, FileInfoResponse, \
FileInfoRequest, AnnotationResponse, MatchReason, Snippet, Node, \
CallGraphResponse, CallGraphRequest, EdgeEnumKind, XrefTypeCount, \
XrefSingleMatch, XrefSearchResult, XrefSearchResponse, \
XrefSearchRequest, VanityGitOnBorgHostname, InternalPackage, \
StatusResponse, GobInfo, DirInfoResponseChild, DirInfoResponse, \
DirInfoRequest, FileResult, SingleMatch, SearchResult, SearchResponse, \
SearchRequest, StatusRequest, CompoundResponse, CompoundRequest, \
CodeSearchProtoJsonEncoder, CodeSearchProtoJsonSymbolizedEncoder
from .paths import GetPackageRelativePath, GetSourceRoot, NoSourceRootError
# Only useful for testing against this library.
from .testing_support import DisableNetwork, EnableNetwork, \
InstallTestRequestHandler
__all__ = []
| bsd-3-clause | Python |
2adbbe6c7291dd79784bd3a1e5702945435fa436 | Put Synchrophasor in a seperate file | sonusz/PhasorToolBox | phasortoolbox/__init__.py | phasortoolbox/__init__.py | #!/usr/bin/env python3
import asyncio
from .synchrophasor import Synchrophasor
from .parser import Parser, PcapParser
from .client import Client
from .pdc import PDC
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| #!/usr/bin/env python3
import asyncio
from .parser import Parser, PcapParser
from .client import Client
from .pdc import PDC
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| mit | Python |
05e8d1b4e162b55321f802bdba8a9eb7bdffd971 | remove object_id during conversion | tyarkoni/pliers,tyarkoni/featureX | pliers/converters/misc.py | pliers/converters/misc.py | """Miscellaneous conversion classes."""
from pliers.extractors import ExtractorResult
from pliers.stimuli import SeriesStim
from .base import Converter
class ExtractorResultToSeriesConverter(Converter):
"""Converts an ExtractorResult instance to a list of SeriesStims."""
_input_type = ExtractorResult
_output_type = SeriesStim
def _convert(self, result):
df = result.to_df(timing=False, metadata=False, object_id=False)
if 'object_id' in df.columns:
df = df.drop(['object_id'], axis=1)
stims = []
for i, data in df.iterrows():
onset = result.onset[i] if result.onset is not None else None
dur = result.duration[i] if result.duration is not None else None
order = result.order[i] if result.order is not None else i
st = SeriesStim(data, onset=onset, duration=dur, order=order)
stims.append(st)
return stims
| """Miscellaneous conversion classes."""
from pliers.extractors import ExtractorResult
from pliers.stimuli import SeriesStim
from .base import Converter
class ExtractorResultToSeriesConverter(Converter):
"""Converts an ExtractorResult instance to a list of SeriesStims."""
_input_type = ExtractorResult
_output_type = SeriesStim
def _convert(self, result):
df = result.to_df(timing=False, metadata=False, object_id=False)
stims = []
for i, data in df.iterrows():
onset = result.onset[i] if result.onset is not None else None
dur = result.duration[i] if result.duration is not None else None
order = result.order[i] if result.order is not None else i
st = SeriesStim(data, onset=onset, duration=dur, order=order)
stims.append(st)
return stims
| bsd-3-clause | Python |
f371f8eff785cd117880466899a14a0d6ee70655 | update websiteUrl for news data | EUMSSI/EUMSSI-platform,EUMSSI/EUMSSI-platform,EUMSSI/EUMSSI-platform | preprocess/news2eumssi.py | preprocess/news2eumssi.py | #!/usr/bin/env python
import pymongo
import time
import datetime
from eumssi_converter import EumssiConverter
def transf_date(x):
if x=="": #no date information
x= "1900-01-01 00:00:00.0" #fake date for empty-location, should be aware of that when using
if x.__class__==datetime.datetime:
return x
else:
return datetime.datetime.strptime(x[:-2],'%Y-%m-%d %H:%M:%S') #2014-02-24 17:57:12.0 for example
def transf_lang(lang):
return lang.lower()
def transf_source(sourceLabel):
if sourceLabel.find("lemonde") >-1:
return "Le Monde"
if sourceLabel.find("Zeit") >-1:
return "Zeit"
if sourceLabel.find("elpais") >-1:
return "El Pais"
if sourceLabel.find("Guardian") >-1:
return "Guardian"
return sourceLabel #in other cases
'''
mapping in the form [<original_fieldname>, <eumssi_fieldname>, <transform_function>, [<available_data>,..]}
'''
news_map = [
['publisheddate', 'datePublished', transf_date, []],
['language', 'inLanguage', transf_lang, []],
['link', 'websiteUrl', None, ['url']],
['content', 'text', None, ['text']],
['page', 'page', None, ['htmlsource']],
['description', 'description', None, ['text']],
['author','author',None,[]],
['title','headline',None,['text']],
['sourceLabel','publisher',transf_source,[]],
]
def main():
conv = EumssiConverter('eumssi-crawler-v102014',news_map)
conv.run()
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import pymongo
import time
import datetime
from eumssi_converter import EumssiConverter
def transf_date(x):
if x=="": #no date information
x= "1900-01-01 00:00:00.0" #fake date for empty-location, should be aware of that when using
if x.__class__==datetime.datetime:
return x
else:
return datetime.datetime.strptime(x[:-2],'%Y-%m-%d %H:%M:%S') #2014-02-24 17:57:12.0 for example
def transf_lang(lang):
return lang.lower()
def transf_source(sourceLabel):
if sourceLabel.find("lemonde") >-1:
return "Le Monde"
if sourceLabel.find("Zeit") >-1:
return "Zeit"
if sourceLabel.find("elpais") >-1:
return "El Pais"
if sourceLabel.find("Guardian") >-1:
return "Guardian"
return sourceLabel #in other cases
'''
mapping in the form [<original_fieldname>, <eumssi_fieldname>, <transform_function>, [<available_data>,..]}
'''
news_map = [
['publisheddate', 'datePublished', transf_date, []],
['language', 'inLanguage', transf_lang, []],
['link', 'mediaurl', None, ['url']],
['content', 'text', None, ['text']],
['page', 'page', None, ['htmlsource']],
['description', 'description', None, ['text']],
['author','author',None,[]],
['title','headline',None,['text']],
['sourceLabel','publisher',transf_source,[]],
]
def main():
conv = EumssiConverter('eumssi-crawler-v102014',news_map)
conv.run()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
1bbc1fab976dd63e6a2f05aa35117dc74db40652 | Use ModelSelectField. Javascript still broken for some reason. | skolsuper/pybbm_private_messages,skolsuper/pybbm_private_messages,skolsuper/pybbm_private_messages | private_messages/forms.py | private_messages/forms.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django_select2.fields import HeavyModelSelect2MultipleChoiceField
from pybb import util
from private_messages.models import PrivateMessage
User = get_user_model()
class MessageForm(forms.ModelForm):
parent = forms.UUIDField(required=False, widget=forms.HiddenInput)
receivers = HeavyModelSelect2MultipleChoiceField(
data_view='private_messages:receivers_json', queryset=User.objects.all())
class Meta(object):
model = PrivateMessage
fields = ('receivers', 'subject', 'body', 'parent')
widgets = {
'body': util.get_markup_engine().get_widget_cls(),
}
labels = {
'receivers': _('To'),
}
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from django_select2.fields import HeavySelect2MultipleChoiceField
from pybb import util
from private_messages.models import PrivateMessage
class MessageForm(forms.ModelForm):
parent = forms.UUIDField(required=False, widget=forms.HiddenInput)
receivers = HeavySelect2MultipleChoiceField(data_view='private_messages:receivers_json')
class Meta(object):
model = PrivateMessage
fields = ('receivers', 'subject', 'body', 'parent')
widgets = {
'body': util.get_markup_engine().get_widget_cls(),
}
labels = {
'receivers': _('To'),
}
| mit | Python |
790142d9b5f04700f5dcecee1816f6cf415886b1 | Remove print statement | dbinetti/barberscore-django,dbinetti/barberscore-django,dbinetti/barberscore,dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api | project/apps/api/views.py | project/apps/api/views.py | import logging
log = logging.getLogger(__name__)
import watson
from rest_framework import (
mixins,
viewsets,
# filters,
)
from .models import (
Convention,
Chorus,
Quartet,
)
from .serializers import (
ConventionSerializer,
ChorusSerializer,
QuartetSerializer,
SearchSerializer,
)
class SearchViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = SearchSerializer
def get_queryset(self, *args, **kwargs):
request = self.request
term = request.GET.get('q', None)
if term:
queryset = watson.search(term)
else:
queryset = None
return queryset
class ConventionViewSet(viewsets.ModelViewSet):
queryset = Convention.objects.filter(
name__in=[
'Philadelphia 2010',
'Kansas City 2011',
'Portland 2012',
'Toronto 2013',
'Las Vegas 2014',
'Pittsburgh 2015',
]
).prefetch_related('contests__contestants__performances')
serializer_class = ConventionSerializer
lookup_field = 'slug'
class ChorusViewSet(viewsets.ModelViewSet):
queryset = Chorus.objects.all().prefetch_related('contestants__performances')
serializer_class = ChorusSerializer
lookup_field = 'slug'
filter_fields = (
'name',
)
class QuartetViewSet(viewsets.ModelViewSet):
queryset = Quartet.objects.all()
serializer_class = QuartetSerializer
lookup_field = 'slug'
filter_fields = (
'name',
)
| import logging
log = logging.getLogger(__name__)
import watson
from rest_framework import (
mixins,
viewsets,
# filters,
)
from .models import (
Convention,
Chorus,
Quartet,
)
from .serializers import (
ConventionSerializer,
ChorusSerializer,
QuartetSerializer,
SearchSerializer,
)
class SearchViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = SearchSerializer
def get_queryset(self, *args, **kwargs):
request = self.request
term = request.GET.get('q', None)
if term:
queryset = watson.search(term)
else:
queryset = None
print queryset
return queryset
class ConventionViewSet(viewsets.ModelViewSet):
queryset = Convention.objects.filter(
name__in=[
'Philadelphia 2010',
'Kansas City 2011',
'Portland 2012',
'Toronto 2013',
'Las Vegas 2014',
'Pittsburgh 2015',
]
).prefetch_related('contests__contestants__performances')
serializer_class = ConventionSerializer
lookup_field = 'slug'
class ChorusViewSet(viewsets.ModelViewSet):
queryset = Chorus.objects.all().prefetch_related('contestants__performances')
serializer_class = ChorusSerializer
lookup_field = 'slug'
filter_fields = (
'name',
)
class QuartetViewSet(viewsets.ModelViewSet):
queryset = Quartet.objects.all()
serializer_class = QuartetSerializer
lookup_field = 'slug'
filter_fields = (
'name',
)
| bsd-2-clause | Python |
0ff8623c34e9123c554875503d2f9e8327f41a74 | allow more than once space around operators, bygroups around includes, variables | dbrgn/pygments-mirror,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror,dbrgn/pygments-mirror,kirbyfan64/pygments-unofficial,kirbyfan64/pygments-unofficial,dbrgn/pygments-mirror | pygments/lexers/puppet.py | pygments/lexers/puppet.py | # -*- coding: utf-8 -*-
"""
pygments.lexers.puppet
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Puppet DSL.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
__all__ = ['PuppetLexer']
class PuppetLexer(RegexLexer):
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\s*(\?|<|>|=|\+|-|\/|\*|~|!|\|)\s*', Operator),
(r'(in|and|or|not)\b', Operator.Word),
(r'[]{}:(),;[]', Punctuation),
(r'(.*)(include)(\s*)(.*)$', bygroups(Text, Keyword, Text, Name.Variable)),
(r'(if|else|elsif|case|class|true|false|define)\b', Keyword),
(r'(inherits|notice|node|realize|import)\b', Keyword),
(r'\$[^ ]*', Name.Variable),
(r'\'(.*?)\'', String),
(r'"(.*?)"', String),
(r'(.*?)(\s*)(=>)(\s*)(.*?)$',
bygroups(Name.Attribute, Text, Operator, Text, String)),
(r'[^\S\n]+', Text),
],
}
| # -*- coding: utf-8 -*-
"""
pygments.lexers.puppet
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Puppet DSL.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
__all__ = ['PuppetLexer']
class PuppetLexer(RegexLexer):
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\s(\?|<|>|=|\+|-|\/|\*|~|!|\|)\s', Operator),
(r'(in|and|or|not)\b', Operator.Word),
(r'[]{}:(),;[]', Punctuation),
(r'(if|else|elsif|case|class|true|false|define)\b', Keyword),
(r'(inherits|notice|node|include|realize|import)\b', Keyword),
(r'\'(.*?)\'', String),
(r'"(.*?)"', String),
(r'(.*?)(\s*)(=>)(\s*)(.*?)$',
bygroups(Name.Attribute, Text, Operator, Text, String)),
(r'[^\S\n]+', Text),
],
}
| bsd-2-clause | Python |
9847886f060483fb38ea6c00ddaba317fadaea29 | Apply isort | thombashi/pytablereader,thombashi/pytablereader,thombashi/pytablereader | pytablereader/__init__.py | pytablereader/__init__.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import
from tabledata import (
EmptyDataError, InvalidDataError, InvalidHeaderNameError, InvalidTableNameError,
SQLiteTableDataSanitizer, TableData, TableDataSanitizer)
from ._constant import PatternMatch
from ._logger import logger, set_log_level, set_logger
from .csv.core import CsvTableFileLoader, CsvTableTextLoader
from .error import (
HTTPError, InvalidFilePathError, PathError, InvalidUrlError, LoaderNotFoundError,
OpenError, ProxyError, PypandocImportError, ValidationError)
from .html.core import HtmlTableFileLoader, HtmlTableTextLoader
from .json.core import JsonTableFileLoader, JsonTableTextLoader
from .loadermanager import TableFileLoader, TableUrlLoader
from .ltsv.core import LtsvTableFileLoader, LtsvTableTextLoader
from .markdown.core import MarkdownTableFileLoader, MarkdownTableTextLoader
from .mediawiki.core import MediaWikiTableFileLoader, MediaWikiTableTextLoader
from .spreadsheet.excelloader import ExcelTableFileLoader
from .spreadsheet.gsloader import GoogleSheetsTableLoader
from .sqlite.core import SqliteFileLoader
from .tsv.core import TsvTableFileLoader, TsvTableTextLoader
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import
from tabledata import (
SQLiteTableDataSanitizer,
TableData,
TableDataSanitizer,
InvalidTableNameError,
InvalidHeaderNameError,
InvalidDataError,
EmptyDataError,
)
from ._constant import PatternMatch
from ._logger import (
logger,
set_logger,
set_log_level,
)
from .csv.core import (
CsvTableFileLoader,
CsvTableTextLoader,
)
from .error import (
ValidationError,
InvalidPathError,
InvalidFilePathError,
InvalidUrlError,
OpenError,
LoaderNotFoundError,
HTTPError,
ProxyError,
PypandocImportError,
)
from .html.core import (
HtmlTableFileLoader,
HtmlTableTextLoader,
)
from .json.core import (
JsonTableFileLoader,
JsonTableTextLoader,
)
from .loadermanager import (
TableFileLoader,
TableUrlLoader,
)
from .ltsv.core import (
LtsvTableFileLoader,
LtsvTableTextLoader,
)
from .markdown.core import (
MarkdownTableFileLoader,
MarkdownTableTextLoader,
)
from .mediawiki.core import (
MediaWikiTableFileLoader,
MediaWikiTableTextLoader,
)
from .spreadsheet.excelloader import ExcelTableFileLoader
from .spreadsheet.gsloader import GoogleSheetsTableLoader
from .sqlite.core import SqliteFileLoader
from .tsv.core import (
TsvTableFileLoader,
TsvTableTextLoader,
)
| mit | Python |
dab54ff92da91f7971e2701d2cf20d03a577664b | Fix crash when CIRCLE_NODE_TOTAL exists but is empty | micktwomey/pytest-circleci | pytest_circleci/plugin.py | pytest_circleci/plugin.py |
import os, hashlib
class CircleCIError(Exception):
"""Raised for problems running the CirleCI py.test plugin"""
def read_circleci_env_variables():
"""Read and convert CIRCLE_* environment variables"""
circle_node_total = int(os.environ.get("CIRCLE_NODE_TOTAL").strip() or "1")
circle_node_index = int(os.environ.get("CIRCLE_NODE_INDEX").strip() or "0")
if circle_node_index >= circle_node_total:
raise CircleCIError("CIRCLE_NODE_INDEX={} >= CIRCLE_NODE_TOTAL={}, should be less".format(circle_node_index, circle_node_total))
return (circle_node_total, circle_node_index)
def pytest_report_header(config):
"""Add CircleCI information to report"""
circle_node_total, circle_node_index = read_circleci_env_variables()
return "CircleCI total nodes: {}, this node index: {}".format(circle_node_total, circle_node_index)
def pytest_collection_modifyitems(session, config, items):
"""
Use CircleCI env vars to determine which tests to run
- CIRCLE_NODE_TOTAL indicates total number of nodes tests are running on
- CIRCLE_NODE_INDEX indicates which node this is
Will run a subset of tests based on the node index.
"""
circle_node_total, circle_node_index = read_circleci_env_variables()
deselected = []
for index, item in enumerate(list(items)):
item_hash = int(hashlib.sha1(':'.join(map(str, item.location))).hexdigest(), 16)
if (item_hash % circle_node_total) != circle_node_index:
deselected.append(item)
items.remove(item)
config.hook.pytest_deselected(items=deselected)
|
import os, hashlib
class CircleCIError(Exception):
"""Raised for problems running the CirleCI py.test plugin"""
def read_circleci_env_variables():
"""Read and convert CIRCLE_* environment variables"""
circle_node_total = int(os.environ.get("CIRCLE_NODE_TOTAL", "1").strip())
circle_node_index = int(os.environ.get("CIRCLE_NODE_INDEX", "0").strip())
if circle_node_index >= circle_node_total:
raise CircleCIError("CIRCLE_NODE_INDEX={} >= CIRCLE_NODE_TOTAL={}, should be less".format(circle_node_index, circle_node_total))
return (circle_node_total, circle_node_index)
def pytest_report_header(config):
"""Add CircleCI information to report"""
circle_node_total, circle_node_index = read_circleci_env_variables()
return "CircleCI total nodes: {}, this node index: {}".format(circle_node_total, circle_node_index)
def pytest_collection_modifyitems(session, config, items):
"""
Use CircleCI env vars to determine which tests to run
- CIRCLE_NODE_TOTAL indicates total number of nodes tests are running on
- CIRCLE_NODE_INDEX indicates which node this is
Will run a subset of tests based on the node index.
"""
circle_node_total, circle_node_index = read_circleci_env_variables()
deselected = []
for index, item in enumerate(list(items)):
item_hash = int(hashlib.sha1(':'.join(map(str, item.location))).hexdigest(), 16)
if (item_hash % circle_node_total) != circle_node_index:
deselected.append(item)
items.remove(item)
config.hook.pytest_deselected(items=deselected)
| mit | Python |
f6ab612ca7caa78ddbdd3496ce6b75e10da310ec | update test stage | ORNL-CEES/Cap,ORNL-CEES/Cap,Rombur/Cap,Rombur/Cap,Rombur/Cap,dalg24/Cap,dalg24/Cap,ORNL-CEES/Cap,dalg24/Cap | python/test/test_stage.py | python/test/test_stage.py | from pycap import PropertyTree,EnergyStorageDevice,Stage,initialize_data
import unittest
device_database=PropertyTree()
device_database.parse_xml('device.xml')
device=EnergyStorageDevice(device_database)
class capStageTestCase(unittest.TestCase):
def test_constant_current_charge_for_given_time(self):
ptree=PropertyTree()
ptree.put_string('type','constant_current')
ptree.put_double('current',5e-3)
ptree.put_string('end_criterion','time')
ptree.put_double('duration',15.0)
ptree.put_double('time_step',0.1)
stage=Stage(ptree)
data=initialize_data()
steps=stage.run(device,data)
self.assertEqual(steps,150)
self.assertAlmostEqual(data['time'][-1],15.0)
self.assertAlmostEqual(data['current'][-1],5e-3)
def test_force_discharge(self):
ptree=PropertyTree()
ptree.put_string('type','constant_voltage')
ptree.put_double('voltage',0.0)
ptree.put_string('end_criterion','current_less_than')
ptree.put_double('current_limit',1e-5)
ptree.put_double('time_step',1.0)
stage=Stage(ptree)
data=initialize_data()
steps=stage.run(device,data)
self.assertGreaterEqual(steps,1)
self.assertAlmostEqual(data['voltage'][-1],0.0)
self.assertLessEqual(data['current'][-1],1e-5)
if __name__ == '__main__':
unittest.main()
| from pycap import PropertyTree,EnergyStorageDevice,Stage,initialize_data
import unittest
device_database=PropertyTree()
device_database.parse_xml('device.xml')
device=EnergyStorageDevice(device_database)
class capStageTestCase(unittest.TestCase):
def test_nothing(self):
ptree=PropertyTree()
ptree.put_string('type','constant_current')
ptree.put_double('current',5e-3)
ptree.put_string('end_criterion','time')
ptree.put_double('duration',15.0)
ptree.put_double('time_step',0.1)
print 'this should throw'
stage=Stage(ptree)
data=initialize_data()
steps=stage.run(device,data)
self.assertEqual(data['time'][-1],15.0)
self.assertEqual(steps,150)
self.assertEqual(device.get_current(),5e-3)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
20fe9ec286f4040e8b10cdfbe300124e781ebf7c | change hr.analytic.timesheet instead of account.analytic.line when associated line to invoice | damdam-s/project-reporting | project_billing_utils/wizard/associate_aal.py | project_billing_utils/wizard/associate_aal.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume
# Copyright 2010 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AssociateInvoice(orm.TransientModel):
_name = 'associate.aal.to.invoice'
_description = 'Associate Analytic Lines'
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice', required=True),
}
def associate_aal(self, cr, uid, ids, context=None):
if context is None:
context = {}
aal_obj = self.pool.get('hr.analytic.timesheet')
aal_ids = context.get('active_ids', False)
if isinstance(ids, list):
req_id = ids[0]
else:
req_id = ids
current = self.browse(cr, uid, req_id, context=context)
aal_obj.write(cr, uid, aal_ids,
{'invoice_id': current.invoice_id.id},
context=context)
return {
'domain': "[('id','in', [%s])]" % (current.invoice_id.id,),
'name': 'Associated invoice',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume
# Copyright 2010 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AssociateInvoice(orm.TransientModel):
_name = 'associate.aal.to.invoice'
_description = 'Associate Analytic Lines'
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice', required=True),
}
def associate_aal(self, cr, uid, ids, context=None):
if context is None:
context = {}
aal_obj = self.pool.get('account.analytic.line')
aal_ids = context.get('active_ids', False)
if isinstance(ids, list):
req_id = ids[0]
else:
req_id = ids
current = self.browse(cr, uid, req_id, context=context)
aal_obj.write(cr, uid, aal_ids,
{'invoice_id': current.invoice_id.id},
context=context)
return {
'domain': "[('id','in', [%s])]" % (current.invoice_id.id,),
'name': 'Associated invoice',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
7c1ab84e5a7acfc439b3b34950a31b933c22d679 | fix migrations | codeforamerica/rva-screening,codeforamerica/rva-screening,codeforamerica/rva-screening | migrations/versions/65f28fd897d_.py | migrations/versions/65f28fd897d_.py | """empty message
Revision ID: 65f28fd897d
Revises: 11288927b825
Create Date: 2015-09-23 14:44:44.824420
"""
# revision identifiers, used by Alembic.
revision = '65f28fd897d'
down_revision = '3f1fecf4ecc8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('referral_permission',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('from_service_id', sa.Integer(), nullable=True),
sa.Column('to_servide_id', sa.Integer(), nullable=True),
sa.Column('last_modified_by_id', sa.Integer(), nullable=True),
sa.Column('created_by_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['created_by_id'], ['app_user.id'], name='fk_created_by_id', use_alter=True),
sa.ForeignKeyConstraint(['from_service_id'], ['service.id'], ),
sa.ForeignKeyConstraint(['last_modified_by_id'], ['app_user.id'], name='fk_last_modified_by_id', use_alter=True),
sa.ForeignKeyConstraint(['to_servide_id'], ['service.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('referral_permission')
### end Alembic commands ###
| """empty message
Revision ID: 65f28fd897d
Revises: 11288927b825
Create Date: 2015-09-23 14:44:44.824420
"""
# revision identifiers, used by Alembic.
revision = '65f28fd897d'
down_revision = '11288927b825'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('referral_permission',
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('from_service_id', sa.Integer(), nullable=True),
sa.Column('to_servide_id', sa.Integer(), nullable=True),
sa.Column('last_modified_by_id', sa.Integer(), nullable=True),
sa.Column('created_by_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['created_by_id'], ['app_user.id'], name='fk_created_by_id', use_alter=True),
sa.ForeignKeyConstraint(['from_service_id'], ['service.id'], ),
sa.ForeignKeyConstraint(['last_modified_by_id'], ['app_user.id'], name='fk_last_modified_by_id', use_alter=True),
sa.ForeignKeyConstraint(['to_servide_id'], ['service.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('referral_permission')
### end Alembic commands ###
| bsd-3-clause | Python |
22b4774372583050d5f2b45bf8cba29a08355efe | move prepare method / in case there are some loose changes exit on resume | jacobilsoe/rtc2git,cwill747/rtc2git,ohumbel/rtc2git,rtcTo/rtc2git,WtfJoke/rtc2git,akchinSTC/rtc2git | migration.py | migration.py | import os
import sys
from rtcFunctions import ImportHandler
from rtcFunctions import WorkspaceHandler
from rtcFunctions import RTCInitializer
from gitFunctions import Initializer
from gitFunctions import Commiter
import configuration
import shouter
def initialize(config):
directory = config.workDirectory
if os.path.exists(directory):
sys.exit("Configured directory already exists, please make sure to use a non-existing directory")
os.makedirs(directory)
os.chdir(directory)
config.deletelogfolder()
git = Initializer(config)
git.initalize()
RTCInitializer.initialize(config)
git.initialcommitandpush()
def resume(config):
os.chdir(config.workDirectory)
os.chdir(config.clonedGitRepoName)
if not ImportHandler(config).is_reloading_necessary():
sys.exit("Directory is not clean, please commit untracked files or revert them")
RTCInitializer.loginandcollectstreamuuid(config)
if config.previousstreamname:
prepare(config)
else:
WorkspaceHandler(config).load()
def migrate():
config = configuration.read()
rtc = ImportHandler(config)
rtcworkspace = WorkspaceHandler(config)
git = Commiter
resume(config)
streamuuid = config.streamuuid
streamname = config.streamname
branchname = streamname + "_branchpoint"
componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(streamuuid)
rtcworkspace.setnewflowtargets(streamuuid)
git.branch(branchname)
history = rtc.readhistory(componentbaselineentries, streamname)
changeentries = rtc.getchangeentriesofstreamcomponents(componentbaselineentries)
rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history))
shouter.shout("All changes until creation of stream '%s' accepted" % streamname)
git.pushbranch(branchname)
git.branch(streamname)
rtcworkspace.setcomponentstobaseline(componentbaselineentries, streamuuid)
rtcworkspace.load()
changeentries = rtc.getchangeentriesofstream(streamuuid)
rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history))
git.pushbranch(streamname)
shouter.shout("All changes of stream '%s' accepted - Migration of stream completed" % streamname)
def prepare(config):
rtc = ImportHandler(config)
rtcworkspace = WorkspaceHandler(config)
# git checkout branchpoint
Commiter.checkout(config.previousstreamname + "_branchpoint")
# list baselines of current workspace
componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(config.previousstreamuuid)
# set components to that baselines
rtcworkspace.setcomponentstobaseline(componentbaselineentries, config.previousstreamuuid)
rtcworkspace.load()
if __name__ == "__main__":
migrate()
| import os
import sys
from rtcFunctions import ImportHandler
from rtcFunctions import WorkspaceHandler
from rtcFunctions import RTCInitializer
from gitFunctions import Initializer
from gitFunctions import Commiter
import configuration
import shouter
def initialize(config):
directory = config.workDirectory
if os.path.exists(directory):
sys.exit("Configured directory already exists, please make sure to use a non-existing directory")
os.makedirs(directory)
os.chdir(directory)
config.deletelogfolder()
git = Initializer(config)
git.initalize()
RTCInitializer.initialize(config)
git.initialcommitandpush()
def resume(config):
os.chdir(config.workDirectory)
os.chdir(config.clonedGitRepoName)
RTCInitializer.loginandcollectstreamuuid(config)
if not config.previousstreamname: # in case previousstreamname is set, #prepare will load the workspace
WorkspaceHandler(config).load()
def migrate():
config = configuration.read()
rtc = ImportHandler(config)
rtcworkspace = WorkspaceHandler(config)
git = Commiter
resume(config)
prepare(config)
streamuuid = config.streamuuid
streamname = config.streamname
branchname = streamname + "_branchpoint"
componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(streamuuid)
rtcworkspace.setnewflowtargets(streamuuid)
git.branch(branchname)
history = rtc.readhistory(componentbaselineentries, streamname)
changeentries = rtc.getchangeentriesofstreamcomponents(componentbaselineentries)
rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history))
shouter.shout("All changes until creation of stream '%s' accepted" % streamname)
git.pushbranch(branchname)
git.branch(streamname)
rtcworkspace.setcomponentstobaseline(componentbaselineentries, streamuuid)
rtcworkspace.load()
changeentries = rtc.getchangeentriesofstream(streamuuid)
rtc.acceptchangesintoworkspace(rtc.getchangeentriestoaccept(changeentries, history))
git.pushbranch(streamname)
shouter.shout("All changes of stream '%s' accepted - Migration of stream completed" % streamname)
def prepare(config):
rtc = ImportHandler(config)
rtcworkspace = WorkspaceHandler(config)
# git checkout branchpoint
Commiter.checkout(config.previousstreamname + "_branchpoint")
# list baselines of current workspace
componentbaselineentries = rtc.getcomponentbaselineentriesfromstream(config.previousstreamuuid)
# set components to that baselines
rtcworkspace.setcomponentstobaseline(componentbaselineentries, config.previousstreamuuid)
rtcworkspace.load()
if __name__ == "__main__":
migrate()
| mit | Python |
04ea2096ade2cf323312cb1a1ff008c667994e24 | Use tf.lite as the py_module name. Made the necessary changes to the api generator to accomodate for `dots` in the py_module name | cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,aldian/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,aldian/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,freedomtan/tensorflow,annarev/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,sarvex/tensorflow,aam-at/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,sarvex/tensorflow,annarev/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,sarvex/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,aam-at/tensorflow,Intel-tensorflow/tensorflow,annarev/tensorflow,aldian/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,cxxgtxy/tensorflow,davidzchen/tensorflow,yongtang/tensorflow,petewarden/tensorflow,petewarden/tensorflow,annarev/tensorflow,cxxgtxy/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,petewarden/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,aam-at/tensorflow,aam-at/tensorflow,aam-at/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,aam-at/tensorflow,petewarden/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aldian/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,davidzchen/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,petewarden/tensorflow,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,petewarden/tensorflow,annarev/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,sarvex/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,Intel-Corporation/tensorflow,aldian/tensorflow,annarev/tensorflow,sarvex/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,aam-at/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,gautam1858/tensorflow,aldian/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aam-at/tensorflow,davidzchen/tensorflow,petewarden/tensorflow,petewarden/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,Intel-Corporation/tensorflow,aam-at/tensorflow,petewarden/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,petewarden/tensorflow,sarvex/tensorflow,aam-at/tensorflow,sarvex/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,karllessard/tensorflow,aldian/tensorflow,karllessard/tensorflow,yongtang/tensorflow,cxxgtxy/tensorflow,karllessard/tensorflow,annarev/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,gautam1858/tensorflow,freedomtan/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow,gautam1858/tensorflow,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,sarvex/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,freedomtan/tensorflow | tensorflow/lite/g3doc/tools/build_py_api_docs.py | tensorflow/lite/g3doc/tools/build_py_api_docs.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generate python docs for tf.lite.
# How to run
```
python build_docs.py --output_dir=/path/to/output
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pathlib
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow_docs.api_generator import generate_lib
flags.DEFINE_string('output_dir', '/tmp/lite_api/',
'The path to output the files to')
flags.DEFINE_string('code_url_prefix',
'https://github.com/tensorflow/tensorflow/blob/master/',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', 'lite/api_docs/python',
'Path prefix in the _toc.yaml')
FLAGS = flags.FLAGS
def main(_):
doc_generator = generate_lib.DocGenerator(
root_title='TensorFlow Lite',
py_modules=[('tf.lite', tf.lite)],
base_dir=str(pathlib.Path(tf.__file__).parent),
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
callbacks=[])
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generate python docs for tf.lite.
# How to run
```
python build_docs.py --output_dir=/path/to/output
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pathlib
from absl import app
from absl import flags
import tensorflow as tf
from tensorflow_docs.api_generator import generate_lib
flags.DEFINE_string('output_dir', '/tmp/lite_api/',
'The path to output the files to')
flags.DEFINE_string('code_url_prefix',
'https://github.com/tensorflow/tensorflow/blob/master/',
'The url prefix for links to code.')
flags.DEFINE_bool('search_hints', True,
'Include metadata search hints in the generated files')
flags.DEFINE_string('site_path', 'lite/api_docs/python',
'Path prefix in the _toc.yaml')
FLAGS = flags.FLAGS
def main(_):
doc_generator = generate_lib.DocGenerator(
root_title='TensorFlow Lite',
py_modules=[('lite', tf.lite)],
base_dir=str(pathlib.Path(tf.__file__).parent),
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
callbacks=[])
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| apache-2.0 | Python |
23c25516765bb9290fcd59159114b2815c4c60c3 | add unit test for pytables._update_or_append() | nathanhilbert/ulmo,nathanhilbert/ulmo,cameronbracken/ulmo,ocefpaf/ulmo,ocefpaf/ulmo,cameronbracken/ulmo | test/usgs_test.py | test/usgs_test.py | import datetime
import os
import isodate
import tables
import pyhis
TEST_FILE_PATH = '/tmp/pyhis_test.h5'
def test_init():
os.remove(TEST_FILE_PATH)
assert not os.path.exists(TEST_FILE_PATH)
pyhis.usgs.pytables.init_h5(TEST_FILE_PATH)
assert os.path.exists(TEST_FILE_PATH)
def test_parse_get_sites():
site_files = ['RI_daily.xml', 'RI_instantaneous.xml']
sites = {}
for site_file in site_files:
with open(site_file, 'r') as f:
sites.update(pyhis.usgs.core._parse_sites(f))
assert len(sites) == 63
return sites
def test_update_site_list():
assert _count_rows('/usgs/sites') == 0
sites = test_parse_get_sites()
pyhis.usgs.pytables._update_site_table(sites, TEST_FILE_PATH)
assert _count_rows('/usgs/sites') == 63
def test_pytables_get_sites():
sites = pyhis.usgs.pytables.get_sites(TEST_FILE_PATH)
assert len(sites) == 63
def test_update_or_append():
h5file = tables.openFile(TEST_FILE_PATH, mode="r+")
test_table = _create_test_table(h5file, 'update_or_append', pyhis.usgs.pytables.USGSValue)
where_filter = '(datetime == "%(datetime)s")'
initial_values = [
{'datetime': isodate.datetime_isoformat(datetime.datetime(2000, 1, 1) + \
datetime.timedelta(days=i)),
'value': 'initial',
'qualifiers': ''}
for i in range(1000)]
update_values = [
{'datetime': isodate.datetime_isoformat(datetime.datetime(2000, 1, 1) + \
datetime.timedelta(days=i)),
'value': 'updated',
'qualifiers': ''}
for i in [20, 30, 10, 999, 1000, 2000, 399]]
pyhis.usgs.pytables._update_or_append(test_table, initial_values, where_filter)
h5file.close()
assert _count_rows('/test/update_or_append') == 1000
h5file = tables.openFile(TEST_FILE_PATH, mode="r+")
test_table = h5file.getNode('/test/update_or_append')
pyhis.usgs.pytables._update_or_append(test_table, update_values, where_filter)
h5file.close()
assert _count_rows('/test/update_or_append') == 1002
def _count_rows(path):
h5file = tables.openFile(TEST_FILE_PATH, mode="r")
table = h5file.getNode(path)
number_of_rows = len([1 for i in table.iterrows()])
h5file.close()
return number_of_rows
def _create_test_table(h5file, table_name, description):
test_table = h5file.createTable('/test', table_name, description,
createparents=True)
return test_table
| import os
import tables
import pyhis
TEST_FILE_PATH = '/tmp/pyhis_test.h5'
def test_init():
os.remove(TEST_FILE_PATH)
assert not os.path.exists(TEST_FILE_PATH)
pyhis.usgs.pytables.init_h5(TEST_FILE_PATH)
assert os.path.exists(TEST_FILE_PATH)
def test_parse_get_sites():
site_files = ['RI_daily.xml', 'RI_instantaneous.xml']
sites = {}
for site_file in site_files:
with open(site_file, 'r') as f:
sites.update(pyhis.usgs.core._parse_sites(f))
assert len(sites) == 63
return sites
def test_update_site_list():
assert _count_sites() == 0
sites = test_parse_get_sites()
pyhis.usgs.pytables._update_site_table(sites, TEST_FILE_PATH)
assert _count_sites() == 63
def test_pytables_get_sites():
sites = pyhis.usgs.pytables.get_sites(TEST_FILE_PATH)
assert len(sites) == 63
def _count_sites():
h5file = tables.openFile(TEST_FILE_PATH, mode="r")
sites_table = h5file.root.usgs.sites
number_of_sites = len([1 for i in sites_table.iterrows()])
h5file.close()
return number_of_sites
| bsd-3-clause | Python |
5384deb82aeddfb6f02c7e198c372dc1b06cd861 | Add purchase suppliers dataset | datasciencebr/serenata-toolbox | serenata_toolbox/datasets.py | serenata_toolbox/datasets.py | import os
from urllib.request import urlretrieve
def fetch(filename, destination_path,
aws_bucket='serenata-de-amor-data',
aws_region='s3-sa-east-1'):
url = 'https://{}.amazonaws.com/{}/{}'.format(aws_region,
aws_bucket,
filename)
filepath = os.path.join(destination_path, filename)
if not os.path.exists(filepath):
urlretrieve(url, filepath)
def fetch_latest_backup(destination_path,
aws_bucket='serenata-de-amor-data',
aws_region='s3-sa-east-1'):
files = (
'2016-08-08-ceap-datasets.md',
'2016-08-08-current-year.xz',
'2016-08-08-datasets-format.html',
'2016-08-08-last-year.xz',
'2016-08-08-previous-years.xz',
'2016-09-03-companies.xz',
'2016-11-11-congressperson-relatives.xz',
'2016-11-19-current-year.xz',
'2016-11-19-last-year.xz',
'2016-11-19-previous-years.xz',
'2016-11-19-reimbursements.xz',
'2016-11-28-congressperson-civil-names.xz',
'2016-11-29-yelp-companies.xz',
'2016-12-02-foursquare-companies.xz',
'2016-12-15-speeches.xz',
'2016-12-20-impeded-non-profit-entities.xz',
'2016-12-21-deputies.xz',
'2016-12-21-inident-and-suspended-companies.xz',
'2016-12-21-national-register-punished-companies.xz',
'2016-12-21-presences.xz',
'2016-12-21-sessions.xz',
'2016-12-21-speeches.xz',
'2016-12-22-agreements.xz',
'2016-12-22-amendments.xz',
'2017-03-20-purchase-suppliers.xz'
)
for filename in files:
fetch(filename, destination_path, aws_bucket, aws_region)
| import os
from urllib.request import urlretrieve
def fetch(filename, destination_path,
aws_bucket='serenata-de-amor-data',
aws_region='s3-sa-east-1'):
url = 'https://{}.amazonaws.com/{}/{}'.format(aws_region,
aws_bucket,
filename)
filepath = os.path.join(destination_path, filename)
if not os.path.exists(filepath):
urlretrieve(url, filepath)
def fetch_latest_backup(destination_path,
aws_bucket='serenata-de-amor-data',
aws_region='s3-sa-east-1'):
files = (
'2016-08-08-ceap-datasets.md',
'2016-08-08-current-year.xz',
'2016-08-08-datasets-format.html',
'2016-08-08-last-year.xz',
'2016-08-08-previous-years.xz',
'2016-09-03-companies.xz',
'2016-11-11-congressperson-relatives.xz',
'2016-11-19-current-year.xz',
'2016-11-19-last-year.xz',
'2016-11-19-previous-years.xz',
'2016-11-19-reimbursements.xz',
'2016-11-28-congressperson-civil-names.xz',
'2016-11-29-yelp-companies.xz',
'2016-12-02-foursquare-companies.xz',
'2016-12-15-speeches.xz',
'2016-12-20-impeded-non-profit-entities.xz',
'2016-12-21-deputies.xz',
'2016-12-21-inident-and-suspended-companies.xz',
'2016-12-21-national-register-punished-companies.xz',
'2016-12-21-presences.xz',
'2016-12-21-sessions.xz',
'2016-12-21-speeches.xz',
'2016-12-22-agreements.xz',
'2016-12-22-amendments.xz'
)
for filename in files:
fetch(filename, destination_path, aws_bucket, aws_region)
| mit | Python |
b61c51798ce2f1fde3d8777d36d809b209741984 | Fix compatibility with Python2 | niboshi/chainer,hvy/chainer,okuta/chainer,ktnyt/chainer,chainer/chainer,pfnet/chainer,wkentaro/chainer,chainer/chainer,hvy/chainer,niboshi/chainer,chainer/chainer,tkerola/chainer,ronekko/chainer,okuta/chainer,hvy/chainer,niboshi/chainer,rezoo/chainer,wkentaro/chainer,keisuke-umezawa/chainer,ktnyt/chainer,niboshi/chainer,jnishi/chainer,ktnyt/chainer,wkentaro/chainer,okuta/chainer,anaruse/chainer,keisuke-umezawa/chainer,jnishi/chainer,jnishi/chainer,okuta/chainer,keisuke-umezawa/chainer,chainer/chainer,jnishi/chainer,ktnyt/chainer,wkentaro/chainer,hvy/chainer,keisuke-umezawa/chainer | tests/chainer_tests/utils_tests/test_argument.py | tests/chainer_tests/utils_tests/test_argument.py | import unittest
import six
from chainer import testing
from chainer.utils.argument import parse_kwargs
class TestArgument(unittest.TestCase):
def test_parse_kwargs(self):
def test(**kwargs):
return parse_kwargs(kwargs, ('foo', 1), ('bar', 2))
self.assertEqual(test(), (1, 2))
self.assertEqual(test(bar=1, foo=2), (2, 1))
with six.assertRaisesRegex(
TypeError, "test\(\) got unexpected keyword argument\(s\) "
"'ham', 'spam'"):
test(spam=1, ham=2)
testing.run_module(__name__, __file__)
| import unittest
from chainer import testing
from chainer.utils.argument import parse_kwargs
class TestArgument(unittest.TestCase):
def test_parse_kwargs(self):
def test(**kwargs):
return parse_kwargs(kwargs, ('foo', 1), ('bar', 2))
self.assertEqual(test(), (1, 2))
self.assertEqual(test(bar=1, foo=2), (2, 1))
with self.assertRaisesRegex(
TypeError, "test\(\) got unexpected keyword argument\(s\) "
"'ham', 'spam'"):
test(spam=1, ham=2)
testing.run_module(__name__, __file__)
| mit | Python |
5d3646a8fc4c05a2902b2f3ca60321204e87f355 | Fix handling of user details | tobias47n9e/social-core,python-social-auth/social-core,python-social-auth/social-core | social_core/backends/asana.py | social_core/backends/asana.py | import datetime
from .oauth import BaseOAuth2
class AsanaOAuth2(BaseOAuth2):
name = 'asana'
AUTHORIZATION_URL = 'https://app.asana.com/-/oauth_authorize'
ACCESS_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://app.asana.com/-/oauth_token'
REFRESH_TOKEN_URL = 'https://app.asana.com/-/oauth_token'
REDIRECT_STATE = False
USER_DATA_URL = 'https://app.asana.com/api/1.0/users/me'
EXTRA_DATA = [
('expires_in', 'expires'),
('refresh_token', 'refresh_token'),
('name', 'name'),
]
def get_user_details(self, response):
data = response['data']
fullname, first_name, last_name = self.get_user_names(data['name'])
return {'email': data['email'],
'username': data['email'],
'fullname': fullname,
'last_name': last_name,
'first_name': first_name}
def user_data(self, access_token, *args, **kwargs):
return self.get_json(self.USER_DATA_URL, headers={
'Authorization': 'Bearer {}'.format(access_token)
})
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
data = super(AsanaOAuth2, self).extra_data(user, uid, response, details)
if self.setting('ESTIMATE_EXPIRES_ON'):
expires_on = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=data['expires'])
data['expires_on'] = expires_on.isoformat()
return data
| import datetime
from .oauth import BaseOAuth2
class AsanaOAuth2(BaseOAuth2):
name = 'asana'
AUTHORIZATION_URL = 'https://app.asana.com/-/oauth_authorize'
ACCESS_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://app.asana.com/-/oauth_token'
REFRESH_TOKEN_URL = 'https://app.asana.com/-/oauth_token'
REDIRECT_STATE = False
USER_DATA_URL = 'https://app.asana.com/api/1.0/users/me'
EXTRA_DATA = [
('expires_in', 'expires'),
('refresh_token', 'refresh_token'),
('name', 'name'),
]
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(response['name'])
return {'email': response['email'],
'username': response['email'],
'fullname': fullname,
'last_name': last_name,
'first_name': first_name}
def user_data(self, access_token, *args, **kwargs):
return self.get_json(self.USER_DATA_URL, headers={
'Authorization': 'Bearer {}'.format(access_token)
})
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
data = super(AsanaOAuth2, self).extra_data(user, uid, response, details)
if self.setting('ESTIMATE_EXPIRES_ON'):
expires_on = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=data['expires'])
data['expires_on'] = expires_on.isoformat()
return data
| bsd-3-clause | Python |
565bb12ca79a159b34ec0e03385a038a05db93c2 | Remove example from docstring | Vayel/MPF,tartopum/MPF | mpf/processors/difference.py | mpf/processors/difference.py | class Difference:
"""TODO"""
def __init__(self):
pass
def work(self, data):
"""Process the difference between an element in the `data` list and the
next one.
:param data: the data to be processed
:type data: list
:return: the list of differences
:rtype: list
"""
return [data[i+1] - data[i] for i in range(len(data) - 1)]
| class Difference:
"""
TODO
"""
def __init__(self):
pass
def work(self, data):
"""Process the difference between an element in the `data` list and the
next one.
:param data: the data to be processed
:type data: list
:return: the list of differences
:rtype: list
"""
return [data[i+1] - data[i] for i in range(len(data) - 1)]
| mit | Python |
9a903f9f003d743242d4ac41b4a4045559f1ff4c | add MIT License copyright header to zmq_sub.py | shaolinfry/litecoin,emc2foundation/einsteinium,EntropyFactory/creativechain-core,nlgcoin/guldencoin-official,funkshelper/woodcore,zcoinofficial/zcoin,zcoinofficial/zcoin,gjhiggins/vcoincore,zcoinofficial/zcoin,Electronic-Gulden-Foundation/egulden,OmniLayer/omnicore,lbrtcoin/albertcoin,lbrtcoin/albertcoin,nlgcoin/guldencoin-official,gravio-net/graviocoin,HashUnlimited/Einsteinium-Unlimited,gravio-net/graviocoin,reorder/viacoin,jl2012/litecoin,experiencecoin/experiencecoin,OmniLayer/omnicore,cannabiscoindev/cannabiscoin420,lbrtcoin/albertcoin,jl2012/litecoin,reorder/viacoin,bitcoinplusorg/xbcwalletsource,okinc/bitcoin,litecoin-project/litecore-litecoin,bitcoinplusorg/xbcwalletsource,magacoin/magacoin,Electronic-Gulden-Foundation/egulden,litecoin-project/litecore-litecoin,shaolinfry/litecoin,EntropyFactory/creativechain-core,magacoin/magacoin,experiencecoin/experiencecoin,shaolinfry/litecoin,ShadowMyst/creativechain-core,gjhiggins/vcoincore,martindale/elements,metacoin/florincoin,litecoin-project/litecore-litecoin,ShadowMyst/creativechain-core,nlgcoin/guldencoin-official,itmanagerro/tresting,metacoin/florincoin,gravio-net/graviocoin,gravio-net/graviocoin,HashUnlimited/Einsteinium-Unlimited,zetacoin/zetacoin,metacoin/florincoin,experiencecoin/experiencecoin,itmanagerro/tresting,okinc/bitcoin,oklink-dev/bitcoin,lbrtcoin/albertcoin,ShadowMyst/creativechain-core,nlgcoin/guldencoin-official,zetacoin/zetacoin,oklink-dev/bitcoin,zcoinofficial/zcoin,funkshelper/woodcore,EntropyFactory/creativechain-core,OmniLayer/omnicore,lbrtcoin/albertcoin,metacoin/florincoin,OmniLayer/omnicore,oklink-dev/bitcoin,cannabiscoindev/cannabiscoin420,zcoinofficial/zcoin,OmniLayer/omnicore,okinc/bitcoin,jl2012/litecoin,litecoin-project/litecore-litecoin,bdelzell/creditcoin-org-creditcoin,martindale/elements,HashUnlimited/Einsteinium-Unlimited,cannabiscoindev/cannabiscoin420,EntropyFactory/creativechain-core,magacoin/magacoin,oklink-dev/bitcoin,jl2012/litecoin,zetacoin/zetacoin,emc2foundation/einsteinium,okinc/bitcoin,jl2012/litecoin,funkshelper/woodcore,shaolinfry/litecoin,lbrtcoin/albertcoin,emc2foundation/einsteinium,shaolinfry/litecoin,bdelzell/creditcoin-org-creditcoin,gjhiggins/vcoincore,lbrtcoin/albertcoin,funkshelper/woodcore,itmanagerro/tresting,oklink-dev/bitcoin,lbrtcoin/albertcoin,zetacoin/zetacoin,experiencecoin/experiencecoin,bdelzell/creditcoin-org-creditcoin,EntropyFactory/creativechain-core,itmanagerro/tresting,reorder/viacoin,martindale/elements,itmanagerro/tresting,shaolinfry/litecoin,itmanagerro/tresting,zcoinofficial/zcoin,martindale/elements,OmniLayer/omnicore,lbrtcoin/albertcoin,emc2foundation/einsteinium,martindale/elements,bitcoinplusorg/xbcwalletsource,zetacoin/zetacoin,zcoinofficial/zcoin,Electronic-Gulden-Foundation/egulden,magacoin/magacoin,HashUnlimited/Einsteinium-Unlimited,magacoin/magacoin,zcoinofficial/zcoin,bdelzell/creditcoin-org-creditcoin,litecoin-project/litecore-litecoin,martindale/elements,bdelzell/creditcoin-org-creditcoin,lbrtcoin/albertcoin,jl2012/litecoin,funkshelper/woodcore,gravio-net/graviocoin,experiencecoin/experiencecoin,lbrtcoin/albertcoin,bitcoinplusorg/xbcwalletsource,Electronic-Gulden-Foundation/egulden,cannabiscoindev/cannabiscoin420,nlgcoin/guldencoin-official,bitcoinplusorg/xbcwalletsource,ShadowMyst/creativechain-core,reorder/viacoin,metacoin/florincoin,HashUnlimited/Einsteinium-Unlimited,emc2foundation/einsteinium,okinc/bitcoin,gravio-net/graviocoin,gjhiggins/vcoincore,zcoinofficial/zcoin,okinc/bitcoin,gjhiggins/vcoincore,lbrtcoin/albertcoin,ShadowMyst/creativechain-core,cannabiscoindev/cannabiscoin420,nlgcoin/guldencoin-official,Electronic-Gulden-Foundation/egulden,litecoin-project/litecore-litecoin,gjhiggins/vcoincore,oklink-dev/bitcoin,ShadowMyst/creativechain-core,reorder/viacoin,bdelzell/creditcoin-org-creditcoin,HashUnlimited/Einsteinium-Unlimited,magacoin/magacoin,Electronic-Gulden-Foundation/egulden,emc2foundation/einsteinium,experiencecoin/experiencecoin,cannabiscoindev/cannabiscoin420,zcoinofficial/zcoin,zetacoin/zetacoin,reorder/viacoin,metacoin/florincoin,bitcoinplusorg/xbcwalletsource,EntropyFactory/creativechain-core | contrib/zmq/zmq_sub.py | contrib/zmq/zmq_sub.py | #!/usr/bin/env python2
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import array
import binascii
import zmq
import struct
port = 28332
zmqContext = zmq.Context()
zmqSubSocket = zmqContext.socket(zmq.SUB)
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "hashblock")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "hashtx")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawblock")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawtx")
zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
try:
while True:
msg = zmqSubSocket.recv_multipart()
topic = str(msg[0])
body = msg[1]
sequence = "Unknown";
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == "hashblock":
print '- HASH BLOCK ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "hashtx":
print '- HASH TX ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "rawblock":
print '- RAW BLOCK HEADER ('+sequence+') -'
print binascii.hexlify(body[:80])
elif topic == "rawtx":
print '- RAW TX ('+sequence+') -'
print binascii.hexlify(body)
except KeyboardInterrupt:
zmqContext.destroy()
| #!/usr/bin/env python2
import array
import binascii
import zmq
import struct
port = 28332
zmqContext = zmq.Context()
zmqSubSocket = zmqContext.socket(zmq.SUB)
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "hashblock")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "hashtx")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawblock")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawtx")
zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
try:
while True:
msg = zmqSubSocket.recv_multipart()
topic = str(msg[0])
body = msg[1]
sequence = "Unknown";
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == "hashblock":
print '- HASH BLOCK ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "hashtx":
print '- HASH TX ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "rawblock":
print '- RAW BLOCK HEADER ('+sequence+') -'
print binascii.hexlify(body[:80])
elif topic == "rawtx":
print '- RAW TX ('+sequence+') -'
print binascii.hexlify(body)
except KeyboardInterrupt:
zmqContext.destroy()
| mit | Python |
2fdac02fe93f4aa4f25c9ae4dadfb7325e7f7bc6 | Resolve flask-wtf deprecation warning | der-michik/c3bottles,der-michik/c3bottles,der-michik/c3bottles,der-michik/c3bottles | controller/__init__.py | controller/__init__.py | #!/usr/bin/python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_wtf import CSRFProtect
c3bottles = Flask(__name__,
static_folder="../static",
template_folder="../templates"
)
# We need to set this here to prevent the depreciation warning
c3bottles.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
def load_config():
c3bottles.config.from_object("config")
db = SQLAlchemy(c3bottles)
lm = LoginManager(c3bottles)
csrf = CSRFProtect(c3bottles)
# Trim and strip blocks in jinja2 so no unnecessary
# newlines and tabs appear in the output:
c3bottles.jinja_env.trim_blocks = True
c3bottles.jinja_env.lstrip_blocks = True
from view.api import api
from view.main import index, faq, dp_list, dp_map, dp_view
from view.create import create_dp
from view.edit import edit_dp
from view.report import report
from view.visit import visit
from view.user import login, logout
from view.statistics import stats
c3bottles.register_blueprint(api)
c3bottles.register_blueprint(stats)
# vim: set expandtab ts=4 sw=4:
| #!/usr/bin/python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_wtf import CsrfProtect
c3bottles = Flask(__name__,
static_folder="../static",
template_folder="../templates"
)
# We need to set this here to prevent the depreciation warning
c3bottles.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
def load_config():
c3bottles.config.from_object("config")
db = SQLAlchemy(c3bottles)
lm = LoginManager(c3bottles)
csrf = CsrfProtect(c3bottles)
# Trim and strip blocks in jinja2 so no unnecessary
# newlines and tabs appear in the output:
c3bottles.jinja_env.trim_blocks = True
c3bottles.jinja_env.lstrip_blocks = True
from view.api import api
from view.main import index, faq, dp_list, dp_map, dp_view
from view.create import create_dp
from view.edit import edit_dp
from view.report import report
from view.visit import visit
from view.user import login, logout
from view.statistics import stats
c3bottles.register_blueprint(api)
c3bottles.register_blueprint(stats)
# vim: set expandtab ts=4 sw=4:
| mit | Python |
c131a725af7e27fd8b80683e6af1ddfc986ca2c4 | Add mpfr 4.0.2 (#13091) | iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/mpfr/package.py | var/spack/repos/builtin/packages/mpfr/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpfr(AutotoolsPackage):
"""The MPFR library is a C library for multiple-precision
floating-point computations with correct rounding."""
homepage = "https://www.mpfr.org/"
url = "https://ftpmirror.gnu.org/mpfr/mpfr-4.0.2.tar.bz2"
version('4.0.2', sha256='c05e3f02d09e0e9019384cdd58e0f19c64e6db1fd6f5ecf77b4b1c61ca253acc')
version('4.0.1', '8c21d8ac7460493b2b9f3ef3cc610454')
version('4.0.0', 'ef619f3bb68039e35c4a219e06be72d0')
version('3.1.6', '320c28198def956aeacdb240b46b8969')
version('3.1.5', 'b1d23a55588e3b2a13e3be66bc69fd8d')
version('3.1.4', 'b8a2f6b0e68bef46e53da2ac439e1cf4')
version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138')
version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19')
# mpir is a drop-in replacement for gmp
depends_on('gmp@4.1:') # 4.2.3 or higher is recommended
depends_on('gmp@5.0:', when='@4.0.0:') # https://www.mpfr.org/mpfr-4.0.0/
# Check the Bugs section of old release pages for patches.
# https://www.mpfr.org/mpfr-X.Y.Z/#bugs
patches = {
'4.0.2': 'f2d2a530acb5e70e1a9d5b80881dbb4a504d56535c4bc103d83e0bb630172029',
'4.0.1': '5230aab653fa8675fc05b5bdd3890e071e8df49a92a9d58c4284024affd27739',
'3.1.6': '66a5d58364113a21405fc53f4a48f4e8',
'3.1.5': '1dc5fe65feb5607b89fe0f410d53b627',
'3.1.4': 'd124381573404fe83654c7d5a79aeabf',
'3.1.3': 'ebd1d835e0ae2fd8a9339210ccd1d0a8',
'3.1.2': '9f96a5c7cac1d6cd983ed9cf7d997074',
}
for ver, checksum in patches.items():
patch('https://www.mpfr.org/mpfr-{0}/allpatches'.format(ver),
when='@' + ver, sha256=checksum)
def configure_args(self):
args = [
'--with-gmp=' + self.spec['gmp'].prefix,
]
return args
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mpfr(AutotoolsPackage):
"""The MPFR library is a C library for multiple-precision
floating-point computations with correct rounding."""
homepage = "http://www.mpfr.org"
url = "https://ftpmirror.gnu.org/mpfr/mpfr-4.0.1.tar.bz2"
version('4.0.1', '8c21d8ac7460493b2b9f3ef3cc610454')
version('4.0.0', 'ef619f3bb68039e35c4a219e06be72d0')
version('3.1.6', '320c28198def956aeacdb240b46b8969')
version('3.1.5', 'b1d23a55588e3b2a13e3be66bc69fd8d')
version('3.1.4', 'b8a2f6b0e68bef46e53da2ac439e1cf4')
version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138')
version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19')
# mpir is a drop-in replacement for gmp
depends_on('gmp@4.1:') # 4.2.3 or higher is recommended
depends_on('gmp@5.0:', when='@4.0.0:') # http://www.mpfr.org/mpfr-4.0.0/
# Check the Bugs section of old release pages for patches.
# http://www.mpfr.org/mpfr-X.Y.Z/#bugs
patches = {
'3.1.6': '66a5d58364113a21405fc53f4a48f4e8',
'3.1.5': '1dc5fe65feb5607b89fe0f410d53b627',
'3.1.4': 'd124381573404fe83654c7d5a79aeabf',
'3.1.3': 'ebd1d835e0ae2fd8a9339210ccd1d0a8',
'3.1.2': '9f96a5c7cac1d6cd983ed9cf7d997074',
}
for ver, checksum in patches.items():
patch('http://www.mpfr.org/mpfr-{0}/allpatches'.format(ver),
when='@' + ver, sha256=checksum)
def configure_args(self):
args = [
'--with-gmp=' + self.spec['gmp'].prefix,
]
return args
| lgpl-2.1 | Python |
6fd8c2b1c1ee59820ce26474c9504b514d325106 | Update scanopy.py | nyholmniklas/scanopy | Scanopy/scanopy.py | Scanopy/scanopy.py | from gui import *
from scanner import *
if __name__ == '__main__':
scanner = Scanner()
gui_thread = Gui(scanner)
gui_thread.run()
| from gui import *
from scanner import *
if __name__ == '__main__':
scanner = Scanner()
gui_thread = Gui(scanner)
gui_thread.start() | mit | Python |
35df1104c0df6a89e083b77a82cca44b7ecbbfd9 | Print a log message every 1000 inserted URLs | berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud | mediacloud/mediawords/util/sitemap/media.py | mediacloud/mediawords/util/sitemap/media.py | from mediawords.db import DatabaseHandler
from mediawords.util.log import create_logger
from mediawords.util.sitemap.tree import sitemap_tree_for_homepage
log = create_logger(__name__)
# FIXME add test for this function
def fetch_sitemap_pages_for_media_id(db: DatabaseHandler, media_id: int) -> None:
"""Fetch and store all pages (news stories or not) from media's sitemap tree."""
media = db.find_by_id(table='media', object_id=media_id)
if not media:
raise Exception("Unable to find media with ID {}".format(media_id))
media_url = media['url']
log.info("Fetching sitemap pages for media ID {} ({})...".format(media_id, media_url))
sitemaps = sitemap_tree_for_homepage(homepage_url=media_url)
pages = sitemaps.all_pages()
log.info("Fetched {} pages for media ID {} ({}).".format(len(pages), media_id, media_url))
log.info("Storing {} sitemap pages for media ID {} ({})...".format(len(pages), media_id, media_url))
insert_counter = 0
for page in pages:
db.query("""
INSERT INTO media_sitemap_pages (
media_id, url, last_modified, change_frequency, priority,
news_title, news_publish_date
) VALUES (
%(media_id)s, %(url)s, %(last_modified)s, %(change_frequency)s, %(priority)s,
%(news_title)s, %(news_publish_date)s
)
ON CONFLICT (url) DO NOTHING
""", {
'media_id': media_id,
'url': page.url,
'last_modified': page.last_modified,
'change_frequency': page.change_frequency.value if page.change_frequency is not None else None,
'priority': page.priority,
'news_title': page.news_story.title if page.news_story is not None else None,
'news_publish_date': page.news_story.publish_date if page.news_story is not None else None,
})
insert_counter += 1
if insert_counter % 1000 == 0:
log.info("Inserted {} / {} URLs...".format(insert_counter, len(pages)))
log.info("Done storing {} sitemap pages for media ID {} ({}).".format(len(pages), media_id, media_url))
| from mediawords.db import DatabaseHandler
from mediawords.util.log import create_logger
from mediawords.util.sitemap.tree import sitemap_tree_for_homepage
log = create_logger(__name__)
# FIXME add test for this function
def fetch_sitemap_pages_for_media_id(db: DatabaseHandler, media_id: int) -> None:
"""Fetch and store all pages (news stories or not) from media's sitemap tree."""
media = db.find_by_id(table='media', object_id=media_id)
if not media:
raise Exception("Unable to find media with ID {}".format(media_id))
media_url = media['url']
log.info("Fetching sitemap pages for media ID {} ({})...".format(media_id, media_url))
sitemaps = sitemap_tree_for_homepage(homepage_url=media_url)
pages = sitemaps.all_pages()
log.info("Fetched {} pages for media ID {} ({}).".format(len(pages), media_id, media_url))
log.info("Storing {} sitemap pages for media ID {} ({})...".format(len(pages), media_id, media_url))
for page in pages:
db.query("""
INSERT INTO media_sitemap_pages (
media_id, url, last_modified, change_frequency, priority,
news_title, news_publish_date
) VALUES (
%(media_id)s, %(url)s, %(last_modified)s, %(change_frequency)s, %(priority)s,
%(news_title)s, %(news_publish_date)s
)
ON CONFLICT (url) DO NOTHING
""", {
'media_id': media_id,
'url': page.url,
'last_modified': page.last_modified,
'change_frequency': page.change_frequency.value if page.change_frequency is not None else None,
'priority': page.priority,
'news_title': page.news_story.title if page.news_story is not None else None,
'news_publish_date': page.news_story.publish_date if page.news_story is not None else None,
})
log.info("Done storing {} sitemap pages for media ID {} ({}).".format(len(pages), media_id, media_url))
| agpl-3.0 | Python |
64313aabb821719f86f331b3f83cdf158344f5c7 | Improve documentation | meetmangukiya/coala-bears,chriscoyfish/coala-bears,yashtrivedi96/coala-bears,dosarudaniel/coala-bears,naveentata/coala-bears,madhukar01/coala-bears,ku3o/coala-bears,incorrectusername/coala-bears,coala-analyzer/coala-bears,ankit01ojha/coala-bears,madhukar01/coala-bears,Vamshi99/coala-bears,damngamerz/coala-bears,kaustubhhiware/coala-bears,SanketDG/coala-bears,yash-nisar/coala-bears,coala/coala-bears,coala/coala-bears,Asnelchristian/coala-bears,srisankethu/coala-bears,naveentata/coala-bears,vijeth-aradhya/coala-bears,incorrectusername/coala-bears,coala-analyzer/coala-bears,LWJensen/coala-bears,gs0510/coala-bears,refeed/coala-bears,LWJensen/coala-bears,SanketDG/coala-bears,horczech/coala-bears,refeed/coala-bears,refeed/coala-bears,arjunsinghy96/coala-bears,sounak98/coala-bears,vijeth-aradhya/coala-bears,ku3o/coala-bears,aptrishu/coala-bears,damngamerz/coala-bears,arjunsinghy96/coala-bears,Shade5/coala-bears,shreyans800755/coala-bears,Shade5/coala-bears,chriscoyfish/coala-bears,Vamshi99/coala-bears,dosarudaniel/coala-bears,sims1253/coala-bears,incorrectusername/coala-bears,Shade5/coala-bears,ankit01ojha/coala-bears,ankit01ojha/coala-bears,naveentata/coala-bears,horczech/coala-bears,seblat/coala-bears,chriscoyfish/coala-bears,arjunsinghy96/coala-bears,sims1253/coala-bears,Asnelchristian/coala-bears,LWJensen/coala-bears,refeed/coala-bears,shreyans800755/coala-bears,shreyans800755/coala-bears,ankit01ojha/coala-bears,horczech/coala-bears,damngamerz/coala-bears,Asnelchristian/coala-bears,ku3o/coala-bears,ankit01ojha/coala-bears,shreyans800755/coala-bears,refeed/coala-bears,meetmangukiya/coala-bears,Vamshi99/coala-bears,arjunsinghy96/coala-bears,yash-nisar/coala-bears,sims1253/coala-bears,yash-nisar/coala-bears,SanketDG/coala-bears,srisankethu/coala-bears,meetmangukiya/coala-bears,meetmangukiya/coala-bears,ku3o/coala-bears,damngamerz/coala-bears,vijeth-aradhya/coala-bears,seblat/coala-bears,horczech/coala-bears,meetmangukiya/coala-bears,srisankethu/coala-bears,damngamerz/coala-bears,coala-analyzer/coala-bears,mr-karan/coala-bears,ankit01ojha/coala-bears,aptrishu/coala-bears,ankit01ojha/coala-bears,madhukar01/coala-bears,meetmangukiya/coala-bears,seblat/coala-bears,ku3o/coala-bears,damngamerz/coala-bears,arjunsinghy96/coala-bears,coala/coala-bears,refeed/coala-bears,Asnelchristian/coala-bears,damngamerz/coala-bears,sounak98/coala-bears,srisankethu/coala-bears,coala/coala-bears,aptrishu/coala-bears,srisankethu/coala-bears,naveentata/coala-bears,ankit01ojha/coala-bears,coala/coala-bears,sims1253/coala-bears,LWJensen/coala-bears,kaustubhhiware/coala-bears,shreyans800755/coala-bears,mr-karan/coala-bears,sounak98/coala-bears,damngamerz/coala-bears,seblat/coala-bears,vijeth-aradhya/coala-bears,kaustubhhiware/coala-bears,meetmangukiya/coala-bears,gs0510/coala-bears,arjunsinghy96/coala-bears,horczech/coala-bears,dosarudaniel/coala-bears,coala-analyzer/coala-bears,arjunsinghy96/coala-bears,shreyans800755/coala-bears,sounak98/coala-bears,coala-analyzer/coala-bears,LWJensen/coala-bears,SanketDG/coala-bears,naveentata/coala-bears,kaustubhhiware/coala-bears,horczech/coala-bears,gs0510/coala-bears,incorrectusername/coala-bears,sounak98/coala-bears,SanketDG/coala-bears,aptrishu/coala-bears,seblat/coala-bears,srisankethu/coala-bears,yash-nisar/coala-bears,Asnelchristian/coala-bears,naveentata/coala-bears,SanketDG/coala-bears,Vamshi99/coala-bears,dosarudaniel/coala-bears,coala/coala-bears,vijeth-aradhya/coala-bears,meetmangukiya/coala-bears,kaustubhhiware/coala-bears,aptrishu/coala-bears,yash-nisar/coala-bears,seblat/coala-bears,yashtrivedi96/coala-bears,arjunsinghy96/coala-bears,ankit01ojha/coala-bears,refeed/coala-bears,incorrectusername/coala-bears,ku3o/coala-bears,naveentata/coala-bears,vijeth-aradhya/coala-bears,madhukar01/coala-bears,ankit01ojha/coala-bears,dosarudaniel/coala-bears,Shade5/coala-bears,mr-karan/coala-bears,gs0510/coala-bears,dosarudaniel/coala-bears,yashtrivedi96/coala-bears,Asnelchristian/coala-bears,srisankethu/coala-bears,yash-nisar/coala-bears,Vamshi99/coala-bears,SanketDG/coala-bears,Asnelchristian/coala-bears,yashtrivedi96/coala-bears,horczech/coala-bears,yash-nisar/coala-bears,incorrectusername/coala-bears,gs0510/coala-bears,coala/coala-bears,Shade5/coala-bears,naveentata/coala-bears,mr-karan/coala-bears,refeed/coala-bears,yashtrivedi96/coala-bears,yash-nisar/coala-bears,coala/coala-bears,srisankethu/coala-bears,aptrishu/coala-bears,dosarudaniel/coala-bears,yashtrivedi96/coala-bears,Vamshi99/coala-bears,gs0510/coala-bears,shreyans800755/coala-bears,Vamshi99/coala-bears,refeed/coala-bears,LWJensen/coala-bears,aptrishu/coala-bears,Vamshi99/coala-bears,incorrectusername/coala-bears,Vamshi99/coala-bears,Shade5/coala-bears,shreyans800755/coala-bears,refeed/coala-bears,chriscoyfish/coala-bears,sounak98/coala-bears,vijeth-aradhya/coala-bears,sounak98/coala-bears,mr-karan/coala-bears,Shade5/coala-bears,coala/coala-bears,LWJensen/coala-bears,madhukar01/coala-bears,incorrectusername/coala-bears,aptrishu/coala-bears,coala-analyzer/coala-bears,shreyans800755/coala-bears,incorrectusername/coala-bears,srisankethu/coala-bears,srisankethu/coala-bears,madhukar01/coala-bears,horczech/coala-bears,meetmangukiya/coala-bears,LWJensen/coala-bears,arjunsinghy96/coala-bears,mr-karan/coala-bears,Asnelchristian/coala-bears,madhukar01/coala-bears,sims1253/coala-bears,coala-analyzer/coala-bears,chriscoyfish/coala-bears,srisankethu/coala-bears,coala-analyzer/coala-bears,kaustubhhiware/coala-bears,shreyans800755/coala-bears,Vamshi99/coala-bears,SanketDG/coala-bears,mr-karan/coala-bears,ankit01ojha/coala-bears,SanketDG/coala-bears,dosarudaniel/coala-bears,vijeth-aradhya/coala-bears,yash-nisar/coala-bears,damngamerz/coala-bears,madhukar01/coala-bears,chriscoyfish/coala-bears,horczech/coala-bears,gs0510/coala-bears,damngamerz/coala-bears,ku3o/coala-bears,coala/coala-bears,chriscoyfish/coala-bears,coala-analyzer/coala-bears,chriscoyfish/coala-bears,Asnelchristian/coala-bears,horczech/coala-bears,kaustubhhiware/coala-bears,yash-nisar/coala-bears,sounak98/coala-bears,seblat/coala-bears,yash-nisar/coala-bears,Vamshi99/coala-bears,vijeth-aradhya/coala-bears,dosarudaniel/coala-bears,mr-karan/coala-bears,kaustubhhiware/coala-bears,yashtrivedi96/coala-bears,damngamerz/coala-bears,coala/coala-bears,aptrishu/coala-bears,shreyans800755/coala-bears,Shade5/coala-bears,Shade5/coala-bears,ku3o/coala-bears,kaustubhhiware/coala-bears,aptrishu/coala-bears,ku3o/coala-bears,naveentata/coala-bears,madhukar01/coala-bears,refeed/coala-bears,aptrishu/coala-bears,yashtrivedi96/coala-bears,sims1253/coala-bears,horczech/coala-bears,sims1253/coala-bears,sounak98/coala-bears,seblat/coala-bears,LWJensen/coala-bears,sims1253/coala-bears,gs0510/coala-bears,gs0510/coala-bears,yashtrivedi96/coala-bears,coala/coala-bears | bears/configfiles/DockerfileLintBear.py | bears/configfiles/DockerfileLintBear.py | import json
from coalib.bearlib.abstractions.Linter import linter
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='dockerfile_lint')
class DockerfileLintBear:
"""
Check file syntax as well as arbitrary semantic and best practice
in Dockerfiles. it also checks LABEL rules against docker images.
Uses ``dockerfile_lint`` to provide the analysis.
See <https://github.com/projectatomic/dockerfile_lint#dockerfile-lint> for
more information .
"""
severity_map = {
"error": RESULT_SEVERITY.MAJOR,
"warn": RESULT_SEVERITY.NORMAL,
"info": RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file):
return '--json', '-f', filename
def process_output(self, output, filename, file):
output = json.loads(output)
for severity in output:
if severity == "summary":
continue
for issue in output[severity]["data"]:
yield Result.from_values(
origin=self,
message=issue["message"],
file=filename,
severity=self.severity_map[issue["level"]],
line=issue["line"])
| import json
from coalib.bearlib.abstractions.Linter import linter
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='dockerfile_lint')
class DockerfileLintBear:
"""
Checks the given file with ``dockerfile_lint``.
"""
severity_map = {
"error": RESULT_SEVERITY.MAJOR,
"warn": RESULT_SEVERITY.NORMAL,
"info": RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file):
return '--json', '-f', filename
def process_output(self, output, filename, file):
output = json.loads(output)
for severity in output:
if severity == "summary":
continue
for issue in output[severity]["data"]:
yield Result.from_values(
origin=self,
message=issue["message"],
file=filename,
severity=self.severity_map[issue["level"]],
line=issue["line"])
| agpl-3.0 | Python |
81dc3cf445046290381e94ccf8f20c32f419dde2 | Fix datetime creation | scorphus/okapi | okapi/api.py | okapi/api.py | """
okapi.api
~~~~~~~~~
This module implements the Requests API while storing valuable information into mongodb.
"""
import datetime
import requests
import time
import urlparse
from pymongo import MongoClient
# TODO:
# Depends on how we want to calculate the time to
# receieve the request form Home Depots API.
# There are several choices.
# Time.time, Time.clock, and a class from request called elapsed
# I have a test file that makes it seem that time.clock is fastest but is it most accurate?!?
# I have used time.clock for now
class Api(object):
def __init__(self, project_name, host, port):
""" initialization of class api"""
self.host = host
self.port = port
self.project_name = project_name
client = MongoClient(self.host, self.port)
self.db = client.okapi
def request(self, method, url, **kwargs):
"""calls a method of request library while storing info about api call into mongo db"""
start = time.clock()
res = requests.request(method, url, **kwargs)
end = time.clock()
content = ''
if not res.ok:
content = res.content
date = datetime.datetime.utcnow()
host = urlparse.urlparse(res.url)
data = {'content': content,
'date': date,
'host': host.hostname,
'method': method,
'project_name': self.project_name,
'response_time': (end - start),
'status_code': res.status_code,
'url': res.url,
}
datas = self.db.datas
data_id = datas.insert(data)
return res
def get(self, url, **kwargs):
return self.request('GET', url, **kwargs)
def delete(self, url, **kwargs):
return self.request('DELETE', url, **kwargs)
def post(self, url, **kwargs):
return self.request('POST', url, **kwargs)
def put(self, url, **kwargs):
return self.request('PUT', url, **kwargs)
| """
okapi.api
~~~~~~~~~
This module implements the Requests API while storing valuable information into mongodb.
"""
import datetime
import requests
import time
import urlparse
from pymongo import MongoClient
# TODO:
# Depends on how we want to calculate the time to
# receieve the request form Home Depots API.
# There are several choices.
# Time.time, Time.clock, and a class from request called elapsed
# I have a test file that makes it seem that time.clock is fastest but is it most accurate?!?
# I have used time.clock for now
class Api(object):
def __init__(self, project_name, host, port):
""" initialization of class api"""
self.host = host
self.port = port
self.project_name = project_name
client = MongoClient(self.host, self.port)
self.db = client.okapi
def request(self, method, url, **kwargs):
"""calls a method of request library while storing info about api call into mongo db"""
start = time.clock()
res = requests.request(method, url, **kwargs)
end = time.clock()
content = ''
if not res.ok:
content = res.content
date = datetime.date.today().utcnow()
host = urlparse.urlparse(res.url)
data = {'content': content,
'date': date,
'host': host.hostname,
'method': method,
'project_name': self.project_name,
'response_time': (end - start),
'status_code': res.status_code,
'url': res.url,
}
datas = self.db.datas
data_id = datas.insert(data)
return res
def get(self, url, **kwargs):
return self.request('GET', url, **kwargs)
def delete(self, url, **kwargs):
return self.request('DELETE', url, **kwargs)
def post(self, url, **kwargs):
return self.request('POST', url, **kwargs)
def put(self, url, **kwargs):
return self.request('PUT', url, **kwargs)
| bsd-3-clause | Python |
d1514b6be184915e0f1227e2761db00945d0e7b4 | use the new interface to attach gdb | HPI-SWA-Lab/RSqueak,HPI-SWA-Lab/RSqueak,HPI-SWA-Lab/RSqueak,HPI-SWA-Lab/RSqueak | spyvm/plugins/vmdebugging.py | spyvm/plugins/vmdebugging.py | import os
from spyvm import model, error
from spyvm.plugins.plugin import Plugin
from spyvm.util.system import IS_WINDOWS
DebuggingPlugin = Plugin()
DebuggingPlugin.userdata['stop_ui'] = False
def stop_ui_process():
DebuggingPlugin.userdata['stop_ui'] = True
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def trace(interp, s_frame, w_rcvr):
# interp.trace = True
# return w_rcvr
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def untrace(interp, s_frame, w_rcvr):
# interp.trace = False
# return w_rcvr
if IS_WINDOWS:
def fork():
raise NotImplementedError("fork on windows")
else:
fork = os.fork
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def trace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.activate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def untrace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.deactivate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def halt(interp, s_frame, w_rcvr):
from rpython.rlib.debug import attach_gdb
print s_frame.print_stack()
attach_gdb()
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isRSqueak(interp, s_frame, w_rcvr):
return interp.space.w_true
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isVMTranslated(interp, s_frame, w_rcvr):
from rpython.rlib.objectmodel import we_are_translated
if we_are_translated():
return interp.space.w_true
else:
return interp.space.w_false
@DebuggingPlugin.expose_primitive(unwrap_spec=[object, object])
def debugPrint(interp, s_frame, w_rcvr, w_string):
if not isinstance(w_string, model.W_BytesObject):
raise error.PrimitiveFailedError()
print interp.space.unwrap_string(w_string).replace('\r', '\n')
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def stopUIProcess(interp, s_frame, w_rcvr):
if DebuggingPlugin.userdata.get('stop_ui', False):
return interp.space.w_true
else:
return interp.space.w_false
| import os
from spyvm import model, error
from spyvm.plugins.plugin import Plugin
from spyvm.util.system import IS_WINDOWS
DebuggingPlugin = Plugin()
DebuggingPlugin.userdata['stop_ui'] = False
def stop_ui_process():
DebuggingPlugin.userdata['stop_ui'] = True
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def trace(interp, s_frame, w_rcvr):
# interp.trace = True
# return w_rcvr
# @DebuggingPlugin.expose_primitive(unwrap_spec=[object])
# def untrace(interp, s_frame, w_rcvr):
# interp.trace = False
# return w_rcvr
if IS_WINDOWS:
def fork():
raise NotImplementedError("fork on windows")
else:
fork = os.fork
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def trace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.activate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def untrace_proxy(interp, s_frame, w_rcvr):
interp.trace_proxy.deactivate()
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def halt(interp, s_frame, w_rcvr):
print s_frame.print_stack()
from rpython.config.translationoption import get_translation_config
from rpython.rlib.objectmodel import we_are_translated
if not we_are_translated() or get_translation_config().translation.lldebug or get_translation_config().translation.lldebug0:
import pdb; pdb.set_trace()
raise error.PrimitiveFailedError
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isRSqueak(interp, s_frame, w_rcvr):
return interp.space.w_true
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def isVMTranslated(interp, s_frame, w_rcvr):
from rpython.rlib.objectmodel import we_are_translated
if we_are_translated():
return interp.space.w_true
else:
return interp.space.w_false
@DebuggingPlugin.expose_primitive(unwrap_spec=[object, object])
def debugPrint(interp, s_frame, w_rcvr, w_string):
if not isinstance(w_string, model.W_BytesObject):
raise error.PrimitiveFailedError()
print interp.space.unwrap_string(w_string).replace('\r', '\n')
return w_rcvr
@DebuggingPlugin.expose_primitive(unwrap_spec=[object])
def stopUIProcess(interp, s_frame, w_rcvr):
if DebuggingPlugin.userdata.get('stop_ui', False):
return interp.space.w_true
else:
return interp.space.w_false
| bsd-3-clause | Python |
313cafa9320a3842eb91186a1ffe225e6d3a025d | Add __version__ (#372) | mlflow/mlflow,mlflow/mlflow,mlflow/mlflow,mlflow/mlflow,mlflow/mlflow,mlflow/mlflow,mlflow/mlflow | mlflow/__init__.py | mlflow/__init__.py | """
Provides the MLflow fluent API, allowing management of an active MLflow run.
For example:
.. code:: python
import mlflow
mlflow.start_run()
mlflow.log_param("my", "param")
mlflow.log_metric("score", 100)
mlflow.end_run()
You can also use syntax like this:
.. code:: python
with mlflow.start_run() as run:
...
which will automatically terminate the run at the end of the block.
"""
from mlflow.version import VERSION as __version__
import os
# Filter annoying Cython warnings that serve no good purpose, and so before
# importing other modules.
# See: https://github.com/numpy/numpy/pull/432/commits/170ed4e33d6196d7
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed") # noqa: E402
warnings.filterwarnings("ignore", message="numpy.ufunc size changed") # noqa: E402
# pylint: disable=wrong-import-position
import mlflow.projects as projects # noqa
import mlflow.tracking as tracking # noqa
import mlflow.tracking.fluent
ActiveRun = mlflow.tracking.fluent.ActiveRun
log_param = mlflow.tracking.fluent.log_param
log_metric = mlflow.tracking.fluent.log_metric
set_tag = mlflow.tracking.fluent.set_tag
log_artifacts = mlflow.tracking.fluent.log_artifacts
log_artifact = mlflow.tracking.fluent.log_artifact
active_run = mlflow.tracking.fluent.active_run
start_run = mlflow.tracking.fluent.start_run
end_run = mlflow.tracking.fluent.end_run
get_artifact_uri = mlflow.tracking.fluent.get_artifact_uri
set_tracking_uri = tracking.set_tracking_uri
get_tracking_uri = tracking.get_tracking_uri
create_experiment = mlflow.tracking.fluent.create_experiment
run = projects.run
__all__ = ["ActiveRun", "log_param", "log_metric", "set_tag", "log_artifacts", "log_artifact",
"active_run", "start_run", "end_run", "get_artifact_uri", "set_tracking_uri",
"create_experiment", "run"]
| """
Provides the MLflow fluent API, allowing management of an active MLflow run.
For example:
.. code:: python
import mlflow
mlflow.start_run()
mlflow.log_param("my", "param")
mlflow.log_metric("score", 100)
mlflow.end_run()
You can also use syntax like this:
.. code:: python
with mlflow.start_run() as run:
...
which will automatically terminate the run at the end of the block.
"""
import os
# Filter annoying Cython warnings that serve no good purpose, and so before
# importing other modules.
# See: https://github.com/numpy/numpy/pull/432/commits/170ed4e33d6196d7
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed") # noqa: E402
warnings.filterwarnings("ignore", message="numpy.ufunc size changed") # noqa: E402
# pylint: disable=wrong-import-position
import mlflow.projects as projects # noqa
import mlflow.tracking as tracking # noqa
import mlflow.tracking.fluent
ActiveRun = mlflow.tracking.fluent.ActiveRun
log_param = mlflow.tracking.fluent.log_param
log_metric = mlflow.tracking.fluent.log_metric
set_tag = mlflow.tracking.fluent.set_tag
log_artifacts = mlflow.tracking.fluent.log_artifacts
log_artifact = mlflow.tracking.fluent.log_artifact
active_run = mlflow.tracking.fluent.active_run
start_run = mlflow.tracking.fluent.start_run
end_run = mlflow.tracking.fluent.end_run
get_artifact_uri = mlflow.tracking.fluent.get_artifact_uri
set_tracking_uri = tracking.set_tracking_uri
get_tracking_uri = tracking.get_tracking_uri
create_experiment = mlflow.tracking.fluent.create_experiment
run = projects.run
__all__ = ["ActiveRun", "log_param", "log_metric", "set_tag", "log_artifacts", "log_artifact",
"active_run", "start_run", "end_run", "get_artifact_uri", "set_tracking_uri",
"create_experiment", "run"]
| apache-2.0 | Python |
f45f1b5a70473d36ac6845db4e1ebf050ec0d9ea | Change u'id' from unicode to ascii string | iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api | project_fish/whats_fresh/tests/test_vendor_model.py | project_fish/whats_fresh/tests/test_vendor_model.py | from django.test import TestCase
from django.conf import settings
from phonenumber_field.modelfields import PhoneNumberField
from whats_fresh.models import *
from django.contrib.gis.db import models
import os
import time
import sys
import datetime
class VendorTestCase(TestCase):
def setUp(self):
self.expected_fields = {
'name': models.TextField,
'description': models.TextField,
'street': models.TextField,
'city': models.TextField,
'state': models.TextField,
'zip': models.TextField,
'location_description': models.TextField,
'contact_name': models.TextField,
'lat': models.FloatField,
'long': models.FloatField,
'website': models.TextField,
'email': models.EmailField,
'phone': PhoneNumberField,
'created': models.DateTimeField,
'modified': models.DateTimeField,
'id': models.AutoField
}
def test_fields_exist(self):
model = models.get_model('whats_fresh', 'Vendor')
for field, field_type in self.expected_fields.items():
self.assertEqual(
field_type, type(model._meta.get_field_by_name(field)[0]))
def test_no_additional_fields(self):
fields = Vendor._meta.get_all_field_names()
self.assertTrue(sorted(fields) == sorted(self.expected_fields.keys()))
def test_created_modified_fields(self):
self.assertTrue(Vendor._meta.get_field('modified').auto_now)
self.assertTrue(Vendor._meta.get_field('created').auto_now_add)
| from django.test import TestCase
from django.conf import settings
from phonenumber_field.modelfields import PhoneNumberField
from whats_fresh.models import *
from django.contrib.gis.db import models
import os
import time
import sys
import datetime
class VendorTestCase(TestCase):
def setUp(self):
self.expected_fields = {
'name': models.TextField,
'description': models.TextField,
'street': models.TextField,
'city': models.TextField,
'state': models.TextField,
'zip': models.TextField,
'location_description': models.TextField,
'contact_name': models.TextField,
'lat': models.FloatField,
'long': models.FloatField,
'website': models.TextField,
'email': models.EmailField,
'phone': PhoneNumberField,
'created': models.DateTimeField,
'modified': models.DateTimeField,
u'id': models.AutoField
}
def test_fields_exist(self):
model = models.get_model('whats_fresh', 'Vendor')
for field, field_type in self.expected_fields.items():
self.assertEqual(
field_type, type(model._meta.get_field_by_name(field)[0]))
def test_no_additional_fields(self):
fields = Vendor._meta.get_all_field_names()
self.assertTrue(sorted(fields) == sorted(self.expected_fields.keys()))
def test_created_modified_fields(self):
self.assertTrue(Vendor._meta.get_field('modified').auto_now)
self.assertTrue(Vendor._meta.get_field('created').auto_now_add)
| apache-2.0 | Python |
d25c1e53408c792205f10bf8e9e6b20fbc9eb836 | Remove doubled license header from blob_storage.py | lihui7115/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,ltilve/ChromiumGStreamerBackend,CapOM/ChromiumGStreamerBackend,lihui7115/ChromiumGStreamerBackend | tools/perf/benchmarks/blob_storage.py | tools/perf/benchmarks/blob_storage.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from telemetry import benchmark
from telemetry.timeline import tracing_category_filter
from telemetry.web_perf import timeline_based_measurement
import page_sets
BLOB_CATEGORY = 'Blob'
TIMELINE_REQUIRED_CATEGORY = 'blink.console'
@benchmark.Disabled('reference', # http://crbug.com/496155
'android') # http://crbug.com/499325
class BlobStorage(perf_benchmark.PerfBenchmark):
"""Timeline based measurement benchmark for Blob Storage."""
page_set = page_sets.BlobWorkshopPageSet
def CreateTimelineBasedMeasurementOptions(self):
cat_filter = tracing_category_filter.CreateMinimalOverheadFilter()
cat_filter.AddIncludedCategory(BLOB_CATEGORY)
cat_filter.AddIncludedCategory(TIMELINE_REQUIRED_CATEGORY)
return timeline_based_measurement.Options(
overhead_level=cat_filter)
@classmethod
def Name(cls):
return 'blob_storage.blob_storage'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
if ('blob-writes' not in value.name and
'blob-reads' not in value.name):
return False
return value.values != None
| # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from telemetry import benchmark
from telemetry.timeline import tracing_category_filter
from telemetry.web_perf import timeline_based_measurement
import page_sets
BLOB_CATEGORY = 'Blob'
TIMELINE_REQUIRED_CATEGORY = 'blink.console'
@benchmark.Disabled('reference', # http://crbug.com/496155
'android') # http://crbug.com/499325
class BlobStorage(perf_benchmark.PerfBenchmark):
"""Timeline based measurement benchmark for Blob Storage."""
page_set = page_sets.BlobWorkshopPageSet
def CreateTimelineBasedMeasurementOptions(self):
cat_filter = tracing_category_filter.CreateMinimalOverheadFilter()
cat_filter.AddIncludedCategory(BLOB_CATEGORY)
cat_filter.AddIncludedCategory(TIMELINE_REQUIRED_CATEGORY)
return timeline_based_measurement.Options(
overhead_level=cat_filter)
@classmethod
def Name(cls):
return 'blob_storage.blob_storage'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
if ('blob-writes' not in value.name and
'blob-reads' not in value.name):
return False
return value.values != None
| bsd-3-clause | Python |
04be2f88383d6cdeb59d817c331cd406c25c0c8e | Fix task URL | Kitware/cumulus,Kitware/cumulus,cjh1/cumulus,cjh1/cumulus | cumulus/task/status.py | cumulus/task/status.py | from __future__ import absolute_import
from cumulus.starcluster.logging import StarClusterLogHandler, StarClusterCallWriteHandler, logstdout, StarClusterLogFilter
import cumulus.starcluster.logging
from cumulus.starcluster.tasks.celery import app
from cumulus.starcluster.tasks.common import _check_status
import cumulus
import requests
import os
import sys
import re
import traceback
from . import runner
from celery.exceptions import MaxRetriesExceededError
sleep_interval = 5
def _update_status(headers, task, status):
task['status'] = status
url = '%s/tasks/%s' % (cumulus.config.girder.baseUrl, task['_id'])
r = requests.patch(url, headers=headers, json=task)
_check_status(r)
@app.task(bind=True, max_retries=None)
def monitor_status(celery_task, token, task, spec, step, variables):
headers = {'Girder-Token': token}
try:
steps = spec['steps']
status_step = steps[step]
params = status_step['params']
if 'timeout' in params:
timeout = int(params['timeout'])
max_retries = timeout % sleep_interval
celery_task.max_retries = max_retries
next_step = step + 1
if next_step >= len(steps):
next_step = None
url = '%s/%s' % (cumulus.config.girder.baseUrl, params['url'])
status = requests.get(url, headers=headers)
_check_status(status)
status = status.json()
selector = params['selector']
selector_path = selector.split('.')
for key in selector_path:
if key in status:
status = status.get(key)
else:
raise Exception('Unable to extract status from \'%s\' using \'%s\'' % (status, selector))
if status in params['success']:
if next_step:
runner.run(token, task, spec, variables, next_step)
else:
_update_status(headers, task, 'complete')
elif status in params['failure']:
_update_status(headers, task, 'failure')
else:
celery_task.retry(throw=False, countdown=sleep_interval)
except MaxRetriesExceededError:
_update_status(headers, task, 'timeout')
except:
# Update task log
traceback.print_exc()
| from __future__ import absolute_import
from cumulus.starcluster.logging import StarClusterLogHandler, StarClusterCallWriteHandler, logstdout, StarClusterLogFilter
import cumulus.starcluster.logging
from cumulus.starcluster.tasks.celery import app
from cumulus.starcluster.tasks.common import _check_status
import cumulus
import requests
import os
import sys
import re
import traceback
from . import runner
from celery.exceptions import MaxRetriesExceededError
sleep_interval = 5
def _update_status(headers, task, status):
task['status'] = status
url = '%s/task/%s' % (cumulus.config.girder.baseUrl, task['_id'])
r = requests.patch(url, headers=headers, json=task)
_check_status(r)
@app.task(bind=True, max_retries=None)
def monitor_status(celery_task, token, task, spec, step, variables):
headers = {'Girder-Token': token}
try:
steps = spec['steps']
status_step = steps[step]
params = status_step['params']
if 'timeout' in params:
timeout = int(params['timeout'])
max_retries = timeout % sleep_interval
celery_task.max_retries = max_retries
next_step = step + 1
if next_step >= len(steps):
next_step = None
url = '%s/%s' % (cumulus.config.girder.baseUrl, params['url'])
status = requests.get(url, headers=headers)
_check_status(status)
status = status.json()
selector = params['selector']
selector_path = selector.split('.')
for key in selector_path:
if key in status:
status = status.get(key)
else:
raise Exception('Unable to extract status from \'%s\' using \'%s\'' % (status, selector))
if status in params['success']:
if next_step:
runner.run(token, task, spec, variables, next_step)
else:
_update_status(headers, task, 'complete')
elif status in params['failure']:
_update_status(headers, task, 'failure')
else:
celery_task.retry(throw=False, countdown=sleep_interval)
except MaxRetriesExceededError:
_update_status(headers, task, 'timeout')
except:
# Update task log
traceback.print_exc()
| apache-2.0 | Python |
1a5d480a251a78048cd920b9f5b615a787a7b90c | make test easier so that we can use qpbo | pystruct/pystruct,amueller/pystruct,wattlebird/pystruct,d-mittal/pystruct,massmutual/pystruct,wattlebird/pystruct,pystruct/pystruct,amueller/pystruct,d-mittal/pystruct,massmutual/pystruct | pystruct/tests/test_learners/test_frankwolfe_svm.py | pystruct/tests/test_learners/test_frankwolfe_svm.py | from tempfile import mkstemp
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_less
from sklearn.datasets import load_iris
from pystruct.models import GridCRF, GraphCRF
from pystruct.datasets import generate_blocks_multinomial
from pystruct.learners import FrankWolfeSSVM
from pystruct.utils import SaveLogger, train_test_split
def test_multinomial_blocks_frankwolfe():
X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0)
crf = GridCRF(inference_method='qpbo')
clf = FrankWolfeSSVM(model=crf, C=1, max_iter=50, verbose=3)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred)
def test_multinomial_blocks_frankwolfe_batch():
X, Y = generate_blocks_multinomial(n_samples=10, noise=0.3, seed=0)
crf = GridCRF(inference_method='qpbo')
clf = FrankWolfeSSVM(model=crf, C=1, max_iter=500, verbose=3, batch_mode=True)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred)
def test_svm_as_crf_pickling_bcfw():
iris = load_iris()
X, y = iris.data, iris.target
X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
Y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1)
_, file_name = mkstemp()
pbl = GraphCRF(n_features=4, n_states=3, inference_method='unary')
logger = SaveLogger(file_name)
svm = FrankWolfeSSVM(pbl, C=10, logger=logger, max_iter=50)
svm.fit(X_train, y_train)
assert_less(.97, svm.score(X_test, y_test))
assert_less(.97, logger.load().score(X_test, y_test))
def test_svm_as_crf_pickling_batch():
iris = load_iris()
X, y = iris.data, iris.target
X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
Y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1)
_, file_name = mkstemp()
pbl = GraphCRF(n_features=4, n_states=3, inference_method='unary')
logger = SaveLogger(file_name)
svm = FrankWolfeSSVM(pbl, C=10, logger=logger, max_iter=50, batch_mode=False)
svm.fit(X_train, y_train)
assert_less(.97, svm.score(X_test, y_test))
assert_less(.97, logger.load().score(X_test, y_test))
| from tempfile import mkstemp
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_less
from sklearn.datasets import load_iris
from pystruct.models import GridCRF, GraphCRF
from pystruct.datasets import generate_blocks_multinomial
from pystruct.learners import FrankWolfeSSVM
from pystruct.utils import SaveLogger, train_test_split
def test_multinomial_blocks_frankwolfe():
X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0)
crf = GridCRF(inference_method='qpbo')
clf = FrankWolfeSSVM(model=crf, C=1, max_iter=50, verbose=3)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred)
def test_multinomial_blocks_frankwolfe_batch():
X, Y = generate_blocks_multinomial(n_samples=10, noise=0.5, seed=0)
crf = GridCRF(inference_method='qpbo')
clf = FrankWolfeSSVM(model=crf, C=1, max_iter=500, verbose=3, batch_mode=True)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred)
def test_svm_as_crf_pickling_bcfw():
iris = load_iris()
X, y = iris.data, iris.target
X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
Y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1)
_, file_name = mkstemp()
pbl = GraphCRF(n_features=4, n_states=3, inference_method='unary')
logger = SaveLogger(file_name)
svm = FrankWolfeSSVM(pbl, C=10, logger=logger, max_iter=50)
svm.fit(X_train, y_train)
assert_less(.97, svm.score(X_test, y_test))
assert_less(.97, logger.load().score(X_test, y_test))
def test_svm_as_crf_pickling_batch():
iris = load_iris()
X, y = iris.data, iris.target
X_ = [(np.atleast_2d(x), np.empty((0, 2), dtype=np.int)) for x in X]
Y = y.reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X_, Y, random_state=1)
_, file_name = mkstemp()
pbl = GraphCRF(n_features=4, n_states=3, inference_method='unary')
logger = SaveLogger(file_name)
svm = FrankWolfeSSVM(pbl, C=10, logger=logger, max_iter=50, batch_mode=False)
svm.fit(X_train, y_train)
assert_less(.97, svm.score(X_test, y_test))
assert_less(.97, logger.load().score(X_test, y_test))
| bsd-2-clause | Python |
f1d3d2f5543c0e847c4b2051c04837cb3586846e | Enhance our plotter to use the new div_markers code | yw374cornell/e-mission-server,joshzarrabi/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,joshzarrabi/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,joshzarrabi/e-mission-server | emission/analysis/plotting/leaflet_osm/our_plotter.py | emission/analysis/plotting/leaflet_osm/our_plotter.py | import pandas as pd
import folium
def df_to_string_list(df):
"""
Convert the input df into a list of strings, suitable for using as popups in a map.
This is a utility function.
"""
print "Converting df with size %s to string list" % df.shape[0]
array_list = df.as_matrix().tolist()
return [str(line) for line in array_list]
def get_map_list(df, potential_splits):
mapList = []
potential_splits_list = list(potential_splits)
for start, end in zip(potential_splits_list, potential_splits_list[1:]):
trip = df[start:end]
currMap = folium.Map([trip.mLatitude.mean(), trip.mLongitude.mean()])
currMap.div_markers(trip[['mLatitude', 'mLongitude']].as_matrix().tolist(),
df_to_string_list(trip[['mLatitude', 'mLongitude', 'formatted_time', 'mAccuracy']]))
currMap.line(trip[['mLatitude', 'mLongitude']].as_matrix().tolist())
mapList.append(currMap)
return mapList
| import pandas as pd
import folium
def get_map_list(df, potential_splits):
mapList = []
potential_splits_list = list(potential_splits)
for start, end in zip(potential_splits_list, potential_splits_list[1:]):
trip = df[start:end]
currMap = folium.Map([trip.mLatitude.mean(), trip.mLongitude.mean()])
plot_point = lambda row: currMap.simple_marker([row['mLatitude'], row['mLongitude']], popup='%s' % row)
trip.apply(plot_point, axis=1)
currMap.line(zip(list(trip.mLatitude), list(trip.mLongitude)))
mapList.append(currMap)
return mapList
| bsd-3-clause | Python |
db2308b1599f28e33897ac08d38a4c7751decf64 | add autots import check (#5384) | intel-analytics/BigDL,yangw1234/BigDL,yangw1234/BigDL,intel-analytics/BigDL,yangw1234/BigDL,intel-analytics/BigDL,yangw1234/BigDL,intel-analytics/BigDL | python/chronos/src/bigdl/chronos/autots/__init__.py | python/chronos/src/bigdl/chronos/autots/__init__.py | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from bigdl.nano.utils.log4Error import invalidInputError
import os
if os.getenv("LD_PRELOAD", "null") != "null":
invalidInputError(False,
errMsg="Users of `bigdl.chronos.autots` should "
"unset bigdl-nano environment variables!",
fixMsg="Please run `source bigdl-nano-unset-env` "
"in your bash terminal")
try:
# TODO: make this a LazyImport
from .autotsestimator import AutoTSEstimator
from .tspipeline import TSPipeline
except ImportError:
pass
| #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
try:
from .autotsestimator import AutoTSEstimator
except ImportError:
warnings.warn("Please install `bigdl-nano[all]` to use AutoTSEstimator")
from .tspipeline import TSPipeline
| apache-2.0 | Python |
0a8ce480bac176a53a418f6ab4242fd2f439942e | Bump version | thombashi/sqlitebiter,thombashi/sqlitebiter | sqlitebiter/__version__.py | sqlitebiter/__version__.py | # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.27.2"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| # encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.27.1"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
| mit | Python |
eee49d1b4a5f326cc7c4becf50b71ebe92580c7c | Update Graphics Python examples to use new pipeline API | berendkleinhaneveld/VTK,keithroe/vtkoptix,candy7393/VTK,hendradarwin/VTK,keithroe/vtkoptix,mspark93/VTK,msmolens/VTK,sumedhasingla/VTK,demarle/VTK,gram526/VTK,collects/VTK,ashray/VTK-EVM,biddisco/VTK,SimVascular/VTK,gram526/VTK,cjh1/VTK,mspark93/VTK,sankhesh/VTK,biddisco/VTK,collects/VTK,aashish24/VTK-old,sankhesh/VTK,jmerkow/VTK,jmerkow/VTK,sankhesh/VTK,SimVascular/VTK,sumedhasingla/VTK,berendkleinhaneveld/VTK,SimVascular/VTK,sankhesh/VTK,msmolens/VTK,johnkit/vtk-dev,jmerkow/VTK,candy7393/VTK,candy7393/VTK,collects/VTK,msmolens/VTK,gram526/VTK,biddisco/VTK,gram526/VTK,johnkit/vtk-dev,sumedhasingla/VTK,mspark93/VTK,biddisco/VTK,mspark93/VTK,msmolens/VTK,jmerkow/VTK,cjh1/VTK,sankhesh/VTK,SimVascular/VTK,sumedhasingla/VTK,aashish24/VTK-old,msmolens/VTK,collects/VTK,mspark93/VTK,keithroe/vtkoptix,sankhesh/VTK,mspark93/VTK,demarle/VTK,aashish24/VTK-old,biddisco/VTK,candy7393/VTK,ashray/VTK-EVM,SimVascular/VTK,biddisco/VTK,collects/VTK,hendradarwin/VTK,berendkleinhaneveld/VTK,sumedhasingla/VTK,aashish24/VTK-old,keithroe/vtkoptix,aashish24/VTK-old,sumedhasingla/VTK,keithroe/vtkoptix,berendkleinhaneveld/VTK,cjh1/VTK,sankhesh/VTK,candy7393/VTK,hendradarwin/VTK,berendkleinhaneveld/VTK,johnkit/vtk-dev,mspark93/VTK,ashray/VTK-EVM,gram526/VTK,johnkit/vtk-dev,gram526/VTK,cjh1/VTK,collects/VTK,aashish24/VTK-old,ashray/VTK-EVM,jmerkow/VTK,demarle/VTK,sankhesh/VTK,demarle/VTK,demarle/VTK,demarle/VTK,keithroe/vtkoptix,gram526/VTK,candy7393/VTK,hendradarwin/VTK,johnkit/vtk-dev,msmolens/VTK,sumedhasingla/VTK,hendradarwin/VTK,ashray/VTK-EVM,biddisco/VTK,ashray/VTK-EVM,hendradarwin/VTK,msmolens/VTK,cjh1/VTK,hendradarwin/VTK,keithroe/vtkoptix,SimVascular/VTK,candy7393/VTK,berendkleinhaneveld/VTK,demarle/VTK,berendkleinhaneveld/VTK,jmerkow/VTK,candy7393/VTK,jmerkow/VTK,cjh1/VTK,demarle/VTK,msmolens/VTK,sumedhasingla/VTK,mspark93/VTK,keithroe/vtkoptix,johnkit/vtk-dev,jmerkow/VTK,SimVascular/VTK,SimVascular/VTK,gram526/VTK,ashray/VTK-EVM,johnkit/vtk-dev,ashray/VTK-EVM | Examples/Graphics/Python/SegmentAndBrokenLineSources.py | Examples/Graphics/Python/SegmentAndBrokenLineSources.py | ############################################################
from vtk import *
############################################################
# Create sources
line1 = vtkLineSource()
line1.SetPoint1( 1, 0, 0 )
line1.SetPoint2( -1, 0, 0 )
line1.SetResolution( 32 )
points = vtkPoints()
points.InsertNextPoint( 1, 0, 0 )
points.InsertNextPoint( -.5, 1, 0 )
points.InsertNextPoint( 0, 1, 2 )
points.InsertNextPoint( 2, 1, -1 )
points.InsertNextPoint( -1, 0, 0 )
line2 = vtkLineSource()
line2.SetPoints( points )
line2.SetResolution( 16 )
# Create mappers
mapper1 = vtkPolyDataMapper()
mapper1.SetInputConnection( line1.GetOutputPort() )
mapper2 = vtkPolyDataMapper()
mapper2.SetInputConnection( line2.GetOutputPort() )
# Create actors
actor1 = vtkActor()
actor1.SetMapper( mapper1 )
actor1.GetProperty().SetColor( 1., 0., 0. )
actor2 = vtkActor()
actor2.SetMapper( mapper2 )
actor2.GetProperty().SetColor( 0., 0., 1. )
actor2.GetProperty().SetLineWidth( 2.5 )
# Create renderer
renderer = vtkRenderer()
renderer.AddViewProp( actor1 )
renderer.AddViewProp( actor2 )
renderer.SetBackground( .3, .4 ,.5 )
# Create render window
window = vtkRenderWindow()
window.AddRenderer( renderer )
window.SetSize( 500, 500 )
# Create interactor
interactor = vtkRenderWindowInteractor()
interactor.SetRenderWindow( window )
# Start interaction
window.Render()
interactor.Start()
| ############################################################
from vtk import *
############################################################
# Create sources
line1 = vtkLineSource()
line1.SetPoint1( 1, 0, 0 )
line1.SetPoint2( -1, 0, 0 )
line1.SetResolution( 32 )
points = vtkPoints()
points.InsertNextPoint( 1, 0, 0 )
points.InsertNextPoint( -.5, 1, 0 )
points.InsertNextPoint( 0, 1, 2 )
points.InsertNextPoint( 2, 1, -1 )
points.InsertNextPoint( -1, 0, 0 )
line2 = vtkLineSource()
line2.SetPoints( points )
line2.SetResolution( 16 )
# Create mappers
mapper1 = vtkPolyDataMapper()
mapper1.SetInput( line1.GetOutput() )
mapper2 = vtkPolyDataMapper()
mapper2.SetInputConnection( line2.GetOutputPort() )
# Create actors
actor1 = vtkActor()
actor1.SetMapper( mapper1 )
actor1.GetProperty().SetColor( 1., 0., 0. )
actor2 = vtkActor()
actor2.SetMapper( mapper2 )
actor2.GetProperty().SetColor( 0., 0., 1. )
actor2.GetProperty().SetLineWidth( 2.5 )
# Create renderer
renderer = vtkRenderer()
renderer.AddViewProp( actor1 )
renderer.AddViewProp( actor2 )
renderer.SetBackground( .3, .4 ,.5 )
# Create render window
window = vtkRenderWindow()
window.AddRenderer( renderer )
window.SetSize( 500, 500 )
# Create interactor
interactor = vtkRenderWindowInteractor()
interactor.SetRenderWindow( window )
# Start interaction
window.Render()
interactor.Start()
| bsd-3-clause | Python |
b4f3f5cc91836765037a44b359f54cf2c58a2033 | Include defaults in --help output. | vine/mysql-prefetcher | myprefetch/fake_updates_prefetch.py | myprefetch/fake_updates_prefetch.py | #!/usr/local/bin/python2.6 -Wignore::DeprecationWarning
import argparse
import logging
import os
from myprefetch import readahead
from myprefetch.rewriters import fake_update
from myprefetch.mysql import Config
def main():
logging.basicConfig()
parser = argparse.ArgumentParser(description="""
This prefetcher will be utilizing fake changes support within InnoDB -
so it can execute statements without much rewriting.""".strip(),
fromfile_prefix_chars='@',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--host', default='localhost', help='MySQL server hostname')
parser.add_argument('--port', default=3306, type=int, help='MySQL server port')
parser.add_argument('--username', '-u', default='root', help='MySQL username')
parser.add_argument('--password', '-p', default='', help='MySQL password')
parser.add_argument('--runners', default=4, type=int,
help='Number of statement runner threads to use')
parser.add_argument('--threshold', default=1.0, type=float,
help='Minimum "seconds behind master" before we start prefetching')
parser.add_argument('--window_start', default=13, type=int,
help='How far into the future from seconds behind master to start '
'prefetching.')
parser.add_argument('--window_stop', default=30, type=int,
help='How far into the future to prefetch.')
parser.add_argument('--elapsed_limit', default=4, type=int,
help='We won\'t try to prefetch statements that took longer than '
'--elapsed_limit on the master')
parser.add_argument('--logpath', default="/var/lib/mysql",
help='How far into the future to prefetch.')
args = vars(parser.parse_args())
if not os.path.isdir(args['logpath']):
raise Exception("%s is not a valid directory" % (args['logpath'],))
config = Config(**{k: args.pop(k) for k in ('host', 'port', 'username', 'password')})
prefetch = readahead.Prefetch(config, **args)
prefetch.worker_init_connect = "SET SESSION "\
"long_query_time=60, innodb_fake_changes=1, sql_log_bin=0"
prefetch.rewriter = fake_update
prefetch.run()
if __name__ == "__main__":
main()
| #!/usr/local/bin/python2.6 -Wignore::DeprecationWarning
import argparse
import logging
import os
from myprefetch import readahead
from myprefetch.rewriters import fake_update
from myprefetch.mysql import Config
def main():
logging.basicConfig()
parser = argparse.ArgumentParser(description="""
This prefetcher will be utilizing fake changes support within InnoDB -
so it can execute statements without much rewriting.""".strip(),
fromfile_prefix_chars='@')
parser.add_argument('--host', default='localhost', help='MySQL server hostname')
parser.add_argument('--port', default=3306, type=int, help='MySQL server port')
parser.add_argument('--username', '-u', default='root', help='MySQL username')
parser.add_argument('--password', '-p', default='', help='MySQL password')
parser.add_argument('--runners', default=4, type=int,
help='Number of statement runner threads to use')
parser.add_argument('--threshold', default=1.0, type=float,
help='Minimum "seconds behind master" before we start prefetching')
parser.add_argument('--window_start', default=13, type=int,
help='How far into the future from seconds behind master to start '
'prefetching.')
parser.add_argument('--window_stop', default=30, type=int,
help='How far into the future to prefetch.')
parser.add_argument('--elapsed_limit', default=4, type=int,
help='We won\'t try to prefetch statements that took longer than '
'--elapsed_limit on the master')
parser.add_argument('--logpath', default="/var/lib/mysql",
help='How far into the future to prefetch.')
args = vars(parser.parse_args())
if not os.path.isdir(args['logpath']):
raise Exception("%s is not a valid directory" % (args['logpath'],))
config = Config(**{k: args.pop(k) for k in ('host', 'port', 'username', 'password')})
prefetch = readahead.Prefetch(config, **args)
prefetch.worker_init_connect = "SET SESSION "\
"long_query_time=60, innodb_fake_changes=1, sql_log_bin=0"
prefetch.rewriter = fake_update
prefetch.run()
if __name__ == "__main__":
main()
| apache-2.0 | Python |
72603e1cbffaecc3abe0076cb4271013b7f550d7 | Add a depreciation warning on importing nipy.neurospin.utils.mask | alexis-roche/nipy,bthirion/nipy,nipy/nipy-labs,alexis-roche/nipy,alexis-roche/register,arokem/nipy,alexis-roche/niseg,alexis-roche/register,bthirion/nipy,bthirion/nipy,arokem/nipy,alexis-roche/niseg,alexis-roche/nireg,arokem/nipy,nipy/nireg,alexis-roche/nipy,alexis-roche/register,nipy/nipy-labs,bthirion/nipy,alexis-roche/nipy,nipy/nireg,alexis-roche/nireg,arokem/nipy | nipy/neurospin/utils/mask.py | nipy/neurospin/utils/mask.py | """
Compatibility module
"""
import warnings
warnings.warn(DeprecationWarning(
"This module (nipy.neurospin.utils.mask) has been moved and "
"is depreciated. Please update your code to import from "
"'nipy.neurospin.mask'."))
from ..mask import *
| """
Compatibility module
"""
# No relative imports, as they are not permitted with 'import *'
from nipy.neurospin.mask import *
| bsd-3-clause | Python |
4892c5d51a973846338f6f46f0dd33843a470ea7 | Bump version to 0.5.2 | Princeton-CDH/django-pucas,Princeton-CDH/django-pucas | pucas/__init__.py | pucas/__init__.py | default_app_config = 'pucas.apps.PucasConfig'
__version_info__ = (0, 5, 2, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| default_app_config = 'pucas.apps.PucasConfig'
__version_info__ = (0, 5, 1, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| apache-2.0 | Python |
2216370f0f9d04da2c2d5b4556d93e01aecd6e97 | fix testdata | SerCeMan/intellij-community,petteyg/intellij-community,ThiagoGarciaAlves/intellij-community,fitermay/intellij-community,hurricup/intellij-community,retomerz/intellij-community,ibinti/intellij-community,ryano144/intellij-community,fengbaicanhe/intellij-community,blademainer/intellij-community,robovm/robovm-studio,Lekanich/intellij-community,apixandru/intellij-community,amith01994/intellij-community,signed/intellij-community,kdwink/intellij-community,MichaelNedzelsky/intellij-community,tmpgit/intellij-community,diorcety/intellij-community,retomerz/intellij-community,ol-loginov/intellij-community,signed/intellij-community,xfournet/intellij-community,MichaelNedzelsky/intellij-community,wreckJ/intellij-community,holmes/intellij-community,suncycheng/intellij-community,suncycheng/intellij-community,da1z/intellij-community,ibinti/intellij-community,signed/intellij-community,Distrotech/intellij-community,ahb0327/intellij-community,kdwink/intellij-community,asedunov/intellij-community,izonder/intellij-community,suncycheng/intellij-community,jagguli/intellij-community,vladmm/intellij-community,da1z/intellij-community,izonder/intellij-community,wreckJ/intellij-community,petteyg/intellij-community,petteyg/intellij-community,retomerz/intellij-community,da1z/intellij-community,samthor/intellij-community,hurricup/intellij-community,vladmm/intellij-community,asedunov/intellij-community,samthor/intellij-community,orekyuu/intellij-community,kool79/intellij-community,blademainer/intellij-community,allotria/intellij-community,vvv1559/intellij-community,kool79/intellij-community,Lekanich/intellij-community,MER-GROUP/intellij-community,vladmm/intellij-community,TangHao1987/intellij-community,kool79/intellij-community,ahb0327/intellij-community,kdwink/intellij-community,asedunov/intellij-community,salguarnieri/intellij-community,samthor/intellij-community,da1z/intellij-community,michaelgallacher/intellij-community,orekyuu/intellij-community,vvv1559/intellij-community,FHannes/intellij-community,semonte/intellij-community,diorcety/intellij-community,muntasirsyed/intellij-community,youdonghai/intellij-community,alphafoobar/intellij-community,holmes/intellij-community,ftomassetti/intellij-community,Lekanich/intellij-community,slisson/intellij-community,ivan-fedorov/intellij-community,signed/intellij-community,alphafoobar/intellij-community,Distrotech/intellij-community,robovm/robovm-studio,blademainer/intellij-community,supersven/intellij-community,retomerz/intellij-community,wreckJ/intellij-community,salguarnieri/intellij-community,akosyakov/intellij-community,ahb0327/intellij-community,xfournet/intellij-community,muntasirsyed/intellij-community,izonder/intellij-community,asedunov/intellij-community,TangHao1987/intellij-community,semonte/intellij-community,FHannes/intellij-community,supersven/intellij-community,kool79/intellij-community,gnuhub/intellij-community,apixandru/intellij-community,samthor/intellij-community,izonder/intellij-community,fitermay/intellij-community,muntasirsyed/intellij-community,ivan-fedorov/intellij-community,retomerz/intellij-community,Distrotech/intellij-community,xfournet/intellij-community,amith01994/intellij-community,ibinti/intellij-community,petteyg/intellij-community,diorcety/intellij-community,FHannes/intellij-community,alphafoobar/intellij-community,adedayo/intellij-community,apixandru/intellij-community,fengbaicanhe/intellij-community,nicolargo/intellij-community,idea4bsd/idea4bsd,pwoodworth/intellij-community,TangHao1987/intellij-community,fengbaicanhe/intellij-community,MER-GROUP/intellij-community,mglukhikh/intellij-community,jagguli/intellij-community,diorcety/intellij-community,lucafavatella/intellij-community,diorcety/intellij-community,pwoodworth/intellij-community,kdwink/intellij-community,caot/intellij-community,SerCeMan/intellij-community,slisson/intellij-community,ahb0327/intellij-community,petteyg/intellij-community,nicolargo/intellij-community,alphafoobar/intellij-community,amith01994/intellij-community,wreckJ/intellij-community,dslomov/intellij-community,SerCeMan/intellij-community,caot/intellij-community,mglukhikh/intellij-community,holmes/intellij-community,nicolargo/intellij-community,clumsy/intellij-community,ryano144/intellij-community,salguarnieri/intellij-community,caot/intellij-community,semonte/intellij-community,kool79/intellij-community,tmpgit/intellij-community,suncycheng/intellij-community,nicolargo/intellij-community,fnouama/intellij-community,samthor/intellij-community,FHannes/intellij-community,allotria/intellij-community,retomerz/intellij-community,ol-loginov/intellij-community,pwoodworth/intellij-community,Distrotech/intellij-community,pwoodworth/intellij-community,da1z/intellij-community,MER-GROUP/intellij-community,allotria/intellij-community,amith01994/intellij-community,michaelgallacher/intellij-community,ftomassetti/intellij-community,MER-GROUP/intellij-community,ryano144/intellij-community,lucafavatella/intellij-community,FHannes/intellij-community,idea4bsd/idea4bsd,allotria/intellij-community,fengbaicanhe/intellij-community,nicolargo/intellij-community,mglukhikh/intellij-community,muntasirsyed/intellij-community,vvv1559/intellij-community,clumsy/intellij-community,allotria/intellij-community,da1z/intellij-community,diorcety/intellij-community,amith01994/intellij-community,da1z/intellij-community,ThiagoGarciaAlves/intellij-community,allotria/intellij-community,fengbaicanhe/intellij-community,xfournet/intellij-community,vladmm/intellij-community,vvv1559/intellij-community,petteyg/intellij-community,clumsy/intellij-community,ivan-fedorov/intellij-community,samthor/intellij-community,Distrotech/intellij-community,fengbaicanhe/intellij-community,retomerz/intellij-community,idea4bsd/idea4bsd,adedayo/intellij-community,nicolargo/intellij-community,pwoodworth/intellij-community,ryano144/intellij-community,akosyakov/intellij-community,wreckJ/intellij-community,salguarnieri/intellij-community,vladmm/intellij-community,MER-GROUP/intellij-community,ThiagoGarciaAlves/intellij-community,slisson/intellij-community,apixandru/intellij-community,hurricup/intellij-community,gnuhub/intellij-community,signed/intellij-community,suncycheng/intellij-community,pwoodworth/intellij-community,kdwink/intellij-community,ol-loginov/intellij-community,gnuhub/intellij-community,ryano144/intellij-community,TangHao1987/intellij-community,supersven/intellij-community,fitermay/intellij-community,caot/intellij-community,youdonghai/intellij-community,asedunov/intellij-community,izonder/intellij-community,youdonghai/intellij-community,ThiagoGarciaAlves/intellij-community,clumsy/intellij-community,semonte/intellij-community,tmpgit/intellij-community,gnuhub/intellij-community,ahb0327/intellij-community,orekyuu/intellij-community,petteyg/intellij-community,orekyuu/intellij-community,michaelgallacher/intellij-community,ftomassetti/intellij-community,slisson/intellij-community,izonder/intellij-community,michaelgallacher/intellij-community,izonder/intellij-community,akosyakov/intellij-community,TangHao1987/intellij-community,idea4bsd/idea4bsd,vladmm/intellij-community,ftomassetti/intellij-community,ivan-fedorov/intellij-community,semonte/intellij-community,orekyuu/intellij-community,adedayo/intellij-community,ivan-fedorov/intellij-community,xfournet/intellij-community,signed/intellij-community,muntasirsyed/intellij-community,fnouama/intellij-community,vladmm/intellij-community,hurricup/intellij-community,idea4bsd/idea4bsd,dslomov/intellij-community,MichaelNedzelsky/intellij-community,pwoodworth/intellij-community,ryano144/intellij-community,lucafavatella/intellij-community,petteyg/intellij-community,fnouama/intellij-community,mglukhikh/intellij-community,alphafoobar/intellij-community,fitermay/intellij-community,alphafoobar/intellij-community,ThiagoGarciaAlves/intellij-community,akosyakov/intellij-community,michaelgallacher/intellij-community,dslomov/intellij-community,vvv1559/intellij-community,akosyakov/intellij-community,SerCeMan/intellij-community,idea4bsd/idea4bsd,caot/intellij-community,ryano144/intellij-community,Lekanich/intellij-community,samthor/intellij-community,fnouama/intellij-community,supersven/intellij-community,tmpgit/intellij-community,blademainer/intellij-community,orekyuu/intellij-community,slisson/intellij-community,michaelgallacher/intellij-community,michaelgallacher/intellij-community,izonder/intellij-community,salguarnieri/intellij-community,vvv1559/intellij-community,fitermay/intellij-community,ThiagoGarciaAlves/intellij-community,lucafavatella/intellij-community,adedayo/intellij-community,ThiagoGarciaAlves/intellij-community,da1z/intellij-community,signed/intellij-community,Lekanich/intellij-community,amith01994/intellij-community,retomerz/intellij-community,mglukhikh/intellij-community,robovm/robovm-studio,dslomov/intellij-community,jagguli/intellij-community,kool79/intellij-community,holmes/intellij-community,salguarnieri/intellij-community,ahb0327/intellij-community,samthor/intellij-community,signed/intellij-community,fitermay/intellij-community,ftomassetti/intellij-community,lucafavatella/intellij-community,mglukhikh/intellij-community,vladmm/intellij-community,ol-loginov/intellij-community,salguarnieri/intellij-community,ftomassetti/intellij-community,ivan-fedorov/intellij-community,salguarnieri/intellij-community,fitermay/intellij-community,clumsy/intellij-community,jagguli/intellij-community,samthor/intellij-community,muntasirsyed/intellij-community,SerCeMan/intellij-community,youdonghai/intellij-community,salguarnieri/intellij-community,kool79/intellij-community,apixandru/intellij-community,blademainer/intellij-community,michaelgallacher/intellij-community,ivan-fedorov/intellij-community,dslomov/intellij-community,suncycheng/intellij-community,blademainer/intellij-community,da1z/intellij-community,fnouama/intellij-community,wreckJ/intellij-community,MichaelNedzelsky/intellij-community,izonder/intellij-community,allotria/intellij-community,MichaelNedzelsky/intellij-community,SerCeMan/intellij-community,suncycheng/intellij-community,MER-GROUP/intellij-community,vladmm/intellij-community,ivan-fedorov/intellij-community,semonte/intellij-community,MER-GROUP/intellij-community,lucafavatella/intellij-community,vladmm/intellij-community,adedayo/intellij-community,vvv1559/intellij-community,lucafavatella/intellij-community,tmpgit/intellij-community,Distrotech/intellij-community,ibinti/intellij-community,adedayo/intellij-community,idea4bsd/idea4bsd,ftomassetti/intellij-community,wreckJ/intellij-community,suncycheng/intellij-community,amith01994/intellij-community,jagguli/intellij-community,ryano144/intellij-community,vvv1559/intellij-community,mglukhikh/intellij-community,holmes/intellij-community,izonder/intellij-community,asedunov/intellij-community,idea4bsd/idea4bsd,supersven/intellij-community,amith01994/intellij-community,FHannes/intellij-community,lucafavatella/intellij-community,clumsy/intellij-community,tmpgit/intellij-community,youdonghai/intellij-community,holmes/intellij-community,dslomov/intellij-community,fitermay/intellij-community,youdonghai/intellij-community,kdwink/intellij-community,robovm/robovm-studio,MichaelNedzelsky/intellij-community,ahb0327/intellij-community,ThiagoGarciaAlves/intellij-community,suncycheng/intellij-community,supersven/intellij-community,xfournet/intellij-community,tmpgit/intellij-community,tmpgit/intellij-community,pwoodworth/intellij-community,mglukhikh/intellij-community,ftomassetti/intellij-community,signed/intellij-community,tmpgit/intellij-community,xfournet/intellij-community,dslomov/intellij-community,orekyuu/intellij-community,vladmm/intellij-community,dslomov/intellij-community,holmes/intellij-community,adedayo/intellij-community,kdwink/intellij-community,Lekanich/intellij-community,tmpgit/intellij-community,idea4bsd/idea4bsd,fnouama/intellij-community,jagguli/intellij-community,holmes/intellij-community,adedayo/intellij-community,izonder/intellij-community,signed/intellij-community,nicolargo/intellij-community,vvv1559/intellij-community,hurricup/intellij-community,asedunov/intellij-community,diorcety/intellij-community,ol-loginov/intellij-community,TangHao1987/intellij-community,slisson/intellij-community,muntasirsyed/intellij-community,adedayo/intellij-community,kdwink/intellij-community,caot/intellij-community,gnuhub/intellij-community,supersven/intellij-community,da1z/intellij-community,akosyakov/intellij-community,idea4bsd/idea4bsd,MichaelNedzelsky/intellij-community,ibinti/intellij-community,wreckJ/intellij-community,mglukhikh/intellij-community,amith01994/intellij-community,asedunov/intellij-community,ftomassetti/intellij-community,asedunov/intellij-community,allotria/intellij-community,fitermay/intellij-community,gnuhub/intellij-community,allotria/intellij-community,robovm/robovm-studio,MichaelNedzelsky/intellij-community,FHannes/intellij-community,ThiagoGarciaAlves/intellij-community,retomerz/intellij-community,akosyakov/intellij-community,hurricup/intellij-community,asedunov/intellij-community,michaelgallacher/intellij-community,ibinti/intellij-community,fengbaicanhe/intellij-community,holmes/intellij-community,akosyakov/intellij-community,kool79/intellij-community,muntasirsyed/intellij-community,muntasirsyed/intellij-community,gnuhub/intellij-community,ryano144/intellij-community,wreckJ/intellij-community,Distrotech/intellij-community,tmpgit/intellij-community,vladmm/intellij-community,ol-loginov/intellij-community,akosyakov/intellij-community,xfournet/intellij-community,MichaelNedzelsky/intellij-community,petteyg/intellij-community,semonte/intellij-community,kdwink/intellij-community,ibinti/intellij-community,lucafavatella/intellij-community,ibinti/intellij-community,lucafavatella/intellij-community,blademainer/intellij-community,xfournet/intellij-community,ftomassetti/intellij-community,Distrotech/intellij-community,ryano144/intellij-community,supersven/intellij-community,retomerz/intellij-community,ol-loginov/intellij-community,alphafoobar/intellij-community,idea4bsd/idea4bsd,salguarnieri/intellij-community,michaelgallacher/intellij-community,caot/intellij-community,supersven/intellij-community,vvv1559/intellij-community,diorcety/intellij-community,nicolargo/intellij-community,adedayo/intellij-community,fengbaicanhe/intellij-community,FHannes/intellij-community,kool79/intellij-community,fnouama/intellij-community,fitermay/intellij-community,mglukhikh/intellij-community,gnuhub/intellij-community,Lekanich/intellij-community,apixandru/intellij-community,TangHao1987/intellij-community,xfournet/intellij-community,blademainer/intellij-community,kool79/intellij-community,holmes/intellij-community,alphafoobar/intellij-community,kdwink/intellij-community,suncycheng/intellij-community,fnouama/intellij-community,semonte/intellij-community,apixandru/intellij-community,ibinti/intellij-community,slisson/intellij-community,fitermay/intellij-community,pwoodworth/intellij-community,gnuhub/intellij-community,samthor/intellij-community,Distrotech/intellij-community,orekyuu/intellij-community,youdonghai/intellij-community,alphafoobar/intellij-community,akosyakov/intellij-community,ThiagoGarciaAlves/intellij-community,da1z/intellij-community,wreckJ/intellij-community,ryano144/intellij-community,semonte/intellij-community,akosyakov/intellij-community,allotria/intellij-community,hurricup/intellij-community,youdonghai/intellij-community,muntasirsyed/intellij-community,ftomassetti/intellij-community,amith01994/intellij-community,jagguli/intellij-community,hurricup/intellij-community,alphafoobar/intellij-community,supersven/intellij-community,adedayo/intellij-community,hurricup/intellij-community,dslomov/intellij-community,kool79/intellij-community,ahb0327/intellij-community,fitermay/intellij-community,wreckJ/intellij-community,apixandru/intellij-community,izonder/intellij-community,MichaelNedzelsky/intellij-community,ahb0327/intellij-community,mglukhikh/intellij-community,signed/intellij-community,MER-GROUP/intellij-community,TangHao1987/intellij-community,diorcety/intellij-community,muntasirsyed/intellij-community,ahb0327/intellij-community,MichaelNedzelsky/intellij-community,jagguli/intellij-community,salguarnieri/intellij-community,TangHao1987/intellij-community,youdonghai/intellij-community,orekyuu/intellij-community,fnouama/intellij-community,ahb0327/intellij-community,ThiagoGarciaAlves/intellij-community,ol-loginov/intellij-community,nicolargo/intellij-community,asedunov/intellij-community,semonte/intellij-community,petteyg/intellij-community,diorcety/intellij-community,Distrotech/intellij-community,nicolargo/intellij-community,TangHao1987/intellij-community,idea4bsd/idea4bsd,fengbaicanhe/intellij-community,pwoodworth/intellij-community,robovm/robovm-studio,MER-GROUP/intellij-community,amith01994/intellij-community,blademainer/intellij-community,Lekanich/intellij-community,fnouama/intellij-community,MER-GROUP/intellij-community,diorcety/intellij-community,tmpgit/intellij-community,apixandru/intellij-community,hurricup/intellij-community,fnouama/intellij-community,ol-loginov/intellij-community,youdonghai/intellij-community,semonte/intellij-community,apixandru/intellij-community,ThiagoGarciaAlves/intellij-community,TangHao1987/intellij-community,MER-GROUP/intellij-community,caot/intellij-community,slisson/intellij-community,ol-loginov/intellij-community,robovm/robovm-studio,mglukhikh/intellij-community,kdwink/intellij-community,suncycheng/intellij-community,allotria/intellij-community,pwoodworth/intellij-community,robovm/robovm-studio,signed/intellij-community,wreckJ/intellij-community,semonte/intellij-community,fitermay/intellij-community,FHannes/intellij-community,muntasirsyed/intellij-community,xfournet/intellij-community,slisson/intellij-community,mglukhikh/intellij-community,hurricup/intellij-community,allotria/intellij-community,asedunov/intellij-community,alphafoobar/intellij-community,fnouama/intellij-community,salguarnieri/intellij-community,retomerz/intellij-community,jagguli/intellij-community,MichaelNedzelsky/intellij-community,FHannes/intellij-community,Lekanich/intellij-community,retomerz/intellij-community,clumsy/intellij-community,Lekanich/intellij-community,amith01994/intellij-community,hurricup/intellij-community,ol-loginov/intellij-community,akosyakov/intellij-community,xfournet/intellij-community,SerCeMan/intellij-community,TangHao1987/intellij-community,fengbaicanhe/intellij-community,alphafoobar/intellij-community,SerCeMan/intellij-community,supersven/intellij-community,vvv1559/intellij-community,orekyuu/intellij-community,Lekanich/intellij-community,holmes/intellij-community,caot/intellij-community,samthor/intellij-community,apixandru/intellij-community,da1z/intellij-community,supersven/intellij-community,semonte/intellij-community,pwoodworth/intellij-community,fengbaicanhe/intellij-community,petteyg/intellij-community,kdwink/intellij-community,ivan-fedorov/intellij-community,kool79/intellij-community,SerCeMan/intellij-community,robovm/robovm-studio,robovm/robovm-studio,slisson/intellij-community,FHannes/intellij-community,apixandru/intellij-community,allotria/intellij-community,MER-GROUP/intellij-community,clumsy/intellij-community,youdonghai/intellij-community,Lekanich/intellij-community,Distrotech/intellij-community,caot/intellij-community,ol-loginov/intellij-community,clumsy/intellij-community,clumsy/intellij-community,xfournet/intellij-community,dslomov/intellij-community,orekyuu/intellij-community,gnuhub/intellij-community,SerCeMan/intellij-community,ivan-fedorov/intellij-community,suncycheng/intellij-community,jagguli/intellij-community,michaelgallacher/intellij-community,signed/intellij-community,FHannes/intellij-community,ivan-fedorov/intellij-community,vvv1559/intellij-community,ibinti/intellij-community,michaelgallacher/intellij-community,ftomassetti/intellij-community,idea4bsd/idea4bsd,petteyg/intellij-community,orekyuu/intellij-community,ibinti/intellij-community,adedayo/intellij-community,asedunov/intellij-community,diorcety/intellij-community,ahb0327/intellij-community,apixandru/intellij-community,lucafavatella/intellij-community,nicolargo/intellij-community,robovm/robovm-studio,slisson/intellij-community,jagguli/intellij-community,robovm/robovm-studio,apixandru/intellij-community,SerCeMan/intellij-community,vvv1559/intellij-community,slisson/intellij-community,blademainer/intellij-community,retomerz/intellij-community,gnuhub/intellij-community,da1z/intellij-community,ryano144/intellij-community,clumsy/intellij-community,ibinti/intellij-community,dslomov/intellij-community,youdonghai/intellij-community,fengbaicanhe/intellij-community,caot/intellij-community,holmes/intellij-community,hurricup/intellij-community,ibinti/intellij-community,blademainer/intellij-community,Distrotech/intellij-community,jagguli/intellij-community,gnuhub/intellij-community,samthor/intellij-community,FHannes/intellij-community,caot/intellij-community,dslomov/intellij-community,blademainer/intellij-community,ivan-fedorov/intellij-community,nicolargo/intellij-community,lucafavatella/intellij-community,clumsy/intellij-community,SerCeMan/intellij-community,youdonghai/intellij-community,lucafavatella/intellij-community | python/testData/completion/predefinedMethodName.py | python/testData/completion/predefinedMethodName.py | class Foo:
def __rad<caret>
| class Foo:
def __ra<caret>
| apache-2.0 | Python |
93700dba921c6bffe77f2eaadc2d7ece5dde03e5 | Fix error caused by moving function setup_logging | raphiz/bsAbstimmungen,raphiz/bsAbstimmungen | tests/__init__.py | tests/__init__.py | from bsAbstimmungen.utils import setup_logging
setup_logging('tests/test-logging.json')
| from bsAbstimmungen import setup_logging
setup_logging('tests/test-logging.json')
| mit | Python |
3b8c4f7a2184e886128ddf5a31a773322ef4ea97 | fix bug: comment.canonical was removed long ago | SysTheron/adhocracy,alkadis/vcv,liqd/adhocracy,SysTheron/adhocracy,alkadis/vcv,phihag/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy,phihag/adhocracy,liqd/adhocracy,DanielNeugebauer/adhocracy,alkadis/vcv,alkadis/vcv,liqd/adhocracy,SysTheron/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy,liqd/adhocracy,phihag/adhocracy,DanielNeugebauer/adhocracy | adhocracy/lib/auth/comment.py | adhocracy/lib/auth/comment.py | from pylons import tmpl_context as c
from authorization import has
import poll
# helper functions
def is_own(co):
return c.user and co.creator == c.user
# authorisation checks
def index(check):
check.perm('comment.view')
def show(check, co):
check.perm('comment.view')
check.other('comment_is_deleted', co.is_deleted())
def create(check):
check.perm('comment.create')
def create_on(check, topic):
if has('instance.admin'):
return
check.other('topic_instance_frozen', topic.instance.frozen)
create(check)
def reply(check, parent):
create_on(check, parent.topic)
check.other('parent_deleted', parent.is_deleted())
def edit(check, co):
check.other('comment_not_mutable', not co.is_mutable())
if has('instance.admin'):
return
check.other('comment_topic_instance_frozen', co.topic.instance.frozen)
check.perm('comment.edit')
show(check, co)
check.other('comment_is_not_wiki_or_own', not (co.wiki or is_own(co)))
revert = edit
def delete(check, co):
if has('instance.admin'):
return
check.other('comment_topic_instance_frozen', co.topic.instance.frozen)
edit(check, co)
check.other('comment_is_not_own', not is_own(co))
check.other('comment_is_edited', co.is_edited())
check.perm('comment.delete')
show(check, co)
check.other('comment_not_mutable', not co.topic.is_mutable())
def rate(check, co):
check.other('comment_topic_instance_frozen', co.topic.instance.frozen)
show(check, co)
check.other('comment_poll_is_none', co.poll is not None)
poll.vote(check, co.poll)
| from pylons import tmpl_context as c
from authorization import has
import poll
# helper functions
def is_own(co):
return c.user and co.creator == c.user
# authorisation checks
def index(check):
check.perm('comment.view')
def show(check, co):
check.perm('comment.view')
check.other('comment_is_deleted', co.is_deleted())
def create(check):
check.perm('comment.create')
def create_on(check, topic):
if has('instance.admin'):
return
check.other('topic_instance_frozen', topic.instance.frozen)
create(check)
def reply(check, parent):
create_on(check, parent.topic)
check.other('parent_deleted', parent.is_deleted())
def edit(check, co):
check.other('comment_not_mutable', not co.is_mutable())
if has('instance.admin'):
return
check.other('comment_topic_instance_frozen', co.topic.instance.frozen)
check.perm('comment.edit')
show(check, co)
check.other('comment_is_not_wiki_or_own', not (co.wiki or is_own(co)))
revert = edit
def delete(check, co):
if has('instance.admin'):
return
check.other('comment_topic_instance_frozen', co.topic.instance.frozen)
edit(check, co)
check.other('comment_is_not_own', not is_own(co))
check.other('comment_is_edited', co.is_edited())
check.perm('comment.delete')
show(check, co)
check.other('comment_not_mutable_and_canonical',
not co.topic.is_mutable() and co.canonical)
def rate(check, co):
check.other('comment_topic_instance_frozen', co.topic.instance.frozen)
show(check, co)
check.other('comment_poll_is_none', co.poll is not None)
poll.vote(check, co.poll)
| agpl-3.0 | Python |
d16006e4274a79b577e2f3ca62c7e2b1db775d4a | fix indent for workqueue.py | weiHelloWorld/accelerated_sampling_with_autoencoder,weiHelloWorld/accelerated_sampling_with_autoencoder | MD_simulation_on_trp_cage/current_work/src/workqueue.py | MD_simulation_on_trp_cage/current_work/src/workqueue.py | """
this programs takes a file containing all Python programs to run as input, and
put these programs into a workqueue, and at every instance we make sure only
n Python programs are running
===========================
input:
- file containing Python programs to run
- number of programs allowed to run concurrently
- time interval of checking the number of running programs
"""
import argparse, subprocess, time
def main():
parser = argparse.ArgumentParser()
parser.add_argument("cmdfile", type=str, help="file containing Python programs to run")
parser.add_argument("--num", type=int, default=20, help="number of programs allowed to run concurrently")
parser.add_argument("--interval", type=int, default=10, help="time interval of checking the number of running programs")
args = parser.parse_args()
command_file = args.cmdfile
num_of_programs_allowed = args.num
interval = args.interval
with open(command_file, 'r') as cmdf:
command_list = cmdf.read().split('\n')[1:]
total_num_jobs = len(command_list)
next_job_index = 0
while next_job_index < total_num_jobs:
time.sleep(interval)
num_of_running_jobs = len(subprocess.check_output(['pidof', 'python']).split())
if num_of_running_jobs < num_of_programs_allowed:
if num_of_programs_allowed - num_of_running_jobs > total_num_jobs - next_job_index:
run_programs(command_list, next_job_index, total_num_jobs)
next_job_index = total_num_jobs
else:
run_programs(command_list, next_job_index, next_job_index + num_of_programs_allowed - num_of_running_jobs)
next_job_index += num_of_programs_allowed - num_of_running_jobs
def run_programs(command_list, start_index, end_index):
"""
run programs with index [start_index, end_index - 1]
"""
for item in range(start_index, end_index):
command_arg = command_list[item].split()
if command_arg[-1] == "&":
command_arg = command_arg[:-1]
print ("running command: " + str(command_arg))
subprocess.Popen(command_arg)
return
if __name__ == '__main__':
main()
| """
this programs takes a file containing all Python programs to run as input, and
put these programs into a workqueue, and at every instance we make sure only
n Python programs are running
===========================
input:
- file containing Python programs to run
- number of programs allowed to run concurrently
- time interval of checking the number of running programs
"""
import argparse, subprocess, time
def main():
parser = argparse.ArgumentParser()
parser.add_argument("cmdfile", type=str, help="file containing Python programs to run")
parser.add_argument("--num", type=int, default=20, help="number of programs allowed to run concurrently")
parser.add_argument("--interval", type=int, default=10, help="time interval of checking the number of running programs")
args = parser.parse_args()
command_file = args.cmdfile
num_of_programs_allowed = args.num
interval = args.interval
with open(command_file, 'r') as cmdf:
command_list = cmdf.read().split('\n')[1:]
total_num_jobs = len(command_list)
next_job_index = 0
while next_job_index < total_num_jobs:
time.sleep(interval)
num_of_running_jobs = len(subprocess.check_output(['pidof', 'python']).split())
if num_of_running_jobs < num_of_programs_allowed:
if num_of_programs_allowed - num_of_running_jobs > total_num_jobs - next_job_index:
run_programs(command_list, next_job_index, total_num_jobs)
next_job_index = total_num_jobs
else:
run_programs(command_list, next_job_index, next_job_index + num_of_programs_allowed - num_of_running_jobs)
next_job_index += num_of_programs_allowed - num_of_running_jobs
def run_programs(command_list, start_index, end_index):
"""
run programs with index [start_index, end_index - 1]
"""
for item in range(start_index, end_index):
command_arg = command_list[item].split()
if command_arg[-1] == "&":
command_arg = command_arg[:-1]
print ("running command: " + str(command_arg))
subprocess.Popen(command_arg)
return
if __name__ == '__main__':
main()
| mit | Python |
fb3f1494bc5bff911df39223d7dd4f96c2f8c389 | remove version | AnythingTechPro/curionet | tests/__init__.py | tests/__init__.py | """
* Copyright (C) Caleb Marshall and others... - All Rights Reserved
* Written by Caleb Marshall <anythingtechpro@gmail.com>, May 23rd, 2017
* Licensing information can found in 'LICENSE', which is part of this source code package.
"""
| """
* Copyright (C) Caleb Marshall and others... - All Rights Reserved
* Written by Caleb Marshall <anythingtechpro@gmail.com>, May 23rd, 2017
* Licensing information can found in 'LICENSE', which is part of this source code package.
"""
__version__ = '1.0.0'
| apache-2.0 | Python |
6dec30deb4530f0ce7823f5942448309c2a59cea | FIX invoice restrict invoicing | ingadhoc/account-invoicing | account_partner_restrict_invoicing/account_invoice.py | account_partner_restrict_invoicing/account_invoice.py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, _
from openerp.exceptions import ValidationError
class account_invoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def action_move_create(self):
for rec in self:
if rec.partner_id.commercial_partner_id.restrict_invoice:
raise ValidationError(_(
'You can not validate an invoice for this partner "%s" '
'while the field "restrict invoice" is set=True') % (
rec.partner_id.name))
return super(account_invoice, self).action_move_create()
| # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, _
from openerp.exceptions import ValidationError
class account_invoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def action_move_create(self):
if self.partner_id.commercial_partner_id.restrict_invoice:
raise ValidationError(_(
'You can not validate an invoice for this partner "%s" while '
'the field "restrict invoice" is set=True') % (
self.partner_id.name))
return super(account_invoice, self).action_move_create()
| agpl-3.0 | Python |
4b0f54a52fc4ca7f954d8a7e9734ef772894518d | Fix search tests | authmillenon/pycomicvine | tests/__init__.py | tests/__init__.py | from tests.characters import *
from tests.chats import *
from tests.concepts import *
from tests.issues import *
from tests.locations import *
from tests.movies import *
from tests.objects import *
from tests.people import *
from tests.promos import *
from tests.powers import *
from tests.search import *
from tests.story_arcs import *
from tests.teams import *
from tests.videos import *
from tests.video_types import *
from tests.volumes import *
| from search import *
from tests.characters import *
from tests.chats import *
from tests.concepts import *
from tests.issues import *
from tests.locations import *
from tests.movies import *
from tests.objects import *
from tests.people import *
from tests.promos import *
from tests.powers import *
from tests.story_arcs import *
from tests.teams import *
from tests.videos import *
from tests.video_types import *
from tests.volumes import *
| mit | Python |
801b077024f1233b1026e5d8e8884bde22afc1df | use correct attribute to find the instance of a bound method | npinto/pytest | py/test/compat.py | py/test/compat.py | import py
from py.test.collect import Function
if py.std.sys.version_info > (3, 0):
_self = "__self__"
else:
_self = "im_self"
class TestCaseUnit(Function):
""" compatibility Unit executor for TestCase methods
honouring setUp and tearDown semantics.
"""
def runtest(self, _deprecated=None):
boundmethod = self.obj
instance = getattr(boundmethod, _self)
instance.setUp()
try:
boundmethod()
finally:
instance.tearDown()
class TestCase(object):
"""compatibility class of unittest's TestCase. """
Function = TestCaseUnit
def setUp(self):
pass
def tearDown(self):
pass
def fail(self, msg=None):
""" fail immediate with given message. """
py.test.fail(msg)
def assertRaises(self, excclass, func, *args, **kwargs):
py.test.raises(excclass, func, *args, **kwargs)
failUnlessRaises = assertRaises
# dynamically construct (redundant) methods
aliasmap = [
('x', 'not x', 'assert_, failUnless'),
('x', 'x', 'failIf'),
('x,y', 'x!=y', 'failUnlessEqual,assertEqual, assertEquals'),
('x,y', 'x==y', 'failIfEqual,assertNotEqual, assertNotEquals'),
]
items = []
for sig, expr, names in aliasmap:
names = map(str.strip, names.split(','))
sigsubst = expr.replace('y', '%s').replace('x', '%s')
for name in names:
items.append("""
def %(name)s(self, %(sig)s, msg=""):
__tracebackhide__ = True
if %(expr)s:
py.test.fail(msg=msg + (%(sigsubst)r %% (%(sig)s)))
""" % locals() )
source = "".join(items)
exec(py.code.Source(source).compile())
__all__ = ['TestCase']
| import py
from py.test.collect import Function
class TestCaseUnit(Function):
""" compatibility Unit executor for TestCase methods
honouring setUp and tearDown semantics.
"""
def runtest(self, _deprecated=None):
boundmethod = self.obj
instance = boundmethod.im_self
instance.setUp()
try:
boundmethod()
finally:
instance.tearDown()
class TestCase(object):
"""compatibility class of unittest's TestCase. """
Function = TestCaseUnit
def setUp(self):
pass
def tearDown(self):
pass
def fail(self, msg=None):
""" fail immediate with given message. """
py.test.fail(msg)
def assertRaises(self, excclass, func, *args, **kwargs):
py.test.raises(excclass, func, *args, **kwargs)
failUnlessRaises = assertRaises
# dynamically construct (redundant) methods
aliasmap = [
('x', 'not x', 'assert_, failUnless'),
('x', 'x', 'failIf'),
('x,y', 'x!=y', 'failUnlessEqual,assertEqual, assertEquals'),
('x,y', 'x==y', 'failIfEqual,assertNotEqual, assertNotEquals'),
]
items = []
for sig, expr, names in aliasmap:
names = map(str.strip, names.split(','))
sigsubst = expr.replace('y', '%s').replace('x', '%s')
for name in names:
items.append("""
def %(name)s(self, %(sig)s, msg=""):
__tracebackhide__ = True
if %(expr)s:
py.test.fail(msg=msg + (%(sigsubst)r %% (%(sig)s)))
""" % locals() )
source = "".join(items)
exec(py.code.Source(source).compile())
__all__ = ['TestCase']
| mit | Python |
65877e88ab9819ed0a505d3cedd2b180bf6f30fa | Update playback.py | miguelgrinberg/pydub,joshrobo/pydub,cbelth/pyMusic,sgml/pydub,jiaaro/pydub,Geoion/pydub | pydub/playback.py | pydub/playback.py | """
Support for playing AudioSegments - pyaudio is a *much* nicer solution, but
is tricky to install. See my notes on installing pyaudio in a virtualenv (on
OSX 10.10): https://gist.github.com/jiaaro/9767512210a1d80a8a0d
"""
import subprocess
from tempfile import NamedTemporaryFile
from .utils import get_player_name, make_chunks
PLAYER = get_player_name()
def _play_with_ffplay(seg):
with NamedTemporaryFile("w+b", suffix=".wav") as f:
seg.export(f.name, "wav")
subprocess.call([PLAYER, "-nodisp", "-autoexit", f.name])
def _play_with_pyaudio(seg):
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(seg.sample_width),
channels=seg.channels,
rate=seg.frame_rate,
output=True)
# break audio into half-second chunks (to allows keyboard interrupts)
for chunk in make_chunks(seg, 500):
stream.write(chunk._data)
stream.stop_stream()
stream.close()
p.terminate()
def play(audio_segment):
try:
import pyaudio
_play_with_pyaudio(audio_segment)
except ImportError:
_play_with_ffplay(audio_segment)
| import subprocess
from tempfile import NamedTemporaryFile
from .utils import get_player_name, make_chunks
PLAYER = get_player_name()
def _play_with_ffplay(seg):
with NamedTemporaryFile("w+b", suffix=".wav") as f:
seg.export(f.name, "wav")
subprocess.call([PLAYER, "-nodisp", "-autoexit", f.name])
def _play_with_pyaudio(seg):
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(seg.sample_width),
channels=seg.channels,
rate=seg.frame_rate,
output=True)
# break audio into half-second chunks (to allows keyboard interrupts)
for chunk in make_chunks(seg, 500):
stream.write(chunk._data)
stream.stop_stream()
stream.close()
p.terminate()
def play(audio_segment):
try:
import pyaudio
_play_with_pyaudio(audio_segment)
except ImportError:
_play_with_ffplay(audio_segment)
| mit | Python |
9c9f692c2da9ee989382504026bc7c95826e3842 | add parse_defaults unittest | imjoey/pyhaproxy | pyhaproxy/test.py | pyhaproxy/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import parse
class TestParse(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup(self):
self.parser = parse.Parser('haproxy.cfg')
self.configration = self.parser.build_configration()
def teardown(self):
pass
def test_parse_global_section(self):
print self.configration.globall.configs
print '-' * 30
print self.configration.globall.options
def test_parse_frontend_section(self):
for frontend in self.configration.frontends:
print frontend.name, frontend.host, frontend.port
print frontend.configs
print frontend.options
print '-' * 30
def test_parse_defaults_section(self):
for defaults in self.configration.defaults:
print defaults.name
print defaults.options
print defaults.configs | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import parse
class TestParse(object):
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
def setup(self):
self.parser = parse.Parser('haproxy.cfg')
self.configration = self.parser.build_configration()
def teardown(self):
pass
def test_parse_global_section(self):
print self.configration.globall.configs
print '-' * 30
print self.configration.globall.options
def test_parse_frontend_section(self):
for frontend in self.configration.frontends:
print frontend.name, frontend.host, frontend.port
print frontend.configs
print frontend.options
print '-' * 30
| mit | Python |
d93c0905ba7ffd1ab860ee489bd948d564ff5ea9 | implement wof core get_site_data | ulmo-dev/ulmo-common | pyhis/wof/core.py | pyhis/wof/core.py | import cStringIO as StringIO
import suds
import pyhis.waterml.v1_0 as wml
def get_sites(wsdl_url):
suds_client = suds.client.Client(wsdl_url)
suds_client.service.GetSites('')
response_text = unicode(suds_client.last_received())
response_buffer = StringIO.StringIO()
# hacks: Hydroserver doesn't declare soap namespace so it doesn't validate
inject_namespaces = ['soap', 'wsa', 'wsse', 'wsu', 'xsi']
response_buffer.write(response_text[:53])
for inject_namespace in inject_namespaces:
response_buffer.write(' xmlns:%s="http://soap/envelope/"' % inject_namespace)
response_buffer.write(response_text[53:])
response_buffer.seek(0)
sites = wml.parse_sites(response_buffer)
return sites
def get_site_data(wsdl_url, site_code, network, variable_code, variable_vocabulary):
#kservice=None, parameter_code=None,
#date_range=None, modified_since=None):
suds_client = suds.client.Client(wsdl_url)
suds_client.service.GetValuesObject(
'%s:%s' % (network, site_code),
'%s:%s' % (timeseries.variable.vocabulary, timeseries.variable.code),
begin_date_str,
end_date_str)
suds_client.service.GetSites('')
response_text = unicode(suds_client.last_received())
response_buffer = StringIO.StringIO()
| import cStringIO as StringIO
import suds
import pyhis.waterml.v1_0 as wml
def get_sites(wsdl_url):
suds_client = suds.client.Client(wsdl_url)
suds_client.service.GetSites('')
response_text = unicode(suds_client.last_received())
response_buffer = StringIO.StringIO()
# hacks: Hydroserver doesn't declare soap namespace so it doesn't validate
inject_namespaces = ['soap', 'wsa', 'wsse', 'wsu', 'xsi']
response_buffer.write(response_text[:53])
for inject_namespace in inject_namespaces:
response_buffer.write(' xmlns:%s="http://soap/envelope/"' % inject_namespace)
response_buffer.write(response_text[53:])
response_buffer.seek(0)
sites = wml.parse_sites(response_buffer)
return sites
def get_site_data(site_code, service=None, parameter_code=None,
date_range=None, modified_since=None):
pass
| bsd-3-clause | Python |
d1af9d4a245ae6cc60baca4080b88fd2ef8f911c | Add `pylxd.client.Client` to `pylxd.Client`. | lxc/pylxd,lxc/pylxd | pylxd/__init__.py | pylxd/__init__.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo('pylxd').version_string()
from pylxd.deprecated import api # NOQA
from pylxd.client import Client # NOQA
| # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo('pylxd').version_string()
from pylxd.deprecated import api # NOQA
| apache-2.0 | Python |
e2402e8c1f1f058dca39531f548490cb327c4fb2 | Remove db fixture | tranlyvu/find-link,tranlyvu/findLink | tests/conftest.py | tests/conftest.py | import pytest
from wikilink.wiki_link import WikiLink
@pytest.fixture()
def engine():
return create_engine('postgresql://localhost/test_database')
| import pytest
from wikilink.wiki_link import WikiLink
@pytest.fixture()
def engine():
return create_engine('postgresql://localhost/test_database')
@pytest.fixture()
def wikilin_db_connection(tmpdir):
WikiLink.setup_db()
yield
| apache-2.0 | Python |
df6cb2c96f0833d96a42aea2980717d04b566a2b | Undo skipping trustme tests under linux32 | KeepSafe/aiohttp,KeepSafe/aiohttp,KeepSafe/aiohttp | tests/conftest.py | tests/conftest.py | import hashlib
import ssl
import pytest
import trustme
pytest_plugins = ['aiohttp.pytest_plugin', 'pytester']
@pytest.fixture
def tls_certificate_authority():
return trustme.CA()
@pytest.fixture
def tls_certificate(tls_certificate_authority):
return tls_certificate_authority.issue_server_cert(
'localhost',
'127.0.0.1',
'::1',
)
@pytest.fixture
def ssl_ctx(tls_certificate):
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
tls_certificate.configure_cert(ssl_ctx)
return ssl_ctx
@pytest.fixture
def client_ssl_ctx(tls_certificate_authority):
ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
tls_certificate_authority.configure_trust(ssl_ctx)
return ssl_ctx
@pytest.fixture
def tls_ca_certificate_pem_path(tls_certificate_authority):
with tls_certificate_authority.cert_pem.tempfile() as ca_cert_pem:
yield ca_cert_pem
@pytest.fixture
def tls_certificate_pem_path(tls_certificate):
with tls_certificate.private_key_and_cert_chain_pem.tempfile() as cert_pem:
yield cert_pem
@pytest.fixture
def tls_certificate_pem_bytes(tls_certificate):
return tls_certificate.cert_chain_pems[0].bytes()
@pytest.fixture
def tls_certificate_fingerprint_sha256(tls_certificate_pem_bytes):
tls_cert_der = ssl.PEM_cert_to_DER_cert(tls_certificate_pem_bytes.decode())
return hashlib.sha256(tls_cert_der).digest()
| import hashlib
import platform
import ssl
import pytest
import trustme
pytest_plugins = ['aiohttp.pytest_plugin', 'pytester']
@pytest.fixture
def tls_certificate_authority():
if (platform.system() == 'Linux' and
platform.architecture() == ('32bit', 'ELF')):
pytest.xfail("trustme fails on 32bit Linux")
return trustme.CA()
@pytest.fixture
def tls_certificate(tls_certificate_authority):
return tls_certificate_authority.issue_server_cert(
'localhost',
'127.0.0.1',
'::1',
)
@pytest.fixture
def ssl_ctx(tls_certificate):
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
tls_certificate.configure_cert(ssl_ctx)
return ssl_ctx
@pytest.fixture
def client_ssl_ctx(tls_certificate_authority):
ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
tls_certificate_authority.configure_trust(ssl_ctx)
return ssl_ctx
@pytest.fixture
def tls_ca_certificate_pem_path(tls_certificate_authority):
with tls_certificate_authority.cert_pem.tempfile() as ca_cert_pem:
yield ca_cert_pem
@pytest.fixture
def tls_certificate_pem_path(tls_certificate):
with tls_certificate.private_key_and_cert_chain_pem.tempfile() as cert_pem:
yield cert_pem
@pytest.fixture
def tls_certificate_pem_bytes(tls_certificate):
return tls_certificate.cert_chain_pems[0].bytes()
@pytest.fixture
def tls_certificate_fingerprint_sha256(tls_certificate_pem_bytes):
tls_cert_der = ssl.PEM_cert_to_DER_cert(tls_certificate_pem_bytes.decode())
return hashlib.sha256(tls_cert_der).digest()
| apache-2.0 | Python |
74d8e24d58d402ad60aef1fb99f6a8fee5fc81b3 | Fix regression test only: annotator use is not created | xs2maverick/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator | tests/conftest.py | tests/conftest.py | """Add or override py.test fixtures for all tests in this directory."""
from splinter import Browser
from pytest import fixture
from pytest import skip
def pytest_addoption(parser):
"""Add pytest option --run_embed_tests."""
parser.addoption('--run_embed_tests', action='store_true', default=False,
help='run embed tests (needs /etc/hosts modifications)',
)
def pytest_runtest_setup(item):
"""Skip tests with `embed` marker if `--run_embed_tests` is not set."""
run_embed = item.config.getoption('--run_embed_tests')
embed_marker = item.get_marker('embed')
if run_embed:
return
elif embed_marker:
skip('You need to enable embed test with --run_embed_tests')
@fixture(scope='class')
def app(zeo, settings, websocket):
"""Return the adhocracy wsgi application.
This overrides adhocracy_core.testing.app.
"""
from pyramid.config import Configurator
from adhocracy_core.testing import includeme_root_with_test_users
import adhocracy
settings['adhocracy.add_default_group'] = False
configurator = Configurator(settings=settings,
root_factory=adhocracy.root_factory)
configurator.include(adhocracy)
configurator.commit()
configurator.include(includeme_root_with_test_users)
app = configurator.make_wsgi_app()
return app
@fixture
def browser(browser, frontend, backend, frontend_url) -> Browser:
"""Return test browser, start sample application and go to `root.html`.
Add attribute `root_url` pointing to the adhocracy root.html page.
Add attribute `app_url` pointing to the adhocracy application page.
Before visiting a new url the browser waits until the angular app is loaded
"""
from adhocracy_frontend.testing import angular_app_loaded
browser.root_url = frontend_url
browser.app_url = frontend_url
browser.visit(browser.root_url)
browser.wait_for_condition(angular_app_loaded, 5)
return browser
@fixture
def browser_embed(browser, frontend, backend, frontend_url) -> Browser:
"""Start embedder application."""
url = frontend_url + 'static/embed.html'
browser.visit(url)
return browser
| """Add or override py.test fixtures for all tests in this directory."""
from splinter import Browser
from pytest import fixture
from pytest import skip
def pytest_addoption(parser):
"""Add pytest option --run_embed_tests."""
parser.addoption('--run_embed_tests', action='store_true', default=False,
help='run embed tests (needs /etc/hosts modifications)',
)
def pytest_runtest_setup(item):
"""Skip tests with `embed` marker if `--run_embed_tests` is not set."""
run_embed = item.config.getoption('--run_embed_tests')
embed_marker = item.get_marker('embed')
if run_embed:
return
elif embed_marker:
skip('You need to enable embed test with --run_embed_tests')
@fixture(scope='class')
def app(zeo, settings, websocket):
"""Return the adhocracy wsgi application.
This overrides adhocracy_core.testing.app.
"""
from pyramid.config import Configurator
import adhocracy
settings['adhocracy.add_default_group'] = False
configurator = Configurator(settings=settings,
root_factory=adhocracy.root_factory)
configurator.include(adhocracy)
app = configurator.make_wsgi_app()
return app
@fixture
def browser(browser, frontend, backend, frontend_url) -> Browser:
"""Return test browser, start sample application and go to `root.html`.
Add attribute `root_url` pointing to the adhocracy root.html page.
Add attribute `app_url` pointing to the adhocracy application page.
Before visiting a new url the browser waits until the angular app is loaded
"""
from adhocracy_frontend.testing import angular_app_loaded
browser.root_url = frontend_url
browser.app_url = frontend_url
browser.visit(browser.root_url)
browser.wait_for_condition(angular_app_loaded, 5)
return browser
@fixture
def browser_embed(browser, frontend, backend, frontend_url) -> Browser:
"""Start embedder application."""
url = frontend_url + 'static/embed.html'
browser.visit(url)
return browser
| agpl-3.0 | Python |
8684bd9c1324f1f3030a84090202f3665407854b | remove unused csrf_exempt | kartta-labs/noter-backend,kartta-labs/noter-backend | noter_backend/noter_backend/urls.py | noter_backend/noter_backend/urls.py | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from main import views
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('api/v0.1/images/', views.ImageList.as_view()),
path('api/v0.1/images/<int:pk>/', views.ImageDetail.as_view()),
path('api/v0.1/projects/', views.ProjectList.as_view()),
path('api/v0.1/projects/<int:pk>/', views.ProjectDetail.as_view()),
path('api/v0.1/annotations/', views.AnnotationsJsonList.as_view()),
path('api/v0.1/annotations/<int:pk>/', views.AnnotationsJsonDetail.as_view()),
path('api/v0.1/users/', views.UserList.as_view()),
path('api/v0.1/users/<int:pk>/', views.UserDetail.as_view()),
path('api/v0.1/whoami/', views.WhoAmI.as_view()),
path('api/v0.1/whatdoihave/', views.WhatDoIHave.as_view()),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = format_suffix_patterns(urlpatterns) | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from main import views
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.decorators.csrf import csrf_exempt
urlpatterns = [
path('api/v0.1/images/', views.ImageList.as_view()),
path('api/v0.1/images/<int:pk>/', views.ImageDetail.as_view()),
path('api/v0.1/projects/', csrf_exempt(views.ProjectList.as_view())),
path('api/v0.1/projects/<int:pk>/', views.ProjectDetail.as_view()),
path('api/v0.1/annotations/', views.AnnotationsJsonList.as_view()),
path('api/v0.1/annotations/<int:pk>/', views.AnnotationsJsonDetail.as_view()),
path('api/v0.1/users/', views.UserList.as_view()),
path('api/v0.1/users/<int:pk>/', views.UserDetail.as_view()),
path('api/v0.1/whoami/', views.WhoAmI.as_view()),
path('api/v0.1/whatdoihave/', views.WhatDoIHave.as_view()),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns = format_suffix_patterns(urlpatterns) | apache-2.0 | Python |
6b7148bb9f9845a9897bb64776ea4025cc4d186e | Patch for issue #303 (thanks to @eltjpm) | stuartarchibald/numba,gdementen/numba,stuartarchibald/numba,stefanseefeld/numba,gmarkall/numba,stuartarchibald/numba,pombredanne/numba,numba/numba,cpcloud/numba,numba/numba,pombredanne/numba,stonebig/numba,stonebig/numba,IntelLabs/numba,pitrou/numba,pitrou/numba,ssarangi/numba,gmarkall/numba,numba/numba,gmarkall/numba,pombredanne/numba,IntelLabs/numba,gdementen/numba,jriehl/numba,sklam/numba,stefanseefeld/numba,seibert/numba,gdementen/numba,seibert/numba,ssarangi/numba,stefanseefeld/numba,ssarangi/numba,pitrou/numba,cpcloud/numba,gmarkall/numba,GaZ3ll3/numba,jriehl/numba,numba/numba,gdementen/numba,IntelLabs/numba,jriehl/numba,ssarangi/numba,IntelLabs/numba,GaZ3ll3/numba,ssarangi/numba,sklam/numba,cpcloud/numba,sklam/numba,pitrou/numba,pombredanne/numba,GaZ3ll3/numba,GaZ3ll3/numba,stonebig/numba,cpcloud/numba,stefanseefeld/numba,pombredanne/numba,stonebig/numba,sklam/numba,pitrou/numba,cpcloud/numba,numba/numba,IntelLabs/numba,seibert/numba,sklam/numba,gdementen/numba,gmarkall/numba,seibert/numba,GaZ3ll3/numba,seibert/numba,stuartarchibald/numba,stuartarchibald/numba,jriehl/numba,jriehl/numba,stonebig/numba,stefanseefeld/numba | numba/tests/issues/test_issue_56.py | numba/tests/issues/test_issue_56.py | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from numba.testing import test_support
import numpy
import unittest
# NOTE: See also numba.tests.ops.test_binary_ops
def maxstar1d(a, b):
M = a.shape[0]
res = numpy.empty(M)
for i in range(M):
res[i] = numpy.max(a[i], b[i]) + numpy.log1p(
numpy.exp(-numpy.abs(a[i] - b[i])))
return res
class TestIssue56(unittest.TestCase):
def test_maxstar1d(self):
test_fn = jit('f8[:](f8[:],f8[:])')(maxstar1d)
test_a = numpy.random.random(10)
test_b = numpy.random.random(10)
self.assertTrue(numpy.allclose(test_fn(test_a, test_b),
maxstar1d(test_a, test_b)))
if __name__ == "__main__":
# TestIssue56("test_maxstar1d").debug()
test_support.main()
| # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
from numba import *
from numba.testing import test_support
import numpy
import unittest
# NOTE: See also numba.tests.ops.test_binary_ops
def maxstar1d(a, b):
M = a.shape[0]
res = numpy.empty(M)
for i in range(M):
res[i] = numpy.max(a[i], b[i]) + numpy.log1p(
numpy.exp(-numpy.abs(a[i] - b[i])))
return res
class TestIssue56(unittest.TestCase):
def test_maxstar1d(self):
test_fn = jit('f8[:](f8[:],f8[:])')(maxstar1d)
test_a = numpy.random.random(10)
test_b = numpy.random.random(10)
self.assertTrue((test_fn(test_a, test_b) ==
maxstar1d(test_a, test_b)).all())
if __name__ == "__main__":
# TestIssue56("test_maxstar1d").debug()
test_support.main()
| bsd-2-clause | Python |
18a9863627f5edb2f909f3c900600d0883493ac0 | Add back fft to scipy namespace. | maniteja123/scipy,aman-iitj/scipy,ChanderG/scipy,woodscn/scipy,mtrbean/scipy,richardotis/scipy,jakevdp/scipy,jseabold/scipy,lukauskas/scipy,anielsen001/scipy,zaxliu/scipy,haudren/scipy,efiring/scipy,grlee77/scipy,ogrisel/scipy,giorgiop/scipy,mingwpy/scipy,befelix/scipy,newemailjdm/scipy,kleskjr/scipy,hainm/scipy,endolith/scipy,Shaswat27/scipy,chatcannon/scipy,surhudm/scipy,mikebenfield/scipy,dominicelse/scipy,maciejkula/scipy,befelix/scipy,nonhermitian/scipy,apbard/scipy,rgommers/scipy,gfyoung/scipy,surhudm/scipy,person142/scipy,rgommers/scipy,tylerjereddy/scipy,aeklant/scipy,mikebenfield/scipy,vigna/scipy,maniteja123/scipy,minhlongdo/scipy,apbard/scipy,ales-erjavec/scipy,andim/scipy,ilayn/scipy,gdooper/scipy,minhlongdo/scipy,jjhelmus/scipy,vigna/scipy,zerothi/scipy,behzadnouri/scipy,fredrikw/scipy,pbrod/scipy,sauliusl/scipy,jseabold/scipy,juliantaylor/scipy,sargas/scipy,futurulus/scipy,gfyoung/scipy,jonycgn/scipy,maciejkula/scipy,bkendzior/scipy,njwilson23/scipy,haudren/scipy,vberaudi/scipy,vberaudi/scipy,dch312/scipy,matthewalbani/scipy,newemailjdm/scipy,tylerjereddy/scipy,zxsted/scipy,scipy/scipy,chatcannon/scipy,jsilter/scipy,nmayorov/scipy,giorgiop/scipy,mhogg/scipy,mortada/scipy,sauliusl/scipy,ogrisel/scipy,fredrikw/scipy,perimosocordiae/scipy,Dapid/scipy,perimosocordiae/scipy,Srisai85/scipy,tylerjereddy/scipy,felipebetancur/scipy,mdhaber/scipy,vanpact/scipy,josephcslater/scipy,zerothi/scipy,bkendzior/scipy,Stefan-Endres/scipy,fredrikw/scipy,efiring/scipy,dch312/scipy,mortonjt/scipy,ales-erjavec/scipy,matthewalbani/scipy,aarchiba/scipy,rmcgibbo/scipy,larsmans/scipy,perimosocordiae/scipy,ndchorley/scipy,andim/scipy,lukauskas/scipy,mgaitan/scipy,futurulus/scipy,argriffing/scipy,jonycgn/scipy,mingwpy/scipy,cpaulik/scipy,ndchorley/scipy,petebachant/scipy,felipebetancur/scipy,perimosocordiae/scipy,andyfaff/scipy,pizzathief/scipy,jseabold/scipy,petebachant/scipy,ales-erjavec/scipy,nonhermitian/scipy,lhilt/scipy,dch312/scipy,Newman101/scipy,chatcannon/scipy,maciejkula/scipy,mortonjt/scipy,sauliusl/scipy,ortylp/scipy,gertingold/scipy,hainm/scipy,FRidh/scipy,woodscn/scipy,piyush0609/scipy,richardotis/scipy,mortada/scipy,witcxc/scipy,Newman101/scipy,sonnyhu/scipy,Shaswat27/scipy,Eric89GXL/scipy,witcxc/scipy,behzadnouri/scipy,Kamp9/scipy,WillieMaddox/scipy,scipy/scipy,Stefan-Endres/scipy,gfyoung/scipy,jseabold/scipy,rgommers/scipy,chatcannon/scipy,jamestwebber/scipy,anntzer/scipy,woodscn/scipy,zaxliu/scipy,newemailjdm/scipy,maniteja123/scipy,anielsen001/scipy,kleskjr/scipy,Srisai85/scipy,nmayorov/scipy,e-q/scipy,teoliphant/scipy,woodscn/scipy,trankmichael/scipy,matthewalbani/scipy,sriki18/scipy,kalvdans/scipy,minhlongdo/scipy,witcxc/scipy,minhlongdo/scipy,matthew-brett/scipy,nvoron23/scipy,futurulus/scipy,kleskjr/scipy,Gillu13/scipy,niknow/scipy,Srisai85/scipy,pnedunuri/scipy,pyramania/scipy,nvoron23/scipy,andyfaff/scipy,gertingold/scipy,WillieMaddox/scipy,jsilter/scipy,zxsted/scipy,arokem/scipy,lhilt/scipy,mhogg/scipy,mdhaber/scipy,fredrikw/scipy,FRidh/scipy,mortonjt/scipy,pschella/scipy,gef756/scipy,mhogg/scipy,trankmichael/scipy,nvoron23/scipy,ChanderG/scipy,teoliphant/scipy,raoulbq/scipy,ortylp/scipy,pbrod/scipy,sriki18/scipy,sauliusl/scipy,vigna/scipy,ortylp/scipy,Stefan-Endres/scipy,raoulbq/scipy,jsilter/scipy,felipebetancur/scipy,grlee77/scipy,jor-/scipy,vhaasteren/scipy,pyramania/scipy,minhlongdo/scipy,Kamp9/scipy,WillieMaddox/scipy,Shaswat27/scipy,piyush0609/scipy,apbard/scipy,matthew-brett/scipy,maniteja123/scipy,njwilson23/scipy,kalvdans/scipy,jakevdp/scipy,vhaasteren/scipy,nonhermitian/scipy,sonnyhu/scipy,maniteja123/scipy,jor-/scipy,Gillu13/scipy,Kamp9/scipy,andyfaff/scipy,giorgiop/scipy,ChanderG/scipy,efiring/scipy,jseabold/scipy,grlee77/scipy,juliantaylor/scipy,gertingold/scipy,jjhelmus/scipy,ndchorley/scipy,pyramania/scipy,trankmichael/scipy,hainm/scipy,njwilson23/scipy,lhilt/scipy,nonhermitian/scipy,jjhelmus/scipy,andim/scipy,zxsted/scipy,mgaitan/scipy,behzadnouri/scipy,perimosocordiae/scipy,WillieMaddox/scipy,minhlongdo/scipy,piyush0609/scipy,gef756/scipy,cpaulik/scipy,Newman101/scipy,mikebenfield/scipy,mortada/scipy,mdhaber/scipy,aeklant/scipy,Shaswat27/scipy,Gillu13/scipy,newemailjdm/scipy,Dapid/scipy,perimosocordiae/scipy,gertingold/scipy,kleskjr/scipy,grlee77/scipy,pbrod/scipy,zaxliu/scipy,dominicelse/scipy,jonycgn/scipy,matthew-brett/scipy,sonnyhu/scipy,endolith/scipy,raoulbq/scipy,efiring/scipy,fernand/scipy,scipy/scipy,cpaulik/scipy,vberaudi/scipy,rgommers/scipy,pizzathief/scipy,nvoron23/scipy,juliantaylor/scipy,anielsen001/scipy,ortylp/scipy,zxsted/scipy,mhogg/scipy,haudren/scipy,fernand/scipy,kalvdans/scipy,richardotis/scipy,dominicelse/scipy,sargas/scipy,mgaitan/scipy,petebachant/scipy,person142/scipy,Eric89GXL/scipy,raoulbq/scipy,josephcslater/scipy,WarrenWeckesser/scipy,vberaudi/scipy,person142/scipy,aman-iitj/scipy,piyush0609/scipy,jonycgn/scipy,hainm/scipy,Newman101/scipy,sonnyhu/scipy,cpaulik/scipy,Shaswat27/scipy,ortylp/scipy,juliantaylor/scipy,scipy/scipy,larsmans/scipy,matthewalbani/scipy,mingwpy/scipy,niknow/scipy,jor-/scipy,mhogg/scipy,gef756/scipy,pizzathief/scipy,witcxc/scipy,maniteja123/scipy,Gillu13/scipy,vigna/scipy,ChanderG/scipy,sargas/scipy,gertingold/scipy,endolith/scipy,ogrisel/scipy,sargas/scipy,andyfaff/scipy,argriffing/scipy,zaxliu/scipy,jonycgn/scipy,sonnyhu/scipy,witcxc/scipy,piyush0609/scipy,anntzer/scipy,lukauskas/scipy,vanpact/scipy,scipy/scipy,e-q/scipy,chatcannon/scipy,niknow/scipy,zerothi/scipy,mortonjt/scipy,aarchiba/scipy,rgommers/scipy,maciejkula/scipy,pschella/scipy,ilayn/scipy,woodscn/scipy,ChanderG/scipy,newemailjdm/scipy,Dapid/scipy,gef756/scipy,bkendzior/scipy,anielsen001/scipy,jakevdp/scipy,larsmans/scipy,maciejkula/scipy,jamestwebber/scipy,ndchorley/scipy,Kamp9/scipy,aarchiba/scipy,pnedunuri/scipy,pschella/scipy,matthew-brett/scipy,rmcgibbo/scipy,ilayn/scipy,teoliphant/scipy,richardotis/scipy,aeklant/scipy,mingwpy/scipy,tylerjereddy/scipy,e-q/scipy,zaxliu/scipy,nvoron23/scipy,ales-erjavec/scipy,juliantaylor/scipy,Dapid/scipy,rmcgibbo/scipy,pnedunuri/scipy,zerothi/scipy,hainm/scipy,endolith/scipy,Srisai85/scipy,FRidh/scipy,anntzer/scipy,argriffing/scipy,grlee77/scipy,zaxliu/scipy,petebachant/scipy,surhudm/scipy,pschella/scipy,niknow/scipy,mtrbean/scipy,rmcgibbo/scipy,jakevdp/scipy,anntzer/scipy,efiring/scipy,ales-erjavec/scipy,larsmans/scipy,surhudm/scipy,jjhelmus/scipy,njwilson23/scipy,mtrbean/scipy,vanpact/scipy,felipebetancur/scipy,vigna/scipy,pnedunuri/scipy,richardotis/scipy,Eric89GXL/scipy,aeklant/scipy,Eric89GXL/scipy,jonycgn/scipy,FRidh/scipy,jsilter/scipy,mdhaber/scipy,mortada/scipy,Stefan-Endres/scipy,Srisai85/scipy,Eric89GXL/scipy,Stefan-Endres/scipy,zxsted/scipy,jor-/scipy,anielsen001/scipy,Newman101/scipy,pbrod/scipy,ales-erjavec/scipy,WarrenWeckesser/scipy,mortonjt/scipy,trankmichael/scipy,mortada/scipy,fernand/scipy,andyfaff/scipy,FRidh/scipy,ortylp/scipy,anntzer/scipy,Gillu13/scipy,jamestwebber/scipy,sonnyhu/scipy,nmayorov/scipy,sriki18/scipy,giorgiop/scipy,petebachant/scipy,arokem/scipy,jseabold/scipy,vhaasteren/scipy,mikebenfield/scipy,surhudm/scipy,sriki18/scipy,gef756/scipy,gfyoung/scipy,ogrisel/scipy,endolith/scipy,WarrenWeckesser/scipy,matthewalbani/scipy,josephcslater/scipy,gdooper/scipy,endolith/scipy,aeklant/scipy,teoliphant/scipy,trankmichael/scipy,nmayorov/scipy,andim/scipy,jakevdp/scipy,vberaudi/scipy,surhudm/scipy,Gillu13/scipy,tylerjereddy/scipy,fredrikw/scipy,person142/scipy,ndchorley/scipy,piyush0609/scipy,dch312/scipy,mtrbean/scipy,Shaswat27/scipy,jsilter/scipy,aman-iitj/scipy,argriffing/scipy,petebachant/scipy,befelix/scipy,ilayn/scipy,kalvdans/scipy,mortonjt/scipy,pbrod/scipy,woodscn/scipy,vhaasteren/scipy,e-q/scipy,mdhaber/scipy,ChanderG/scipy,ndchorley/scipy,andim/scipy,pyramania/scipy,hainm/scipy,Srisai85/scipy,teoliphant/scipy,arokem/scipy,mgaitan/scipy,larsmans/scipy,scipy/scipy,pizzathief/scipy,bkendzior/scipy,dominicelse/scipy,dominicelse/scipy,matthew-brett/scipy,pyramania/scipy,andim/scipy,Kamp9/scipy,aman-iitj/scipy,raoulbq/scipy,Dapid/scipy,behzadnouri/scipy,haudren/scipy,richardotis/scipy,FRidh/scipy,vanpact/scipy,kleskjr/scipy,gfyoung/scipy,Eric89GXL/scipy,Newman101/scipy,mingwpy/scipy,ilayn/scipy,jamestwebber/scipy,anntzer/scipy,cpaulik/scipy,raoulbq/scipy,zxsted/scipy,futurulus/scipy,argriffing/scipy,josephcslater/scipy,pbrod/scipy,kleskjr/scipy,argriffing/scipy,trankmichael/scipy,behzadnouri/scipy,nvoron23/scipy,sauliusl/scipy,befelix/scipy,WillieMaddox/scipy,nmayorov/scipy,rmcgibbo/scipy,pizzathief/scipy,Stefan-Endres/scipy,giorgiop/scipy,mhogg/scipy,ogrisel/scipy,WarrenWeckesser/scipy,haudren/scipy,mgaitan/scipy,lhilt/scipy,nonhermitian/scipy,WillieMaddox/scipy,WarrenWeckesser/scipy,mdhaber/scipy,cpaulik/scipy,lukauskas/scipy,zerothi/scipy,mtrbean/scipy,Kamp9/scipy,rmcgibbo/scipy,fernand/scipy,niknow/scipy,aarchiba/scipy,sriki18/scipy,lhilt/scipy,apbard/scipy,mikebenfield/scipy,gdooper/scipy,ilayn/scipy,aarchiba/scipy,sargas/scipy,apbard/scipy,fredrikw/scipy,mtrbean/scipy,vhaasteren/scipy,newemailjdm/scipy,bkendzior/scipy,jjhelmus/scipy,andyfaff/scipy,vberaudi/scipy,lukauskas/scipy,felipebetancur/scipy,mortada/scipy,mgaitan/scipy,lukauskas/scipy,arokem/scipy,arokem/scipy,josephcslater/scipy,e-q/scipy,efiring/scipy,fernand/scipy,pnedunuri/scipy,pnedunuri/scipy,kalvdans/scipy,felipebetancur/scipy,chatcannon/scipy,giorgiop/scipy,fernand/scipy,vanpact/scipy,gdooper/scipy,aman-iitj/scipy,futurulus/scipy,anielsen001/scipy,gef756/scipy,jamestwebber/scipy,mingwpy/scipy,Dapid/scipy,vanpact/scipy,zerothi/scipy,WarrenWeckesser/scipy,njwilson23/scipy,sriki18/scipy,larsmans/scipy,befelix/scipy,haudren/scipy,sauliusl/scipy,gdooper/scipy,vhaasteren/scipy,dch312/scipy,futurulus/scipy,njwilson23/scipy,behzadnouri/scipy,person142/scipy,pschella/scipy,aman-iitj/scipy,jor-/scipy,niknow/scipy | Lib/__init__.py | Lib/__init__.py | """\
SciPy --- A scientific computing package for Python
===================================================
You can support the development of SciPy by purchasing documentation
at
http://www.trelgol.com
It is being distributed for a fee for a limited time to try and raise
money for development.
Documentation is also available in the docstrings.
"""
try:
import pkg_resources as _pr # activate namespace packages (manipulates __path__)
del _pr
except ImportError:
pass
__all__ = ['pkgload','test']
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError,"Cannot import scipy when running from numpy source directory."
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space
import numpy as _num
from numpy import oldnumeric
from numpy import *
from numpy.random import rand, randn
from numpy.fft import fft, ifft
from numpy.lib.scimath import *
_num.seterr(all='ignore')
__all__ += ['oldnumeric']+_num.__all__
__all__ += ['randn', 'rand', 'fft', 'ifft']
__doc__ += """
Contents
--------
numpy name space
"""
del _num
from __config__ import show as show_config
from version import version as __version__
# Load scipy packages, their global_symbols, set up __doc__ string.
from numpy._import_tools import PackageLoader
import os as _os
SCIPY_IMPORT_VERBOSE = int(_os.environ.get('SCIPY_IMPORT_VERBOSE','-1'))
del _os
pkgload = PackageLoader()
pkgload(verbose=SCIPY_IMPORT_VERBOSE,postpone=True)
__doc__ += """
Available subpackages
---------------------
"""
__doc__ += pkgload.get_pkgdocs()
def test(level=1, verbosity=1):
""" Run Scipy tests suite with level and verbosity."""
from numpy.testing import ScipyTest
import scipy
scipy.pkgload()
return ScipyTest(scipy).test(level, verbosity)
__doc__ += """
Utility tools
-------------
test --- Run scipy unittests
pkgload --- Load scipy packages
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- Scipy version string
__numpy_version__ --- Numpy version string
Environment variables
---------------------
SCIPY_IMPORT_VERBOSE --- pkgload verbose flag, default is 0.
"""
| """\
SciPy --- A scientific computing package for Python
===================================================
You can support the development of SciPy by purchasing documentation
at
http://www.trelgol.com
It is being distributed for a fee for a limited time to try and raise
money for development.
Documentation is also available in the docstrings.
"""
try:
import pkg_resources as _pr # activate namespace packages (manipulates __path__)
del _pr
except ImportError:
pass
__all__ = ['pkgload','test']
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError,"Cannot import scipy when running from numpy source directory."
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space
import numpy as _num
from numpy import oldnumeric
from numpy import *
from numpy.random import rand, randn
from numpy.lib.scimath import *
_num.seterr(all='ignore')
__all__ += ['oldnumeric']+_num.__all__
__all__ += ['randn', 'rand']
__doc__ += """
Contents
--------
numpy name space
"""
del _num
from __config__ import show as show_config
from version import version as __version__
# Load scipy packages, their global_symbols, set up __doc__ string.
from numpy._import_tools import PackageLoader
import os as _os
SCIPY_IMPORT_VERBOSE = int(_os.environ.get('SCIPY_IMPORT_VERBOSE','-1'))
del _os
pkgload = PackageLoader()
pkgload(verbose=SCIPY_IMPORT_VERBOSE,postpone=True)
__doc__ += """
Available subpackages
---------------------
"""
__doc__ += pkgload.get_pkgdocs()
def test(level=1, verbosity=1):
""" Run Scipy tests suite with level and verbosity."""
from numpy.testing import ScipyTest
import scipy
scipy.pkgload()
return ScipyTest(scipy).test(level, verbosity)
__doc__ += """
Utility tools
-------------
test --- Run scipy unittests
pkgload --- Load scipy packages
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- Scipy version string
__numpy_version__ --- Numpy version string
Environment variables
---------------------
SCIPY_IMPORT_VERBOSE --- pkgload verbose flag, default is 0.
"""
| bsd-3-clause | Python |
cf2e38a9ffac6294be8fc67f5ab422e07b4deab1 | fix log message | bnomis/macports-update | macports.py | macports.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# macports: updates macports
# https://github.com/bnomis/macports-update
# (c) Simon Blanchard
import argparse
import datetime
import subprocess
import sys
# the port command
portcmd = '/opt/local/bin/port'
# where to write logs
logfile = '/var/root/logs/macports.log'
# version info
version_info = (0, 1, 0)
__version__ = ".".join([str(v) for v in version_info])
def write_log(options, log, exception=None):
with open(logfile, 'a') as fp:
fp.write(log + '\n')
if exception:
fp.write('%s\n' % exception)
def run_port(options, cmd):
argv = [portcmd, '-q']
argv.extend(cmd.split())
try:
p = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
write_log(options, 'Exception running %s' % argv, exception=e)
else:
stdout, stderr = p.communicate()
if stdout:
write_log(options, stdout.decode().strip())
if stderr:
write_log(options, stderr.decode().strip())
p.wait()
def macports_update(options):
write_log(options, '\n------------')
write_log(options, 'Starting update %s' % datetime.datetime.now())
run_port(options, 'selfupdate')
run_port(options, 'upgrade outdated')
run_port(options, 'uninstall inactive')
write_log(options, 'Ending update %s' % datetime.datetime.now())
write_log(options, '------------\n')
def main(argv):
program_name = 'macports'
usage_string = '%(prog)s [options]'
version_string = '%(prog)s %(version)s' % {'prog': program_name, 'version': __version__}
description_string = 'macports: updates the installed macports'
parser = argparse.ArgumentParser(
prog=program_name,
usage=usage_string,
description=description_string,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--version',
action='version',
version=version_string
)
options = parser.parse_args(argv)
macports_update(options)
return 0
def run():
sys.exit(main(sys.argv[1:]))
if __name__ == '__main__':
run()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# macports: updates macports
# https://github.com/bnomis/macports-update
# (c) Simon Blanchard
import argparse
import datetime
import subprocess
import sys
# the port command
portcmd = '/opt/local/bin/port'
# where to write logs
logfile = '/var/root/logs/macports.log'
# version info
version_info = (0, 1, 0)
__version__ = ".".join([str(v) for v in version_info])
def write_log(options, log, exception=None):
with open(logfile, 'a') as fp:
fp.write(log + '\n')
if exception:
fp.write('%s\n' % exception)
def run_port(options, cmd):
argv = [portcmd, '-q']
argv.extend(cmd.split())
try:
p = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
write_log(options, 'Exception running %s' % argv, exception=e)
else:
stdout, stderr = p.communicate()
if stdout:
write_log(options, stdout.decode().strip())
if stderr:
write_log(options, stderr.decode().strip())
p.wait()
def macports_update(options):
write_log(options, '\n------------')
write_log(options, 'Starting update %s' % datetime.datetime.now())
run_port(options, 'selfupdate')
run_port(options, 'upgrade outdated')
run_port(options, 'uninstall inactive')
write_log(options, 'Starting ended %s' % datetime.datetime.now())
write_log(options, '------------\n')
def main(argv):
program_name = 'macports'
usage_string = '%(prog)s [options]'
version_string = '%(prog)s %(version)s' % {'prog': program_name, 'version': __version__}
description_string = 'macports: updates the installed macports'
parser = argparse.ArgumentParser(
prog=program_name,
usage=usage_string,
description=description_string,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--version',
action='version',
version=version_string
)
options = parser.parse_args(argv)
macports_update(options)
return 0
def run():
sys.exit(main(sys.argv[1:]))
if __name__ == '__main__':
run()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.