commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
107ffa054406cefa71f245045fe0ad140d44cc55 | Update drinkertest | drinkertest.py | drinkertest.py | from hieretikz import *
import subprocess
from hierarchy import *
formulae = lem, wlem, dp, he, dnsu, dnse, glpo, glpoa, gmp = \
'lem', 'wlem', 'dp', 'he', 'dnsu', 'dnse', 'glpo', 'glpoa', 'gmp'
_______ = None
formula_layout = [
' glpoa ',
' lem glpo ',
' ',
' dp he ',
' gmp ',
' dnsu dnse ',
' wlem ',
]
proofs = {
(lem, wlem): '', # Not yet
(dp, wlem): '',
(he, wlem): '',
(lem, glpo): '',
(glpo, lem): '',
(glpoa, lem): '',
(glpoa, glpo): '',
(dp, dnsu): '',
(glpoa, dnsu): '',
#
(he, dnse): '',
(gmp, dnse): '',
(gmp, dnsu): '',
}
counter_models = {
(dp, he): '',
(he, dp): '',
(lem, dp): '',
(lem, he): '',
(lem, glpoa): '',
(he, dnsu): '',
(dnsu, dp): '',
#
(dp, lem): '',
(he, lem): '',
(dnse, dp): '',
(dp, dnse): '',
}
document = r'''
\documentclass{article}
\usepackage{tikz}
\usepackage{amsmath}
\usepackage{fullpage}
\usepackage{multicol}
\begin{document}
\begin{tikzpicture}[node distance=1 cm, line width=0.3mm, auto]
''' + \
make_tikz(formulae, formula_layout, set(proofs), set(counter_models)) + \
r'''
\end{tikzpicture}
\paragraph{}
It remains to investigate:
\begin{multicols}{3}
\noindent
''' + \
assist(formulae, formula_layout, set(proofs), set(counter_models)) + \
r'''
\end{multicols}
\end{document}
'''
with open('drinker.tex', 'w') as f:
f.write(document)
subprocess.Popen(['pdflatex', 'drinker.tex'], stdout=subprocess.DEVNULL)
| from hieretikz import *
import subprocess
from hierarchy import *
formulae = lem, wlem, dp, he, dnsu, dnse, glpo, glpoa, gmp = \
'lem', 'wlem', 'dp', 'he', 'dnsu', 'dnse', 'glpo', 'glpoa', 'gmp'
_______ = None
formula_layout = [
' glpoa ',
' lem glpo ',
' ',
' dp he ',
' gmp ',
' dnsu dnse ',
' wlem ',
]
proofs = {
(lem, wlem): '', # Not yet
(dp, wlem): '',
(he, wlem): '',
(lem, glpo): '',
(glpo, lem): '',
(glpoa, lem): '',
(glpoa, glpo): '',
(dp, dnsu): '',
(glpoa, dnsu): '',
#
(he, dnse): '',
(gmp, dnse): '',
(gmp, dnsu): '',
}
counter_models = {
(dp, he): '',
(he, dp): '',
(lem, dp): '',
(lem, he): '',
(lem, glpoa): '',
(he, dnsu): '',
(dnsu, dp): '',
#
(dp, lem): '',
(he, lem): '',
(dnse, dp): '',
(dp, dnse): '',
}
document = r'''
\documentclass{article}
\usepackage{tikz}
\usepackage{amsmath}
\usepackage{fullpage}
\usepackage{multicol}
\begin{document}
\begin{tikzpicture}[node distance=1 cm, line width=0.3mm, auto]
''' + \
make_tikz(formulae, formula_layout, proofs, counter_models) + \
r'''
\end{tikzpicture}
\paragraph{}
It remains to investigate:
\begin{multicols}{3}
\noindent
''' + \
assist(formulae, formula_layout, proofs, counter_models) + \
r'''
\end{multicols}
\end{document}
'''
with open('drinker.tex', 'w') as f:
f.write(document)
subprocess.Popen(['pdflatex', 'drinker.tex'], stdout=subprocess.DEVNULL)
| Python | 0.000001 |
23f87565a9074b7d6bd5e45b1ce8686ba49d3ce8 | Update nsoltSynthesis2dNetwork.py | appendix/pytorch/nsoltSynthesis2dNetwork.py | appendix/pytorch/nsoltSynthesis2dNetwork.py | import torch
import torch.nn as nn
from nsoltBlockIdct2dLayer import NsoltBlockIdct2dLayer
from nsoltFinalRotation2dLayer import NsoltFinalRotation2dLayer
from nsoltLayerExceptions import InvalidNumberOfChannels, InvalidPolyPhaseOrder, InvalidNumberOfVanishingMoments
class NsoltSynthesis2dNetwork(nn.Module):
def __init__(self,
number_of_channels=[],
decimation_factor=[],
polyphase_order=[0,0],
number_of_vanishing_moments=1):
super(NsoltSynthesis2dNetwork, self).__init__()
# Check and set parameters
# # of channels
if number_of_channels[0] != number_of_channels[1]:
raise InvalidNumberOfChannels(
'[%d %d] : Currently, Type-I NSOLT is only suported, where the symmetric and antisymmetric channel numbers should be the same.'\
%(number_of_channels[0],number_of_channels[1]))
self.number_of_channels = number_of_channels
# Decimaton factor
self.decimation_factor = decimation_factor
# Polyphase order
if any(torch.tensor(polyphase_order)%2):
raise InvalidPolyPhaseOrder(
'%d + %d : Currently, even polyphase orders are only supported.'\
%(polyphase_order[0],polyphase_order[1]))
self.polyphase_order = polyphase_order
# # of vanishing moments
if number_of_vanishing_moments < 0 \
or number_of_vanishing_moments > 1:
raise InvalidNumberOfVanishingMoments(
'%d : The number of vanishing moment must be either of 0 or 1.'\
%(number_of_vanishing_moments))
self.number_of_vanishing_moments = number_of_vanishing_moments
# Instantiation of layers
self.layerV0T = NsoltFinalRotation2dLayer(
number_of_channels=number_of_channels,
decimation_factor=decimation_factor,
name='V0~'
)
self.layerE0T = NsoltBlockIdct2dLayer(
decimation_factor=decimation_factor,
name='E0~'
)
def forward(self,x):
u = self.layerV0T.forward(x)
y = self.layerE0T.forward(u)
return y
| import torch
import torch.nn as nn
from nsoltBlockIdct2dLayer import NsoltBlockIdct2dLayer
from nsoltFinalRotation2dLayer import NsoltFinalRotation2dLayer
class NsoltSynthesis2dNetwork(nn.Module):
def __init__(self,
number_of_channels=[],
decimation_factor=[]):
super(NsoltSynthesis2dNetwork, self).__init__()
self.number_of_channels = number_of_channels
self.decimation_factor = decimation_factor
# Instantiation of layers
self.layerV0 = NsoltFinalRotation2dLayer(
number_of_channels=number_of_channels,
decimation_factor=decimation_factor,
name='V0'
)
self.layerE0 = NsoltBlockIdct2dLayer(
decimation_factor=decimation_factor,
name='E0'
)
def forward(self,x):
u = self.layerV0.forward(x)
y = self.layerE0.forward(u)
return y
| Python | 0.000001 |
0d056fefa1896a1e4d17b56f0e84dae106c17c57 | fix bug | meta/api/views.py | meta/api/views.py | from django.conf.urls import patterns, url
from django.shortcuts import render
from django.http import Http404
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sites.shortcuts import get_current_site
import requests
from .forms import UploadImageForm
from .models import Picture
# Create your views here.
@csrf_exempt
def image_handler(request):
possibe_food = set([
'apple',
'banana',
'carrot',
'broccoli',
'pear'
])
if request.method != 'POST':
raise Http404('wrong method')
else:
form = UploadImageForm(request.POST, request.FILES)
current_site = get_current_site(request)
print current_site
if form.is_valid():
newpic = Picture(image=request.FILES['image'])
newpic.save()
auth = ('acc_2569f28daa2ca36', '5f3d54692a4dcdeda460024d50505ecd')
image_path = \
'http://' + str(current_site) + '/media/' + str(newpic.image.name)
r_url = 'https://api.imagga.com/v1/tagging?url=' + image_path
r = requests.get(r_url, auth=auth)
if r.status_code < 400:
data = r.json()
print data
foods = data['results'][0]['tags']
for food in foods:
if food['tag'] in possibe_food:
return JsonResponse({'food': food['tag']})
return JsonResponse({'food': foods[0]['tag']})
else:
raise Http404('Imagga error occured')
raise Http404('Unknown error occured')
def recipe_handler(request):
if request.method != 'POST':
return Http404('wrong method')
| from django.conf.urls import patterns, url
from django.shortcuts import render
from django.http import Http404
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sites.shortcuts import get_current_site
import requests
from .forms import UploadImageForm
from .models import Picture
# Create your views here.
@csrf_exempt
def image_handler(request):
possibe_food = set([
'apple',
'banana',
'carrot',
'broccoli'
])
if request.method != 'POST':
raise Http404('wrong method')
else:
form = UploadImageForm(request.POST, request.FILES)
current_site = get_current_site(request)
print current_site
if form.is_valid():
newpic = Picture(image=request.FILES['image'])
newpic.save()
auth = ('acc_2569f28daa2ca36', '5f3d54692a4dcdeda460024d50505ecd')
image_path = \
'http://' + str(current_site) + '/media/' + str(newpic.image.name)
r_url = 'https://api.imagga.com/v1/tagging?url=' + image_path
r = requests.get(r_url, auth=auth)
if r.status_code < 400:
data = r.json()
print data
foods = data['results'][0]['tags']
for food in foods:
if food['tag'] in possibe_food:
return JsonResponse({'food': food['tag']})
return JsonResponse({'food': foods[0]['tag']})
else:
raise Http404('Imagga error occured')
raise Http404('Unknown error occured')
def recipe_handler(request):
if request.method != 'POST':
return Http404('wrong method')
| Python | 0.000001 |
afebd530cb19196d2101b91cee59011c770b9709 | fix bug | meta/api/views.py | meta/api/views.py | from django.conf.urls import patterns, url
from django.shortcuts import render
from django.http import Http404
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sites.shortcuts import get_current_site
import requests
from .forms import UploadImageForm
from .models import Picture
# Create your views here.
@csrf_exempt
def image_handler(request):
possibe_food = set([
'apple',
'banana',
'carrot',
'broccoli',
'pear',
'watermelon'
])
if request.method != 'POST':
raise Http404('wrong method')
else:
form = UploadImageForm(request.POST, request.FILES)
current_site = get_current_site(request)
print current_site
if form.is_valid():
newpic = Picture(image=request.FILES['image'])
newpic.save()
auth = ('acc_2569f28daa2ca36', '5f3d54692a4dcdeda460024d50505ecd')
image_path = \
'http://' + str(current_site) + '/media/' + str(newpic.image.name)
r_url = 'https://api.imagga.com/v1/tagging?url=' + image_path
r = requests.get(r_url, auth=auth)
if r.status_code < 400:
data = r.json()
print data
foods = data['results'][0]['tags']
for food in foods:
if food['tag'] in possibe_food:
return JsonResponse({'food': food['tag']})
return JsonResponse({'food': foods[0]['tag']})
else:
raise Http404('Imagga error occured')
raise Http404('Unknown error occured')
def recipe_handler(request):
if request.method != 'GET':
return Http404('wrong method')
query = request.GET['id']
if query:
r_url = 'http://api.bigoven.com/recipe/%s?api_key=dvx4Bf83RbNOha0Re4c8ZYaTAe0X3hRj' % str(query)
r = requests.get(r_url, headers={"Accept": "application/json"})
if r.status_code < 400:
recipe = r.json()
processed_results = {}
if 'Instructions' not in recipe:
return JsonResponse({'error': 'Instructions not found'})
else:
instruction = recipe['Instructions'].replace('\n', ' ').replace('\r', '')
instructions = instruction.split('.')
instructions = map(
lambda sentence: sentence.strip(),
instructions
)
instructions = filter(
lambda s: not s.isspace() and s,
instructions
)
processed_results['Instructions'] = instructions
processed_results['Ingredient'] = map(
lambda ingredient: ingredient['Name'],
recipe['Ingredients']
)
return JsonResponse(processed_results)
raise Http404('Unknown error occured')
def list_handler(request):
if request.method != 'GET':
return Http404('Wrong method')
query = request.GET['name']
if query:
r_url = 'http://api.bigoven.com/recipes?title_kw=%s&api_key=dvx4Bf83RbNOha0Re4c8ZYaTAe0X3hRj&pg=1&rpp=3' % query
r = requests.get(r_url, headers={"Accept": "application/json"})
if r.status_code < 400:
results = r.json()['Results']
processed_results = map(
lambda recipe: {'title': recipe['Title'], 'id': recipe['RecipeID']},
results
)
return JsonResponse({'result': processed_results})
return Http404('Unknown error occured')
| from django.conf.urls import patterns, url
from django.shortcuts import render
from django.http import Http404
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.sites.shortcuts import get_current_site
import requests
from .forms import UploadImageForm
from .models import Picture
# Create your views here.
@csrf_exempt
def image_handler(request):
possibe_food = set([
'apple',
'banana',
'carrot',
'broccoli',
'pear',
'watermelon'
])
if request.method != 'POST':
raise Http404('wrong method')
else:
form = UploadImageForm(request.POST, request.FILES)
current_site = get_current_site(request)
print current_site
if form.is_valid():
newpic = Picture(image=request.FILES['image'])
newpic.save()
auth = ('acc_2569f28daa2ca36', '5f3d54692a4dcdeda460024d50505ecd')
image_path = \
'http://' + str(current_site) + '/media/' + str(newpic.image.name)
r_url = 'https://api.imagga.com/v1/tagging?url=' + image_path
r = requests.get(r_url, auth=auth)
if r.status_code < 400:
data = r.json()
print data
foods = data['results'][0]['tags']
for food in foods:
if food['tag'] in possibe_food:
return JsonResponse({'food': food['tag']})
return JsonResponse({'food': foods[0]['tag']})
else:
raise Http404('Imagga error occured')
raise Http404('Unknown error occured')
def recipe_handler(request):
if request.method != 'GET':
return Http404('wrong method')
query = request.GET['id']
if query:
r_url = 'http://api.bigoven.com/recipe/%s?api_key=dvx4Bf83RbNOha0Re4c8ZYaTAe0X3hRj' % str(query)
r = requests.get(r_url, headers={"Accept": "application/json"})
if r.status_code < 400:
recipe = r.json()
processed_results = {}
if 'Instructions' not in recipe:
return JsonResponse({'error': 'Instructions not found'})
else:
instruction = recipe['Instructions'].replace('\n', ' ').replace('\r', '')
instructions = instruction.split('.')
instructions = map(
lambda sentence: sentence.strip(),
instructions
)
instructions = filter(
lambda s: not s.isspace(),
instructions
)
processed_results['Instructions'] = instructions
processed_results['Ingredient'] = map(
lambda ingredient: ingredient['Name'],
recipe['Ingredients']
)
return JsonResponse(processed_results)
raise Http404('Unknown error occured')
def list_handler(request):
if request.method != 'GET':
return Http404('Wrong method')
query = request.GET['name']
if query:
r_url = 'http://api.bigoven.com/recipes?title_kw=%s&api_key=dvx4Bf83RbNOha0Re4c8ZYaTAe0X3hRj&pg=1&rpp=3' % query
r = requests.get(r_url, headers={"Accept": "application/json"})
if r.status_code < 400:
results = r.json()['Results']
processed_results = map(
lambda recipe: {'title': recipe['Title'], 'id': recipe['RecipeID']},
results
)
return JsonResponse({'result': processed_results})
return Http404('Unknown error occured')
| Python | 0.000001 |
bfe039111e2b8e83a450b0d5e5f56827338cd017 | Fix slashes in blog urlpatterns. | mezzanine/urls.py | mezzanine/urls.py | """
This is the main ``urlconf`` for Mezzanine - it sets up patterns for
all the various Mezzanine apps, third-party apps like Grappelli and
filebrowser.
"""
from __future__ import unicode_literals
from future.builtins import str
from django.conf.urls import include, url
from django.contrib.sitemaps.views import sitemap
from django.views.i18n import javascript_catalog
from django.http import HttpResponse
from mezzanine.conf import settings
from mezzanine.core.sitemaps import DisplayableSitemap
urlpatterns = []
# JavaScript localization feature
js_info_dict = {'domain': 'django'}
urlpatterns += [
url(r'^jsi18n/(?P<packages>\S+?)/$', javascript_catalog, js_info_dict),
]
if settings.DEBUG and "debug_toolbar" in settings.INSTALLED_APPS:
try:
import debug_toolbar
except ImportError:
pass
else:
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
# Django's sitemap app.
if "django.contrib.sitemaps" in settings.INSTALLED_APPS:
sitemaps = {"sitemaps": {"all": DisplayableSitemap}}
urlpatterns += [
url("^sitemap\.xml$", sitemap, sitemaps),
]
# Return a robots.txt that disallows all spiders when DEBUG is True.
if getattr(settings, "DEBUG", False):
urlpatterns += [
url("^robots.txt$", lambda r: HttpResponse("User-agent: *\nDisallow: /",
content_type="text/plain")),
]
# Miscellanous Mezzanine patterns.
urlpatterns += [
url("^", include("mezzanine.core.urls")),
url("^", include("mezzanine.generic.urls")),
]
# Mezzanine's Accounts app
if "mezzanine.accounts" in settings.INSTALLED_APPS:
# We don't define a URL prefix here such as /account/ since we want
# to honour the LOGIN_* settings, which Django has prefixed with
# /account/ by default. So those settings are used in accounts.urls
urlpatterns += [
url("^", include("mezzanine.accounts.urls")),
]
# Mezzanine's Blog app.
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
BLOG_SLUG = settings.BLOG_SLUG.rstrip("/") + "/"
blog_patterns = [
url("^%s" % BLOG_SLUG, include("mezzanine.blog.urls")),
]
urlpatterns += blog_patterns
# Mezzanine's Pages app.
PAGES_SLUG = ""
if "mezzanine.pages" in settings.INSTALLED_APPS:
# No BLOG_SLUG means catch-all patterns belong to the blog,
# so give pages their own prefix and inject them before the
# blog urlpatterns.
if blog_installed and not BLOG_SLUG.rstrip("/"):
PAGES_SLUG = getattr(settings, "PAGES_SLUG", "pages").strip("/") + "/"
blog_patterns_start = urlpatterns.index(blog_patterns[0])
urlpatterns[blog_patterns_start:len(blog_patterns)] = [
url("^%s" % str(PAGES_SLUG), include("mezzanine.pages.urls")),
]
else:
urlpatterns += [
url("^", include("mezzanine.pages.urls")),
]
| """
This is the main ``urlconf`` for Mezzanine - it sets up patterns for
all the various Mezzanine apps, third-party apps like Grappelli and
filebrowser.
"""
from __future__ import unicode_literals
from future.builtins import str
from django.conf.urls import include, url
from django.contrib.sitemaps.views import sitemap
from django.views.i18n import javascript_catalog
from django.http import HttpResponse
from mezzanine.conf import settings
from mezzanine.core.sitemaps import DisplayableSitemap
urlpatterns = []
# JavaScript localization feature
js_info_dict = {'domain': 'django'}
urlpatterns += [
url(r'^jsi18n/(?P<packages>\S+?)/$', javascript_catalog, js_info_dict),
]
if settings.DEBUG and "debug_toolbar" in settings.INSTALLED_APPS:
try:
import debug_toolbar
except ImportError:
pass
else:
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
# Django's sitemap app.
if "django.contrib.sitemaps" in settings.INSTALLED_APPS:
sitemaps = {"sitemaps": {"all": DisplayableSitemap}}
urlpatterns += [
url("^sitemap\.xml$", sitemap, sitemaps),
]
# Return a robots.txt that disallows all spiders when DEBUG is True.
if getattr(settings, "DEBUG", False):
urlpatterns += [
url("^robots.txt$", lambda r: HttpResponse("User-agent: *\nDisallow: /",
content_type="text/plain")),
]
# Miscellanous Mezzanine patterns.
urlpatterns += [
url("^", include("mezzanine.core.urls")),
url("^", include("mezzanine.generic.urls")),
]
# Mezzanine's Accounts app
if "mezzanine.accounts" in settings.INSTALLED_APPS:
# We don't define a URL prefix here such as /account/ since we want
# to honour the LOGIN_* settings, which Django has prefixed with
# /account/ by default. So those settings are used in accounts.urls
urlpatterns += [
url("^", include("mezzanine.accounts.urls")),
]
# Mezzanine's Blog app.
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
BLOG_SLUG = settings.BLOG_SLUG.rstrip("/")
blog_patterns = [
url("^%s" % BLOG_SLUG, include("mezzanine.blog.urls")),
]
urlpatterns += blog_patterns
# Mezzanine's Pages app.
PAGES_SLUG = ""
if "mezzanine.pages" in settings.INSTALLED_APPS:
# No BLOG_SLUG means catch-all patterns belong to the blog,
# so give pages their own prefix and inject them before the
# blog urlpatterns.
if blog_installed and not BLOG_SLUG:
PAGES_SLUG = getattr(settings, "PAGES_SLUG", "pages").strip("/") + "/"
blog_patterns_start = urlpatterns.index(blog_patterns[0])
urlpatterns[blog_patterns_start:len(blog_patterns)] = [
url("^%s" % str(PAGES_SLUG), include("mezzanine.pages.urls")),
]
else:
urlpatterns += [
url("^", include("mezzanine.pages.urls")),
]
| Python | 0.000001 |
338282a17766345f054f570d5063a7b0b803727b | Enable migration with special chars in pw | migrations/env.py | migrations/env.py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from sqlalchemy.engine.url import make_url
from logging.config import fileConfig
from six.moves.urllib.parse import quote
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
def set_database_url(config):
url = current_app.config.get('SQLALCHEMY_DATABASE_URI')
try:
# In case of MySQL, add ``charset=utf8`` to the parameters (if no charset is set),
# because this is what Flask-SQLAlchemy does
if url.startswith("mysql"):
parsed_url = make_url(url)
parsed_url.query.setdefault("charset", "utf8")
# We need to quote the password in case it contains special chars
parsed_url.password = quote(parsed_url.password)
url = str(parsed_url)
except Exception as exx:
print(u"Attempted to set charset=utf8 on connection, but failed: {}".format(exx))
# set_main_option() requires escaped "%" signs in the string
config.set_main_option('sqlalchemy.url', url.replace('%', '%%'))
set_database_url(config)
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# FIX for Postgres updates
url = config.get_section(config.config_ini_section).get("sqlalchemy.url")
driver = url.split(":")[0]
if driver == "postgresql+psycopg2":
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
isolation_level="AUTOCOMMIT",
poolclass=pool.NullPool)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
print("Running offline")
run_migrations_offline()
else:
print("Running online")
run_migrations_online()
| from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from sqlalchemy.engine.url import make_url
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
def set_database_url(config):
url = current_app.config.get('SQLALCHEMY_DATABASE_URI')
try:
# In case of MySQL, add ``charset=utf8`` to the parameters (if no charset is set),
# because this is what Flask-SQLAlchemy does
if url.startswith("mysql"):
parsed_url = make_url(url)
parsed_url.query.setdefault("charset", "utf8")
url = str(parsed_url)
except Exception as exx:
print(u"Attempted to set charset=utf8 on connection, but failed: {}".format(exx))
# set_main_option() requires escaped "%" signs in the string
config.set_main_option('sqlalchemy.url', url.replace('%', '%%'))
set_database_url(config)
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# FIX for Postgres updates
url = config.get_section(config.config_ini_section).get("sqlalchemy.url")
driver = url.split(":")[0]
if driver == "postgresql+psycopg2":
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
isolation_level="AUTOCOMMIT",
poolclass=pool.NullPool)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
print("Running offline")
run_migrations_offline()
else:
print("Running online")
run_migrations_online()
| Python | 0 |
3380d0fed1a8d24eba8627bd65dccc1fb2f772dd | Update version to next release | minio/__init__.py | minio/__init__.py | # -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minio - MinIO Python Library for Amazon S3 Compatible Cloud Storage
~~~~~~~~~~~~~~~~~~~~~
>>> import minio
>>> minio = Minio('https://s3.amazonaws.com')
>>> for bucket in minio.list_buckets():
... print(bucket.name)
:copyright: (c) 2015, 2016, 2017 by MinIO, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'minio-py'
__author__ = 'MinIO, Inc.'
__version__ = '5.0.2'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015, 2016, 2017, 2018, 2019 MinIO, Inc.'
from .api import Minio
from .error import ResponseError
from .post_policy import PostPolicy
from .copy_conditions import CopyConditions
from .definitions import Bucket, Object
| # -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015, 2016, 2017 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minio - MinIO Python Library for Amazon S3 Compatible Cloud Storage
~~~~~~~~~~~~~~~~~~~~~
>>> import minio
>>> minio = Minio('https://s3.amazonaws.com')
>>> for bucket in minio.list_buckets():
... print(bucket.name)
:copyright: (c) 2015, 2016, 2017 by MinIO, Inc.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'minio-py'
__author__ = 'MinIO, Inc.'
__version__ = '5.0.1'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015, 2016, 2017, 2018, 2019 MinIO, Inc.'
from .api import Minio
from .error import ResponseError
from .post_policy import PostPolicy
from .copy_conditions import CopyConditions
from .definitions import Bucket, Object
| Python | 0 |
5f7cde32f64965fe8f75dd229d67598a53362701 | Fix checker | model/simple-python/components/checker.py | model/simple-python/components/checker.py | from . import *
import z3
class PropertyChecker (object):
"""Actually check for properties in the network graph etc."""
def __init__ (self, context, network):
self.ctx = context
self.net = network
self.solver = z3.Solver()
self.constraints = list ()
self.primed = False
def CheckIsolationProperty (self, src, dest):
assert(src in self.net.elements)
assert(dest in self.net.elements)
if not self.primed:
self.CheckNow()
self.solver.push ()
p = z3.Const('__reachability_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
eh = z3.Const('__reachability_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
self.solver.add(z3.Exists([eh], self.ctx.recv(eh, dest.z3Node, p)))
self.solver.add(self.ctx.packet.origin(p) == src.z3Node)
self.result = self.solver.check()
if self.result == z3.sat:
self.model = self.solver.model ()
self.solver.pop()
return self.result
def CheckImpliedIsolation (self, srcn, destn, src, dest):
assert(srcn in self.net.elements)
assert(destn in self.net.elements)
if not self.primed:
self.CheckNow()
self.solver.push()
pn = z3.Const('__implied_reachability_neg_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
ehn = z3.Const('__implied_reachability_neg_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
p = z3.Const('__implied_reachability_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
eh = z3.Const('__implied_reachability_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
self.solver.add(z3.And(z3.Not(z3.Exists([pn, ehn], \
z3.And(self.ctx.recv(ehn, destn.z3Node, pn), \
self.ctx.packet.origin(pn) == srcn.z3Node))),
z3.And(z3.Exists([eh], \
self.ctx.recv(eh, dest.z3Node, p)), \
self.ctx.packet.origin(p) == src.z3Node)))
self.result = self.solver.check()
if self.result == z3.sat:
self.model = self.solver.model ()
self.solver.pop()
return self.result
def CheckNow (self):
self.ctx._addConstraints(self.solver)
self.net._addConstraints(self.solver)
for el in self.net.elements:
el._addConstraints(self.solver)
self.primed = True
| from . import *
import z3
class PropertyChecker (object):
"""Actually check for properties in the network graph etc."""
def __init__ (self, context, network):
self.ctx = context
self.net = network
self.solver = z3.Solver()
self.constraints = list ()
self.primed = False
def CheckIsolationProperty (self, src, dest):
assert(src in self.net.elements)
assert(dest in self.net.elements)
if not self.primed:
self.CheckNow()
self.solver.push ()
p = z3.Const('__reachability_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
eh = z3.Const('__reachability_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
self.solver.add(z3.Exists([eh], self.ctx.recv(eh, dest.z3Node, p)))
self.solver.add(self.ctx.packet.origin(p) == src.z3Node)
self.result = self.solver.check()
if self.result == z3.sat:
self.model = self.solver.model ()
self.solver.pop()
return self.result
def CheckImpliedIsolation (self, srcn, destn, src, dest):
assert(srcn in self.net.elements)
assert(destn in self.net.elements)
if not self.primed:
self.CheckNow()
self.solver.push()
pn = z3.Const('__implied_reachability_neg_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
ehn = z3.Const('__implied_reachability_neg_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
p = z3.Const('__implied_reachability_Packet_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.packet)
eh = z3.Const('__implied_reachability_last_Node_%s_%s'%(src.z3Node, dest.z3Node), self.ctx.node)
self.solver.add(z3.And(z3.Not(z3.Exists([pn, ehn], \
z3.And(self.ctx.recv(ehn, destn.z3Node, pn), \
self.ctx.packet.origin(pn) == srcn))),
z3.And(z3.Exists([eh], \
self.ctx.recv(eh, dest.node, p)), \
self.ctx.packet.origin(p) == src)))
self.result = self.solver.check()
if self.result == z3.sat:
self.model = self.solver.model ()
self.solver.pop()
return self.result
def CheckNow (self):
self.ctx._addConstraints(self.solver)
self.net._addConstraints(self.solver)
for el in self.net.elements:
el._addConstraints(self.solver)
self.primed = True
| Python | 0.000003 |
97b594b84811da7bd90a615752c47c8982c1303c | fix addon order - dumper must be last | mitmproxy/dump.py | mitmproxy/dump.py | from __future__ import absolute_import, print_function, division
from typing import Optional # noqa
import typing # noqa
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import builtins
from mitmproxy import options
from mitmproxy.builtins import dumper, termlog
from netlib import tcp
class DumpError(Exception):
pass
class Options(options.Options):
def __init__(
self,
keepserving=False, # type: bool
filtstr=None, # type: Optional[str]
flow_detail=1, # type: int
tfile=None, # type: Optional[typing.io.TextIO]
**kwargs
):
self.filtstr = filtstr
self.flow_detail = flow_detail
self.keepserving = keepserving
self.tfile = tfile
super(Options, self).__init__(**kwargs)
class DumpMaster(flow.FlowMaster):
def __init__(self, server, options):
flow.FlowMaster.__init__(self, options, server, flow.DummyState())
self.has_errored = False
self.addons.add(termlog.TermLog())
self.addons.add(*builtins.default_addons())
self.addons.add(dumper.Dumper())
# This line is just for type hinting
self.options = self.options # type: Options
self.set_stream_large_bodies(options.stream_large_bodies)
if not self.options.no_server and server:
self.add_log(
"Proxy server listening at http://{}".format(server.address),
"info"
)
if self.server and self.options.http2 and not tcp.HAS_ALPN: # pragma: no cover
self.add_log(
"ALPN support missing (OpenSSL 1.0.2+ required)!\n"
"HTTP/2 is disabled. Use --no-http2 to silence this warning.",
"error"
)
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log("Flow file corrupted.", "error")
raise DumpError(v)
if self.options.app:
self.start_app(self.options.app_host, self.options.app_port)
def _readflow(self, paths):
"""
Utitility function that reads a list of flows
or raises a DumpError if that fails.
"""
try:
return flow.read_flows_from_paths(paths)
except exceptions.FlowReadException as e:
raise DumpError(str(e))
@controller.handler
def log(self, e):
if e.level == "error":
self.has_errored = True
def run(self): # pragma: no cover
if self.options.rfile and not self.options.keepserving:
self.addons.done()
return
super(DumpMaster, self).run()
| from __future__ import absolute_import, print_function, division
from typing import Optional # noqa
import typing # noqa
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import builtins
from mitmproxy import options
from mitmproxy.builtins import dumper, termlog
from netlib import tcp
class DumpError(Exception):
pass
class Options(options.Options):
def __init__(
self,
keepserving=False, # type: bool
filtstr=None, # type: Optional[str]
flow_detail=1, # type: int
tfile=None, # type: Optional[typing.io.TextIO]
**kwargs
):
self.filtstr = filtstr
self.flow_detail = flow_detail
self.keepserving = keepserving
self.tfile = tfile
super(Options, self).__init__(**kwargs)
class DumpMaster(flow.FlowMaster):
def __init__(self, server, options):
flow.FlowMaster.__init__(self, options, server, flow.DummyState())
self.has_errored = False
self.addons.add(dumper.Dumper())
self.addons.add(termlog.TermLog())
self.addons.add(*builtins.default_addons())
# This line is just for type hinting
self.options = self.options # type: Options
self.set_stream_large_bodies(options.stream_large_bodies)
if not self.options.no_server and server:
self.add_log(
"Proxy server listening at http://{}".format(server.address),
"info"
)
if self.server and self.options.http2 and not tcp.HAS_ALPN: # pragma: no cover
self.add_log(
"ALPN support missing (OpenSSL 1.0.2+ required)!\n"
"HTTP/2 is disabled. Use --no-http2 to silence this warning.",
"error"
)
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log("Flow file corrupted.", "error")
raise DumpError(v)
if self.options.app:
self.start_app(self.options.app_host, self.options.app_port)
def _readflow(self, paths):
"""
Utitility function that reads a list of flows
or raises a DumpError if that fails.
"""
try:
return flow.read_flows_from_paths(paths)
except exceptions.FlowReadException as e:
raise DumpError(str(e))
@controller.handler
def log(self, e):
if e.level == "error":
self.has_errored = True
def run(self): # pragma: no cover
if self.options.rfile and not self.options.keepserving:
self.addons.done()
return
super(DumpMaster, self).run()
| Python | 0 |
87f08dc5a847a12ecbb8f9dfc21303f324e12e99 | Fix execution of `commit_msg` | resources/git/hooks/commit_msg.py | resources/git/hooks/commit_msg.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import sys
import textwrap
#===============================================================================
FIRST_LINE_MAX_CHAR_LENGTH = 70
MESSAGE_BODY_MAX_CHAR_LINE_LENGTH = 72
COMMIT_MESSAGE_FUNCS_PREFIX = "commit_msg"
def print_error_message_and_exit(message, exit_status=1):
print(message, file=sys.stderr)
sys.exit(exit_status)
#===============================================================================
def commit_msg_check_first_line_length(commit_message):
first_line = commit_message.split("\n")[0]
if len(first_line) <= FIRST_LINE_MAX_CHAR_LENGTH:
return commit_message
else:
print_error_message_and_exit(
"First line of commit message too long ({}), must be at most {}".format(
len(first_line), FIRST_LINE_MAX_CHAR_LENGTH))
def commit_msg_check_second_line_is_empty(commit_message):
lines = commit_message.split("\n")
if len(lines) <= 1 or not lines[1]:
return commit_message
else:
print_error_message_and_exit(
"If writing a commit message body, the second line must be empty")
def commit_msg_remove_trailing_period_from_first_line(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
first_line_processed = first_line.rstrip(".")
return "\n".join([first_line_processed] + body)
def commit_msg_capitalize_first_letter_in_header(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
first_line_segments = first_line.split(":", 1)
if len(first_line_segments) <= 1:
first_line_processed = first_line
else:
scope, header = first_line_segments
header_without_leading_space = header.lstrip(" ")
header_capitalized = (
" " + header_without_leading_space[0].upper()
+ header_without_leading_space[1:])
first_line_processed = ":".join([scope, header_capitalized])
return "\n".join([first_line_processed] + body)
def commit_msg_wrap_message_body(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
if not body:
return commit_message
else:
wrapped_body = [
textwrap.fill(
line,
MESSAGE_BODY_MAX_CHAR_LINE_LENGTH,
replace_whitespace=False,
drop_whitespace=False)
for line in body]
return "\n".join([first_line] + wrapped_body)
def commit_msg_remove_trailing_newlines(commit_message):
return commit_message.rstrip("\n")
def process_commit_messages(commit_message_filepath):
with open(commit_message_filepath, "r") as commit_message_file:
commit_message = commit_message_file.read()
commit_message_funcs = (
_get_module_level_functions_with_prefix(COMMIT_MESSAGE_FUNCS_PREFIX))
for func in commit_message_funcs:
commit_message = func(commit_message)
with open(commit_message_filepath, "w") as commit_message_file:
commit_message_file.write(commit_message)
def _get_module_level_functions_with_prefix(prefix):
return [
member_obj
for member_name, member_obj in inspect.getmembers(sys.modules[__name__])
if inspect.isfunction(member_obj) and member_name.startswith(prefix)]
#===============================================================================
def main():
process_commit_messages(sys.argv[1])
if __name__ == "__main__":
main()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import sys
import textwrap
#===============================================================================
FIRST_LINE_MAX_CHAR_LENGTH = 70
MESSAGE_BODY_MAX_CHAR_LINE_LENGTH = 72
COMMIT_MESSAGE_FUNCS_PREFIX = "commit_msg"
def print_error_message_and_exit(message, exit_status=1):
print(message, file=sys.stderr)
sys.exit(exit_status)
#===============================================================================
def commit_msg_check_first_line_length(commit_message):
first_line = commit_message.split("\n")[0]
if len(first_line) <= FIRST_LINE_MAX_CHAR_LENGTH:
return commit_message
else:
print_error_message_and_exit(
"First line of commit message too long ({}), must be at most {}".format(
len(first_line), FIRST_LINE_MAX_CHAR_LENGTH))
def commit_msg_check_second_line_is_empty(commit_message):
lines = commit_message.split("\n")
if len(lines) <= 1 or not lines[1]:
return commit_message
else:
print_error_message_and_exit(
"If writing a commit message body, the second line must be empty")
def commit_msg_remove_trailing_period_from_first_line(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
first_line_processed = first_line.rstrip(".")
return "\n".join([first_line_processed] + body)
def commit_msg_capitalize_first_letter_in_header(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
first_line_segments = first_line.split(":", 1)
if len(first_line_segments) <= 1:
first_line_processed = first_line
else:
scope, header = first_line_segments
header_without_leading_space = header.lstrip(" ")
header_capitalized = (
" " + header_without_leading_space[0].upper()
+ header_without_leading_space[1:])
first_line_processed = ":".join([scope, header_capitalized])
return "\n".join([first_line_processed] + body)
def commit_msg_wrap_message_body(commit_message):
lines = commit_message.split("\n")
first_line, body = lines[0], lines[1:]
if not body:
return commit_message
else:
wrapped_body = [
textwrap.fill(
line,
MESSAGE_BODY_MAX_CHAR_LINE_LENGTH,
replace_whitespace=False,
drop_whitespace=False)
for line in body]
return "\n".join([first_line] + wrapped_body)
def commit_msg_remove_trailing_newlines(commit_message):
return commit_message.rstrip("\n")
def process_commit_messages(commit_message_filepath):
with open(commit_message_filepath, "r") as commit_message_file:
commit_message = commit_message_file.read()
commit_message_funcs = (
_get_module_level_functions_with_prefix(COMMIT_MESSAGE_FUNCS_PREFIX))
for func_and_args in commit_message_funcs:
func, args = func_and_args[0], func_and_args[1:]
commit_message = func(commit_message, *args)
with open(commit_message_filepath, "w") as commit_message_file:
commit_message_file.write(commit_message)
def _get_module_level_functions_with_prefix(prefix):
return [
member_obj
for member_name, member_obj in inspect.getmembers(sys.modules[__name__])
if inspect.isfunction(member_obj) and member_name.startswith(prefix)]
#===============================================================================
def main():
process_commit_messages(sys.argv[1])
if __name__ == "__main__":
main()
| Python | 0 |
ef1336fcb30f951d057b5c943a948e8e5d95f07b | Add import json in roomfinder_dispo | roomfinder_dispo/roomfinder_dispo/dispo.py | roomfinder_dispo/roomfinder_dispo/dispo.py | #!/usr/bin/env python2.7
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from flask import Flask, render_template, request, jsonify
import argparse
import datetime
import os, sys
import requests
from socket import error as SocketError
import errno
import json
app = Flask(__name__)
@app.route("/book", methods=["GET"])
def book():
starttime=request.args.get('starttime', '')
endtime=request.args.get('endtime', '')
user_name=request.args.get('user_name', '')
user_email=request.args.get('user_email', '')
room_name=request.args.get('room_name', '')
if starttime is None or endtime is None or user_name is None or user_email is None or room_name is None:
return "no parameter provided to book request\n"
data = {
"cmd": "book",
"data": {"starttime": starttime, "endtime": endtime, "user_name": user_name, "user_email": user_email, "room_name": room_name}
}
message = json.dumps(data)
return send_message_to_queue(message)
@app.route("/dispo", methods=["GET"])
def dispo():
key=request.args.get('key', '')
sys.stderr.write( "key: "+str(key)+'\r\n')
if key is not None and str(key) is not "":
data = {
"cmd": "dispo",
"data": {"key": key}
}
message = json.dumps(data)
return send_message_to_queue(message)
return "no parameter provided to dispo request\n"
def on_response(ch, method, props, body):
global corr_id
global response
if corr_id == props.correlation_id:
response = body
def send_message_to_queue(message):
global corr_id
global response
global connection
global channel
global callback_queue
response=None
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq,port=rabbitmq_port,heartbeat_interval=30))
channel = connection.channel()
result=channel.queue_declare(exclusive=True)
callback_queue = result.method.queue
channel.basic_consume(on_response, no_ack=True,
queue=callback_queue)
corr_id=str(uuid.uuid4())
response = None
corr_id = str(uuid.uuid4())
channel.basic_publish( exchange='',
routing_key="rpc_queue",
properties=pika.BasicProperties(
reply_to = callback_queue,
correlation_id = corr_id),
body=message)
print(" [x] Sent data to RabbitMQ")
while response is None:
connection.process_data_events()
print(" [x] Get response from RabbitMQ")
return response
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser("Room Finder Dispo Service")
parser.add_argument("-r","--rabbitmq", help="IP or hostname for rabbitmq server, e.g. 'rabbit.domain.com'.")
parser.add_argument("-p","--port", help="tcp port for rabitmq server, e.g. '2765'.")
args = parser.parse_args()
rabbitmq = args.rabbitmq
if (rabbitmq == None):
rabbitmq = os.getenv("roomfinder_rabbitmq_server")
if (rabbitmq == None):
get_rabbitmq_server = raw_input("What is the rabbitmq server IP or hostname? ")
rabbitmq = get_rabbitmq_server
rabbitmq_port = args.port
if (rabbitmq_port == None):
rabbitmq_port = os.getenv("roomfinder_rabbitmq_port")
if (rabbitmq_port == None):
get_rabbitmq_port = raw_input("What is the rabbitmq TCP port? ")
rabbitmq_port = get_rabbitmq_port
try:
app.run(debug=True, host='0.0.0.0', port=int("5000"))
except:
try:
app.run(debug=True, host='0.0.0.0', port=int("5000"))
except:
print "Dispo web services error"
| #!/usr/bin/env python2.7
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from flask import Flask, render_template, request, jsonify
import argparse
import datetime
import os, sys
import requests
from socket import error as SocketError
import errno
app = Flask(__name__)
@app.route("/book", methods=["GET"])
def book():
starttime=request.args.get('starttime', '')
endtime=request.args.get('endtime', '')
user_name=request.args.get('user_name', '')
user_email=request.args.get('user_email', '')
room_name=request.args.get('room_name', '')
return "no parameter provided to book request\n"
data = {
"cmd": "book",
"data": {"starttime": starttime, "endtime": endtime, "user_name": user_name, "user_email": user_email, "room_name": room_name}
}
message = json.dumps(data)
return send_message_to_queue(message)
@app.route("/dispo", methods=["GET"])
def dispo():
key=request.args.get('key', '')
sys.stderr.write( "key: "+str(key)+'\r\n')
if key is not None and str(key) is not "":
data = {
"cmd": "dispo",
"data": {"key": key}
}
message = json.dumps(data)
return send_message_to_queue(message)
return "no parameter provided to dispo request\n"
def on_response(ch, method, props, body):
global corr_id
global response
if corr_id == props.correlation_id:
response = body
def send_message_to_queue(message):
global corr_id
global response
global connection
global channel
global callback_queue
response=None
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq,port=rabbitmq_port,heartbeat_interval=30))
channel = connection.channel()
result=channel.queue_declare(exclusive=True)
callback_queue = result.method.queue
channel.basic_consume(on_response, no_ack=True,
queue=callback_queue)
corr_id=str(uuid.uuid4())
response = None
corr_id = str(uuid.uuid4())
channel.basic_publish( exchange='',
routing_key="rpc_queue",
properties=pika.BasicProperties(
reply_to = callback_queue,
correlation_id = corr_id),
body=message)
print(" [x] Sent data to RabbitMQ")
while response is None:
connection.process_data_events()
print(" [x] Get response from RabbitMQ")
return response
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser("Room Finder Dispo Service")
parser.add_argument("-r","--rabbitmq", help="IP or hostname for rabbitmq server, e.g. 'rabbit.domain.com'.")
parser.add_argument("-p","--port", help="tcp port for rabitmq server, e.g. '2765'.")
args = parser.parse_args()
rabbitmq = args.rabbitmq
if (rabbitmq == None):
rabbitmq = os.getenv("roomfinder_rabbitmq_server")
if (rabbitmq == None):
get_rabbitmq_server = raw_input("What is the rabbitmq server IP or hostname? ")
rabbitmq = get_rabbitmq_server
rabbitmq_port = args.port
if (rabbitmq_port == None):
rabbitmq_port = os.getenv("roomfinder_rabbitmq_port")
if (rabbitmq_port == None):
get_rabbitmq_port = raw_input("What is the rabbitmq TCP port? ")
rabbitmq_port = get_rabbitmq_port
try:
app.run(debug=True, host='0.0.0.0', port=int("5000"))
except:
try:
app.run(debug=True, host='0.0.0.0', port=int("5000"))
except:
print "Dispo web services error"
| Python | 0.000001 |
4815315b2200cb8061640f6333f2cb96b0707965 | update admin to enable list edit | django_sso/admin.py | django_sso/admin.py | from django.contrib import admin
from django_sso import settings
from django_sso.models import Assignment
class AssignmentAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'username', 'username_mode', 'domain', 'user', 'weight')
list_editable = ('username', 'username_mode', 'domain', 'user', 'weight')
admin.site.register(Assignment, AssignmentAdmin)
if settings.DJANGO_SSO_ADD_LOGIN_BUTTON:
admin.site.login_template = 'django_sso/login.html'
| from django.contrib import admin
from django_sso import settings
from django_sso.models import Assignment
class AssignmentAdmin(admin.ModelAdmin):
pass
admin.site.register(Assignment, AssignmentAdmin)
if settings.DJANGO_SSO_ADD_LOGIN_BUTTON:
admin.site.login_template = 'django_sso/login.html'
| Python | 0 |
5022cefce20ecf20d7689fa6d2c280f8774ee677 | add forgot password link to admin login | djangogirls/urls.py | djangogirls/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
urlpatterns = patterns('',
# Redirect old links:
url(r'^pages/in-your-city/$', RedirectView.as_view(url='/organize/', permanent=True)),
url(r'^admin$', RedirectView.as_view(url='/admin/', permanent=True)),
# Admin link for password reset
# See: https://github.com/darklow/django-suit/blob/92a745d72935622220eca80edfce779419c30094/suit/templates/admin/login.html#L61
url(r'^admin/password_reset/$',
RedirectView.as_view(url='/account/password_reset', permanent=True),
name='admin_password_reset'),
# Regular links:
url(r'^community/', include('jobs.urls', namespace='jobs')),
url(r'^admin/', include(admin.site.urls)),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^account/', include('django.contrib.auth.urls')),
url(r'', include('core.urls', namespace='core')),
url(r'', include('applications.urls', namespace='applications')),
url(r'^ckeditor/', include('ckeditor.urls')),
)
| from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic.base import RedirectView
urlpatterns = patterns('',
# Redirect old links:
url(r'^pages/in-your-city/$', RedirectView.as_view(url='/organize/', permanent=True)),
url(r'^admin$', RedirectView.as_view(url='/admin/', permanent=True)),
# Regular links:
url(r'^community/', include('jobs.urls', namespace='jobs')),
url(r'^admin/', include(admin.site.urls)),
url(r'^pages/', include('django.contrib.flatpages.urls')),
url(r'^account/', include('django.contrib.auth.urls')),
url(r'', include('core.urls', namespace='core')),
url(r'', include('applications.urls', namespace='applications')),
url(r'^ckeditor/', include('ckeditor.urls')),
)
| Python | 0 |
caa63e934fb252c17feac25a01f4298ee4d9b3e1 | Remove STATIC_URL from Django settings. | hello_world/settings.py | hello_world/settings.py | """
Django settings for hello_world project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '80o&#_i#(1haanv=!4w1ns3r5oal!rih-)lm8+@aens#^i(eu7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hello_world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| """
Django settings for hello_world project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '80o&#_i#(1haanv=!4w1ns3r5oal!rih-)lm8+@aens#^i(eu7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hello_world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| Python | 0 |
c7291b333c00d11e54339b2b2ae14b399cfdc12c | fix sys import (thank you, Orlando) | retirement_api/utils/check_api.py | retirement_api/utils/check_api.py | # script to check the retirement api to make sure
# the SSA Quick Calculator is operational
# and to log the result to a csv (currently via cron)
import sys
import requests
import datetime
import json
import time
import signal
from urlparse import urlparse
timestamp = datetime.datetime.now()
# rolling dob to guarantee subject is 44 and full retirement age is 67
dob = timestamp - datetime.timedelta(days=44*365+30)
timeout_seconds = 15
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError("Request timed out")
class Collector(object):
data = ''
date = "%s" % timestamp
status = ''
error = ''
note = ''
api_fail = ''
timer = ''
collector = Collector()
log_header = ['data', 'date', 'status', 'error', 'note', 'api_fail', 'timer']
local_base = 'http://localhost:8080'
api_base = 'retirement/retirement-api'
api_string = '%s/%s/estimator/%s-%s-%s/70000/'
def print_msg(collector):
msg = ",".join([collector.__getattribute__(key) for key in log_header])
print msg
return msg
def check_data(data):
""" For a 44-year-old, the api should
always return an age, a full retirement age
and a value for benefits at age 70
"""
if (data['current_age'] == 44 and
data['data']['full retirement age'] == '67' and
data['data']['benefits']['age 70']):
return "OK"
else:
return "BAD DATA"
def run(base):
url = api_string % (base, api_base, dob.month, dob.day, dob.year)
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_seconds)
start = time.time()
try:
test_request = requests.get(url)
except requests.ConnectionError:
end = time.time()
signal.alarm(0)
collector.status = "ABORTED"
collector.error = 'Server connection error'
collector.api_fail = 'FAIL'
except TimeoutError:
end = time.time()
signal.alarm(0)
collector.status = "TIMEDOUT"
collector.error = 'SSA request exceeded 15 sec'
collector.api_fail = 'FAIL'
else:
if test_request.status_code != 200:
signal.alarm(0)
end = time.time()
collector.status = "%s" % test_request.status_code
collector.error = test_request.reason
collector.api_fail = 'FAIL'
else:
end = time.time()
signal.alarm(0)
data = json.loads(test_request.text)
collector.status = "%s" % test_request.status_code
collector.error = data['error']
collector.note = data['note']
collector.data = check_data(data)
collector.timer = "%s" % (end - start)
print_msg(collector)
print url
return collector
if __name__ == '__main__':
"""runs against a local url unless a domain is passed
"""
for arg in sys.argv:
parsed = urlparse(arg)
if parsed.netloc:
base = arg
else:
base = local_base
run(base)
| # script to check the retirement api to make sure
# the SSA Quick Calculator is operational
# and to log the result to a csv (currently via cron)
import requests
import datetime
import json
import time
import signal
timestamp = datetime.datetime.now()
# rolling dob to guarantee subject is 44 and full retirement age is 67
dob = timestamp - datetime.timedelta(days=44*365+30)
timeout_seconds = 15
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError("Request timed out")
class Collector(object):
data = ''
date = "%s" % timestamp
status = ''
error = ''
note = ''
api_fail = ''
timer = ''
collector = Collector()
log_header = ['data', 'date', 'status', 'error', 'note', 'api_fail', 'timer']
local_base = 'http://localhost:8080'
api_base = 'retirement/retirement-api'
api_string = '%s/%s/estimator/%s-%s-%s/70000/'
def print_msg(collector):
msg = ",".join([collector.__getattribute__(key) for key in log_header])
print msg
return msg
def check_data(data):
""" For a 44-year-old, the api should
always return an age, a full retirement age
and a value for benefits at age 70
"""
if (data['current_age'] == 44 and
data['data']['full retirement age'] == '67' and
data['data']['benefits']['age 70']):
return "OK"
else:
return "BAD DATA"
def run(base):
url = api_string % (base, api_base, dob.month, dob.day, dob.year)
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_seconds)
start = time.time()
try:
test_request = requests.get(url)
except requests.ConnectionError:
end = time.time()
signal.alarm(0)
collector.status = "ABORTED"
collector.error = 'Server connection error'
collector.api_fail = 'FAIL'
except TimeoutError:
end = time.time()
signal.alarm(0)
collector.status = "TIMEDOUT"
collector.error = 'SSA request exceeded 15 sec'
collector.api_fail = 'FAIL'
else:
if test_request.status_code != 200:
signal.alarm(0)
end = time.time()
collector.status = "%s" % test_request.status_code
collector.error = test_request.reason
collector.api_fail = 'FAIL'
else:
end = time.time()
signal.alarm(0)
data = json.loads(test_request.text)
collector.status = "%s" % test_request.status_code
collector.error = data['error']
collector.note = data['note']
collector.data = check_data(data)
collector.timer = "%s" % (end - start)
print_msg(collector)
print url
return collector
if __name__ == '__main__':
"""runs against a local url unless a domain is passed
"""
try:
base = sys.argv[1]
except:
base = local_base
run(base)
| Python | 0 |
123f3e706e1931120e035acd2f11fcb1fa3320c7 | Remove unnecessary files | rules/QEMU.py | rules/QEMU.py | import xyz
import shutil
class Qemu(xyz.BuildProtocol):
pkg_name = 'QEMU'
deps = ['pkg-config', 'gettext', 'glib']
def configure(self):
env = {}
ldflags = '{standard_ldflags} -F/Library/Frameworks -F/System/Library/Frameworks'
args = ('{source_dir_from_build}/configure',
'--prefix={prefix}',
'--disable-cocoa',
'--target-list=arm-softmmu',
'--disable-curses',
'--disable-vnc',
'--disable-console',
'--enable-werror',
'--disable-slirp',
'--disable-curl',
'--disable-guest-base',
'--disable-guest-agent' ,
'--disable-blobs',
'--audio-drv-list=',
'--audio-card-list=',
'--disable-usb',
'--disable-smartcard',
'--disable-ide',
# '--exec-prefix={eprefix}',
# '--host={host}',
# '--build={build}',
#'--target-list={target}'
)
base_env = {
'LDFLAGS': ldflags,
'PKG_CONFIG_PATH': '{devtree_dir_abs}/{host}/lib/pkgconfig'.format(**self.config),
'QEMU_PKG_CONFIG_FLAGS': '--define-variable prefix={devtree_dir_abs} --define-variable exec_prefix={devtree_dir_abs}/{host}'.format(**self.config)
}
base_env.update(env)
self.builder.cmd(*args, env=base_env, config=self.config)
def install(self):
super().install()
# Now we go and remove all the stuff we don't want.
# In fact, it may be easy to just install manually what we do want, but
# to try and keep this working for future version we take this
# approach for now.
keymaps_dir = self.builder.j('{install_dir}', self.config['prefix'][1:], 'share', 'qemu',
config=self.config)
xyz.rmtree(keymaps_dir)
etc_dir = self.builder.j('{install_dir}', self.config['prefix'][1:], 'etc', config=self.config)
xyz.rmtree(etc_dir)
# Copy qemu-system-arm to the right bin location...
bin_dir = self.builder.j('{install_dir}', self.config['prefix'][1:], 'bin', config=self.config)
ebin_dir = self.builder.j('{install_dir}', self.config['prefix'][1:], '{host}', 'bin', config=self.config)
xyz.ensure_dir(ebin_dir)
shutil.copy(self.builder.j(bin_dir, 'qemu-system-arm'), ebin_dir)
xyz.rmtree(bin_dir)
rules = Qemu
| import xyz
class Qemu(xyz.BuildProtocol):
pkg_name = 'QEMU'
deps = ['pkg-config', 'gettext', 'glib']
def configure(self, builder, config):
env = {}
ldflags = '{standard_ldflags} -F/Library/Frameworks -F/System/Library/Frameworks'
args = ('{source_dir_from_build}/configure',
'--prefix={prefix}',
'--disable-cocoa',
'--target-list=arm-softmmu',
'--disable-curses',
'--disable-vnc',
'--disable-console',
'--enable-werror',
'--disable-slirp',
'--disable-curl',
'--disable-guest-base',
'--disable-guest-agent' ,
'--disable-blobs',
'--audio-drv-list=',
'--audio-card-list=',
'--disable-usb',
'--disable-smartcard',
'--disable-ide',
# '--exec-prefix={eprefix}',
# '--host={host}',
# '--build={build}',
#'--target-list={target}'
)
base_env = {
'LDFLAGS': ldflags,
'PKG_CONFIG_PATH': '{devtree_dir_abs}/{host}/lib/pkgconfig'.format(**config),
'QEMU_PKG_CONFIG_FLAGS': '--define-variable prefix={devtree_dir_abs} --define-variable exec_prefix={devtree_dir_abs}/{host}'.format(**config)
}
base_env.update(env)
builder.cmd(*args, env=base_env, config=config)
rules = Qemu()
| Python | 0.000004 |
232d4c2c8876b05cec4a427fcdf141a036c97045 | Rename "Events" into "EventRegistry" and "trigger" into "notify" | pyofwave_server/pyofwave/core/operation.py | pyofwave_server/pyofwave/core/operation.py | """
Standard interface for connecting client protocols to the operation extensions.
"""
from delta import DeltaObserverPool as dop
import opdev, delta
# Perform operation
def _getChildren(tag):
rep = [tag.text, ]
for child in tag:
rep.append(child)
rep.append(child.tail)
return rep
def performOperation(event, operation):
""" Execute a operation."""
rep = opdev._receive[operation.tag](event, *_getChildren(operation), **operation.attrib)
EventRegisty.notify(operation)
return rep
# Events
def get(obj, prop, default = {}):
if not obj.get(prop):
obj[prop] = default
return obj[prop]
_handlers = {}
class EventRegisty(object):
"""Keeps track of all the events a user registers to."""
def __init__(self, user, callback):
self.user = user
self._callback = callback
def _handlers(self, url, operation):
# XXX : Why is it a list that is associated to an operation ?
# XXX : Is it possible to assign several callback to an operation ?
return get(get(_handlers, url), operation, [])
def register(self, url, operation):
# XXX: All registered operations will have the save callback
self._handlers(url, operation).append(self._callback)
def unregister(self, url, operation="*"):
url_handlers = get(_handlers, url)
if operation == "*":
for operation in url_handlers.keys():
operation_callback = self._handlers(url, operation)
if self._callback in operation_callback:
operation_callback.remove(self._callback)
else:
self._handlers(url, operation).remove(self._callback)
@staticmethod
def notify(operation, src = None):
if src == None:
src = operation.get("href", operation.get("src", ""))
for handler in _handlers.get(src, {}).get(operation.tag, []):
dop.apply_async(handler, (operation.tag))
@delta.alphaDeltaObservable.addObserver
@staticmethod
def applyDelta(doc, delta):
""" Calculate and send events. """
| """
Standard interface for connecting client protocols to the operation extensions.
"""
from delta import DeltaObserverPool as dop
import opdev, delta
# Perform operation
def _getChildren(tag):
rep = [tag.text, ]
for child in tag:
rep.append(child)
rep.append(child.tail)
return rep
def performOperation(event, operation):
""" Execute a operation."""
rep = opdev._receive[operation.tag](event, *_getChildren(operation), **operation.attrib)
Events.trigger(operation)
return rep
# Events
def get(obj, prop, default = {}):
if not obj.get(prop):
obj[prop] = default
return obj[prop]
_handlers = {}
class Events(object):
"""Keeps track of all the events a user registers to."""
def __init__(self, user, callback):
self.user = user
self._callback = callback
def _handlers(self, url, operation):
# XXX : Why is it a list that is associated to an operation ?
# XXX : Is it possible to assign several callback to an operation ?
return get(get(_handlers, url), operation, [])
def register(self, url, operation):
# XXX: All registered operations will have the save callback
self._handlers(url, operation).append(self._callback)
def unregister(self, url, operation="*"):
url_handlers = get(_handlers, url)
if operation == "*":
for operation in url_handlers.keys():
operation_callback = self._handlers(url, operation)
if self._callback in operation_callback:
operation_callback.remove(self._callback)
else:
self._handlers(url, operation).remove(self._callback)
@staticmethod
def trigger(operation, src = None):
if src == None:
src = operation.get("href", operation.get("src", ""))
for handler in _handlers.get(src, {}).get(operation.tag, []):
dop.apply_async(handler, (operation.tag))
@delta.alphaDeltaObservable.addObserver
@staticmethod
def applyDelta(doc, delta):
""" Calculate and send events. """
| Python | 0.000001 |
7c120c02097bfaa1f494627ac93d6cddf5fb9049 | FIX adding newline for chunks | cutools/diff/__init__.py | cutools/diff/__init__.py | from hashlib import md5
from clint.textui import puts, colored
def clean_diff(diff):
"""Removes diff header from a diff.
"""
res = []
skip = True
for line in diff.split('\n'):
if line.startswith('diff --git'):
skip = True
if line.startswith('@@ '):
skip = False
if not skip:
res.append(line)
return '\n'.join(res)
def print_diff(diff):
"""Prints colored diff.
"""
for line in diff.split('\n'):
line = unicode(line).encode('utf-8')
if line.startswith('+'):
puts(colored.green(line))
elif line.startswith('-'):
puts(colored.red(line))
else:
puts(line)
def get_chunks(diff):
"""Returns a list with all the chunks in this diff.
"""
diff = clean_diff(diff)
chunk = []
chunks = []
for line in diff.split('\n'):
if not line:
continue
if line.startswith('@@ '):
if chunk:
chunks.append('\n'.join(chunk) + '\n')
chunk = [line]
else:
chunk.append(line)
if chunk:
chunks.append('\n'.join(chunk) + '\n')
return chunks
def get_hashed_chunks(chunks):
chunks_dict = {}
for chunk in chunks:
chunks_dict[md5(unicode(chunk).encode('utf-8')).hexdigest()] = chunk
return chunks_dict
def clean_chunk(chunk):
"""Clean headers from chunk.
"""
return '\n'.join([x[1:] for x in chunk.split('\n')
if x and x[0] not in ('-', '@')])
def chunk_in_text(chunk, text):
"""Checks if chunk is inside text.
"""
chunk = clean_chunk(chunk)
return text.find(chunk) >= 0
| from hashlib import md5
from clint.textui import puts, colored
def clean_diff(diff):
"""Removes diff header from a diff.
"""
res = []
skip = True
for line in diff.split('\n'):
if line.startswith('diff --git'):
skip = True
if line.startswith('@@ '):
skip = False
if not skip:
res.append(line)
return '\n'.join(res)
def print_diff(diff):
"""Prints colored diff.
"""
for line in diff.split('\n'):
line = unicode(line).encode('utf-8')
if line.startswith('+'):
puts(colored.green(line))
elif line.startswith('-'):
puts(colored.red(line))
else:
puts(line)
def get_chunks(diff):
"""Returns a list with all the chunks in this diff.
"""
diff = clean_diff(diff)
chunk = []
chunks = []
for line in diff.split('\n'):
if not line:
continue
if line.startswith('@@ '):
if chunk:
chunks.append('\n'.join(chunk))
chunk = [line]
else:
chunk.append(line)
if chunk:
chunks.append('\n'.join(chunk))
return chunks
def get_hashed_chunks(chunks):
chunks_dict = {}
for chunk in chunks:
chunks_dict[md5(unicode(chunk).encode('utf-8')).hexdigest()] = chunk
return chunks_dict
def clean_chunk(chunk):
"""Clean headers from chunk.
"""
return '\n'.join([x[1:] for x in chunk.split('\n')
if x and x[0] not in ('-', '@')])
def chunk_in_text(chunk, text):
"""Checks if chunk is inside text.
"""
chunk = clean_chunk(chunk)
return text.find(chunk) >= 0
| Python | 0 |
c8fa72a130d84d921b23f5973dafb8fa91367381 | Make ip_type a RadioSelect in the PTR form | cyder/cydns/ptr/forms.py | cyder/cydns/ptr/forms.py | from django import forms
from cyder.cydns.forms import DNSForm
from cyder.cydns.ptr.models import PTR
class PTRForm(DNSForm):
def delete_instance(self, instance):
instance.delete()
class Meta:
model = PTR
exclude = ('ip', 'reverse_domain', 'ip_upper',
'ip_lower')
widgets = {'views': forms.CheckboxSelectMultiple,
'ip_type': forms.RadioSelect}
| from django import forms
from cyder.cydns.forms import DNSForm
from cyder.cydns.ptr.models import PTR
class PTRForm(DNSForm):
def delete_instance(self, instance):
instance.delete()
class Meta:
model = PTR
exclude = ('ip', 'reverse_domain', 'ip_upper',
'ip_lower')
widgets = {'views': forms.CheckboxSelectMultiple}
| Python | 0.000001 |
e68c85ae4526557efd0d3c1bd45857583d542659 | handle errors in better bibtex | python/citation_vim/zotero/betterbibtex.py | python/citation_vim/zotero/betterbibtex.py | # -*- coding: utf-8 -*-
import os
import shutil
import json
import sqlite3
class betterBibtex(object):
def __init__(self, zotero_path, cache_path):
self.bb_file = os.path.join(zotero_path, 'better-bibtex/db.json')
self.bb_database = os.path.join(zotero_path, 'betterbibtex-lokijs.sqlite')
self.bb_copy = os.path.join(cache_path, 'betterbibtex.sqlite')
bb_data_query = u"""
select lokijs.data
from lokijs
where lokijs.name = "db.json"
"""
def load_citekeys(self):
"""
Loads better-bibtex citekeys if they exist.
"""
# The storage method for betterbibtex keeps changing so we'll try a few.
try:
bb_data = open(self.bb_file).read()
bb_json = json.loads(bb_data)
except:
try:
desc_strings.append(getattr(entry, desc_field))
shutil.copyfile(self.bb_database, self.bb_copy)
conn = sqlite3.connect(self.bb_copy)
cur = conn.cursor()
cur.execute(self.bb_data_query)
bb_data = cur.fetchone()[0]
bb_json = json.loads(bb_data)
except:
return {}
citekeys = {}
try:
for item in bb_json['collections'][0]['data']:
if 'citekey' in item and 'itemID' in item:
citekeys[item['itemID']] = item['citekey']
else:
citekeys[item['itemID']] = ""
except:
return {}
return citekeys
| # -*- coding: utf-8 -*-
import os
import shutil
import json
import sqlite3
class betterBibtex(object):
def __init__(self, zotero_path, cache_path):
self.bb_file = os.path.join(zotero_path, 'better-bibtex/db.json')
self.bb_database = os.path.join(zotero_path, 'betterbibtex-lokijs.sqlite')
self.bb_copy = os.path.join(cache_path, 'betterbibtex.sqlite')
bb_data_query = u"""
select lokijs.data
from lokijs
where lokijs.name = "db.json"
"""
def load_citekeys(self):
"""
Loads better-bibtex citekeys if they exist.
"""
# The storage method for betterbibtex keeps changing so we'll try a few.
try:
bb_data = open(self.bb_file).read()
bb_json = json.loads(bb_data)
except:
try:
desc_strings.append(getattr(entry, desc_field))
shutil.copyfile(self.bb_database, self.bb_copy)
conn = sqlite3.connect(self.bb_copy)
cur = conn.cursor()
cur.execute(self.bb_data_query)
bb_data = cur.fetchone()[0]
bb_json = json.loads(bb_data)
except:
return {}
citekeys = {}
for item in bb_json['collections'][0]['data']:
if 'citekey' in item:
citekeys[item['itemID']] = item['citekey']
else:
citekeys[item['itemID']] = ""
return citekeys
| Python | 0.000002 |
cbdbe14365d5caad28fe77d9c2ca1c66cbf783bd | test travis turning off db switch | python/marvin/tests/misc/test_db_switch.py | python/marvin/tests/misc/test_db_switch.py | #!/usr/bin/env python2
# encoding: utf-8
#
# test_db_switch.py
#
# Created by José Sánchez-Gallego on Sep 7, 2016.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
def create_connection(db_name):
"""Creates the connection and import the model classes."""
from marvin.db.DatabaseConnection import DatabaseConnection
database_connection_string = 'postgresql+psycopg2:///{0}'.format(db_name)
db = DatabaseConnection(database_connection_string=database_connection_string)
import marvin.db.models.DataModelClasses as mangaData
return db, mangaData
def perform_query(db, mangaData):
"""Performs a simple query and return the value."""
session = db.Session()
xfocal = session.query(mangaData.Cube.xfocal).filter(
mangaData.Cube.plate == 8485, mangaData.Cube.mangaid == '1-209232').join(
mangaData.PipelineInfo, mangaData.PipelineVersion).filter(
mangaData.PipelineVersion.version == 'v1_5_1').one()
return xfocal
# db_name = 'manga'
# db, mangaData = create_connection(db_name)
# print(perform_query(db, mangaData))
# db_name_copy = 'manga_copy'
# db, mangaData = create_connection(db_name_copy)
# print(perform_query(db, mangaData))
| #!/usr/bin/env python2
# encoding: utf-8
#
# test_db_switch.py
#
# Created by José Sánchez-Gallego on Sep 7, 2016.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
def create_connection(db_name):
"""Creates the connection and import the model classes."""
from marvin.db.DatabaseConnection import DatabaseConnection
database_connection_string = 'postgresql+psycopg2:///{0}'.format(db_name)
db = DatabaseConnection(database_connection_string=database_connection_string)
import marvin.db.models.DataModelClasses as mangaData
return db, mangaData
def perform_query(db, mangaData):
"""Performs a simple query and return the value."""
session = db.Session()
xfocal = session.query(mangaData.Cube.xfocal).filter(
mangaData.Cube.plate == 8485, mangaData.Cube.mangaid == '1-209232').join(
mangaData.PipelineInfo, mangaData.PipelineVersion).filter(
mangaData.PipelineVersion.version == 'v1_5_1').one()
return xfocal
db_name = 'manga'
db, mangaData = create_connection(db_name)
print(perform_query(db, mangaData))
db_name_copy = 'manga_copy'
db, mangaData = create_connection(db_name_copy)
print(perform_query(db, mangaData))
| Python | 0 |
c8df75a2112cd8e6a4f929ceac21714b716e46ce | Use the IRC nickname for !twitter if one is not provided. | dasbit/plugin/twitter.py | dasbit/plugin/twitter.py | from twisted.web.client import getPage
from urllib import urlencode
import json
class Twitter:
def __init__(self, manager):
self.client = manager.client
manager.registerCommand('twitter', 'lookup', 'twitter', '(?P<query>.*?)', self.lookup)
def lookup(self, source, query):
if query.isdigit():
url = 'http://api.twitter.com/1/statuses/show/%s.json' % query
elif len(query) > 0:
url = 'http://api.twitter.com/1/users/show.json?%s' % urlencode({'screen_name' : query})
else:
url = 'http://api.twitter.com/1/users/show.json?%s' % urlencode({'screen_name' : source.prefix['nickname']})
getPage(url).addCallback(self._returnResult, source, query.isdigit())
def _returnResult(self, value, source, isNumericLookup):
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if 'error' in data:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if isNumericLookup:
user = data['user']['screen_name']
text = data['text']
id = data['id_str']
else:
user = data['screen_name']
text = data['status']['text']
id = data['status']['id_str']
url = 'https://twitter.com/#!/%s/status/%s' % (user, id)
self.client.reply(source, '<%s> %s (%s)' % (user, text, url))
| from twisted.web.client import getPage
from urllib import urlencode
import json
class Twitter:
def __init__(self, manager):
self.client = manager.client
manager.registerCommand('twitter', 'lookup', 'twitter', '(?P<query>.*?)', self.lookup)
def lookup(self, source, query):
if query.isdigit():
url = 'http://api.twitter.com/1/statuses/show/%s.json' % query
else:
url = 'http://api.twitter.com/1/users/show.json?%s' % urlencode({'screen_name' : query})
getPage(url).addCallback(self._returnResult, source, query.isdigit())
def _returnResult(self, value, source, isNumericLookup):
try:
data = json.loads(value)
except:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if 'error' in data:
self.client.reply(source, 'An error occured while processing the result', 'notice')
return
if isNumericLookup:
user = data['user']['screen_name']
text = data['text']
id = data['id_str']
else:
user = data['screen_name']
text = data['status']['text']
id = data['status']['id_str']
url = 'https://twitter.com/#!/%s/status/%s' % (user, id)
self.client.reply(source, '<%s> %s (%s)' % (user, text, url))
| Python | 0 |
1cd445e7e2f41665200a43728cbd5196098b7cad | fix crash | school/api.py | school/api.py | # -*- coding: utf-8 -*-
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.urls import re_path
from django.db.models import Q
from haystack.query import SearchQuerySet
from tastypie import fields
from tastypie.resources import ModelResource
from tastypie.resources import ALL, ALL_WITH_RELATIONS
from tastypie.utils import trailing_slash
from tastypie.authorization import DjangoAuthorization
from people.api import ArtistResource
from .models import Promotion, Student, StudentApplication
from assets.api import GalleryResource
class PromotionResource(ModelResource):
class Meta:
queryset = Promotion.objects.all()
resource_name = 'school/promotion'
ordering = ['starting_year']
class StudentResource(ArtistResource):
class Meta:
queryset = Student.objects.all()
resource_name = 'school/student'
ordering = ['user', ]
filtering = {
'artist': ALL_WITH_RELATIONS,
'user': ALL_WITH_RELATIONS,
'user__last_name__istartswith': ALL_WITH_RELATIONS,
'promotion': ALL,
}
fields = ['id', 'number', 'promotion', 'graduate', 'user', 'artist']
promotion = fields.ForeignKey(PromotionResource, 'promotion')
artist = fields.ForeignKey(ArtistResource, 'artist', full=True)
# BUG Error (why?) user__last_name__istartswith
# "The 'last_name' field does not support relations"
def build_filters(self, filters=None, ignore_bad_filters=False):
# turn off error : ignore_bad_filters et True
return super(StudentResource, self).build_filters(filters, ignore_bad_filters=True)
# override user__last_name__istartswith query
def apply_filters(self, request, applicable_filters):
base_object_list = super(StudentResource, self).apply_filters(request, applicable_filters)
# override
query = request.GET.get('user__last_name__istartswith', None)
if query:
qset = (Q(user__last_name__istartswith=query))
base_object_list = base_object_list.filter(qset).distinct()
return base_object_list
# end of Bug Error
def prepend_urls(self):
return [
re_path(r"^(?P<resource_name>%s)/search%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('get_search'),
name="api_get_search"),
]
def get_search(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Do the query.
sqs = SearchQuerySet().models(Student).load_all().autocomplete(content_auto=request.GET.get('q', ''))
paginator = Paginator(sqs, 20)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("Sorry, no results on that page.")
objects = []
for result in page.object_list:
bundle = self.build_bundle(obj=result.object, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
object_list = {
'objects': objects,
}
self.log_throttled_access(request)
return self.create_response(request, object_list)
class StudentApplicationResource(ModelResource):
class Meta:
queryset = StudentApplication.objects.all()
resource_name = 'school/application'
ordering = ['created_on']
# no authorization for Anonymous user
authorization = DjangoAuthorization()
artist = fields.ForeignKey(ArtistResource, 'artist')
administrative_galleries = fields.ToManyField(GalleryResource, 'administrative_galleries', full=True, null=True)
artwork_galleries = fields.ToManyField(GalleryResource, 'artwork_galleries', full=True, null=True)
| # -*- coding: utf-8 -*-
from django.core.paginator import Paginator, InvalidPage
from django.http import Http404
from django.urls import re_path
from django.db.models import Q
from haystack.query import SearchQuerySet
from tastypie import fields
from tastypie.resources import ModelResource
from tastypie.resources import ALL, ALL_WITH_RELATIONS
from tastypie.utils import trailing_slash
from tastypie.authorization import DjangoAuthorization
from people.api import ArtistResource
from .models import Promotion, Student, StudentApplication
from assets.api import GalleryResource
class PromotionResource(ModelResource):
class Meta:
queryset = Promotion.objects.all()
resource_name = 'school/promotion'
ordering = ['starting_year']
class StudentResource(ArtistResource):
class Meta:
queryset = Student.objects.all()
resource_name = 'school/student'
ordering = ['user', ]
filtering = {
'artist': ALL_WITH_RELATIONS,
'user': ALL_WITH_RELATIONS,
'user__last_name__istartswith': ALL_WITH_RELATIONS,
'promotion': ALL,
}
fields = ['id', 'number', 'promotion', 'graduate', 'user', 'artist']
promotion = fields.ForeignKey(PromotionResource, 'promotion')
artist = fields.ForeignKey(ArtistResource, 'artist', full=True)
# BUG Error (why?) user__last_name__istartswith
# "The 'last_name' field does not support relations"
def build_filters(self, filters=None):
# turn off error : ignore_bad_filters et True
return super(StudentResource, self).build_filters(filters, ignore_bad_filters=True)
# override user__last_name__istartswith query
def apply_filters(self, request, applicable_filters):
base_object_list = super(StudentResource, self).apply_filters(request, applicable_filters)
# override
query = request.GET.get('user__last_name__istartswith', None)
if query:
qset = (Q(user__last_name__istartswith=query))
base_object_list = base_object_list.filter(qset).distinct()
return base_object_list
# end of Bug Error
def prepend_urls(self):
return [
re_path(r"^(?P<resource_name>%s)/search%s$" % (self._meta.resource_name, trailing_slash()),
self.wrap_view('get_search'),
name="api_get_search"),
]
def get_search(self, request, **kwargs):
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Do the query.
sqs = SearchQuerySet().models(Student).load_all().autocomplete(content_auto=request.GET.get('q', ''))
paginator = Paginator(sqs, 20)
try:
page = paginator.page(int(request.GET.get('page', 1)))
except InvalidPage:
raise Http404("Sorry, no results on that page.")
objects = []
for result in page.object_list:
bundle = self.build_bundle(obj=result.object, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
object_list = {
'objects': objects,
}
self.log_throttled_access(request)
return self.create_response(request, object_list)
class StudentApplicationResource(ModelResource):
class Meta:
queryset = StudentApplication.objects.all()
resource_name = 'school/application'
ordering = ['created_on']
# no authorization for Anonymous user
authorization = DjangoAuthorization()
artist = fields.ForeignKey(ArtistResource, 'artist')
administrative_galleries = fields.ToManyField(GalleryResource, 'administrative_galleries', full=True, null=True)
artwork_galleries = fields.ToManyField(GalleryResource, 'artwork_galleries', full=True, null=True)
| Python | 0.000003 |
774da53edef30cb2f3c45cc47c63d46f142a4e07 | Use four space indentation, repo_path to arguments | score_repo.py | score_repo.py | #!/usr/bin/env python3
import argparse
import importlib
import json
import os
import sys
def load_attribute_plugins(attributes):
for attribute in attributes:
if attribute['enabled']:
try:
attribute['implementation'] = importlib.import_module("attributes.{0}.main".format(attribute['name']))
except ImportError:
print("Failed to load the {0} attribute.".format(attribute['name']))
def process_configuration(config_file):
try:
config = json.load(config_file)
return config
except:
print("Malformatted or missing configuration.")
sys.exit(2)
def repository_path(path_string):
if os.path.exists(path_string):
if os.path.exists("{0}/.git".format(path_string)):
return path_string
else:
raise argparse.ArgumentTypeError("{0} is not a git repository.".format(path_string))
else:
raise argparse.ArgumentTypeError("{0} is not a directory.".format(path_string))
def process_arguments():
parser = argparse.ArgumentParser(description='Calculate the score of a repository.')
parser.add_argument('-c', '--config', type=argparse.FileType('r'), default='config.json', dest='config_file', help='Path to the configuration file.')
parser.add_argument('repository_id', type=int, nargs=1, help='Identifier for a repository as it appears in the GHTorrent database.')
parser.add_argument('repository_path', type=repository_path, nargs=1, help='Path to the repository source code.')
if len(sys.argv) is 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
args = process_arguments()
config = process_configuration(args.config_file)
attributes = config['attributes']
load_attribute_plugins(attributes)
score = 0
for attribute in attributes:
result = attribute['implementation'].run(config.repository_id, config.repository_path, attribute['options'])
score += result * attribute['weight']
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("Caught interrupt, exiting.")
sys.exit(1)
| #!/usr/bin/env python3
import argparse
import importlib
import json
import sys
def loadAttributePlugins(attributes):
for attribute in attributes:
if attribute['enabled']:
try:
attribute['implementation'] = importlib.import_module("attributes.{0}.main".format(attribute['name']))
except ImportError:
print("Failed to load the {0} attribute.".format(attribute['name']))
def processConfiguration(config_file):
try:
config = json.load(config_file)
return config
except:
print("Malformatted or missing configuration.")
sys.exit(2)
def processArguments():
parser = argparse.ArgumentParser(description='Calculate the score of a repository.')
parser.add_argument('-c', '--config', type=argparse.FileType('r'), default='config.json', dest='config_file', help='Path to the configuration file.')
parser.add_argument('repository_id', type=int, nargs=1, help='Identifier for a repository as it appears in the GHTorrent database.')
if len(sys.argv) is 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
args = processArguments()
config = processConfiguration(args.config_file)
attributes = config['attributes']
loadAttributePlugins(attributes)
score = 0
for attribute in attributes:
result = attribute['implementation'].run(metadata, repo_path, attribute['options'])
score += result * attribute['weight']
if __name__ == '__main__':
main()
| Python | 0.000001 |
47c7cccc674beee06c2d4d6f6f197cb860d33354 | Update bno055.py | home/Calamity/bno055.py | home/Calamity/bno055.py | arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM11")
bno = Runtime.createAndStart("bno","Bno055")
bno.setController(arduino)
if bno.begin():
while (True):
event = bno.getEvent()
print event.orientation.x
print event.orientation.y
print event.orientation.z
sleep(1)
| arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM11")
bno = Runtime.createAndStart("bno","Bno055")
bno.setController(arduino)
if bno.begin():
event = bno.getEvent()
print event.orientation.x
print event.orientation.y
print event.orientation.z
| Python | 0 |
59f96d2ca0f3752052d870ef9c7bc5bc21f21e40 | add header | host/pydaq/HL/tdc_s3.py | host/pydaq/HL/tdc_s3.py | #
# ------------------------------------------------------------
# Copyright (c) SILAB , Physics Institute of Bonn University
# ------------------------------------------------------------
#
# SVN revision information:
# $Rev:: $:
# $Author:: $:
# $Date:: $:
#
from HL.HardwareLayer import HardwareLayer
import struct
import array
class tdc_s3(HardwareLayer):
'''
TDC controller interface
'''
def __init__(self, intf, conf):
HardwareLayer.__init__(self, intf, conf)
'''
Resets the TDC controller module inside the FPGA, base adress zero
'''
def reset(self):
self._intf.write(self._conf['base_addr'], [0])
'''
Initialise the TDC controller module
'''
def init(self):
self.reset()
def set_en(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 1)[0]
self._intf.write(self._conf['base_addr'] + 1, [(current & 0xfe) | enable])
def get_en(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 1)[0] & 0x01) else False
def set_exten(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 4)
self._intf.write(self._conf['base_addr'] + 1, [(current[3] & 0xfe) | enable,current[2],current[1],current[0]])
def get_exten(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 4)[3] & 0x01) else False
| #
# ------------------------------------------------------------
# Copyright (c) SILAB , Physics Institute of Bonn University
# ------------------------------------------------------------
#
# SVN revision information:
# $Rev:: 1 $:
# $Author:: TheresaObermann $:
# $Date:: 2013-10-09 10:58:06 #$:
#
from HL.HardwareLayer import HardwareLayer
import struct
import array
class tdc_s3(HardwareLayer):
'''
TDC controller interface
'''
def __init__(self, intf, conf):
HardwareLayer.__init__(self, intf, conf)
'''
Resets the TDC controller module inside the FPGA, base adress zero
'''
def reset(self):
self._intf.write(self._conf['base_addr'], [0])
'''
Initialise the TDC controller module
'''
def init(self):
self.reset()
def set_en(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 1)[0]
self._intf.write(self._conf['base_addr'] + 1, [(current & 0xfe) | enable])
def get_en(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 1)[0] & 0x01) else False
def set_exten(self, enable):
current = self._intf.read(self._conf['base_addr'] + 1, 4)
self._intf.write(self._conf['base_addr'] + 1, [(current[3] & 0xfe) | enable,current[2],current[1],current[0]])
def get_exten(self):
return True if (self._intf.read(self._conf['base_addr'] + 1, 4)[3] & 0x01) else False
| Python | 0.000001 |
70f460d3d0ba4c605704bb7179e3618583311621 | add docstrings for SPIRE funcs | hsadownload/getspire.py | hsadownload/getspire.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hsadownload.access import getHsaFits, getObsUrn, parseContextHdu, fixHerschelHeader
import os
import astropy.io.fits as fits
def downloadSpireMap(ldict, obsid, lev, bandKey, direc='./SpirePhoto/', \
spgVersion='SPG v13.0.0', overWrite=False):
"""
Download SPIRE map. Not meant to be called directly but to be called by
storeSpirePhoto.
Parameters:
-----------
ldict (dict) : dictionary of level labels and urn strings
obsid (long int) : observation id
lev (string) : level being downloaded
bandKey (string) : key for band being downloaded
direc (string) : directory in which to store file, defaults to './SpirePhoto/'
spgVersion (string) : pipeline version, to put in output filename
overwrite (bool) : overwrite file if it already exists? defaults to False
"""
normVersion = ''.join(spgVersion.split())
if bandKey in ldict:
filename = os.path.join(direc,"%s_SPIRE_%s_%s_%s.fits.gz"%(obsid,lev,bandKey,normVersion))
hdu = getHsaFits(ldict[bandKey], fname=filename, save=True)
hdu.close()
print('downloaded ' + filename)
else:
print('did not find %s in %s for %s' %(bandKey, lev, obsid))
def storeSpirePhoto(obsid, spgVersion='SPG v13.0.0', direc='./SpirePhotoScan/'):
"""
Download and store a SPIRE map
Parameters:
-----------
obsid (long int): observation id
spgVersion (string) : pipeline version, defaults to 'SPG v13.0.0'
direc (string) : path to directory for storing files, defaults to './SpirePhoto/'
Returns:
--------
Returns 0 if no level2_5 or level2 in observation; otherwise 1
"""
instrument = 'SPIRE'
normVersion = ''.join(spgVersion.split())
urn = getObsUrn(obsid,instrument,spgVersion=spgVersion)
hdulist = getHsaFits(urn)
cdict = parseContextHdu(hdulist)
if 'level2_5' in cdict:
lev = 'L25'
lhdulist = getHsaFits(cdict['level2_5'])
ldict = parseContextHdu(lhdulist)
for bandKey in ['psrcPLW','psrcPMW', 'psrcPSW']:
if (bandKey in ldict):
if (obsid == lhdulist[0].header['obsid001']):
downloadSpireMap(ldict, obsid, lev, bandKey, direc,
spgVersion=spgVersion)
else:
print('skipping %s for %s since obsid001 is %s' % (bandKey, obsid, lhdulist[0].header['obsid001']))
elif 'level2' in cdict:
lev = 'L2'
lhdulist = getHsaFits(cdict['level2'])
ldict = parseContextHdu(lhdulist)
for bandKey in ['psrcPLW','psrcPMW', 'psrcPSW']:
downloadSpireMap(ldict, obsid, lev, bandKey, direc,
spgVersion=spgVersion)
else:
return(0)
return(1)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hsadownload.access import getHsaFits, getObsUrn, parseContextHdu, fixHerschelHeader
import os
import astropy.io.fits as fits
def downloadSpireMap(ldict, obsid, lev, bandKey, direc='./SpirePhoto/', \
spgVersion='SPG v13.0.0', overWrite=False):
normVersion = ''.join(spgVersion.split())
if bandKey in ldict:
filename = os.path.join(direc,"%s_SPIRE_%s_%s_%s.fits.gz"%(obsid,lev,bandKey,normVersion))
hdu = getHsaFits(ldict[bandKey], fname=filename, save=True)
hdu.close()
print('downloaded ' + filename)
else:
print('did not find %s in %s for %s' %(bandKey, lev, obsid))
def storeSpirePhoto(obsid, spgVersion='SPG v13.0.0', direc='./SpirePhotoScan/'):
instrument = 'SPIRE'
normVersion = ''.join(spgVersion.split())
urn = getObsUrn(obsid,instrument,spgVersion=spgVersion)
hdulist = getHsaFits(urn)
cdict = parseContextHdu(hdulist)
if 'level2_5' in cdict:
lev = 'L25'
lhdulist = getHsaFits(cdict['level2_5'])
ldict = parseContextHdu(lhdulist)
for bandKey in ['psrcPLW','psrcPMW', 'psrcPSW']:
if (bandKey in ldict):
if (obsid == lhdulist[0].header['obsid001']):
downloadSpireMap(ldict, obsid, lev, bandKey, direc,
spgVersion=spgVersion)
else:
print('skipping %s for %s since obsid001 is %s' % (bandKey, obsid, lhdulist[0].header['obsid001']))
elif 'level2' in cdict:
lev = 'L2'
lhdulist = getHsaFits(cdict['level2'])
ldict = parseContextHdu(lhdulist)
for bandKey in ['psrcPLW','psrcPMW', 'psrcPSW']:
downloadSpireMap(ldict, obsid, lev, bandKey, direc,
spgVersion=spgVersion)
else:
return(0)
return(1)
| Python | 0 |
f52921e78cc6a8af38df50f0b0ba4d04b15fd768 | fix the import error in db.py | service/db.py | service/db.py | #coding=utf-8
import torndb
import datetime
from constants.errorcode import Errorcode
from util.gip_exception import GipException
class DB(object):
def __init__(self, application):
self.mysql_read = application.mysql_conn_read
self.mysql_write = application.mysql_conn_write
#self.mongo_conn = application.mongo_conn
def sample(self):
try:
sql = ''' select count(1) from tag'''
result = self.mysql_write.query(sql)
except:
pass
finally:
return result[0]
def get_article_by_id(self,id):
try:
sql = ''' select * from article where id =%s limit 1'''%(id)
result = self.mysql_write.query(sql)
except:
pass
finally:
return result[0]
| #coding=utf-8
import torndb
import datetime
from constants.errorcode import Errorcode
from util.lt_exception import LTException
class DB(object):
def __init__(self, application):
self.mysql_read = application.mysql_conn_read
self.mysql_write = application.mysql_conn_write
#self.mongo_conn = application.mongo_conn
def sample(self):
'''
示例代码
'''
try:
sql = ''' select count(1) from tag'''
result = self.mysql_write.query(sql)
except:
# 抛出异常
pass
finally:
return result[0]
def get_article_by_id(self,id):
'''
示例代码
'''
try:
sql = ''' select * from article where id =%s limit 1'''%(id)
result = self.mysql_write.query(sql)
except:
# 抛出异常
pass
finally:
return result[0]
| Python | 0.000035 |
624adf50b90f97454857185d71259f6fb7a7fed6 | fix imports | hublib/rappture/tool.py | hublib/rappture/tool.py | from __future__ import print_function
from .node import Node
import numpy as np
from lxml import etree as ET
import os
import subprocess
import sys
from .rappture import RapXML
class Tool(RapXML):
def __init__(self, tool):
"""
tool can be any of the following:
- Path to a tool.xml file.
- Name of a published tool. The current version will be run.
"""
dirname, xml = os.path.split(tool)
if dirname == "":
if xml != "tool.xml":
# must be tool name
dirname = "/apps/%s/current" % xml
xml = dirname + "/rappture/tool.xml"
else:
dirname = os.getcwd()
else:
xml = tool
dirname = os.path.abspath(os.path.join(dirname, '..'))
xml = os.path.abspath(xml)
if not os.path.isfile(xml):
raise ValueError("tool must be a toolname or path to a tool.xml file.")
invoke_file = os.path.join(dirname, 'middleware', 'invoke')
if os.path.isfile(invoke_file):
self.invoke_file = invoke_file
sessdir = os.environ['SESSIONDIR']
self.tmp_name = os.path.join(sessdir, 'tool_driver_%s.xml' % os.getpid())
self.run_name = ""
self.toolparameters_name = os.path.join(sessdir, 'driver_%s.hz' % os.getpid())
self.rappturestatus_name = os.path.join(sessdir, 'rappture.status')
self.fname = xml
self.tree = ET.parse(xml)
self.path = ''
def run(self, verbose=True):
# print("Writing", self.tmp_name)
with open(self.tmp_name, 'w') as f:
f.write(str(self.xml(pretty=False, header=True)))
with open(self.toolparameters_name, 'w') as f:
f.write("file(execute):%s" % (self.tmp_name))
cmd = "TOOL_PARAMETERS=%s %s" % (self.toolparameters_name,self.invoke_file)
if verbose:
print("cmd=", cmd)
cwd = os.getcwd()
os.chdir(os.environ['SESSIONDIR'])
try:
ret = subprocess.call(cmd, shell=True)
if ret:
print('Error: "%s"' % cmd, file=sys.stderr)
if ret < 0:
print("Terminated by signal", -ret, file=sys.stderr)
else:
print("Returncode", ret, file=sys.stderr)
except OSError as e:
print('Error: "%s"' % cmd, file=sys.stderr)
print("Failed:", e, file=sys.stderr)
sys.exit(1)
with(open(self.rappturestatus_name, 'r')) as f:
statusData = f.readlines()
for record in statusData:
if 'output saved in' in record:
self.run_name = record.strip().split()[-1]
break
if self.run_name:
self.tree = ET.parse(self.run_name)
os.chdir(cwd)
| from __future__ import print_function
from .node import Node
import numpy as np
from lxml import etree as ET
import os
from subprocess import call, Popen, PIPE
import sys
from .rappture import RapXML
from hublib.use import _use
class Tool(RapXML):
def __init__(self, tool):
"""
tool can be any of the following:
- Path to a tool.xml file.
- Name of a published tool. The current version will be run.
"""
dirname, xml = os.path.split(tool)
if dirname == "":
if xml != "tool.xml":
# must be tool name
dirname = "/apps/%s/current" % xml
xml = dirname + "/rappture/tool.xml"
else:
dirname = os.getcwd()
else:
xml = tool
dirname = os.path.abspath(os.path.join(dirname, '..'))
xml = os.path.abspath(xml)
if not os.path.isfile(xml):
raise ValueError("tool must be a toolname or path to a tool.xml file.")
invoke_file = os.path.join(dirname, 'middleware', 'invoke')
if os.path.isfile(invoke_file):
self.invoke_file = invoke_file
sessdir = os.environ['SESSIONDIR']
self.tmp_name = os.path.join(sessdir, 'tool_driver_%s.xml' % os.getpid())
self.run_name = ""
self.toolparameters_name = os.path.join(sessdir, 'driver_%s.hz' % os.getpid())
self.rappturestatus_name = os.path.join(sessdir, 'rappture.status')
self.fname = xml
self.tree = ET.parse(xml)
self.path = ''
def run(self, verbose=True):
# print("Writing", self.tmp_name)
with open(self.tmp_name, 'w') as f:
f.write(str(self.xml(pretty=False, header=True)))
with open(self.toolparameters_name, 'w') as f:
f.write("file(execute):%s" % (self.tmp_name))
cmd = "TOOL_PARAMETERS=%s %s" % (self.toolparameters_name,self.invoke_file)
if verbose:
print("cmd=", cmd)
cwd = os.getcwd()
os.chdir(os.environ['SESSIONDIR'])
try:
ret = subprocess.call(cmd, shell=True)
if ret:
print('Error: "%s"' % cmd, file=sys.stderr)
if ret < 0:
print("Terminated by signal", -ret, file=sys.stderr)
else:
print("Returncode", ret, file=sys.stderr)
except OSError as e:
print('Error: "%s"' % cmd, file=sys.stderr)
print("Failed:", e, file=sys.stderr)
sys.exit(1)
with(open(self.rappturestatus_name, 'r')) as f:
statusData = f.readlines()
for record in statusData:
if 'output saved in' in record:
self.run_name = record.strip().split()[-1]
break
if self.run_name:
self.tree = ET.parse(self.run_name)
os.chdir(cwd)
| Python | 0.000002 |
0b4b57f90ee3d0fe0af3ba9921adccda784d6301 | Allow to order payment profile by name, type and status. | src/waldur_mastermind/invoices/filters.py | src/waldur_mastermind/invoices/filters.py | import django_filters
from rest_framework import filters
from waldur_core.core import filters as core_filters
from . import models
class InvoiceFilter(django_filters.FilterSet):
customer = core_filters.URLFilter(
view_name='customer-detail', field_name='customer__uuid'
)
customer_uuid = django_filters.UUIDFilter(field_name='customer__uuid')
state = django_filters.MultipleChoiceFilter(choices=models.Invoice.States.CHOICES)
o = django_filters.OrderingFilter(fields=(('year', 'month'),))
class Meta:
model = models.Invoice
fields = ('year', 'month')
class PaymentProfileFilter(django_filters.FilterSet):
organization = core_filters.URLFilter(
view_name='customer-detail', field_name='organization__uuid'
)
organization_uuid = django_filters.UUIDFilter(field_name='organization__uuid')
payment_type = django_filters.MultipleChoiceFilter(
choices=models.PaymentType.CHOICES
)
o = django_filters.OrderingFilter(fields=(('name', 'payment_type', 'is_active'),))
class Meta:
model = models.PaymentProfile
fields = []
class PaymentProfileFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if request.user.is_staff or request.user.is_support:
return queryset
return queryset.filter(is_active=True)
class PaymentFilter(django_filters.FilterSet):
profile = core_filters.URLFilter(
view_name='payment-profile-detail', field_name='profile__uuid'
)
profile_uuid = django_filters.UUIDFilter(field_name='profile__uuid')
class Meta:
model = models.Payment
fields = ['date_of_payment']
| import django_filters
from rest_framework import filters
from waldur_core.core import filters as core_filters
from . import models
class InvoiceFilter(django_filters.FilterSet):
customer = core_filters.URLFilter(
view_name='customer-detail', field_name='customer__uuid'
)
customer_uuid = django_filters.UUIDFilter(field_name='customer__uuid')
state = django_filters.MultipleChoiceFilter(choices=models.Invoice.States.CHOICES)
o = django_filters.OrderingFilter(fields=(('year', 'month'),))
class Meta:
model = models.Invoice
fields = ('year', 'month')
class PaymentProfileFilter(django_filters.FilterSet):
organization = core_filters.URLFilter(
view_name='customer-detail', field_name='organization__uuid'
)
organization_uuid = django_filters.UUIDFilter(field_name='organization__uuid')
payment_type = django_filters.MultipleChoiceFilter(
choices=models.PaymentType.CHOICES
)
class Meta:
model = models.PaymentProfile
fields = []
class PaymentProfileFilterBackend(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
if request.user.is_staff or request.user.is_support:
return queryset
return queryset.filter(is_active=True)
class PaymentFilter(django_filters.FilterSet):
profile = core_filters.URLFilter(
view_name='payment-profile-detail', field_name='profile__uuid'
)
profile_uuid = django_filters.UUIDFilter(field_name='profile__uuid')
class Meta:
model = models.Payment
fields = ['date_of_payment']
| Python | 0 |
af0fbfe74ecaac67fb37f03e01a9aefcd06ce83f | Change default scriptPubKey in coinbase | qa/rpc-tests/test_framework/blocktools.py | qa/rpc-tests/test_framework/blocktools.py | # blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from script import CScript, CScriptOp, OP_TRUE, OP_CHECKSIG
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(heightAdjust = 0, absoluteHeight = None, pubkey = None):
global counter
height = absoluteHeight if absoluteHeight is not None else counter+heightAdjust
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50*100000000
halvings = int((height)/150) # regtest
coinbaseoutput.nValue >>= halvings
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
| # blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from script import CScript, CScriptOp
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create an anyone-can-spend coinbase transaction, assuming no miner fees
def create_coinbase(heightAdjust = 0, absoluteHeight = None):
global counter
height = absoluteHeight if absoluteHeight is not None else counter+heightAdjust
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(height)), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50*100000000
halvings = int((height)/150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.scriptPubKey = ""
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
| Python | 0 |
4d413d45def838d730806097484d7ccf9d49744f | Fix to test code | mycluster/test.py | mycluster/test.py |
import mycluster
mycluster.init()
mycluster.create_submit('hybrid:hybrid.q',script_name='test.job',num_tasks=2,
tasks_per_node=2,
my_script='test.bsh',
user_email='test@email.com',
)
mycluster.submit('test.job')
for i in mycluster.job_list():
print i, mycluster.get_job(i).status |
import mycluster
mycluster.create_submit('hybrid:hybrid.q',script_name='test.job',num_tasks=2,
tasks_per_node=2,
my_script='test.bsh',
user_email='test@email.com',
)
mycluster.submit('test.job')
for i in mycluster.job_list():
print i, mycluster.get_job(i).status | Python | 0.000018 |
4c29471af61989e852a813999cf37aa9a8acf76d | test anon to /users endpoint | onadata/apps/api/tests/viewsets/test_user_viewset.py | onadata/apps/api/tests/viewsets/test_user_viewset.py | import json
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
class TestUserViewSet(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
def test_user_list(self):
view = UserViewSet.as_view({'get': 'list'})
request = self.factory.get('/', **self.extra)
response = view(request)
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_list_anon(self):
view = UserViewSet.as_view({'get': 'list'})
request = self.factory.get('/')
response = view(request)
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
def test_user_anon_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/')
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
| import json
from onadata.apps.api.tests.viewsets.test_abstract_viewset import\
TestAbstractViewSet
from onadata.apps.api.viewsets.user_viewset import UserViewSet
class TestUserViewSet(TestAbstractViewSet):
def setUp(self):
super(self.__class__, self).setUp()
def test_user_list(self):
view = UserViewSet.as_view({'get': 'list'})
request = self.factory.get('/', **self.extra)
response = view(request)
data = [{'username': u'bob', 'first_name': u'Bob', 'last_name': u''}]
self.assertContains(response, json.dumps(data))
def test_user_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/', **self.extra)
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
def test_user_anon_get(self):
view = UserViewSet.as_view({'get': 'retrieve'})
request = self.factory.get('/')
response = view(request, username='bob')
data = {'username': u'bob', 'first_name': u'Bob', 'last_name': u''}
self.assertContains(response, json.dumps(data))
| Python | 0.000001 |
84eff383a42784400261c39c87b9455133771bea | correct role & add image | data/upload/backend/parser.py | data/upload/backend/parser.py | import re
import csv
from data.upload.backend.xlrd import xlrd_dict_reader
from data.upload.backend.csv import csv_dict_reader
from data.upload.models import (SpreadsheetUpload, SpreadsheetPerson,
SpreadsheetSource, SpreadsheetLink,
SpreadsheetContactDetail)
from contextlib import contextmanager
from pupa.scrape.helpers import Legislator
from pupa.scrape.popolo import Organization
OCD_SOURCE_URL = "http://opencivicdata.org/manual-data/source-notice"
def people_to_pupa(stream, transaction):
org = Organization(
name=transaction.jurisdiction.name,
classification='legislature',
)
for person in stream:
name = person.name
position = person.position
district = person.district
image = person.image
if not name or not district:
raise ValueError("A name and district is required for each entry.")
if position is None:
position = "member"
obj = Legislator(name=name, district=district)
if image:
obj.image = image
org.add_post(label="%s, %s" % (position, district), role=position)
for detail in person.contacts.all():
obj.add_contact_detail(
type=detail.type,
value=detail.value,
note=detail.note,
)
for link in person.links.all():
obj.add_link(
url=link.url,
note=link.url
)
for source in person.sources.all():
obj.add_source(
url=source.url,
note=source.note,
)
obj.validate()
obj.pre_save(transaction.jurisdiction.id)
yield obj
for related in obj._related:
yield related
for related in org._related:
yield related
yield org
def import_parsed_stream(stream, user, jurisdiction):
upload = SpreadsheetUpload(user=user, jurisdiction=jurisdiction)
upload.save()
for person in stream:
if (not person['District'] or not person['Name'] or
not person['Position']):
raise ValueError("Bad district or name")
who = SpreadsheetPerson(
name=person.pop('Name'),
spreadsheet=upload,
position=person.pop('Position'),
district=person.pop('District'),
)
if 'Image' in person:
who.image = person.pop("Image")
who.save()
contact_details = {
"Address": "address",
"Phone": "voice",
"Email": "email",
"Fax": "fax",
"Cell": "voice",
"Twitter": "twitter",
"Facebook": "facebook"
}
links = ["Website", "Homepage"]
sources = ["Source"]
for key, value in person.items():
match = re.match("(?P<key>.*) (?P<label>\(.*\))?", key)
root = key
label = None
if match:
d = match.groupdict()
root = d['key']
label = d['label'].rstrip(")").lstrip("(")
if root in sources:
a = SpreadsheetSource(
person=who,
url=value,
note=key
)
a.save()
continue
# If we've got a link.
if root in links:
a = SpreadsheetLink(
person=who,
url=value,
note=key,
)
a.save()
continue
# If we've got a contact detail.
if root in contact_details:
type_ = contact_details[root]
a = SpreadsheetContactDetail(
person=who,
type=type_,
value=value,
label=label or "",
note=key,
)
a.save()
continue
raise ValueError("Unknown spreadhseet key: %s" % (key))
return upload
def import_stream(stream, extension, user, jurisdiction):
reader = {"csv": csv_dict_reader,
"xlsx": xlrd_dict_reader,
"xls": xlrd_dict_reader}[extension]
return import_parsed_stream(reader(stream), user, jurisdiction)
@contextmanager
def import_file_stream(fpath, user, jurisdiction):
_, xtn = fpath.rsplit(".", 1)
with open(fpath, 'br') as fd:
yield import_stream(fd, xtn, user, jurisdiction)
| import re
import csv
from data.upload.backend.xlrd import xlrd_dict_reader
from data.upload.backend.csv import csv_dict_reader
from data.upload.models import (SpreadsheetUpload, SpreadsheetPerson,
SpreadsheetSource, SpreadsheetLink,
SpreadsheetContactDetail)
from contextlib import contextmanager
from pupa.scrape.helpers import Legislator
from pupa.scrape.popolo import Organization
OCD_SOURCE_URL = "http://opencivicdata.org/manual-data/source-notice"
def people_to_pupa(stream, transaction):
org = Organization(
name=transaction.jurisdiction.name,
classification='legislature',
)
for person in stream:
name = person.name
position = person.position
district = person.district
image = person.image
if not name or not district:
raise ValueError("A name and district is required for each entry.")
if position is None:
position = "member"
obj = Legislator(name=name, district=district)
if image:
obj.image = image
org.add_post(label=district, role=position)
for detail in person.contacts.all():
obj.add_contact_detail(
type=detail.type,
value=detail.value,
note=detail.note,
)
for link in person.links.all():
obj.add_link(
url=link.url,
note=link.url
)
for source in person.sources.all():
obj.add_source(
url=source.url,
note=source.note,
)
obj.validate()
obj.pre_save(transaction.jurisdiction.id)
yield obj
for related in obj._related:
yield related
for related in org._related:
yield related
yield org
def import_parsed_stream(stream, user, jurisdiction):
upload = SpreadsheetUpload(user=user, jurisdiction=jurisdiction)
upload.save()
for person in stream:
if (not person['District'] or not person['Name'] or
not person['Position']):
raise ValueError("Bad district or name")
who = SpreadsheetPerson(
name=person.pop('Name'),
spreadsheet=upload,
position=person.pop('Position'),
district=person.pop('District'),
)
who.save()
contact_details = {
"Address": "address",
"Phone": "voice",
"Email": "email",
"Fax": "fax",
"Cell": "voice",
"Twitter": "twitter",
"Facebook": "facebook"
}
links = ["Website", "Homepage"]
sources = ["Source"]
for key, value in person.items():
match = re.match("(?P<key>.*) (?P<label>\(.*\))?", key)
root = key
label = None
if match:
d = match.groupdict()
root = d['key']
label = d['label'].rstrip(")").lstrip("(")
if root in sources:
a = SpreadsheetSource(
person=who,
url=value,
note=key
)
a.save()
continue
# If we've got a link.
if root in links:
a = SpreadsheetLink(
person=who,
url=value,
note=key,
)
a.save()
continue
# If we've got a contact detail.
if root in contact_details:
type_ = contact_details[root]
a = SpreadsheetContactDetail(
person=who,
type=type_,
value=value,
label=label or "",
note=key,
)
a.save()
continue
raise ValueError("Unknown spreadhseet key: %s" % (key))
return upload
def import_stream(stream, extension, user, jurisdiction):
reader = {"csv": csv_dict_reader,
"xlsx": xlrd_dict_reader,
"xls": xlrd_dict_reader}[extension]
return import_parsed_stream(reader(stream), user, jurisdiction)
@contextmanager
def import_file_stream(fpath, user, jurisdiction):
_, xtn = fpath.rsplit(".", 1)
with open(fpath, 'br') as fd:
yield import_stream(fd, xtn, user, jurisdiction)
| Python | 0 |
9e30bc38cfa3cb000ab2d84730552d50ea604ac1 | configure wsgi file to use whitenoise | heroku-libsass-python/wsgi.py | heroku-libsass-python/wsgi.py | """
WSGI config for project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = ".settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from configurations.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| """
WSGI config for project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = ".settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from configurations.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| Python | 0 |
e5b1221598e27c0b871f6afa34bd9dfa06a72845 | clean up artifacts at the end of doc build | docs/build.py | docs/build.py | #!/usr/bin/env python
"""Build scriptharness documentation. At some point it would be good to
automate all CI/Release tasks for scriptharness; this is a good start.
"""
from __future__ import print_function, division, absolute_import, \
unicode_literals
from jinja2 import Template
import os
import shutil
import subprocess
import sys
READTHEDOCS_LINK = """.. image:: https://readthedocs.org/projects/python-scriptharness/badge/?version=latest
:target: https://readthedocs.org/projects/python-scriptharness/?badge=latest
:alt: Documentation Status"""
def cleanup(*args):
"""Cleanliness."""
for path in args:
if os.path.exists(path):
os.remove(path)
def build_readme_rst():
with open("README.rst.j2") as filehandle:
contents = filehandle.read()
template = Template(contents)
with open("../README.rst", "w") as filehandle:
filehandle.write(template.render(readthedocs_link=READTHEDOCS_LINK))
with open("README.rst", "w") as filehandle:
filehandle.write(template.render())
def indent_output(command, required_string="INFO", **kwargs):
output = ""
kwargs.setdefault('stderr', subprocess.STDOUT)
for line in subprocess.check_output(command, **kwargs).splitlines():
output += " {}{}".format(line.decode(), os.linesep)
assert required_string in output
return output
def build_quickstart():
for line in subprocess.check_output(['git', 'branch', '--no-color'],
stderr=subprocess.PIPE).splitlines():
if line.startswith(b'*'):
_, branch = line.split()
branch = branch.decode()
with open("quickstart.rst.j2") as filehandle:
contents = filehandle.read()
template = Template(contents)
quickstart_contents = ""
with open("../examples/quickstart.py") as filehandle:
for line in filehandle.readlines():
quickstart_contents += " {}".format(line)
run_output = indent_output(
[sys.executable, "../examples/quickstart.py"],
)
actions_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--actions",
"package", "upload", "notify"],
)
list_actions_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--list-actions"],
required_string="clobber",
)
dump_config_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--new-argument",
"foo", "--dump-config"],
required_string="Dumping",
)
help_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--help"],
required_string="usage:"
)
with open("quickstart.rst", "w") as filehandle:
filehandle.write(
template.render(
git_branch=branch,
quickstart_contents=quickstart_contents,
run_output=run_output,
actions_output=actions_output,
list_actions_output=list_actions_output,
dump_config_output=dump_config_output,
help_output=help_output,
)
)
def main():
"""Main function"""
os.chdir(os.path.dirname(__file__))
subprocess.check_call("sphinx-apidoc -f -o . ../scriptharness".split())
cleanup("modules.rst")
build_readme_rst()
build_quickstart()
subprocess.check_call(["make", "html"])
subprocess.check_call(["make", "text"])
subprocess.check_call(["cp", "_build/text/README.txt", "../README"])
if os.path.exists("artifacts"):
shutil.rmtree("artifacts")
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""Build scriptharness documentation. At some point it would be good to
automate all CI/Release tasks for scriptharness; this is a good start.
"""
from __future__ import print_function, division, absolute_import, \
unicode_literals
from jinja2 import Template
import os
import shutil
import subprocess
import sys
READTHEDOCS_LINK = """.. image:: https://readthedocs.org/projects/python-scriptharness/badge/?version=latest
:target: https://readthedocs.org/projects/python-scriptharness/?badge=latest
:alt: Documentation Status"""
def cleanup(*args):
"""Cleanliness."""
for path in args:
if os.path.exists(path):
os.remove(path)
def build_readme_rst():
with open("README.rst.j2") as filehandle:
contents = filehandle.read()
template = Template(contents)
with open("../README.rst", "w") as filehandle:
filehandle.write(template.render(readthedocs_link=READTHEDOCS_LINK))
with open("README.rst", "w") as filehandle:
filehandle.write(template.render())
def indent_output(command, required_string="INFO", **kwargs):
output = ""
kwargs.setdefault('stderr', subprocess.STDOUT)
for line in subprocess.check_output(command, **kwargs).splitlines():
output += " {}{}".format(line.decode(), os.linesep)
assert required_string in output
return output
def build_quickstart():
for line in subprocess.check_output(['git', 'branch', '--no-color'],
stderr=subprocess.PIPE).splitlines():
if line.startswith(b'*'):
_, branch = line.split()
branch = branch.decode()
with open("quickstart.rst.j2") as filehandle:
contents = filehandle.read()
template = Template(contents)
quickstart_contents = ""
with open("../examples/quickstart.py") as filehandle:
for line in filehandle.readlines():
quickstart_contents += " {}".format(line)
run_output = indent_output(
[sys.executable, "../examples/quickstart.py"],
)
actions_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--actions",
"package", "upload", "notify"],
)
list_actions_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--list-actions"],
required_string="clobber",
)
dump_config_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--new-argument",
"foo", "--dump-config"],
required_string="Dumping",
)
help_output = indent_output(
[sys.executable, "../examples/quickstart.py", "--help"],
required_string="usage:"
)
with open("quickstart.rst", "w") as filehandle:
filehandle.write(
template.render(
git_branch=branch,
quickstart_contents=quickstart_contents,
run_output=run_output,
actions_output=actions_output,
list_actions_output=list_actions_output,
dump_config_output=dump_config_output,
help_output=help_output,
)
)
def main():
"""Main function"""
os.chdir(os.path.dirname(__file__))
subprocess.check_call("sphinx-apidoc -f -o . ../scriptharness".split())
cleanup("modules.rst")
build_readme_rst()
build_quickstart()
subprocess.check_call(["make", "html"])
subprocess.check_call(["make", "text"])
subprocess.check_call(["cp", "_build/text/README.txt", "../README"])
if __name__ == '__main__':
main()
| Python | 0 |
0306b2631260c115f99b8a1f73322277e1c50989 | Remove fetal anomaly scans from ARPKD | scripts/fixtures/radar_fixtures/cohorts.py | scripts/fixtures/radar_fixtures/cohorts.py | from radar.models.groups import Group, GROUP_TYPE
from radar.pages import PAGE
from radar_fixtures.utils import add
COHORTS = [
{
'code': 'BONEITIS',
'name': 'Bone-itis',
'short_name': 'Bone-itis',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
],
},
{
'code': 'CIRCUSITIS',
'name': 'Circusitis',
'short_name': 'Circusitis',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
],
},
{
'code': 'ADTKD,
'name': 'Autosomal Dominant Tubulointerstitial Kidney Disease (FUAN)',
'short_name': 'ADTKD (FUAN)',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.FUAN_CLINICAL_PICTURES,
PAGE.RESULTS,
PAGE.DIALYSIS,
PAGE.TRANSPLANTS,
]
},
{
'code': 'ADPKD',
'name': 'Autosomal Dominant Polycystic Kidney Disease',
'short_name': 'ADPKD',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.RENAL_IMAGING,
PAGE.LIVER_IMAGING,
PAGE.LIVER_DISEASES,
PAGE.RESULTS,
PAGE.TRANSPLANTS,
PAGE.LIVER_TRANSPLANTS,
]
},
{
'code': 'ARPKD',
'name': 'Autosomal Recessive Polycystic Kidney Disease',
'short_name': 'ARPKD',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.FETAL_ULTRASOUNDS,
PAGE.RENAL_IMAGING,
PAGE.LIVER_IMAGING,
PAGE.LIVER_DISEASES,
PAGE.RESULTS,
PAGE.NUTRITION,
PAGE.LIVER_TRANSPLANTS,
PAGE.NEPHRECTOMIES,
]
}
]
def create_cohorts():
for x in COHORTS:
group = Group()
group.type = GROUP_TYPE.COHORT
group.code = x['code']
group.name = x['name']
group.short_name = x['short_name']
group.pages = x['pages']
add(group)
| from radar.models.groups import Group, GROUP_TYPE
from radar.pages import PAGE
from radar_fixtures.utils import add
COHORTS = [
{
'code': 'BONEITIS',
'name': 'Bone-itis',
'short_name': 'Bone-itis',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
],
},
{
'code': 'CIRCUSITIS',
'name': 'Circusitis',
'short_name': 'Circusitis',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
],
},
{
'code': 'ADTKD,
'name': 'Autosomal Dominant Tubulointerstitial Kidney Disease (FUAN)',
'short_name': 'ADTKD (FUAN)',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.FUAN_CLINICAL_PICTURES,
PAGE.RESULTS,
PAGE.DIALYSIS,
PAGE.TRANSPLANTS,
]
},
{
'code': 'ADPKD',
'name': 'Autosomal Dominant Polycystic Kidney Disease',
'short_name': 'ADPKD',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.RENAL_IMAGING,
PAGE.LIVER_IMAGING,
PAGE.LIVER_DISEASES,
PAGE.RESULTS,
PAGE.TRANSPLANTS,
PAGE.LIVER_TRANSPLANTS,
]
},
{
'code': 'ARPKD',
'name': 'Autosomal Recessive Polycystic Kidney Disease',
'short_name': 'ARPKD',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.FETAL_ULTRASOUNDS,
PAGE.FETAL_ANOMALY_SCANS,
PAGE.RENAL_IMAGING,
PAGE.LIVER_IMAGING,
PAGE.LIVER_DISEASES,
PAGE.RESULTS,
PAGE.NUTRITION,
PAGE.LIVER_TRANSPLANTS,
PAGE.NEPHRECTOMIES,
]
}
]
def create_cohorts():
for x in COHORTS:
group = Group()
group.type = GROUP_TYPE.COHORT
group.code = x['code']
group.name = x['name']
group.short_name = x['short_name']
group.pages = x['pages']
add(group)
| Python | 0 |
d6371341c13ffe623755cf89ff03733c111bb994 | change to rga2 | profile_collection/startup/12-rga.py | profile_collection/startup/12-rga.py | ### This is RGA:2 configured for ExQ new RGA connected at 10.28.2.142 #####
from ophyd import Device, Component as Cpt
class RGA(Device):
startRGA = Cpt(EpicsSignal, 'Cmd:MID_Start-Cmd')
stopRGA = Cpt(EpicsSignal, 'Cmd:ScanAbort-Cmd')
mass1 = Cpt(EpicsSignalRO, 'P:MID1-I')
mass2 = Cpt(EpicsSignalRO, 'P:MID2-I')
mass3 = Cpt(EpicsSignalRO, 'P:MID3-I')
mass4 = Cpt(EpicsSignalRO, 'P:MID4-I')
mass5 = Cpt(EpicsSignalRO, 'P:MID5-I')
mass6 = Cpt(EpicsSignalRO, 'P:MID6-I')
mass7 = Cpt(EpicsSignalRO, 'P:MID7-I')
mass8 = Cpt(EpicsSignalRO, 'P:MID8-I')
mass9 = Cpt(EpicsSignalRO, 'P:MID9-I')
## We don't want the RGA to start and stop by any bluseky plan###
"""
def stage(self):
self.startRGA.put(1)
def unstage(self):
self.stopRGA.put(1)
def describe(self):
res = super().describe()
# This precision should be configured correctly in EPICS.
for key in res:
res[key]['precision'] = 12
return res
"""
rga = RGA('XF:28IDC-VA{RGA:2}',
name='rga',
read_attrs=['mass1', 'mass2', 'mass3', 'mass4','mass5', 'mass6', 'mass7', 'mass8', 'mass9'])
| from ophyd import Device, Component as Cpt
class RGA(Device):
startRGA = Cpt(EpicsSignal, 'Cmd:MID_Start-Cmd')
stopRGA = Cpt(EpicsSignal, 'Cmd:ScanAbort-Cmd')
mass1 = Cpt(EpicsSignalRO, 'P:MID1-I')
mass2 = Cpt(EpicsSignalRO, 'P:MID2-I')
mass3 = Cpt(EpicsSignalRO, 'P:MID3-I')
mass4 = Cpt(EpicsSignalRO, 'P:MID4-I')
mass5 = Cpt(EpicsSignalRO, 'P:MID5-I')
mass6 = Cpt(EpicsSignalRO, 'P:MID6-I')
mass7 = Cpt(EpicsSignalRO, 'P:MID7-I')
mass8 = Cpt(EpicsSignalRO, 'P:MID8-I')
mass9 = Cpt(EpicsSignalRO, 'P:MID9-I')
## We don't want the RGA to start and stop by any bluseky plan###
"""
def stage(self):
self.startRGA.put(1)
def unstage(self):
self.stopRGA.put(1)
def describe(self):
res = super().describe()
# This precision should be configured correctly in EPICS.
for key in res:
res[key]['precision'] = 12
return res
"""
rga = RGA('XF:28IDA-VA{RGA:1}',
name='rga',
read_attrs=['mass1', 'mass2', 'mass3', 'mass4','mass5', 'mass6', 'mass7', 'mass8', 'mass9'])
| Python | 0.998462 |
99c02cfc36660838e5250e62d1c6f390994dda9b | Attach metadata class instance to class attribute. | pyramid_jsonapi/metadata/__init__.py | pyramid_jsonapi/metadata/__init__.py | """This package contains metadata 'plugin' modules
that provide extra information related to the API being generated,
such as documentation, schemas etc.
Such plugins can optionally be offered as pyramid routes and views
under the 'metadata' endpoint."""
import collections
import importlib
import os.path
import pkgutil
from pyramid.settings import aslist
class MetaData():
"""Adds routes and views for all metadata modules.
Plugins are added by the module name being added to self.modules
This may be overriden in the pyramid inifile config option
'pyramid_jsonapi.metadata_modules'
Modules specified in thbis wasy should be space or newline separated
(see pyramid.settings aslist())
All modules MUST have a class with the same name as the package.
This class MAY contain a 'views' attribute, which contains a list
of 'VIEWS' namedtuple instances, which will be converted into pyramid
routes and views.
"""
def __init__(self, api):
self.api = api
# aslist expects space-separated strings to convert to lists.
# iter_modules returns a list of tuples - we only want name ([1])
self.modules = aslist(
self.api.config.registry.settings.get(
'pyramid_jsonapi.metadata_modules',
' '.join(
[
x[1] for x in pkgutil.iter_modules(
[os.path.dirname(__file__)]
)
]
)
),
flatten=True
)
self.make_routes_views()
def make_routes_views(self):
"""Generate routes and views for plugin modules."""
for mod_name in self.modules:
# Import the module from the name provided
module = importlib.import_module("{}.{}".format(__name__, mod_name))
# Each module should have a class with the same name
class_name = mod_name
mclass = getattr(module, class_name)(self.api)
# Attach the instance as an attribute named after the class
setattr(self, mod_name, mclass)
views = getattr(mclass, 'views', [])
for view in views:
rp_constructor = self.api.endpoint_data.rp_constructor
route_name = self.api.endpoint_data.make_route_name(
class_name,
suffix=view.route_name
)
route_pattern = rp_constructor.metadata_pattern(
class_name,
view.route_name
)
self.api.config.add_route(
route_name,
route_pattern
)
self.api.config.add_view(
mclass,
attr=str(view.attr),
route_name=route_name,
request_method=view.request_method or 'GET',
renderer=view.renderer or 'json',
)
VIEWS = collections.namedtuple('Views', 'attr request_method route_name renderer')
| """This package contains metadata 'plugin' modules
that provide extra information related to the API being generated,
such as documentation, schemas etc.
Such plugins can optionally be offered as pyramid routes and views
under the 'metadata' endpoint."""
import collections
import importlib
import os.path
import pkgutil
from pyramid.settings import aslist
class MetaData():
"""Adds routes and views for all metadata modules.
Plugins are added by the module name being added to self.modules
This may be overriden in the pyramid inifile config option
'pyramid_jsonapi.metadata_modules'
Modules specified in thbis wasy should be space or newline separated
(see pyramid.settings aslist())
All modules MUST have a class with the same name as the package.
This class MAY contain a 'views' attribute, which contains a list
of 'VIEWS' namedtuple instances, which will be converted into pyramid
routes and views.
"""
def __init__(self, api):
self.api = api
# aslist expects space-separated strings to convert to lists.
# iter_modules returns a list of tuples - we only want name ([1])
self.modules = aslist(
self.api.config.registry.settings.get(
'pyramid_jsonapi.metadata_modules',
' '.join(
[
x[1] for x in pkgutil.iter_modules(
[os.path.dirname(__file__)]
)
]
)
),
flatten=True
)
self.make_routes_views()
def make_routes_views(self):
"""Generate routes and views for plugin modules."""
for mod_name in self.modules:
# Import the module from the name provided
module = importlib.import_module("{}.{}".format(__name__, mod_name))
# Each module should have a class with the same name
class_name = mod_name
mclass = getattr(module, class_name)(self.api)
views = getattr(mclass, 'views', [])
for view in views:
rp_constructor = self.api.endpoint_data.rp_constructor
route_name = self.api.endpoint_data.make_route_name(
class_name,
suffix=view.route_name
)
route_pattern = rp_constructor.metadata_pattern(
class_name,
view.route_name
)
self.api.config.add_route(
route_name,
route_pattern
)
self.api.config.add_view(
mclass,
attr=str(view.attr),
route_name=route_name,
request_method=view.request_method or 'GET',
renderer=view.renderer or 'json',
)
VIEWS = collections.namedtuple('Views', 'attr request_method route_name renderer')
| Python | 0 |
ec46226b0ae5e9d2c29aa07f2ec6749f96a36804 | add str isValidPalindrome | str/string_function.py | str/string_function.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
def reverseStr(input_str, begin, end):
# Pythonic way should be input_str[::-1]
str_list = list(input_str)
while begin < end:
str_list[begin], str_list[end] = str_list[end], str_list[begin]
begin += 1
end -= 1
return ''.join(str_list)
def reveseWord(input_str):
input_str = input_str[::-1]
str_list = input_str.split()
for i in xrange(len(str_list)):
str_list[i] = str_list[i][::-1]
return ' '.join(str_list)
def atoi(str):
# check is None first
if not str:
return 0
# Second, strip str
index = 0
while str[index] == ' ':
index += 1
# Third, check sign
positive = True
if str[index] == '-':
positive = False
index += 1
if str[index] == '+':
index += 1
# loop, get the result
result = 0
for i in xrange(index, len(str)):
# if not a digit, break, return current result
# Question: What about "213k"?
# return 0 or 213?
if not str[i].isdigit():
break
digit = ord(str[i]) - ord('0')
result = result * 10 + digit
# check overflow
if positive:
if result > 2147483647:
return 2147483647
return result
else:
if -result < -2147483648:
return -2147483648
return -result
def isPalindrome(self, s):
start = 0
end = len(s) - 1
while start < end:
while not s[start].isalnum() and start < end:
start += 1
while not s[end].isalnum() and start < end:
end -= 1
if start < end and s[start].lower() != s[end].lower():
return False
end -= 1
start += 1
return True
if __name__ == '__main__':
test = "I am a student."
result = reveseWord(test)
test1 = "World"
result = reveseWord(test1)
# result = reverseStr(test, 0, len(test) - 1)
print(result)
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
def reverseStr(input_str, begin, end):
# Pythonic way should be input_str[::-1]
str_list = list(input_str)
while begin < end:
str_list[begin], str_list[end] = str_list[end], str_list[begin]
begin += 1
end -= 1
return ''.join(str_list)
def reveseWord(input_str):
input_str = input_str[::-1]
str_list = input_str.split()
for i in xrange(len(str_list)):
str_list[i] = str_list[i][::-1]
return ' '.join(str_list)
def atoi(str):
# check is None first
if not str:
return 0
# Second, strip str
index = 0
while str[index] == ' ':
index += 1
# Third, check sign
positive = True
if str[index] == '-':
positive = False
index += 1
if str[index] == '+':
index += 1
# loop, get the result
result = 0
for i in xrange(index, len(str)):
# if not a digit, break, return current result
# Question: What about "213k"?
# return 0 or 213?
if not str[i].isdigit():
break
digit = ord(str[i]) - ord('0')
result = result * 10 + digit
# check overflow
if positive:
if result > 2147483647:
return 2147483647
return result
else:
if -result < -2147483648:
return -2147483648
return -result
if __name__ == '__main__':
test = "I am a student."
result = reveseWord(test)
test1 = "World"
result = reveseWord(test1)
# result = reverseStr(test, 0, len(test) - 1)
print(result)
| Python | 0.999996 |
95a8a85aab19cad08703ec1adbb42ca1437119b0 | Fix gprof2dot not found error | pytest-profiling/pytest_profiling.py | pytest-profiling/pytest_profiling.py | from __future__ import absolute_import
import sys
import os
import cProfile
import pstats
import pipes
import errno
from hashlib import md5
import six
import pytest
LARGE_FILENAME_HASH_LEN = 8
def clean_filename(s):
forbidden_chars = set('/?<>\:*|"')
return six.text_type("".join(c if c not in forbidden_chars and ord(c) < 127 else '_'
for c in s))
class Profiling(object):
"""Profiling plugin for pytest."""
svg = False
svg_name = None
profs = []
combined = None
def __init__(self, svg):
self.svg = svg
self.profs = []
self.gprof2dot = os.path.abspath(os.path.join(os.path.dirname(sys.executable), 'gprof2dot'))
if not os.path.isfile(self.gprof2dot):
# Can't see gprof in the local bin dir, we'll just have to hope it's on the path somewhere
self.gprof2dot = 'gprof2dot'
def pytest_sessionstart(self, session): # @UnusedVariable
try:
os.makedirs("prof")
except OSError:
pass
def pytest_sessionfinish(self, session, exitstatus): # @UnusedVariable
if self.profs:
combined = pstats.Stats(self.profs[0])
for prof in self.profs[1:]:
combined.add(prof)
self.combined = os.path.abspath(os.path.join("prof", "combined.prof"))
combined.dump_stats(self.combined)
if self.svg:
self.svg_name = os.path.abspath(os.path.join("prof", "combined.svg"))
t = pipes.Template()
t.append("{} -f pstats $IN".format(self.gprof2dot), "f-")
t.append("dot -Tsvg -o $OUT", "-f")
t.copy(self.combined, self.svg_name)
def pytest_terminal_summary(self, terminalreporter):
if self.combined:
terminalreporter.write("Profiling (from {prof}):\n".format(prof=self.combined))
pstats.Stats(self.combined, stream=terminalreporter).strip_dirs().sort_stats('cumulative').print_stats(20)
if self.svg_name:
terminalreporter.write("SVG profile in {svg}.\n".format(svg=self.svg_name))
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
prof_filename = os.path.abspath(os.path.join("prof", clean_filename(item.name) + ".prof"))
try:
os.makedirs(os.path.dirname(prof_filename))
except OSError:
pass
prof = cProfile.Profile()
prof.enable()
yield
prof.disable()
try:
prof.dump_stats(prof_filename)
except EnvironmentError as err:
if err.errno != errno.ENAMETOOLONG:
raise
if len(item.name) < LARGE_FILENAME_HASH_LEN:
raise
hash_str = md5(item.name.encode('utf-8')).hexdigest()[:LARGE_FILENAME_HASH_LEN]
prof_filename = os.path.join("prof", hash_str + ".prof")
prof.dump_stats(prof_filename)
self.profs.append(prof_filename)
def pytest_addoption(parser):
"""pytest_addoption hook for profiling plugin"""
group = parser.getgroup('Profiling')
group.addoption("--profile", action="store_true",
help="generate profiling information")
group.addoption("--profile-svg", action="store_true",
help="generate profiling graph (using gprof2dot and dot -Tsvg)")
def pytest_configure(config):
"""pytest_configure hook for profiling plugin"""
profile_enable = any(config.getvalue(x) for x in ('profile', 'profile_svg'))
if profile_enable:
config.pluginmanager.register(Profiling(config.getvalue('profile_svg')))
| from __future__ import absolute_import
import sys
import os
import cProfile
import pstats
import pipes
import errno
from hashlib import md5
import six
import pytest
LARGE_FILENAME_HASH_LEN = 8
def clean_filename(s):
forbidden_chars = set('/?<>\:*|"')
return six.text_type("".join(c if c not in forbidden_chars and ord(c) < 127 else '_'
for c in s))
class Profiling(object):
"""Profiling plugin for pytest."""
svg = False
svg_name = None
profs = []
combined = None
def __init__(self, svg):
self.svg = svg
self.profs = []
self.gprof2dot = os.path.abspath(os.path.join(os.path.dirname(sys.executable), 'gprof2dot'))
if os.path.isfile(self.gprof2dot):
# Can't see gprof in the local bin dir, we'll just have to hope it's on the path somewhere
self.gprof2dot = 'gprof2dot'
def pytest_sessionstart(self, session): # @UnusedVariable
try:
os.makedirs("prof")
except OSError:
pass
def pytest_sessionfinish(self, session, exitstatus): # @UnusedVariable
if self.profs:
combined = pstats.Stats(self.profs[0])
for prof in self.profs[1:]:
combined.add(prof)
self.combined = os.path.abspath(os.path.join("prof", "combined.prof"))
combined.dump_stats(self.combined)
if self.svg:
self.svg_name = os.path.abspath(os.path.join("prof", "combined.svg"))
t = pipes.Template()
t.append("{} -f pstats $IN".format(self.gprof2dot), "f-")
t.append("dot -Tsvg -o $OUT", "-f")
t.copy(self.combined, self.svg_name)
def pytest_terminal_summary(self, terminalreporter):
if self.combined:
terminalreporter.write("Profiling (from {prof}):\n".format(prof=self.combined))
pstats.Stats(self.combined, stream=terminalreporter).strip_dirs().sort_stats('cumulative').print_stats(20)
if self.svg_name:
terminalreporter.write("SVG profile in {svg}.\n".format(svg=self.svg_name))
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
prof_filename = os.path.abspath(os.path.join("prof", clean_filename(item.name) + ".prof"))
try:
os.makedirs(os.path.dirname(prof_filename))
except OSError:
pass
prof = cProfile.Profile()
prof.enable()
yield
prof.disable()
try:
prof.dump_stats(prof_filename)
except EnvironmentError as err:
if err.errno != errno.ENAMETOOLONG:
raise
if len(item.name) < LARGE_FILENAME_HASH_LEN:
raise
hash_str = md5(item.name.encode('utf-8')).hexdigest()[:LARGE_FILENAME_HASH_LEN]
prof_filename = os.path.join("prof", hash_str + ".prof")
prof.dump_stats(prof_filename)
self.profs.append(prof_filename)
def pytest_addoption(parser):
"""pytest_addoption hook for profiling plugin"""
group = parser.getgroup('Profiling')
group.addoption("--profile", action="store_true",
help="generate profiling information")
group.addoption("--profile-svg", action="store_true",
help="generate profiling graph (using gprof2dot and dot -Tsvg)")
def pytest_configure(config):
"""pytest_configure hook for profiling plugin"""
profile_enable = any(config.getvalue(x) for x in ('profile', 'profile_svg'))
if profile_enable:
config.pluginmanager.register(Profiling(config.getvalue('profile_svg')))
| Python | 0.000002 |
72171897071bf4b88013b74a13e45231fbc8d7ff | Remove feedback from response serializer. | studies/serializers.py | studies/serializers.py | from accounts.models import Child, DemographicData, Organization, User
from api.serializers import (ModelSerializer, UUIDResourceRelatedField,
UUIDSerializerMixin)
from rest_framework_json_api import serializers
from studies.models import Response, Study, Feedback
class StudySerializer(UUIDSerializerMixin, ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='study-detail',
lookup_field='uuid'
)
organization = UUIDResourceRelatedField(
queryset=Organization.objects,
related_link_view_name='organization-detail',
related_link_lookup_field='uuid', many=False
)
creator = UUIDResourceRelatedField(
queryset=User.objects,
related_link_view_name='user-detail',
related_link_lookup_field='uuid', many=False
)
responses = UUIDResourceRelatedField(
queryset=Response.objects,
many=True,
related_link_view_name='study-responses-list',
related_link_url_kwarg='study_uuid',
related_link_lookup_field='uuid',
)
class Meta:
model = Study
fields = (
'url',
'name',
'date_modified',
'short_description',
'long_description',
'criteria',
'duration',
'contact_info',
'image',
'structure',
'display_full_screen',
'exit_url',
'state',
'public',
'organization',
'creator',
'responses',
)
class FeedbackSerializer(UUIDSerializerMixin, ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='feedback-detail',
lookup_field='uuid'
)
response = UUIDResourceRelatedField(
queryset=Response.objects,
many=False,
related_link_view_name='response-detail',
related_link_lookup_field='uuid',
)
researcher = UUIDResourceRelatedField(
queryset=User.objects,
many=False,
related_link_view_name='user-detail',
related_link_lookup_field='uuid',
)
class Meta:
model = Feedback
fields = (
'url',
'comment',
'researcher',
'response'
)
class ResponseSerializer(UUIDSerializerMixin, ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='response-detail',
lookup_field='uuid'
)
study = UUIDResourceRelatedField(
queryset=Study.objects,
many=False,
related_link_view_name='study-detail',
related_link_lookup_field='uuid',
)
user = UUIDResourceRelatedField(
source='child.user',
queryset=User.objects,
many=False,
related_link_view_name='user-list',
related_link_lookup_field='uuid',
required=False
)
child = UUIDResourceRelatedField(
queryset=Child.objects,
many=False,
related_link_view_name='child-detail',
related_link_lookup_field='uuid',
)
demographic_snapshot = UUIDResourceRelatedField(
queryset=DemographicData.objects,
many=False,
related_link_view_name='demographicdata-detail',
related_link_lookup_field='uuid',
required=False
)
class Meta:
model = Response
fields = (
'url',
'conditions',
'global_event_timings',
'exp_data',
'sequence',
'completed',
'child',
'user',
'study',
'demographic_snapshot',
)
class ResponseWriteableSerializer(ResponseSerializer):
def create(self, validated_data):
"""
Use the ids for objects so django rest framework doesn't
try to create new objects out of spite
"""
study = validated_data.pop('study')
validated_data['study_id'] = study.id
# implicitly set the demographic data because we know what it will be
validated_data['demographic_snapshot_id'] = validated_data.get('child').user.latest_demographics.id
child = validated_data.pop('child')
validated_data['child_id'] = child.id
return super().create(validated_data)
| from accounts.models import Child, DemographicData, Organization, User
from api.serializers import (ModelSerializer, UUIDResourceRelatedField,
UUIDSerializerMixin)
from rest_framework_json_api import serializers
from studies.models import Response, Study, Feedback
class StudySerializer(UUIDSerializerMixin, ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='study-detail',
lookup_field='uuid'
)
organization = UUIDResourceRelatedField(
queryset=Organization.objects,
related_link_view_name='organization-detail',
related_link_lookup_field='uuid', many=False
)
creator = UUIDResourceRelatedField(
queryset=User.objects,
related_link_view_name='user-detail',
related_link_lookup_field='uuid', many=False
)
responses = UUIDResourceRelatedField(
queryset=Response.objects,
many=True,
related_link_view_name='study-responses-list',
related_link_url_kwarg='study_uuid',
related_link_lookup_field='uuid',
)
class Meta:
model = Study
fields = (
'url',
'name',
'date_modified',
'short_description',
'long_description',
'criteria',
'duration',
'contact_info',
'image',
'structure',
'display_full_screen',
'exit_url',
'state',
'public',
'organization',
'creator',
'responses',
)
class FeedbackSerializer(UUIDSerializerMixin, ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='feedback-detail',
lookup_field='uuid'
)
response = UUIDResourceRelatedField(
queryset=Response.objects,
many=False,
related_link_view_name='response-detail',
related_link_lookup_field='uuid',
)
researcher = UUIDResourceRelatedField(
queryset=User.objects,
many=False,
related_link_view_name='user-detail',
related_link_lookup_field='uuid',
)
class Meta:
model = Feedback
fields = (
'url',
'comment',
'researcher',
'response'
)
class ResponseSerializer(UUIDSerializerMixin, ModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name='response-detail',
lookup_field='uuid'
)
feedback = UUIDResourceRelatedField(
queryset=Feedback.objects,
many=True,
related_link_view_name='response-feedback-list',
related_link_url_kwarg='response_uuid',
related_link_lookup_field='uuid',
)
study = UUIDResourceRelatedField(
queryset=Study.objects,
many=False,
related_link_view_name='study-detail',
related_link_lookup_field='uuid',
)
user = UUIDResourceRelatedField(
source='child.user',
queryset=User.objects,
many=False,
related_link_view_name='user-list',
related_link_lookup_field='uuid',
required=False
)
child = UUIDResourceRelatedField(
queryset=Child.objects,
many=False,
related_link_view_name='child-detail',
related_link_lookup_field='uuid',
)
demographic_snapshot = UUIDResourceRelatedField(
queryset=DemographicData.objects,
many=False,
related_link_view_name='demographicdata-detail',
related_link_lookup_field='uuid',
required=False
)
class Meta:
model = Response
fields = (
'url',
'conditions',
'global_event_timings',
'exp_data',
'sequence',
'completed',
'child',
'user',
'study',
'feedback',
'demographic_snapshot',
)
class ResponseWriteableSerializer(ResponseSerializer):
def create(self, validated_data):
"""
Use the ids for objects so django rest framework doesn't
try to create new objects out of spite
"""
study = validated_data.pop('study')
validated_data['study_id'] = study.id
# implicitly set the demographic data because we know what it will be
validated_data['demographic_snapshot_id'] = validated_data.get('child').user.latest_demographics.id
child = validated_data.pop('child')
validated_data['child_id'] = child.id
return super().create(validated_data)
| Python | 0 |
c128355193d3f0339e24ee70f685d31dbf5b6bb9 | Add stub for search() | sudoku/puzzlesolver.py | sudoku/puzzlesolver.py | """
puzzlesolver.py
:created on: 20160624
__author__ = 'Frederic Dupont'
:License: GPL3
"""
__author__ = 'Fred Dupont'
import sys
from sudoku import puzzleconstants as p_const
from sudoku.puzzle import Puzzle, make_grid_from_string
class PuzzleSolver(object):
"""
class that solves a sudoku puzzle
takes the clone of a puzzle and solves it
"""
def __init__(self, puzzle_clone):
self._puzzle = puzzle_clone
def eliminate_candidates(self):
"""For each square in the grid that has a single assigned value,
run through the PEERS and eliminate this value from the candidates
*** This will have to be redone each time the puzzle is cloned ***
:return: False if an inconsistency is discovered --> makes the puzzle invalid
True otherwise
"""
for square in p_const.SQUARES:
current_value = self._puzzle.grid[square]
if current_value not in '.0':
# this says is there is an inconsistency
# that means it must return False to signal it
if self._puzzle.candidates[square] != p_const.VALUE_TO_CANDIDATES[current_value]:
return False
for peer in p_const.PEERS[square]:
self._puzzle.candidates[peer] = self._puzzle.candidates[peer].replace(current_value, '.')
return True
def propagate(self):
"""if a UNIT has only one possible place for a value,
assign this value there, and adjust the candidates for this place
"""
for unit in p_const.UNIT_LISTS:
for digit in p_const.DIGITS:
result = []
for square in unit:
if digit in self._puzzle.candidates[square]:
result.append(square)
if len(result) == 1:
self._puzzle.grid[result[0]] = digit
self._puzzle.candidates[result[0]] = p_const.VALUE_TO_CANDIDATES[digit]
def fill_singles(self):
"""
completes a unit that is missing only one value
by updating the grid and the candidates values
"""
for unit in p_const.UNIT_LISTS:
result = []
res_string = [str(_) for _ in range(1, 10)]
for square in unit:
val = self._puzzle.grid[square]
if val in '.0':
result.append(square)
else:
res_string[int(val) - 1] = '.'
if len(result) == 1:
self._puzzle.grid[result[0]] = p_const.CANDIDATES_TO_VALUE[''.join(res_string)]
self._puzzle.candidates[result[0]] = ''.join(res_string)
def eliminate_propagate_fill(self):
"""
Propagates the constraints by successively eliminating, propagating and filling the
solved squares until all constraints have been propagated. (no more change in the
puzzle states)
...or the puzzle is solved
:return: False if an inconsistency is discovered, True otherwise
"""
while not self._puzzle.is_solved():
pre_grid_state, pre_candidates_state = repr(self._puzzle), str(self._puzzle)
if not self.eliminate_candidates():
return False
self.propagate()
self.fill_singles()
post_grid_state, post_candidates_state = repr(self._puzzle), str(self._puzzle)
if pre_grid_state == post_grid_state and pre_candidates_state == post_candidates_state:
break
print(repr(self._puzzle))
return True
def search(self):
"""
clones the puzzle
creates a new solver
assigns the next candidate value to the empty square with the less candidates
recursively calls solve() on the new puzzle
:return: a solved puzzle repr()
"""
pass
def solve(self):
"""
manages the operations to conduct in order to solve a puzzla
:print: the repr of a solved puzzle (as far as could go with constraint propagation)
:return: nothing at the moment
"""
self.eliminate_propagate_fill()
return repr(self._puzzle)
# def main(argv):
# if __name__ == '__main__':
# sys.exit(main(sys.argv))
| """
puzzlesolver.py
:created on: 20160624
__author__ = 'Frederic Dupont'
:License: GPL3
"""
__author__ = 'Fred Dupont'
import sys
from sudoku import puzzleconstants as p_const
from sudoku.puzzle import Puzzle, make_grid_from_string
class PuzzleSolver(object):
"""
class that solves a sudoku puzzle
takes the clone of a puzzle and solves it
"""
def __init__(self, puzzle_clone):
self._puzzle = puzzle_clone
def eliminate_candidates(self):
"""For each square in the grid that has a single assigned value,
run through the PEERS and eliminate this value from the candidates
*** This will have to be redone each time the puzzle is cloned ***
:return: False if an inconsistency is discovered --> makes the puzzle invalid
True otherwise
"""
for square in p_const.SQUARES:
current_value = self._puzzle.grid[square]
if current_value not in '.0':
# this says is there is an inconsistency
# that means it must return False to signal it
if self._puzzle.candidates[square] != p_const.VALUE_TO_CANDIDATES[current_value]:
return False
for peer in p_const.PEERS[square]:
self._puzzle.candidates[peer] = self._puzzle.candidates[peer].replace(current_value, '.')
return True
def propagate(self):
"""if a UNIT has only one possible place for a value,
assign this value there, and adjust the candidates for this place
"""
for unit in p_const.UNIT_LISTS:
for digit in p_const.DIGITS:
result = []
for square in unit:
if digit in self._puzzle.candidates[square]:
result.append(square)
if len(result) == 1:
self._puzzle.grid[result[0]] = digit
self._puzzle.candidates[result[0]] = p_const.VALUE_TO_CANDIDATES[digit]
def fill_singles(self):
"""
completes a unit that is missing only one value
by updating the grid and the candidates values
"""
for unit in p_const.UNIT_LISTS:
result = []
res_string = [str(_) for _ in range(1, 10)]
for square in unit:
val = self._puzzle.grid[square]
if val in '.0':
result.append(square)
else:
res_string[int(val) - 1] = '.'
if len(result) == 1:
self._puzzle.grid[result[0]] = p_const.CANDIDATES_TO_VALUE[''.join(res_string)]
self._puzzle.candidates[result[0]] = ''.join(res_string)
def eliminate_propagate_fill(self):
"""
Propagates the constraints by successively eliminating, propagating and filling the
solved squares until all constraints have been propagated. (no more change in the
puzzle states)
...or the puzzle is solved
:return: False if an inconsistency is discovered, True otherwise
"""
while not self._puzzle.is_solved():
pre_grid_state, pre_candidates_state = repr(self._puzzle), str(self._puzzle)
if not self.eliminate_candidates():
return False
self.propagate()
self.fill_singles()
post_grid_state, post_candidates_state = repr(self._puzzle), str(self._puzzle)
if pre_grid_state == post_grid_state and pre_candidates_state == post_candidates_state:
break
print(repr(self._puzzle))
return True
def solve(self):
"""
manages the operations to conduct in order to solve a puzzla
:print: the repr of a solved puzzle (as far as could go with constraint propagation)
:return: nothing at the moment
"""
self.eliminate_propagate_fill()
return repr(self._puzzle)
# def main(argv):
# if __name__ == '__main__':
# sys.exit(main(sys.argv))
| Python | 0 |
39bf1b019897b71a3269e46816f11eefa32de507 | Fix argument order | elm_package.py | elm_package.py | #! /usr/bin/env python
"""
Load and save elm-package.json safely.
"""
# from typing import Dict, Tuple, IO
import copy
from collections import OrderedDict
import json
def load(fileobj):
# type: (IO[str]) -> Dict
return json.load(fileobj, object_pairs_hook=OrderedDict)
def dump(package, fileobj):
# type: (Dict, IO[str]) -> None
to_save = copy.deepcopy(package)
to_save['dependencies'] = sorted_deps(to_save['dependencies'])
json.dump(to_save, fileobj, sort_keys=False, indent=4, separators=(',', ': '))
def sorted_deps(deps):
# type: (Dict) -> Dict
return OrderedDict(sorted(deps.items()))
def sync_deps(from_deps, to_deps):
# type: (Dict, Dict) -> Tuple[List[str], Dict]
messages = []
result = copy.deepcopy(to_deps)
for (package_name, package_version) in from_deps.items():
if package_name not in to_deps:
result[package_name] = package_version
messages.append('Inserting new package {package_name} at version {package_version}'.format(
package_name=package_name, package_version=package_version)
)
elif to_deps[package_name] != package_version:
result[package_name] = package_version
messages.append('Changing {package_name} from version {other_package_version} to {package_version}'.format(
package_version=package_version, package_name=package_name,
other_package_version=to_deps[package_name])
)
return messages, result
| #! /usr/bin/env python
"""
Load and save elm-package.json safely.
"""
# from typing import Dict, Tuple, IO
import copy
from collections import OrderedDict
import json
def load(fileobj):
# type: (IO[str]) -> Dict
return json.load(fileobj, object_pairs_hook=OrderedDict)
def dump(package, fileobj):
# type: (Dict, IO[str]) -> None
to_save = copy.deepcopy(package)
to_save['dependencies'] = sorted_deps(to_save['dependencies'])
json.dump(to_save, fileobj, sort_keys=False, indent=4, separators=(',', ': '))
def sorted_deps(deps):
# type: (Dict) -> Dict
return OrderedDict(sorted(deps.items()))
def sync_deps(from_deps, to_deps):
# type: (Dict, Dict) -> Tuple[List[str], Dict]
messages = []
result = copy.deepcopy(to_deps)
for (package_name, package_version) in from_deps.items():
if package_name not in to_deps:
result[package_name] = package_version
messages.append('Inserting new package {package_name} at version {package_version}'.format(
package_name=package_name, package_version=package_version)
)
elif to_deps[package_name] != package_version:
result[package_name] = package_version
messages.append('Changing {package_name} from version {package_version} to {other_package_version}'.format(
package_version=package_version, package_name=package_name,
other_package_version=to_deps[package_name])
)
return messages, result
| Python | 0.002367 |
c4670a1b6eb1bbd546369be185cd1fb69dfa3ec0 | fix race condition that would drop certain messages on the floor. | emailparser.py | emailparser.py | #!/usr/bin/python
import imaplib, time, email.utils, email.parser, calendar
import re
class EmailParser:
def __init__(self, server, username, password, port=None, use_ssl=False, last_check=time.time()):
self.server = server
if not port:
if use_ssl: self.port = 993
else: self.port = 110
else:
self.port = port
if use_ssl: self.imap = imaplib.IMAP4_SSL(self.server, self.port)
else: self.imap = imaplib.IMAP4(self.server, self.port)
self.imap.login(username, password)
self.last_check = last_check
self.set_last_uids()
def set_last_uids(self):
self.last_uid = {}
for folder in self.get_folders():
if "[Gmail]" in folder: continue
print "running on %s" % folder
selection = self.imap.select(folder, True) # open read-only
if selection[0] == "NO": continue
if selection[1][0] == '0':
self.last_uid[folder] = 0
continue
last_msgid = selection[1][0]
uid_text = self.imap.fetch(last_msgid, "UID")[1][0]
self.last_uid[folder] = int(re.search("\(UID (\d+)\)", uid_text).group(1))
def get_folders(self):
# folders are indicated like (\\HasNoChildren) "." "INBOX.Foo"; we just want INBOX.Foo
folder_re = re.compile(r'\(.*?\) ".*" (?P<name>.*)')
return [folder_re.match(f_str).groups()[0].strip('"') for f_str in self.imap.list()[1]]
def check(self):
"""
Check for messages received since the last check.
Return the number of unread messages.
"""
headers = []
for folder in self.get_folders():
if "[Gmail]" in folder: continue
response, [nmesgs] = self.imap.select(folder, True)
if response == "NO": continue # open read-only
# XXX: large number is because * will always return the last message
throwaway, new = self.imap.search(None, 'UNSEEN', "(UID %d:99999999)" % (self.last_uid[folder] + 1))
if new == ['']: continue # skip all-read folders
indices = ','.join(new[0].split(' '))
# for some reason, I get )s mixed in with actual header/response pair information.
new_headers = [parse_headers(x[1]) for x in self.imap.fetch(indices, "(BODY[HEADER.FIELDS (FROM SUBJECT)])")[1] if x != ')']
for new_header in new_headers: new_header["folder"] = folder
headers += new_headers
uid_text = self.imap.fetch(nmesgs, "UID")[1][0]
self.last_uid[folder] = int(re.search("\(UID (\d+)\)", uid_text).group(1))
return headers
def parse_headers(header):
return {"from": get_field("from", header), "subject" : get_field("subject", header)}
def get_field(name, field_string): # TODO: replace this with a real e-mail parser
fields = filter(lambda x: x.startswith(name.capitalize() + ":"), field_string.split("\r\n"))
if fields == []: return None
else: return fields[0].replace(name.capitalize() + ": ", "", 1)
| #!/usr/bin/python
import imaplib, time, email.utils, email.parser, calendar
import re
class EmailParser:
def __init__(self, server, username, password, port=None, use_ssl=False, last_check=time.time()):
self.server = server
if not port:
if use_ssl: self.port = 993
else: self.port = 110
else:
self.port = port
if use_ssl: self.imap = imaplib.IMAP4_SSL(self.server, self.port)
else: self.imap = imaplib.IMAP4(self.server, self.port)
self.imap.login(username, password)
self.last_check = last_check
self.set_last_uids()
def set_last_uids(self):
self.last_uid = {}
for folder in self.get_folders():
if "[Gmail]" in folder: continue
print "running on %s" % folder
selection = self.imap.select(folder, True) # open read-only
if selection[0] == "NO": continue
if selection[1][0] == '0':
self.last_uid[folder] = 0
continue
last_msgid = selection[1][0]
uid_text = self.imap.fetch(last_msgid, "UID")[1][0]
self.last_uid[folder] = int(re.search("\(UID (\d+)\)", uid_text).group(1))
def get_folders(self):
# folders are indicated like (\\HasNoChildren) "." "INBOX.Foo"; we just want INBOX.Foo
folder_re = re.compile(r'\(.*?\) ".*" (?P<name>.*)')
return [folder_re.match(f_str).groups()[0].strip('"') for f_str in self.imap.list()[1]]
def check(self):
"""
Check for messages received since the last check.
Return the number of unread messages.
"""
headers = []
for folder in self.get_folders():
if "[Gmail]" in folder: continue
if self.imap.select(folder, True)[0] == "NO": continue # open read-only
# XXX: large number is because * will always return the last message
throwaway, new = self.imap.search(None, 'UNSEEN', "(UID %d:99999999)" % (self.last_uid[folder] + 1))
if new == ['']: continue # skip all-read folders
indices = ','.join(new[0].split(' '))
# for some reason, I get )s mixed in with actual header/response pair information.
new_headers = [parse_headers(x[1]) for x in self.imap.fetch(indices, "(BODY[HEADER.FIELDS (FROM SUBJECT)])")[1] if x != ')']
for new_header in new_headers: new_header["folder"] = folder
headers += new_headers
self.set_last_uids()
return headers
def parse_headers(header):
return {"from": get_field("from", header), "subject" : get_field("subject", header)}
def get_field(name, field_string): # TODO: replace this with a real e-mail parser
fields = filter(lambda x: x.startswith(name.capitalize() + ":"), field_string.split("\r\n"))
if fields == []: return None
else: return fields[0].replace(name.capitalize() + ": ", "", 1)
| Python | 0 |
e33f4bf8aab41a3bcb564e582c64e2d5e1986187 | Fix name/username match | modules/sfp_myspace.py | modules/sfp_myspace.py | #-------------------------------------------------------------------------------
# Name: sfp_myspace
# Purpose: Query MySpace for username and location information.
#
# Author: Brendan Coles <bcoles@gmail.com>
#
# Created: 2018-10-07
# Copyright: (c) Brendan Coles 2018
# Licence: GPL
#-------------------------------------------------------------------------------
import re
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_myspace(SpiderFootPlugin):
"""MySpace:Footprint,Investigate,Passive:Social Media::Gather username and location from MySpace.com profiles."""
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.__dataSource__ = "MySpace.com"
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return [ "EMAILADDR", "SOCIAL_MEDIA" ]
# What events this module produces
def producedEvents(self):
return [ "SOCIAL_MEDIA", "GEOINFO" ]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
else:
self.results.append(eventData)
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# Search by email address
if eventName == "EMAILADDR":
email = eventData
res = self.sf.fetchUrl("https://myspace.com/search/people?q=" + email, timeout=self.opts['_fetchtimeout'], useragent=self.opts['_useragent'])
if res['content'] is None:
return None
# Extract HTML containing potential profile matches
profiles = re.findall(r'<a href="/[a-zA-Z0-9_]+">[^<]+</a></h6>', res['content'])
if not profiles:
return None
# The first result is the closest match, but whether it's an exact match is unknown.
profile = profiles[0]
# Check for email address as name, at the risk of missed results.
matches = re.findall(r'<a href="/([a-zA-Z0-9_]+)">' + email, profile, re.IGNORECASE)
if not matches:
return None
name = matches[0]
e = SpiderFootEvent("SOCIAL_MEDIA", "MySpace: " + name, self.__name__, event)
self.notifyListeners(e)
# Retrieve location from MySpace profile
if eventName == "SOCIAL_MEDIA":
network = eventData.split(": ")[0]
name = eventData.split(": ")[1]
if network != "MySpace":
self.sf.debug("Skipping social network profile, " + name + ", as not a MySpace profile")
return None
res = self.sf.fetchUrl("https://myspace.com/" + name, timeout=self.opts['_fetchtimeout'], useragent=self.opts['_useragent'])
if res['content'] is None:
return None
data = re.findall(r'<div class="location_[^"]+" data-display-text="(.+?)"', res['content'])
if not data:
return None
location = data[0]
if len(location) < 5 or len(location) > 100:
self.sf.debug("Skipping likely invalid location.")
return None
e = SpiderFootEvent("GEOINFO", location, self.__name__, event)
self.notifyListeners(e)
# End of sfp_myspace class
| #-------------------------------------------------------------------------------
# Name: sfp_myspace
# Purpose: Query MySpace for username and location information.
#
# Author: Brendan Coles <bcoles@gmail.com>
#
# Created: 2018-10-07
# Copyright: (c) Brendan Coles 2018
# Licence: GPL
#-------------------------------------------------------------------------------
import re
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_myspace(SpiderFootPlugin):
"""MySpace:Footprint,Investigate,Passive:Social Media::Gather username and location from MySpace.com profiles."""
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.__dataSource__ = "MySpace.com"
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return [ "EMAILADDR", "SOCIAL_MEDIA" ]
# What events this module produces
def producedEvents(self):
return [ "SOCIAL_MEDIA", "GEOINFO" ]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
else:
self.results.append(eventData)
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
# Search by email address
if eventName == "EMAILADDR":
email = eventData
res = self.sf.fetchUrl("https://myspace.com/search/people?q=" + email, timeout=self.opts['_fetchtimeout'], useragent=self.opts['_useragent'])
if res['content'] is None:
return None
# The first result is the closest match, but whether it's an exact match is unknown.
# As such, we check for email address as name, at the risk of missed results.
profiles = re.findall(r'<a href="/([a-zA-Z0-9_]+)">' + email.lower() + '</a></h6>', res['content'].lower())
if not profiles:
return None
name = profiles[0]
e = SpiderFootEvent("SOCIAL_MEDIA", "MySpace: " + name, self.__name__, event)
self.notifyListeners(e)
# Retrieve location from MySpace profile
if eventName == "SOCIAL_MEDIA":
network = eventData.split(": ")[0]
name = eventData.split(": ")[1]
if network != "MySpace":
self.sf.debug("Skipping social network profile, " + name + ", as not a MySpace profile")
return None
res = self.sf.fetchUrl("https://myspace.com/" + name, timeout=self.opts['_fetchtimeout'], useragent=self.opts['_useragent'])
if res['content'] is None:
return None
data = re.findall(r'<div class="location_[^"]+" data-display-text="(.+?)"', res['content'])
if not data:
return None
location = data[0]
if len(location) < 5 or len(location) > 100:
self.sf.debug("Skipping likely invalid location.")
return None
e = SpiderFootEvent("GEOINFO", location, self.__name__, event)
self.notifyListeners(e)
# End of sfp_myspace class
| Python | 0.033851 |
552a9e958443ffdff4b28e6e432c09e7d011df6a | Update tesselate_shapes_frame docstring | svg_model/tesselate.py | svg_model/tesselate.py | # coding: utf-8
import types
import pandas as pd
from .seidel import Triangulator
def tesselate_shapes_frame(df_shapes, shape_i_columns):
'''
Tesselate each shape path into one or more triangles.
Parameters
----------
df_shapes : pandas.DataFrame
Table containing vertices of shapes, one row per vertex, with the *at
least* the following columns:
- ``x``: The x-coordinate of the vertex.
- ``y``: The y-coordinate of the vertex.
shape_i_columns : str or list
Column(s) forming key to differentiate rows/vertices for each distinct
shape.
Returns
-------
pandas.DataFrame
Table where each row corresponds to a triangle vertex, with the following
columns:
- ``shape_i_columns[]``: The shape path index column(s).
- ``triangle_i``: The integer triangle index within each electrode path.
- ``vertex_i``: The integer vertex index within each triangle.
'''
frames = []
if isinstance(shape_i_columns, types.StringType):
shape_i_columns = [shape_i_columns]
for shape_i, df_path in df_shapes.groupby(shape_i_columns):
points_i = df_path[['x', 'y']].values
if (points_i[0] == points_i[-1]).all():
# XXX End point is the same as the start point (do not include it).
points_i = points_i[:-1]
triangulator = Triangulator(points_i)
if not isinstance(shape_i, (types.ListType, types.TupleType)):
shape_i = [shape_i]
for i, triangle_i in enumerate(triangulator.triangles()):
triangle_points_i = [shape_i + [i] + [j, x, y]
for j, (x, y) in enumerate(triangle_i)]
frames.extend(triangle_points_i)
frames = None if not frames else frames
return pd.DataFrame(frames, columns=shape_i_columns +
['triangle_i', 'vertex_i', 'x', 'y'])
| # coding: utf-8
import types
import pandas as pd
from .seidel import Triangulator
def tesselate_shapes_frame(df_shapes, shape_i_columns):
'''
Tesselate each shape path into one or more triangles.
Return `pandas.DataFrame` with columns storing the following fields
for each row (where each row corresponds to a triangle vertex):
- `shape_i_columns`: The shape path index column(s).
- `triangle_i`: The integer triangle index within each electrode path.
- `vertex_i`: The integer vertex index within each triangle.
'''
frames = []
if isinstance(shape_i_columns, types.StringType):
shape_i_columns = [shape_i_columns]
for shape_i, df_path in df_shapes.groupby(shape_i_columns):
points_i = df_path[['x', 'y']].values
if (points_i[0] == points_i[-1]).all():
# XXX End point is the same as the start point (do not include it).
points_i = points_i[:-1]
triangulator = Triangulator(points_i)
if not isinstance(shape_i, (types.ListType, types.TupleType)):
shape_i = [shape_i]
for i, triangle_i in enumerate(triangulator.triangles()):
triangle_points_i = [shape_i + [i] + [j, x, y]
for j, (x, y) in enumerate(triangle_i)]
frames.extend(triangle_points_i)
frames = None if not frames else frames
return pd.DataFrame(frames, columns=shape_i_columns +
['triangle_i', 'vertex_i', 'x', 'y'])
| Python | 0 |
9a744080454cd2577af0935ca9e8b5efccabeb8c | Fix typo hexify -> hexlify | stratatools/helper/rpi_daemon.py | stratatools/helper/rpi_daemon.py | #!/usr/bin/env python2
import argparse
import binascii
import pyudev
import sys
import time
import traceback
from stratatools import *
from stratatools import machine,cartridge,manager,crypto,checksum,cartridge_pb2
from google.protobuf.text_format import MessageToString, Merge
cartridge_manager = None
machine_number = None
cartridge_template = None
def read_bytes(path):
data = None
with open(path, "r") as f:
data = bytearray(f.read())
return data
def write_bytes(path, data):
with open(path, "w", buffering=0) as f:
f.write(data)
def on_new_cartridge(device):
eeprom_path = "/sys/" + device.device_path + "/eeprom"
eeprom_uid = read_bytes("/sys/" + device.device_path + "/id")
print("New device detected <" + binascii.hexlify(eeprom_uid) + ">.")
try:
c = cartridge_template
if c is None:
c = cartridge_manager.decode(machine_number, eeprom_uid, read_bytes(eeprom_path))
print("Device is a valid cartridge.")
c = cartridge.refill(c)
write_bytes(eeprom_path, cartridge_manager.encode(machine_number, eeprom_uid, c))
print("Refill complete!")
print("You can safely disconnect the cartridge.")
except Exception as e:
print("Error! verify machine type?")
print("Details:")
traceback.print_exc()
def read_cartridge_template(path):
catridge = None
with open(path, "r") as f:
cartridge = cartridge_pb2.Cartridge()
Merge(f.read(), cartridge)
return cartridge
def main():
global cartridge_manager
global machine_number
global cartridge_template
parser = argparse.ArgumentParser(description="Raspberry Pi Flasher Daemon")
parser.add_argument("-t", "--template", action="store", type=str, dest="template", help="Path to cartridge configuration")
parser.add_argument("machine_type", action="store", choices=machine.get_machine_types())
args = parser.parse_args()
cartridge_manager = manager.Manager(crypto.Desx_Crypto(), checksum.Crc16_Checksum())
machine_number = machine.get_number_from_type(args.machine_type)
cartridge_template = None
if args.template:
cartridge_template = read_cartridge_template(args.template)
print("Fill cartridge using template from <" + args.template + ">.")
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by('w1')
observer = pyudev.MonitorObserver(monitor, callback=on_new_cartridge)
observer.start()
try:
print("Listening to new device ... ^c to quit")
while True:
time.sleep(0.1)
except KeyboardInterrupt:
pass
observer.stop()
if __name__ == "__main__":
main()
| #!/usr/bin/env python2
import argparse
import binascii
import pyudev
import sys
import time
import traceback
from stratatools import *
from stratatools import machine,cartridge,manager,crypto,checksum,cartridge_pb2
from google.protobuf.text_format import MessageToString, Merge
cartridge_manager = None
machine_number = None
cartridge_template = None
def read_bytes(path):
data = None
with open(path, "r") as f:
data = bytearray(f.read())
return data
def write_bytes(path, data):
with open(path, "w", buffering=0) as f:
f.write(data)
def on_new_cartridge(device):
eeprom_path = "/sys/" + device.device_path + "/eeprom"
eeprom_uid = read_bytes("/sys/" + device.device_path + "/id")
print("New device detected <" + binascii.hexify(eeprom_uid) + ">.")
try:
c = cartridge_template
if c is None:
c = cartridge_manager.decode(machine_number, eeprom_uid, read_bytes(eeprom_path))
print("Device is a valid cartridge.")
c = cartridge.refill(c)
write_bytes(eeprom_path, cartridge_manager.encode(machine_number, eeprom_uid, c))
print("Refill complete!")
print("You can safely disconnect the cartridge.")
except Exception as e:
print("Error! verify machine type?")
print("Details:")
traceback.print_exc()
def read_cartridge_template(path):
catridge = None
with open(path, "r") as f:
cartridge = cartridge_pb2.Cartridge()
Merge(f.read(), cartridge)
return cartridge
def main():
global cartridge_manager
global machine_number
global cartridge_template
parser = argparse.ArgumentParser(description="Raspberry Pi Flasher Daemon")
parser.add_argument("-t", "--template", action="store", type=str, dest="template", help="Path to cartridge configuration")
parser.add_argument("machine_type", action="store", choices=machine.get_machine_types())
args = parser.parse_args()
cartridge_manager = manager.Manager(crypto.Desx_Crypto(), checksum.Crc16_Checksum())
machine_number = machine.get_number_from_type(args.machine_type)
cartridge_template = None
if args.template:
cartridge_template = read_cartridge_template(args.template)
print("Fill cartridge using template from <" + args.template + ">.")
context = pyudev.Context()
monitor = pyudev.Monitor.from_netlink(context)
monitor.filter_by('w1')
observer = pyudev.MonitorObserver(monitor, callback=on_new_cartridge)
observer.start()
try:
print("Listening to new device ... ^c to quit")
while True:
time.sleep(0.1)
except KeyboardInterrupt:
pass
observer.stop()
if __name__ == "__main__":
main()
| Python | 0.000336 |
dfee7e1c89df879f187921752485153fd6214445 | Fix typo | IPython/extensions/cythonmagic.py | IPython/extensions/cythonmagic.py | # -*- coding: utf-8 -*-
"""
The cython magic has been integrated into Cython itself,
which is now released in version 0.21.
cf github `Cython` organisation, `Cython` repo, under the
file `Cython/Build/IpythonMagic.py`
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import print_function
import IPython.utils.version as version
try:
import Cython
except:
Cython = None
try:
from Cython.Build.IpythonMagic import CythonMagics
except :
pass
## still load the magic in IPython 3.x, remove completely in future versions.
def load_ipython_extension(ip):
"""Load the extension in IPython."""
print("""The Cython magic has been moved to the Cython package, hence """)
print("""`%load_ext cythonmagic` is deprecated; please use `%load_ext Cython` instead.""")
if Cython is None or not version.check_version(Cython.__version__, "0.21"):
print("You need Cython version >=0.21 to use the Cython magic")
return
print("""\nThough, because I am nice, I'll still try to load it for you this time.""")
Cython.load_ipython_extension(ip)
| # -*- coding: utf-8 -*-
"""
The cython magic has been integrated into Cython itself,
which is now released in version 0.21.
cf github `Cython` organisation, `Cython` repo, under the
file `Cython/Build/IpythonMagic.py`
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import print_function
import IPython.utils.version as version
try:
import Cython
except:
Cython = None
try:
from Cython.Build.IpythonMagic import CythonMagics
except :
pass
## still load the magic in IPython 3.x, remove completely in future versions.
def load_ipython_extension(ip):
"""Load the extension in IPython."""
print("""The Cython magic has been move to the Cython package, hence """)
print("""`%load_ext cythonmagic` is deprecated; Please use `%load_ext Cython` instead.""")
if Cython is None or not version.check_version(Cython.__version__, "0.21"):
print("You need Cython version >=0.21 to use the Cython magic")
return
print("""\nThough, because I am nice, I'll still try to load it for you this time.""")
Cython.load_ipython_extension(ip)
| Python | 0.000073 |
9f56f877705bdc0171c3afddadc6d58fb867cefc | Fix PEP 8 issue. | test/units/modules/system/test_systemd.py | test/units/modules/system/test_systemd.py | import os
import tempfile
from ansible.compat.tests import unittest
from ansible.modules.system.systemd import parse_systemctl_show
class ParseSystemctlShowTestCase(unittest.TestCase):
def test_simple(self):
lines = [
'Type=simple',
'Restart=no',
'Requires=system.slice sysinit.target',
'Description=Blah blah blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Restart': 'no',
'Requires': 'system.slice sysinit.target',
'Description': 'Blah blah blah',
})
def test_multiline_exec(self):
# This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar"
lines = [
'Type=simple',
'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo',
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description=blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\n'
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description': 'blah',
})
def test_single_line_with_brace(self):
lines = [
'Type=simple',
'Description={ this is confusing',
'Restart=no',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Description': '{ this is confusing',
'Restart': 'no',
})
| import os
import tempfile
from ansible.compat.tests import unittest
from ansible.modules.system.systemd import parse_systemctl_show
class ParseSystemctlShowTestCase(unittest.TestCase):
def test_simple(self):
lines = [
'Type=simple',
'Restart=no',
'Requires=system.slice sysinit.target',
'Description=Blah blah blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Restart': 'no',
'Requires': 'system.slice sysinit.target',
'Description': 'Blah blah blah',
})
def test_multiline_exec(self):
# This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar"
lines = [
'Type=simple',
'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo',
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description=blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\nbar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description': 'blah',
})
def test_single_line_with_brace(self):
lines = [
'Type=simple',
'Description={ this is confusing',
'Restart=no',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Description': '{ this is confusing',
'Restart': 'no',
})
| Python | 0 |
9bdb3a5e6e2f241a39269d992f1ca5c9f37440ae | add wlm source | importer/SeFornminSv.py | importer/SeFornminSv.py | from Monument import Monument
import importer_utils as utils
from os import path
MAPPING_DIR = "mappings"
class SeFornminSv(Monument):
def update_labels(self):
if len(self.namn) == 0:
self.add_label("sv", self.raa_nr)
else:
self.add_label("sv", self.namn)
def set_descriptions(self):
"""
"""
DESC_BASE = "fornminne"
description = ""
if len(self.typ) > 0:
description = self.typ.lower()
else:
description = DESC_BASE
description += " i " + self.landskap
self.add_description("sv", description)
def set_raa(self):
"""
With registrant_url as source, to test source uploading mechanism
"""
ref = self.wlm_source
self.add_statement("raa-nr", self.raa_nr, refs=[ref])
def set_adm_location(self):
municip_dict = utils.load_json(path.join(
MAPPING_DIR, "sweden_municipalities.json"))
if self.kommun == "Göteborg":
municip_name = "Gothenburg"
else:
municip_name = self.kommun
pattern = municip_name.lower() + " municipality"
try:
municipality = [x["item"] for x in municip_dict if x[
"en"].lower() == pattern][0]
ref = self.wlm_source
self.add_statement("located_adm", municipality, refs=[ref])
except IndexError:
print("Could not parse municipality: {}.".format(self.kommun))
return
def set_type(self):
"""
Replace the original P31 rather than adding to it.
"""
if self.has_non_empty_attribute("typ"):
table = self.data_files["types"]["mappings"]
type_to_search_for = self.typ.lower()
try:
special_type = [table[x]["items"]
for x in table
if x.lower() == type_to_search_for][0][0]
self.substitute_statement("is", special_type)
except IndexError:
return
def get_socken(self, socken_name, landskap_name):
return utils.socken_to_q(socken_name, landskap_name)
def set_location(self):
if self.has_non_empty_attribute("plats"):
if "[[" in self.plats:
wikilinks = utils.get_wikilinks(self.plats)
if len(wikilinks) == 1:
target_page = wikilinks[0].title
wd_item = utils.q_from_wikipedia("sv", target_page)
ref = self.wlm_source
self.add_statement("location", wd_item, refs=[ref])
if self.has_non_empty_attribute("socken"):
ref = self.wlm_source
self.add_statement("location", self.get_socken(
self.socken, self.landskap), refs=[ref])
def set_inception(self):
# TODO
# This is messy and not super prioritized...
return
def set_monuments_all_id(self):
self.monuments_all_id = self.id
def __init__(self, db_row_dict, mapping, data_files, existing):
Monument.__init__(self, db_row_dict, mapping, data_files, existing)
self.set_monuments_all_id()
self.set_changed()
self.wlm_source = self.create_wlm_source(self.monuments_all_id)
self.set_country()
self.set_is()
self.set_heritage()
self.set_source()
self.set_registrant_url()
self.set_image("bild")
self.update_labels()
self.set_descriptions()
self.set_raa()
self.set_adm_location()
self.set_type()
self.set_location()
self.set_inception()
# self.exists("sv", "artikel")
self.set_coords(("lat", "lon"))
self.set_commonscat()
# self.exists_with_prop(mapping)
| from Monument import Monument
import importer_utils as utils
from os import path
MAPPING_DIR = "mappings"
class SeFornminSv(Monument):
def update_labels(self):
if len(self.namn) == 0:
self.add_label("sv", self.raa_nr)
else:
self.add_label("sv", self.namn)
def set_descriptions(self):
"""
"""
DESC_BASE = "fornminne"
description = ""
if len(self.typ) > 0:
description = self.typ.lower()
else:
description = DESC_BASE
description += " i " + self.landskap
self.add_description("sv", description)
def set_raa(self):
"""
With registrant_url as source, to test source uploading mechanism
"""
self.add_statement("raa-nr", self.raa_nr, refs=[self.registrant_url])
def set_adm_location(self):
municip_dict = utils.load_json(path.join(
MAPPING_DIR, "sweden_municipalities.json"))
if self.kommun == "Göteborg":
municip_name = "Gothenburg"
else:
municip_name = self.kommun
pattern = municip_name.lower() + " municipality"
try:
municipality = [x["item"] for x in municip_dict if x[
"en"].lower() == pattern][0]
self.add_statement("located_adm", municipality)
except IndexError:
print("Could not parse municipality: {}.".format(self.kommun))
return
def set_type(self):
"""
Replace the original P31 rather than adding to it.
"""
if self.has_non_empty_attribute("typ"):
table = self.data_files["types"]["mappings"]
type_to_search_for = self.typ.lower()
try:
special_type = [table[x]["items"]
for x in table
if x.lower() == type_to_search_for][0][0]
self.substitute_statement("is", special_type)
except IndexError:
return
def get_socken(self, socken_name, landskap_name):
return utils.socken_to_q(socken_name, landskap_name)
def set_location(self):
if self.has_non_empty_attribute("plats"):
if "[[" in self.plats:
wikilinks = utils.get_wikilinks(self.plats)
if len(wikilinks) == 1:
target_page = wikilinks[0].title
wd_item = utils.q_from_wikipedia("sv", target_page)
self.add_statement("location", wd_item)
if self.has_non_empty_attribute("socken"):
self.add_statement("location", self.get_socken(
self.socken, self.landskap))
def set_inception(self):
# TODO
# This is messy and not super prioritized...
return
def __init__(self, db_row_dict, mapping, data_files, existing):
Monument.__init__(self, db_row_dict, mapping, data_files, existing)
self.set_image("bild")
self.update_labels()
self.set_descriptions()
self.set_raa()
self.set_adm_location()
self.set_type()
self.set_location()
self.set_inception()
# self.exists("sv", "artikel")
self.set_coords(("lat", "lon"))
self.set_commonscat()
# self.exists_with_prop(mapping)
| Python | 0 |
ce1e52336fa298d52aac264d8ccd3887fc8e18e4 | Bump binaryen to version 81. | src/funfuzz/js/with_binaryen.py | src/funfuzz/js/with_binaryen.py | # coding=utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""Run seeds with binaryen to get a wasm file,
then run the shell with the translated wasm binary using a js file wrapper
"""
import io
from pathlib import Path
import platform
import subprocess
import tarfile
import threading
from time import sleep
import fasteners
import requests
from ..util import sm_compile_helpers
BINARYEN_VERSION = 81
BINARYEN_URL = (f"https://github.com/WebAssembly/binaryen/releases/download/version_{BINARYEN_VERSION}/"
f"binaryen-version_{BINARYEN_VERSION}-{platform.uname()[4]}-linux.tar.gz")
def ensure_binaryen(url, version):
"""Download and use a compiled binaryen to generate WebAssembly files if it does not exist.
Args:
url (str): URL of the compressed binaryen binary package
version (int): Version of the compressed binaryen binary package
Returns:
Path: Path of the extracted wasm-opt binary
"""
shell_cache = sm_compile_helpers.ensure_cache_dir(Path.home())
wasmopt_path = Path(shell_cache / f"binaryen-version_{version}" / "wasm-opt").resolve()
sleep_time = 2
t_lock = threading.Lock()
with fasteners.try_lock(t_lock) as gotten:
while not wasmopt_path.is_file():
if gotten:
with requests.get(url, allow_redirects=True, stream=True) as binaryen_gzip_request:
try:
with tarfile.open(fileobj=io.BytesIO(binaryen_gzip_request.content), mode="r:gz") as f:
f.extractall(str(shell_cache.resolve()))
except OSError:
print("binaryen tarfile threw an OSError")
break
sleep(sleep_time)
sleep_time *= 2
return wasmopt_path
def wasmopt_run(seed):
"""Runs binaryen with the generated seed.
Args:
seed (Path): Generated jsfunfuzz file (acts as the seed for binaryen)
Returns:
bool: Returns True on successful wasm-opt execution, False otherwise
"""
assert platform.system() == "Linux"
assert seed.is_file()
seed_wrapper_output = seed.resolve().with_suffix(".wrapper")
seed_wasm_output = seed.resolve().with_suffix(".wasm")
sleep_time = 2
t_lock = threading.Lock()
with fasteners.try_lock(t_lock) as gotten:
while True:
if gotten:
try:
subprocess.run([ensure_binaryen(BINARYEN_URL, BINARYEN_VERSION),
seed,
"--translate-to-fuzz",
"--disable-simd",
"--output", seed_wasm_output,
f"--emit-js-wrapper={seed_wrapper_output}"], check=True)
except subprocess.CalledProcessError:
print("wasm-opt aborted with a CalledProcessError")
break
sleep(sleep_time)
sleep_time *= 2
assert seed_wrapper_output.is_file()
assert seed_wasm_output.is_file()
return (seed_wrapper_output, seed_wasm_output)
| # coding=utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""Run seeds with binaryen to get a wasm file,
then run the shell with the translated wasm binary using a js file wrapper
"""
import io
from pathlib import Path
import platform
import subprocess
import tarfile
import threading
from time import sleep
import fasteners
import requests
from ..util import sm_compile_helpers
BINARYEN_VERSION = 72
BINARYEN_URL = (f"https://github.com/WebAssembly/binaryen/releases/download/version_{BINARYEN_VERSION}/"
f"binaryen-version_{BINARYEN_VERSION}-{platform.uname()[4]}-linux.tar.gz")
def ensure_binaryen(url, version):
"""Download and use a compiled binaryen to generate WebAssembly files if it does not exist.
Args:
url (str): URL of the compressed binaryen binary package
version (int): Version of the compressed binaryen binary package
Returns:
Path: Path of the extracted wasm-opt binary
"""
shell_cache = sm_compile_helpers.ensure_cache_dir(Path.home())
wasmopt_path = Path(shell_cache / f"binaryen-version_{version}" / "wasm-opt").resolve()
sleep_time = 2
t_lock = threading.Lock()
with fasteners.try_lock(t_lock) as gotten:
while not wasmopt_path.is_file():
if gotten:
with requests.get(url, allow_redirects=True, stream=True) as binaryen_gzip_request:
try:
with tarfile.open(fileobj=io.BytesIO(binaryen_gzip_request.content), mode="r:gz") as f:
f.extractall(str(shell_cache.resolve()))
except OSError:
print("binaryen tarfile threw an OSError")
break
sleep(sleep_time)
sleep_time *= 2
return wasmopt_path
def wasmopt_run(seed):
"""Runs binaryen with the generated seed.
Args:
seed (Path): Generated jsfunfuzz file (acts as the seed for binaryen)
Returns:
bool: Returns True on successful wasm-opt execution, False otherwise
"""
assert platform.system() == "Linux"
assert seed.is_file()
seed_wrapper_output = seed.resolve().with_suffix(".wrapper")
seed_wasm_output = seed.resolve().with_suffix(".wasm")
sleep_time = 2
t_lock = threading.Lock()
with fasteners.try_lock(t_lock) as gotten:
while True:
if gotten:
try:
subprocess.run([ensure_binaryen(BINARYEN_URL, BINARYEN_VERSION),
seed,
"--translate-to-fuzz",
"--disable-simd",
"--output", seed_wasm_output,
f"--emit-js-wrapper={seed_wrapper_output}"], check=True)
except subprocess.CalledProcessError:
print("wasm-opt aborted with a CalledProcessError")
break
sleep(sleep_time)
sleep_time *= 2
assert seed_wrapper_output.is_file()
assert seed_wasm_output.is_file()
return (seed_wrapper_output, seed_wasm_output)
| Python | 0 |
52c6efa0a84334522cdd76e1e85ffe6bf601ea02 | Annotate commands/export_single_user.py. | zerver/management/commands/export_single_user.py | zerver/management/commands/export_single_user.py | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
import os
import shutil
import subprocess
import tempfile
import ujson
from zerver.lib.export import do_export_user
from zerver.models import UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Exports message data from a Zulip user
This command exports the message history for a single Zulip user.
Note that this only exports the user's message history and
realm-public metadata needed to understand it; it does nothing
with (for example) any bots owned by the user."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to export")
parser.add_argument('--output',
dest='output_dir',
action="store",
default=None,
help='Directory to write exported data to.')
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
try:
user_profile = get_user_profile_by_email(options["email"])
except UserProfile.DoesNotExist:
raise CommandError("No such user.")
output_dir = options["output_dir"]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="/tmp/zulip-export-")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Exporting user %s" % (user_profile.email,))
do_export_user(user_profile, output_dir)
print("Finished exporting to %s; tarring" % (output_dir,))
tarball_path = output_dir.rstrip('/') + '.tar.gz'
subprocess.check_call(["tar", "--strip-components=1", "-czf", tarball_path, output_dir])
print("Tarball written to %s" % (tarball_path,))
| from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
import os
import shutil
import subprocess
import tempfile
import ujson
from zerver.lib.export import do_export_user
from zerver.models import UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Exports message data from a Zulip user
This command exports the message history for a single Zulip user.
Note that this only exports the user's message history and
realm-public metadata needed to understand it; it does nothing
with (for example) any bots owned by the user."""
def add_arguments(self, parser):
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to export")
parser.add_argument('--output',
dest='output_dir',
action="store",
default=None,
help='Directory to write exported data to.')
def handle(self, *args, **options):
try:
user_profile = get_user_profile_by_email(options["email"])
except UserProfile.DoesNotExist:
raise CommandError("No such user.")
output_dir = options["output_dir"]
if output_dir is None:
output_dir = tempfile.mkdtemp(prefix="/tmp/zulip-export-")
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Exporting user %s" % (user_profile.email,))
do_export_user(user_profile, output_dir)
print("Finished exporting to %s; tarring" % (output_dir,))
tarball_path = output_dir.rstrip('/') + '.tar.gz'
subprocess.check_call(["tar", "--strip-components=1", "-czf", tarball_path, output_dir])
print("Tarball written to %s" % (tarball_path,))
| Python | 0 |
2a2309bb4f3a8ae231106123855959d44a0e7551 | Fix linter | cartoframes/viz/helpers/size_continuous_layer.py | cartoframes/viz/helpers/size_continuous_layer.py | from __future__ import absolute_import
from ..layer import Layer
def size_continuous_layer(source, value, title='', size=None, color=None):
return Layer(
source,
style={
'point': {
'width': 'ramp(linear(sqrt(${0}), sqrt(globalMin(${0})), sqrt(globalMax(${0}))), {1})'.format(
value, size or [2, 50]),
'color': 'opacity({0}, 0.8)'.format(color or '#F46D43')
},
'line': {
'width': 'ramp(linear(${0}), {1})'.format(value, size or [1, 10]),
'color': 'opacity({0}, 0.8)'.format(color or '#4CC8A3')
}
},
popup={
'hover': {
'title': title or value,
'value': '$' + value
}
},
legend={
'type': {
'point': 'size-continuous-point',
'line': 'size-continuous-line',
'polygon': 'size-continuous-polygon'
},
'title': title or value,
'description': ''
}
)
| from __future__ import absolute_import
from ..layer import Layer
def size_continuous_layer(source, value, title='', size=None, color=None):
return Layer(
source,
style={
'point': {
'width': 'ramp(linear(sqrt(${0}), sqrt(globalMin(${0})), sqrt(globalMax(${0}))), {1})'.format(value, size or [2, 50]),
'color': 'opacity({0}, 0.8)'.format(color or '#F46D43')
},
'line': {
'width': 'ramp(linear(${0}), {1})'.format(value, size or [1, 10]),
'color': 'opacity({0}, 0.8)'.format(color or '#4CC8A3')
}
},
popup={
'hover': {
'title': title or value,
'value': '$' + value
}
},
legend={
'type': {
'point': 'size-continuous-point',
'line': 'size-continuous-line',
'polygon': 'size-continuous-polygon'
},
'title': title or value,
'description': ''
}
)
| Python | 0.000002 |
c426ae514227adaf9dd86f6ada6ce05bc76298c2 | Make portal_config fetch config from a URL | catkin/src/portal_config/scripts/serve_config.py | catkin/src/portal_config/scripts/serve_config.py | #!/usr/bin/env python
import rospy
import urllib2
from portal_config.srv import *
# XXX TODO: return an error if the config file isn't valid JSON
class ConfigRequestHandler():
def __init__(self, url):
self.url = url
def get_config(self):
response = urllib2.urlopen(self.url)
return response.read()
def handle_request(self, request):
config = self.get_config()
return PortalConfigResponse(config)
def main():
rospy.init_node('portal_config')
url = rospy.get_param('~url', 'http://lg-head/portal/config.json')
handler = ConfigRequestHandler(url)
s = rospy.Service(
'/portal_config/query',
PortalConfig,
handler.handle_request
)
rospy.spin()
if __name__ == '__main__':
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 smartindent
| #!/usr/bin/env python
import rospy
from portal_config.srv import *
class ConfigRequestHandler():
def __init__(self, url):
self.url = url
def get_config(self):
return '{"foo": "bar"}'
def handle_request(self, request):
config = self.get_config()
return PortalConfigResponse(config)
def main():
rospy.init_node('portal_config')
url = rospy.get_param('~url', 'http://lg-head/portal/config.json')
handler = ConfigRequestHandler(url)
s = rospy.Service(
'/portal_config/query',
PortalConfig,
handler.handle_request
)
rospy.spin()
if __name__ == '__main__':
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0.000002 |
3023600701d64da42534fdc0050c473450e9e5f7 | Bump the dart version used on the infra bots to 1.14.1 | infra/download_dart_sdk.py | infra/download_dart_sdk.py | #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pulls down the current dart sdk to third_party/dart-sdk/.
You can manually force this to run again by removing
third_party/dart-sdk/STAMP_FILE, which contains the URL of the SDK that
was downloaded. Rolling works by updating LINUX_64_SDK to a new URL.
"""
import os
import shutil
import subprocess
import sys
# How to roll the dart sdk: Just change this url! We write this to the stamp
# file after we download, and then check the stamp file for differences.
SDK_URL_BASE = ('http://gsdview.appspot.com/dart-archive/channels/stable/raw/'
'1.14.1/sdk/')
LINUX_64_SDK = 'dartsdk-linux-x64-release.zip'
MACOS_64_SDK = 'dartsdk-macos-x64-release.zip'
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
DART_SDK_DIR = os.path.join(THIS_DIR, 'dart-sdk')
STAMP_FILE = os.path.join(DART_SDK_DIR, 'STAMP_FILE')
LIBRARIES_FILE = os.path.join(DART_SDK_DIR,'dart-sdk',
'lib', '_internal', 'libraries.dart')
def RunCommand(command, fail_hard=True):
"""Run command and return success (True) or failure; or if fail_hard is
True, exit on failure."""
print 'Running %s' % (str(command))
if subprocess.call(command, shell=False) == 0:
return True
print 'Failed.'
if fail_hard:
sys.exit(1)
return False
def main():
# Only get the SDK if we don't have a stamp for or have an out of date stamp
# file.
get_sdk = False
if sys.platform.startswith('linux'):
sdk_url = SDK_URL_BASE + LINUX_64_SDK
output_file = os.path.join(DART_SDK_DIR, LINUX_64_SDK)
elif sys.platform.startswith('darwin'):
sdk_url = SDK_URL_BASE + MACOS_64_SDK
output_file = os.path.join(DART_SDK_DIR, MACOS_64_SDK)
else:
print "Platform not supported"
return 1
if not os.path.exists(STAMP_FILE):
get_sdk = True
else:
# Get the contents of the stamp file.
with open(STAMP_FILE, "r") as stamp_file:
stamp_url = stamp_file.read().replace('\n', '')
if stamp_url != sdk_url:
get_sdk = True
if get_sdk:
# Completely remove all traces of the previous SDK.
if os.path.exists(DART_SDK_DIR):
shutil.rmtree(DART_SDK_DIR)
os.mkdir(DART_SDK_DIR)
# Download the Linux x64 based Dart SDK.
# '-C -': Resume transfer if possible.
# '--location': Follow Location: redirects.
# '-o': Output file.
curl_command = ['curl',
'-C', '-',
'--location',
'-o', output_file,
sdk_url]
if not RunCommand(curl_command, fail_hard=False):
print "Failed to get dart sdk from server."
return 1
# Write our stamp file so we don't redownload the sdk.
with open(STAMP_FILE, "w") as stamp_file:
stamp_file.write(sdk_url)
unzip_command = ['unzip', '-o', '-q', output_file, '-d', DART_SDK_DIR]
if not RunCommand(unzip_command, fail_hard=False):
print "Failed to unzip the dart sdk."
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pulls down the current dart sdk to third_party/dart-sdk/.
You can manually force this to run again by removing
third_party/dart-sdk/STAMP_FILE, which contains the URL of the SDK that
was downloaded. Rolling works by updating LINUX_64_SDK to a new URL.
"""
import os
import shutil
import subprocess
import sys
# How to roll the dart sdk: Just change this url! We write this to the stamp
# file after we download, and then check the stamp file for differences.
SDK_URL_BASE = ('http://gsdview.appspot.com/dart-archive/channels/stable/raw/'
'1.13.0/sdk/')
LINUX_64_SDK = 'dartsdk-linux-x64-release.zip'
MACOS_64_SDK = 'dartsdk-macos-x64-release.zip'
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
DART_SDK_DIR = os.path.join(THIS_DIR, 'dart-sdk')
STAMP_FILE = os.path.join(DART_SDK_DIR, 'STAMP_FILE')
LIBRARIES_FILE = os.path.join(DART_SDK_DIR,'dart-sdk',
'lib', '_internal', 'libraries.dart')
def RunCommand(command, fail_hard=True):
"""Run command and return success (True) or failure; or if fail_hard is
True, exit on failure."""
print 'Running %s' % (str(command))
if subprocess.call(command, shell=False) == 0:
return True
print 'Failed.'
if fail_hard:
sys.exit(1)
return False
def main():
# Only get the SDK if we don't have a stamp for or have an out of date stamp
# file.
get_sdk = False
if sys.platform.startswith('linux'):
sdk_url = SDK_URL_BASE + LINUX_64_SDK
output_file = os.path.join(DART_SDK_DIR, LINUX_64_SDK)
elif sys.platform.startswith('darwin'):
sdk_url = SDK_URL_BASE + MACOS_64_SDK
output_file = os.path.join(DART_SDK_DIR, MACOS_64_SDK)
else:
print "Platform not supported"
return 1
if not os.path.exists(STAMP_FILE):
get_sdk = True
else:
# Get the contents of the stamp file.
with open(STAMP_FILE, "r") as stamp_file:
stamp_url = stamp_file.read().replace('\n', '')
if stamp_url != sdk_url:
get_sdk = True
if get_sdk:
# Completely remove all traces of the previous SDK.
if os.path.exists(DART_SDK_DIR):
shutil.rmtree(DART_SDK_DIR)
os.mkdir(DART_SDK_DIR)
# Download the Linux x64 based Dart SDK.
# '-C -': Resume transfer if possible.
# '--location': Follow Location: redirects.
# '-o': Output file.
curl_command = ['curl',
'-C', '-',
'--location',
'-o', output_file,
sdk_url]
if not RunCommand(curl_command, fail_hard=False):
print "Failed to get dart sdk from server."
return 1
# Write our stamp file so we don't redownload the sdk.
with open(STAMP_FILE, "w") as stamp_file:
stamp_file.write(sdk_url)
unzip_command = ['unzip', '-o', '-q', output_file, '-d', DART_SDK_DIR]
if not RunCommand(unzip_command, fail_hard=False):
print "Failed to unzip the dart sdk."
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| Python | 0 |
cb96065fcf1f31dfbecfbb064c9414ffbc69217f | Remove all relative imports. We have always been at war with relative imports. | forms.py | forms.py | """
GB-specific Form helpers
"""
from __future__ import absolute_import
import re
from django.contrib.localflavor.gb.gb_regions import GB_NATIONS_CHOICES, GB_REGION_CHOICES
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class GBPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(GBPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class GBCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
super(GBCountySelect, self).__init__(attrs, choices=GB_REGION_CHOICES)
class GBNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
super(GBNationSelect, self).__init__(attrs, choices=GB_NATIONS_CHOICES)
| """
GB-specific Form helpers
"""
import re
from django.forms.fields import CharField, Select
from django.forms import ValidationError
from django.utils.translation import ugettext_lazy as _
class GBPostcodeField(CharField):
"""
A form field that validates its input is a UK postcode.
The regular expression used is sourced from the schema for British Standard
BS7666 address types: http://www.govtalk.gov.uk/gdsc/schemas/bs7666-v2-0.xsd
The value is uppercased and a space added in the correct place, if required.
"""
default_error_messages = {
'invalid': _(u'Enter a valid postcode.'),
}
outcode_pattern = '[A-PR-UWYZ]([0-9]{1,2}|([A-HIK-Y][0-9](|[0-9]|[ABEHMNPRVWXY]))|[0-9][A-HJKSTUW])'
incode_pattern = '[0-9][ABD-HJLNP-UW-Z]{2}'
postcode_regex = re.compile(r'^(GIR 0AA|%s %s)$' % (outcode_pattern, incode_pattern))
space_regex = re.compile(r' *(%s)$' % incode_pattern)
def clean(self, value):
value = super(GBPostcodeField, self).clean(value)
if value == u'':
return value
postcode = value.upper().strip()
# Put a single space before the incode (second part).
postcode = self.space_regex.sub(r' \1', postcode)
if not self.postcode_regex.search(postcode):
raise ValidationError(self.error_messages['invalid'])
return postcode
class GBCountySelect(Select):
"""
A Select widget that uses a list of UK Counties/Regions as its choices.
"""
def __init__(self, attrs=None):
from gb_regions import GB_REGION_CHOICES
super(GBCountySelect, self).__init__(attrs, choices=GB_REGION_CHOICES)
class GBNationSelect(Select):
"""
A Select widget that uses a list of UK Nations as its choices.
"""
def __init__(self, attrs=None):
from gb_regions import GB_NATIONS_CHOICES
super(GBNationSelect, self).__init__(attrs, choices=GB_NATIONS_CHOICES)
| Python | 0.002027 |
c78c6f7e9cc305b96eb35a5a0c8f7353db5a3ed2 | Update _share.py | tago/account/_share.py | tago/account/_share.py | import requests # Used to make HTTP requests
import os
import json
API_TAGO = os.environ.get('TAGO_SERVER') or 'https://api.tago.io'
def invite(type, ref_id, data, default_options):
data = data if data else {}
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
elif data['email'] is None or data['email'] == '':
raise ValueError('email must be set in data')
return requests.post('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type,ref_id=ref_id), headers=default_options, data=json.dumps(data)).json()
def edit(type, share_id, data, default_options):
data = data if data else {}
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.put('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options, data=json.dumps(data)).json()
def list(type, ref_id, default_options):
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
return requests.get('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type, ref_id=ref_id), headers=default_options).json()
def remove(type, share_id, default_options):
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.delete('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options).json()
| import requests # Used to make HTTP requests
import os
import json
API_TAGO = os.environ.get('TAGO_SERVER') or 'https://api.tago.io'
def invite(type, ref_id, data, default_options):
data = data if data else {}
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
elif data['email'] is None or data['email'] == '':
raise ValueError('email must be set in data')
return requests.post('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type,ref_id=ref_id), headers=default_options, data=json.dumps(data)).json()
def edit(type, share_id, data, default_options):
data = data if data else {}
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.put('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options, data=json.dumps(data)).json()
def list(type, ref_id, default_options):
if ref_id is None or ref_id == '':
raise ValueError('ref_id must be set')
return requests.get('{api_endpoint}/share/{type}/{ref_id}'.format(api_endpoint=API_TAGO,type=type, ref_id=ref_id), headers=default_options).json()
def remove(type, share_id, default_options):
if share_id is None or share_id == '':
raise ValueError('share_id must be set')
return requests.delete('{api_endpoint}/share/{share_id}'.format(api_endpoint=API_TAGO,share_id=share_id), headers=default_options).json()
# Not sure what exports do...
| Python | 0 |
f2a2f6268e11086ece3c311dd3e93e11672de03b | Comment addition. | Lib/distutils/command/build_py.py | Lib/distutils/command/build_py.py | """distutils.command.build_py
Implements the Distutils 'build_py' command."""
# created 1999/03/08, Greg Ward
__rcsid__ = "$Id$"
import string, os
from distutils.core import Command
from distutils.errors import *
from distutils.util import mkpath, newer, make_file, copy_file
class BuildPy (Command):
options = [('dir=', 'd', "directory for platform-shared files"),
]
def set_default_options (self):
self.dir = None
self.modules = None
self.package = None
def set_final_options (self):
self.set_undefined_options ('build',
('libdir', 'dir'))
# 'package' is an alias option in Distribution (hmmm, we
# really should change to "pull" options from Distribution
# rather than "pushing" them out to commands...)
if self.package is None:
self.package = ''
def run (self):
# XXX copy_file by default preserves all stat info -- mode, atime,
# and mtime. IMHO this is the right thing to do, but perhaps it
# should be an option -- in particular, a site administrator might
# want installed files to reflect the time of installation rather
# than the last modification time before the installed release.
# XXX copy_file does *not* preserve MacOS-specific file metadata.
# If this is a problem for building/installing Python modules, then
# we'll have to fix copy_file. (And what about installing scripts,
# when the time comes for that -- does MacOS use its special
# metadata to know that a file is meant to be interpreted by
# Python?)
self.set_final_options ()
infiles = []
outfiles = []
missing = []
# Loop over the list of "pure Python" modules, deriving
# input and output filenames and checking for missing
# input files.
# it's ok not to have *any* py files, right?
if not self.modules:
return
# XXX we should allow for wildcards, so eg. the Distutils setup.py
# file would just have to say
# py_modules = ['distutils.*', 'distutils.command.*']
# without having to list each one explicitly.
for m in self.modules:
fn = apply (os.path.join, tuple (string.split (m, '.'))) + '.py'
if not os.path.exists (fn):
missing.append (fn)
else:
infiles.append (fn)
outfiles.append (os.path.join (self.dir, self.package, fn))
# Blow up if any input files were not found.
if missing:
raise DistutilsFileError, \
"missing files: " + string.join (missing, ' ')
# Loop over the list of input files, copying them to their
# temporary (build) destination.
created = {}
for i in range (len (infiles)):
outdir = os.path.split (outfiles[i])[0]
if not created.get(outdir):
self.mkpath (outdir)
created[outdir] = 1
self.copy_file (infiles[i], outfiles[i])
# end class BuildPy
| """distutils.command.build_py
Implements the Distutils 'build_py' command."""
# created 1999/03/08, Greg Ward
__rcsid__ = "$Id$"
import string, os
from distutils.core import Command
from distutils.errors import *
from distutils.util import mkpath, newer, make_file, copy_file
class BuildPy (Command):
options = [('dir=', 'd', "directory for platform-shared files"),
]
def set_default_options (self):
self.dir = None
self.modules = None
self.package = None
def set_final_options (self):
self.set_undefined_options ('build',
('libdir', 'dir'))
if self.package is None:
self.package = ''
def run (self):
# XXX copy_file by default preserves all stat info -- mode, atime,
# and mtime. IMHO this is the right thing to do, but perhaps it
# should be an option -- in particular, a site administrator might
# want installed files to reflect the time of installation rather
# than the last modification time before the installed release.
# XXX copy_file does *not* preserve MacOS-specific file metadata.
# If this is a problem for building/installing Python modules, then
# we'll have to fix copy_file. (And what about installing scripts,
# when the time comes for that -- does MacOS use its special
# metadata to know that a file is meant to be interpreted by
# Python?)
self.set_final_options ()
infiles = []
outfiles = []
missing = []
# Loop over the list of "pure Python" modules, deriving
# input and output filenames and checking for missing
# input files.
# it's ok not to have *any* py files, right?
if not self.modules:
return
# XXX we should allow for wildcards, so eg. the Distutils setup.py
# file would just have to say
# py_modules = ['distutils.*', 'distutils.command.*']
# without having to list each one explicitly.
for m in self.modules:
fn = apply (os.path.join, tuple (string.split (m, '.'))) + '.py'
if not os.path.exists (fn):
missing.append (fn)
else:
infiles.append (fn)
outfiles.append (os.path.join (self.dir, self.package, fn))
# Blow up if any input files were not found.
if missing:
raise DistutilsFileError, \
"missing files: " + string.join (missing, ' ')
# Loop over the list of input files, copying them to their
# temporary (build) destination.
created = {}
for i in range (len (infiles)):
outdir = os.path.split (outfiles[i])[0]
if not created.get(outdir):
self.mkpath (outdir)
created[outdir] = 1
self.copy_file (infiles[i], outfiles[i])
# end class BuildPy
| Python | 0 |
e0bebba359bca6498c212e1c1fae3d95d2a046b4 | Fix python scripts src/chrome_frame/tools/test/page_cycler/cf_cycler.py | chrome_frame/tools/test/page_cycler/cf_cycler.py | chrome_frame/tools/test/page_cycler/cf_cycler.py | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automates IE to visit a list of web sites while running CF in full tab mode.
The page cycler automates IE and navigates it to a series of URLs. It is
designed to be run with Chrome Frame configured to load every URL inside
CF full tab mode.
TODO(robertshield): Make use of the python unittest module as per
review comments.
"""
import optparse
import sys
import time
import win32com.client
import win32gui
def LoadSiteList(path):
"""Loads a list of URLs from |path|.
Expects the URLs to be separated by newlines, with no leading or trailing
whitespace.
Args:
path: The path to a file containing a list of new-line separated URLs.
Returns:
A list of strings, each one a URL.
"""
f = open(path)
urls = f.readlines()
f.close()
return urls
def LaunchIE():
"""Starts up IE, makes it visible and returns the automation object.
Returns:
The IE automation object.
"""
ie = win32com.client.Dispatch("InternetExplorer.Application")
ie.visible = 1
win32gui.SetForegroundWindow(ie.HWND)
return ie
def RunTest(url, ie):
"""Loads |url| into the InternetExplorer.Application instance in |ie|.
Waits for the Document object to be created and then waits for
the document ready state to reach READYSTATE_COMPLETE.
Args:
url: A string containing the url to navigate to.
ie: The IE automation object to navigate.
"""
print "Navigating to " + url
ie.Navigate(url)
timer = 0
READYSTATE_COMPLETE = 4
last_ready_state = -1
for retry in xrange(60):
try:
# TODO(robertshield): Become an event sink instead of polling for
# changes to the ready state.
last_ready_state = ie.Document.ReadyState
if last_ready_state == READYSTATE_COMPLETE:
break
except:
# TODO(robertshield): Find the precise exception related to ie.Document
# being not accessible and handle it here.
print "Unexpected error:", sys.exc_info()[0]
raise
time.sleep(1)
if last_ready_state != READYSTATE_COMPLETE:
print "Timeout waiting for " + url
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--url_list', default='urllist',
help='The path to the list of URLs')
(opts, args) = parser.parse_args()
urls = LoadSiteList(opts.url_list)
ie = LaunchIE()
for url in urls:
RunTest(url, ie)
time.sleep(1)
ie.visible = 0
ie.Quit()
if __name__ == '__main__':
main()
| # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Automates IE to visit a list of web sites while running CF in full tab mode.
The page cycler automates IE and navigates it to a series of URLs. It is
designed to be run with Chrome Frame configured to load every URL inside
CF full tab mode.
TODO(robertshield): Make use of the python unittest module as per
review comments.
"""
import optparse
import sys
import time
import win32com.client
import win32gui
def LoadSiteList(path):
"""Loads a list of URLs from |path|.
Expects the URLs to be separated by newlines, with no leading or trailing
whitespace.
Args:
path: The path to a file containing a list of new-line separated URLs.
Returns:
A list of strings, each one a URL.
"""
f = open(path)
urls = f.readlines()
f.close()
return urls
def LaunchIE():
"""Starts up IE, makes it visible and returns the automation object.
Returns:
The IE automation object.
"""
ie = win32com.client.Dispatch("InternetExplorer.Application")
ie.visible = 1
win32gui.SetForegroundWindow(ie.HWND)
return ie
def RunTest(url, ie):
"""Loads |url| into the InternetExplorer.Application instance in |ie|.
Waits for the Document object to be created and then waits for
the document ready state to reach READYSTATE_COMPLETE.
Args:
url: A string containing the url to navigate to.
ie: The IE automation object to navigate.
"""
print "Navigating to " + url
ie.Navigate(url)
timer = 0
READYSTATE_COMPLETE = 4
last_ready_state = -1
for retry in xrange(60):
try:
# TODO(robertshield): Become an event sink instead of polling for
# changes to the ready state.
last_ready_state = ie.Document.ReadyState
if last_ready_state == READYSTATE_COMPLETE:
break
except:
# TODO(robertshield): Find the precise exception related to ie.Document
# being not accessible and handle it here.
print "Unexpected error:", sys.exc_info()[0]
raise
time.sleep(1)
if last_ready_state != READYSTATE_COMPLETE:
print "Timeout waiting for " + url
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--url_list', default='urllist',
help='The path to the list of URLs')
(opts, args) = parser.parse_args()
urls = LoadSiteList(opts.url_list)
ie = LaunchIE()
for url in urls:
RunTest(url, ie)
time.sleep(1)
ie.visible = 0
ie.Quit()
if __name__ == '__main__':
main()
| Python | 0.999995 |
3575415592fbd215de02e139d95ad5780bccadd2 | Add greeting method that returns given parameter | hello.py | hello.py |
def greeting(msg):
print(msg)
if __name__ == "__main__":
greeting('hello')
| print("Hello")
| Python | 0.000146 |
f2e8f2ef957a6053345f72889c1048a871988bc0 | Add octario library path to the plugin helper | ir-plugin/osp_version_name.py | ir-plugin/osp_version_name.py | #!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
sys.path.append('../octario')
from octario.lib.component import Component
import logging
LOG = logging.getLogger("OctarioLogger")
LOG.setLevel(logging.ERROR)
def main(component_path):
cmpnt = Component(component_path)
release = cmpnt.get_rhos_release()
name = cmpnt.get_name()
if release is not None and name is not None:
json_out = {
'plugin': 'iroctario',
'name': name,
'version': release,
}
print(json.dumps(json_out))
if __name__ == "__main__":
"""Helper script used by InfraRed-Octario plugin to discover component
name and OSP release number.
"""
if len(sys.argv) != 2:
LOG.error("Improper number of arguments, passed %d instead of 1" %
int(len(sys.argv)-1))
sys.exit(1)
main(sys.argv[1])
| #!/usr/bin/env python
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
from octario.lib.component import Component
import logging
LOG = logging.getLogger("OctarioLogger")
LOG.setLevel(logging.ERROR)
def main(component_path):
cmpnt = Component(component_path)
release = cmpnt.get_rhos_release()
name = cmpnt.get_name()
if release is not None and name is not None:
json_out = {
'plugin': 'iroctario',
'name': name,
'version': release,
}
print(json.dumps(json_out))
if __name__ == "__main__":
"""Helper script used by InfraRed-Octario plugin to discover component
name and OSP release number.
"""
if len(sys.argv) != 2:
LOG.error("Improper number of arguments, passed %d instead of 1" %
int(len(sys.argv)-1))
sys.exit(1)
main(sys.argv[1])
| Python | 0.000002 |
6565c4852c2a26f385cf9a422a914ee110b25b4b | Remove cruft from activity_WorkflowConflictCheck.py | activity/activity_WorkflowConflictCheck.py | activity/activity_WorkflowConflictCheck.py | import boto.swf
import json
import random
import datetime
import calendar
import time
import activity
import boto.ses
import provider.swfmeta as swfmetalib
"""
WorkflowConflictCheck activity
"""
class activity_WorkflowConflictCheck(activity.activity):
def __init__(self, settings, logger, conn = None, token = None, activity_task = None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "WorkflowConflictCheck"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60*30
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout= 60*30
self.description = "Check for open workflows to determine logical conflicts, when two workflow types should not run concurrently."
def do_activity(self, data = None):
"""
WorkflowConflictCheck activity, do the work
"""
if(self.logger):
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
is_open = None
workflow_id = None
workflow_name = None
workflow_version = None
try:
workflow_id = data["data"]["workflow_id"]
except KeyError:
pass
try:
workflow_name = data["data"]["workflow_name"]
except KeyError:
pass
try:
workflow_version = data["data"]["workflow_version"]
except KeyError:
pass
swfmeta = swfmetalib.SWFMeta(self.settings)
swfmeta.connect()
is_open = swfmeta.is_workflow_open(workflow_id = workflow_id, workflow_name = workflow_name, workflow_version = workflow_version)
# Return logic: if is_open is False, then return True as being no conflict
# But, if is_open is True, do not return a value, causing this activity to timeout
if is_open is False:
return True
else:
return None
| import boto.swf
import json
import random
import datetime
import calendar
import time
import activity
import boto.ses
import provider.swfmeta as swfmetalib
"""
WorkflowConflictCheck activity
"""
class activity_WorkflowConflictCheck(activity.activity):
def __init__(self, settings, logger, conn = None, token = None, activity_task = None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "WorkflowConflictCheck"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60*30
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout= 60*30
self.description = "Check for open workflows to determine logical conflicts, when two workflow types should not run concurrently."
def do_activity(self, data = None):
"""
WorkflowConflictCheck activity, do the work
"""
if(self.logger):
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
is_open = None
workflow_id = None
workflow_name = None
workflow_version = None
try:
workflow_id = data["data"]["workflow_id"]
except KeyError:
pass
try:
workflow_name = data["data"]["workflow_name"]
except KeyError:
pass
try:
workflow_version = data["data"]["workflow_version"]
except KeyError:
pass
swfmeta = swfmetalib.SWFMeta(self.settings)
swfmeta.connect()
is_open = swfmeta.is_workflow_open(workflow_id = workflow_id, workflow_name = workflow_name, workflow_version = workflow_version)
# Return logic: if is_open is False, then return True as being no conflict
# But, if is_open is True, do not return a value, causing this activity to timeout
if is_open is False:
return True
else:
return None
def get_email_body(self, time_period, history_text):
"""
Format the body of the email
"""
body = ""
date_format = '%Y-%m-%dT%H:%M:%S.000Z'
datetime_string = time.strftime(date_format, time.gmtime())
body = "A short history of workflow executions\n"
body += "As at " + datetime_string + "\n"
body += "Time period: " + str(time_period) + " seconds" + "\n"
body += "Domain: " + self.settings.domain + "\n"
body += history_text
body += "\n\nSincerely\n\neLife bot"
return body
def send_email(self, sender_email, recipient_email, subject, body, format = "text"):
"""
Using Amazon SES service
"""
ses_conn = boto.ses.connect_to_region(self.settings.simpledb_region, aws_access_key_id = self.settings.aws_access_key_id, aws_secret_access_key = self.settings.aws_secret_access_key)
ses_conn.send_email(
source = sender_email,
to_addresses = recipient_email,
subject = subject,
body = body,
format = format)
def get_workflow_count_by_closestatus(self, seconds):
history_text = ""
close_status_list = ["COMPLETED", "FAILED", "CANCELED", "TERMINATED", "CONTINUED_AS_NEW", "TIMED_OUT"]
swfmeta = swfmetalib.SWFMeta(self.settings)
swfmeta.connect()
date_format = '%Y-%m-%dT%H:%M:%S.000Z'
current_timestamp = calendar.timegm(time.gmtime())
start_latest_date_timestamp = current_timestamp
start_oldest_date_timestamp = start_latest_date_timestamp - seconds
for close_status in close_status_list:
count = swfmeta.get_closed_workflow_execution_count(
domain = self.settings.domain,
start_oldest_date = start_oldest_date_timestamp,
start_latest_date = start_latest_date_timestamp,
close_status = close_status
)
run_count = None
try:
run_count = count["count"]
except:
run_count = None
# Concatenate the message
history_text = history_text + "\n" + close_status + ": " + str(run_count)
return history_text
| Python | 0 |
5a5418a9e5f817c3c3f426f57aeefe800c45cc96 | Implement tuples. | jaspyx/visitor/types.py | jaspyx/visitor/types.py | import json
from jaspyx.visitor import BaseVisitor
class Types(BaseVisitor):
def visit_Num(self, node):
self.output(json.dumps(node.n))
def visit_Str(self, node):
self.output(json.dumps(node.s))
def visit_List(self, node):
self.group(node.elts, prefix='[', infix=', ', suffix=']')
visit_Tuple = visit_List
| import json
from jaspyx.visitor import BaseVisitor
class Types(BaseVisitor):
def visit_Num(self, node):
self.output(json.dumps(node.n))
def visit_Str(self, node):
self.output(json.dumps(node.s))
def visit_List(self, node):
self.group(node.elts, prefix='[', infix=', ', suffix=']')
| Python | 0.000004 |
5cda63163acec59a43c3975f1320b7268dcf337b | Add parameter for log level | devito/parameters.py | devito/parameters.py | """The parameters dictionary contains global parameter settings."""
__all__ = ['Parameters', 'parameters']
# Be EXTREMELY careful when writing to a Parameters dictionary
# Read here for reference: http://wiki.c2.com/?GlobalVariablesAreBad
# https://softwareengineering.stackexchange.com/questions/148108/why-is-global-state-so-evil
# If any issues related to global state arise, the following class should
# be made immutable. It shall only be written to at application startup
# and never modified.
class Parameters(dict):
""" A dictionary-like class to hold global configuration parameters for devito
On top of a normal dict, this provides the option to provide callback functions
so that any interested module can be informed when the configuration changes.
"""
def __init__(self, name=None, **kwargs):
self._name = name
self.update_functions = None
for key, value in iteritems(kwargs):
self[key] = value
def __setitem__(self, key, value):
super(Parameters, self).__setitem__(key, value)
# If a Parameters dictionary is being added as a child,
# ask it to tell us when it is updated
if isinstance(value, Parameters):
child_update = lambda x: self._updated(*x)
value.update_functions.push(child_update)
# Tell everyone we've been updated
self._updated(key, value)
def _updated(self, key, value):
""" Call any provided update functions so everyone knows we've been updated
"""
for f in self.update_functions:
f(key, value)
parameters = Parameters()
parameters["log_level"] = 'info'
| """The parameters dictionary contains global parameter settings."""
__all__ = ['Parameters', 'parameters']
# Be EXTREMELY careful when writing to a Parameters dictionary
# Read here for reference: http://wiki.c2.com/?GlobalVariablesAreBad
# If any issues related to global state arise, the following class should
# be made immutable. It shall only be written to at application startup
# and never modified.
class Parameters(dict):
""" A dictionary-like class to hold global configuration parameters for devito
On top of a normal dict, this provides the option to provide callback functions
so that any interested module can be informed when the configuration changes.
"""
def __init__(self, name=None, **kwargs):
self._name = name
self.update_functions = None
for key, value in iteritems(kwargs):
self[key] = value
def __setitem__(self, key, value):
super(Parameters, self).__setitem__(key, value)
# If a Parameters dictionary is being added as a child,
# ask it to tell us when it is updated
if isinstance(value, Parameters):
child_update = lambda x: self._updated(*x)
value.update_functions.push(child_update)
# Tell everyone we've been updated
self._updated(key, value)
def _updated(self, key, value):
""" Call any provided update functions so everyone knows we've been updated
"""
for f in self.update_functions:
f(key, value)
| Python | 0.000001 |
8555fc56b72dc86f266055da4b903cda7986654b | Update utils.py to prevent downcasting | emdp/utils.py | emdp/utils.py | import numpy as np
# 1D utilities.
def convert_int_rep_to_onehot(state, vector_size):
s = np.zeros(vector_size)
s[state] = 1
return s
def convert_onehot_to_int(state):
if type(state) is not np.ndarray:
state = np.array(state)
return state.argmax().item()
#
# def xy_to_flatten_state(state, size):
# """Flatten state (x,y) into a one hot vector of size"""
# idx = self.size * state[0] + state[1]
# one_hot = np.zeros(self.size * self.size)
# one_hot[idx] = 1
# return one_hot
#
# def unflatten_state(self, onehot):
# onehot = onehot.reshape(self.size, self.size)
# x = onehot.argmax(0).max()
# y = onehot.argmax(1).max()
# return (x, y)
# def step(self, action):
# """action must be the index of an action"""
# # get the vector representing the next state probabilities:
# current_state_idx = np.argmax(self.current_state)
# next_state_probs = self.P[current_state_idx, action]
# # sample the next state
# sampled_next_state = np.random.choice(np.arange(self.P.shape[0]), p=next_state_probs)
# # observe the reward
# reward = self.r[current_state_idx, action]
# self.current_state = self.convert_int_rep_to_onehot(sampled_next_state)
# # if reward > 0 :print(reward, current_state_idx, action)
# return self.current_state, reward, sampled_next_state == self.P.shape[0] - 1, {}
| import numpy as np
# 1D utilities.
def convert_int_rep_to_onehot(state, vector_size):
s = np.zeros(vector_size)
s[state] = 1
return s
def convert_onehot_to_int(state):
if type(state) is not np.ndarray:
state = np.array(state)
return state.argmax().astype(np.int8)
#
# def xy_to_flatten_state(state, size):
# """Flatten state (x,y) into a one hot vector of size"""
# idx = self.size * state[0] + state[1]
# one_hot = np.zeros(self.size * self.size)
# one_hot[idx] = 1
# return one_hot
#
# def unflatten_state(self, onehot):
# onehot = onehot.reshape(self.size, self.size)
# x = onehot.argmax(0).max()
# y = onehot.argmax(1).max()
# return (x, y)
# def step(self, action):
# """action must be the index of an action"""
# # get the vector representing the next state probabilities:
# current_state_idx = np.argmax(self.current_state)
# next_state_probs = self.P[current_state_idx, action]
# # sample the next state
# sampled_next_state = np.random.choice(np.arange(self.P.shape[0]), p=next_state_probs)
# # observe the reward
# reward = self.r[current_state_idx, action]
# self.current_state = self.convert_int_rep_to_onehot(sampled_next_state)
# # if reward > 0 :print(reward, current_state_idx, action)
# return self.current_state, reward, sampled_next_state == self.P.shape[0] - 1, {}
| Python | 0 |
76aa0d680f85298ca66de7bbcd0dbdc2342c9955 | Update Vikidia versions | pywikibot/families/vikidia_family.py | pywikibot/families/vikidia_family.py | # -*- coding: utf-8 -*-
"""Family module for Vikidia."""
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
class Family(family.SubdomainFamily):
"""Family class for Vikidia."""
name = 'vikidia'
domain = 'vikidia.org'
codes = ['ca', 'de', 'en', 'es', 'eu', 'fr', 'it', 'ru', 'scn']
# Sites we want to edit but not count as real languages
test_codes = ['central', 'test']
def protocol(self, code):
"""Return https as the protocol for this family."""
return "https"
def ignore_certificate_error(self, code):
"""Ignore certificate errors."""
return True # has self-signed certificate for a different domain.
| # -*- coding: utf-8 -*-
"""Family module for Vikidia."""
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
class Family(family.SubdomainFamily):
"""Family class for Vikidia."""
name = 'vikidia'
domain = 'vikidia.org'
codes = ['ca', 'en', 'es', 'eu', 'fr', 'it', 'ru', 'scn']
def protocol(self, code):
"""Return https as the protocol for this family."""
return "https"
def ignore_certificate_error(self, code):
"""Ignore certificate errors."""
return True # has self-signed certificate for a different domain.
| Python | 0 |
f1c4b6c9a4bc5a34c6b63b0b96e170f7d2b7fa17 | Move comment for min_conflicts algorithm | ai_graph_color/algorithms/min_conflicts.py | ai_graph_color/algorithms/min_conflicts.py | import random
params = {}
def run(graph, setup, params):
"""
Color a graph using min-conflicts:
First generate a random coloring for the graph.
Until there are no conflicts in the graph,
choose a random node in the graph, and change it to have the color
which reduces the number of conflicts in the graph the most.
:param colors: the number of colors to color the graph with
:type colors: int
"""
num_colors = params['colors']
colors = range(num_colors)
setup.logger.debug(
'Trying to color min-conflicts with %s colors', num_colors
)
coloring = [random.choice(colors) for _ in graph]
setup.logger.debug('Initial coloring: %s', coloring)
num_conflicts = num_conflicts_graph(graph, coloring)
setup.logger.debug('Initial conflicts: %s', num_conflicts)
if setup.counter.increment():
yield num_conflicts
while num_conflicts > 0:
index = random.randint(0, len(graph) - 1)
setup.logger.debug('Selected node: %s', index)
initial_conflicts = num_conflicts_node(graph, index, coloring)
if setup.counter.increment():
yield num_conflicts
initial_color = coloring[index]
min_conflicts = initial_conflicts
min_conflicts_value = initial_color
colors = range(num_colors)
colors.remove(initial_color) # don't recheck the same color
for color in colors:
coloring[index] = color
conflicts = num_conflicts_node(graph, index, coloring)
if setup.counter.increment():
yield num_conflicts
if conflicts < min_conflicts:
min_conflicts = conflicts
min_conflicts_value = color
coloring[index] = min_conflicts_value
setup.logger.debug('Updated coloring: %s', coloring)
num_conflicts -= initial_conflicts - min_conflicts
setup.logger.debug('Updated conflicts: %s', num_conflicts)
yield num_conflicts
def num_conflicts_graph(graph, coloring):
"""
Compute the number of conflicting edges on a graph for a given
coloring.
:param graph: the graph, in adjacency list form
:type graph: list[list[int]]
:param coloring: the coloring of the graph
:type coloring: list[int]
:rtype: int
:return: the number of conflicting edges for the coloring of the
given graph.
"""
conflicts = 0
for from_index, connections in enumerate(graph):
for to_index in connections:
if (from_index < to_index and
coloring[to_index] == coloring[from_index]):
conflicts += 1
return conflicts
def num_conflicts_node(graph, index, coloring):
"""
Compute the number of conflicting edges coming from a particular
node on a graph with a particular coloring.
:param graph: a graph in adjacency list form
:type graph: list[list[int]]
:param index: the index of the node in the graph
:type index: int
:param coloring: the coloring of the graph
:type coloring: list[int]
:return: the number of conflicting edges coming from the given
node for the given coloring
"""
conflicts = 0
for to_index in graph[index]:
if coloring[index] == coloring[to_index]:
conflicts += 1
return conflicts
| """
Color a graph using min-conflicts:
First generate a random coloring for the graph.
Until there are no conflicts in the graph,
choose a random node in the graph, and change it to have the color
which reduces the number of conflicts in the graph the most.
:param colors: the number of colors to color the graph with
:type colors: int
"""
import random
params = {}
def run(graph, setup, params):
num_colors = params['colors']
colors = range(num_colors)
setup.logger.debug(
'Trying to color min-conflicts with %s colors', num_colors
)
coloring = [random.choice(colors) for _ in graph]
setup.logger.debug('Initial coloring: %s', coloring)
num_conflicts = num_conflicts_graph(graph, coloring)
setup.logger.debug('Initial conflicts: %s', num_conflicts)
if setup.counter.increment():
yield num_conflicts
while num_conflicts > 0:
index = random.randint(0, len(graph) - 1)
setup.logger.debug('Selected node: %s', index)
initial_conflicts = num_conflicts_node(graph, index, coloring)
if setup.counter.increment():
yield num_conflicts
initial_color = coloring[index]
min_conflicts = initial_conflicts
min_conflicts_value = initial_color
colors = range(num_colors)
colors.remove(initial_color) # don't recheck the same color
for color in colors:
coloring[index] = color
conflicts = num_conflicts_node(graph, index, coloring)
if setup.counter.increment():
yield num_conflicts
if conflicts < min_conflicts:
min_conflicts = conflicts
min_conflicts_value = color
coloring[index] = min_conflicts_value
setup.logger.debug('Updated coloring: %s', coloring)
num_conflicts -= initial_conflicts - min_conflicts
setup.logger.debug('Updated conflicts: %s', num_conflicts)
yield num_conflicts
def num_conflicts_graph(graph, coloring):
"""
Compute the number of conflicting edges on a graph for a given
coloring.
:param graph: the graph, in adjacency list form
:type graph: list[list[int]]
:param coloring: the coloring of the graph
:type coloring: list[int]
:rtype: int
:return: the number of conflicting edges for the coloring of the
given graph.
"""
conflicts = 0
for from_index, connections in enumerate(graph):
for to_index in connections:
if (from_index < to_index and
coloring[to_index] == coloring[from_index]):
conflicts += 1
return conflicts
def num_conflicts_node(graph, index, coloring):
"""
Compute the number of conflicting edges coming from a particular
node on a graph with a particular coloring.
:param graph: a graph in adjacency list form
:type graph: list[list[int]]
:param index: the index of the node in the graph
:type index: int
:param coloring: the coloring of the graph
:type coloring: list[int]
:return: the number of conflicting edges coming from the given
node for the given coloring
"""
conflicts = 0
for to_index in graph[index]:
if coloring[index] == coloring[to_index]:
conflicts += 1
return conflicts
| Python | 0 |
7f14a3a1babf102a090a7541038e88337ef14c46 | Remove unused import from test | tests/functional/test_customer_account.py | tests/functional/test_customer_account.py | from django.core.urlresolvers import reverse
from oscar.test.testcases import WebTestCase
from oscar_support.models import Ticket
from oscar_support.defaults import SUPPORT_INITIAL_STATUS
from tests import factories
class TestACustomer(WebTestCase):
is_anonymous = False
def setUp(self):
super(TestACustomer, self).setUp()
self.status = factories.TicketStatusFactory(
name=SUPPORT_INITIAL_STATUS
)
self.type = factories.TicketTypeFactory()
self.subject = "this is the subject line"
self.message_text = "this is a new message text"
def test_can_create_a_new_ticket(self):
page = self.get(reverse('support:customer-ticket-create'))
ticket_form = page.forms['create-ticket-form']
ticket_form['type'] = self.type.uuid
ticket_form['subject'] = self.subject
ticket_form['body'] = self.message_text
page = ticket_form.submit()
user_tickets = Ticket.objects.filter(requester=self.user)
self.assertEquals(user_tickets.count(), 1)
ticket = user_tickets[0]
self.assertEquals(ticket.status, self.status)
self.assertEquals(ticket.type, self.type)
self.assertEquals(ticket.subject, self.subject)
self.assertEquals(ticket.body, self.message_text)
self.assertEquals(ticket.number, '1')
self.assertEquals(ticket.subticket_id, 0)
self.assertEquals(ticket.relatedorders.count(), 0)
def test_can_create_a_ticket_with_related_order(self):
page = self.get(reverse('support:customer-ticket-create'))
ticket_form = page.forms['create-ticket-form']
ticket_form['type'] = self.type.uuid
ticket_form['subject'] = self.subject
ticket_form['body'] = self.message_text
page = ticket_form.submit()
user_tickets = Ticket.objects.filter(requester=self.user)
self.assertEquals(user_tickets.count(), 1)
ticket = user_tickets[0]
self.assertEquals(ticket.status, self.status)
self.assertEquals(ticket.type, self.type)
self.assertEquals(ticket.subject, self.subject)
self.assertEquals(ticket.body, self.message_text)
self.assertEquals(ticket.number, '1')
self.assertEquals(ticket.subticket_id, 0)
def test_can_add_message_to_a_ticket(self):
ticket = Ticket.objects.create(
requester=self.user,
status=self.status,
type=self.type,
subject='This is the subject line',
body="I have a question about something",
)
self.assertEquals(ticket.messages.count(), 0)
page = self.get(
reverse(
'support:customer-ticket-update',
kwargs={'pk': ticket.uuid}
)
)
self.assertContains(page, ticket.body)
message_form = page.forms['add-message-form']
message_form['message_text'] = 'this is some additional message'
message_form.submit()
ticket = Ticket.objects.get(uuid=ticket.uuid)
self.assertEquals(ticket.messages.count(), 1)
| from django.core.urlresolvers import reverse
from oscar.test.testcases import WebTestCase
from oscar.test.factories import create_order
from oscar_support.models import Ticket
from oscar_support.defaults import SUPPORT_INITIAL_STATUS
from tests import factories
class TestACustomer(WebTestCase):
is_anonymous = False
def setUp(self):
super(TestACustomer, self).setUp()
self.status = factories.TicketStatusFactory(
name=SUPPORT_INITIAL_STATUS
)
self.type = factories.TicketTypeFactory()
self.subject = "this is the subject line"
self.message_text = "this is a new message text"
def test_can_create_a_new_ticket(self):
page = self.get(reverse('support:customer-ticket-create'))
ticket_form = page.forms['create-ticket-form']
ticket_form['type'] = self.type.uuid
ticket_form['subject'] = self.subject
ticket_form['body'] = self.message_text
page = ticket_form.submit()
user_tickets = Ticket.objects.filter(requester=self.user)
self.assertEquals(user_tickets.count(), 1)
ticket = user_tickets[0]
self.assertEquals(ticket.status, self.status)
self.assertEquals(ticket.type, self.type)
self.assertEquals(ticket.subject, self.subject)
self.assertEquals(ticket.body, self.message_text)
self.assertEquals(ticket.number, '1')
self.assertEquals(ticket.subticket_id, 0)
self.assertEquals(ticket.relatedorders.count(), 0)
def test_can_create_a_ticket_with_related_order(self):
page = self.get(reverse('support:customer-ticket-create'))
ticket_form = page.forms['create-ticket-form']
ticket_form['type'] = self.type.uuid
ticket_form['subject'] = self.subject
ticket_form['body'] = self.message_text
page = ticket_form.submit()
user_tickets = Ticket.objects.filter(requester=self.user)
self.assertEquals(user_tickets.count(), 1)
ticket = user_tickets[0]
self.assertEquals(ticket.status, self.status)
self.assertEquals(ticket.type, self.type)
self.assertEquals(ticket.subject, self.subject)
self.assertEquals(ticket.body, self.message_text)
self.assertEquals(ticket.number, '1')
self.assertEquals(ticket.subticket_id, 0)
def test_can_add_message_to_a_ticket(self):
ticket = Ticket.objects.create(
requester=self.user,
status=self.status,
type=self.type,
subject='This is the subject line',
body="I have a question about something",
)
self.assertEquals(ticket.messages.count(), 0)
page = self.get(
reverse(
'support:customer-ticket-update',
kwargs={'pk': ticket.uuid}
)
)
self.assertContains(page, ticket.body)
message_form = page.forms['add-message-form']
message_form['message_text'] = 'this is some additional message'
message_form.submit()
ticket = Ticket.objects.get(uuid=ticket.uuid)
self.assertEquals(ticket.messages.count(), 1)
| Python | 0 |
5138db4353edf7414c79ca8e1e42c73b35313b15 | Remove various now unused interfaces. | morepath/interfaces.py | morepath/interfaces.py | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class Interface(object):
__meta__ = ABCMeta
# class IConsumer(Interface):
# """A consumer consumes steps in a stack to find an object.
# """
# @abstractmethod
# def __call__(self, obj, stack, lookup):
# """Returns a boolean meaning that some stack has been consumed,
# an object and the rest of unconsumed stack
# """
class IRoot(Interface):
"""Mark this object as the root.
"""
class IApp(Interface):
"""An application."""
# XXX fill in details
class IConfigAction(Interface):
"""A configuration item.
"""
@abstractmethod
def discriminator(self):
"""Returns an immutable that uniquely identifies this config.
Used for configuration conflict detection.
"""
@abstractmethod
def prepare(self, obj):
"""Prepare action for configuration.
obj - the object being registered
"""
@abstractmethod
def perform(self, obj):
"""Register whatever is being configured.
obj - the object being registered
"""
class ConfigError(Exception):
"""Raised when configuration is bad
"""
class ResolveError(Exception):
"""Raised when path cannot be resolved
"""
class ModelError(ResolveError):
"""Raised when a model cannot be resolved
"""
class ResourceError(ResolveError):
"""Raised when a resource cannot be resolved
"""
class TrajectError(Exception):
"""Raised when path supplied to traject is not allowed.
"""
class LinkError(Exception):
"""Raised when a link cannot be made.
"""
| # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
class Interface(object):
__meta__ = ABCMeta
class IConsumer(Interface):
"""A consumer consumes steps in a stack to find an object.
"""
@abstractmethod
def __call__(self, obj, stack, lookup):
"""Returns a boolean meaning that some stack has been consumed,
an object and the rest of unconsumed stack
"""
class IResource(Interface):
pass
class IResponse(Interface):
pass
class IResponseFactory(Interface):
"""When called, a Response instance is returned.
"""
@abstractmethod
def __call__(self):
"""Returns a Response instance."""
class ITraject(Interface):
pass
class IInverse(Interface):
"""Marker interface to hook in inverse component in a traject."""
class IRoot(Interface):
"""Mark this object as the root.
"""
class ILookup(Interface):
"""Mark this model as an model that can change the lookup.
"""
class IApp(Interface):
"""An application."""
# XXX fill in details
class IModelBase(Interface):
"""Mark this object as a base of a model.
"""
class IPath(Interface):
"""Get the path for a model."""
class ILink(Interface):
"""Get the hyperlink for a model."""
class IConfigAction(Interface):
"""A configuration item.
"""
@abstractmethod
def discriminator(self):
"""Returns an immutable that uniquely identifies this config.
Used for configuration conflict detection.
"""
@abstractmethod
def prepare(self, obj):
"""Prepare action for configuration.
obj - the object being registered
"""
@abstractmethod
def perform(self, obj):
"""Register whatever is being configured.
obj - the object being registered
"""
class ConfigError(Exception):
"""Raised when configuration is bad
"""
class ResolveError(Exception):
"""Raised when path cannot be resolved
"""
class ModelError(ResolveError):
"""Raised when a model cannot be resolved
"""
class ResourceError(ResolveError):
"""Raised when a resource cannot be resolved
"""
class TrajectError(Exception):
"""Raised when path supplied to traject is not allowed.
"""
class LinkError(Exception):
"""Raised when a link cannot be made.
"""
| Python | 0 |
d7f17d7d8592f016d8c46ac530338e11b366896d | make randomizer standalone | mpf/core/randomizer.py | mpf/core/randomizer.py | from uuid import uuid4
import random
class Randomizer(object):
def __init__(self, items):
self.force_different = True
self.force_all = False
self.disable_random = False
# self.loop - property which sets force_all=True if loop==False
self.items = list()
self._loop = True
self.data = None
self._uuid = uuid4()
assert(isinstance(items, list) or isinstance(items, tuple))
for i in items:
if isinstance(i, (tuple, list)):
this_item = i[0]
this_weight = int(i[1])
else:
this_item = i
this_weight = 1
self.items.append((this_item, this_weight))
self.data = dict()
self._init_data(self.data)
def __iter__(self):
return self
def __next__(self):
if self.disable_random:
return self._next_not_random()
potential_nexts = list()
if self.force_all:
potential_nexts = [
x for x in self.items if x[0] not in self.data['items_sent']]
elif self.force_different:
potential_nexts = [
x for x in self.items if x[0] is not self.data['current_item']]
if not potential_nexts:
if not self._loop:
raise StopIteration
self.data['items_sent'] = set()
if self.force_different:
potential_nexts = [x for x in self.items if x[0] is not (
self.data['current_item'])]
else:
potential_nexts = [x for x in self.items]
self.data['current_item'] = self.pick_weighted_random(potential_nexts)
self.data['items_sent'].add(self.data['current_item'])
return self.data['current_item']
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, loop):
if loop:
self._loop = True
else:
self._loop = False
self.force_all = True
def _next_not_random(self):
if self.data['current_item_index'] == len(self.items):
if not self.loop:
raise StopIteration
else:
self.data['current_item_index'] = 0
self.data['current_item'] = (
self.items[self.data['current_item_index']][0])
self.data['current_item_index'] += 1
return self.data['current_item']
def _init_data(self, data_dict):
data_dict['current_item'] = None
data_dict['items_sent'] = set()
data_dict['current_item_index'] = 0 # only used with disable random
def get_current(self):
if self.data['current_item']:
return self.data['current_item']
else:
return self.__next__()
def get_next(self):
return self.__next__()
@staticmethod
def pick_weighted_random(items):
total_weights = sum([x[1] for x in items])
value = random.randint(1, total_weights)
index_value = 0
for item in items:
index_value += item[1]
if index_value >= value:
return item[0]
return items[-1][0]
| from uuid import uuid4
import random
class Randomizer(object):
def __init__(self, machine, items, memory='player'):
self.force_different = True
self.force_all = False
self.disable_random = False
# self.loop - property which sets force_all=True if loop==False
self.items = list()
self._loop = True
self._machine = machine
self._uuid = uuid4()
self._data = None
self._player_memory = True
assert(isinstance(items, list) or isinstance(items, tuple))
for i in items:
if hasattr(i, '__iter__'):
this_item = i[0]
this_weight = int(i[1])
else:
this_item = i
this_weight = 1
self.items.append((this_item, this_weight))
if memory == 'player':
self._player_memory = True
elif memory == 'machine':
self._player_memory = False
self._data = dict()
self._init_data(self._data)
else:
raise ValueError("Memory should be 'machine' or 'player")
def __iter__(self):
return self
def __next__(self):
if self.disable_random:
return self._next_not_random()
potential_nexts = list()
if self.force_all:
potential_nexts = [
x for x in self.items if x[0] not in self.data['items_sent']]
elif self.force_different:
potential_nexts = [
x for x in self.items if x[0] is not self.data['current_item']]
if not potential_nexts:
if not self._loop:
raise StopIteration
self.data['items_sent'] = set()
if self.force_different:
potential_nexts = [x for x in self.items if x[0] is not (
self.data['current_item'])]
else:
potential_nexts = [x for x in self.items]
self.data['current_item'] = self.pick_weighted_random(potential_nexts)
self.data['items_sent'].add(self.data['current_item'])
return self.data['current_item']
@property
def data(self):
if self._player_memory:
try:
if not self._machine.game.player[self._uuid]:
self._machine.game.player[self._uuid] = dict()
self._init_data(self._machine.game.player[self._uuid])
except AttributeError:
raise AssertionError("Cannot access 'player memory' Randomizer"
" as there is no active game or player")
return self._machine.game.player[self._uuid]
else:
return self._data
@property
def loop(self):
return self._loop
@loop.setter
def loop(self, loop):
if loop:
self._loop = True
else:
self._loop = False
self.force_all = True
def _next_not_random(self):
if self.data['current_item_index'] == len(self.items):
if not self.loop:
raise StopIteration
else:
self.data['current_item_index'] = 0
self.data['current_item'] = (
self.items[self._data['current_item_index']][0])
self.data['current_item_index'] += 1
return self.data['current_item']
def _init_data(self, data_dict):
data_dict['current_item'] = None
data_dict['items_sent'] = set()
data_dict['current_item_index'] = 0 # only used with disable random
def get_current(self):
if self.data['current_item']:
return self.data['current_item']
else:
return self.__next__()
def get_next(self):
return self.__next__()
@staticmethod
def pick_weighted_random(items):
total_weights = sum([x[1] for x in items])
value = random.randint(1, total_weights)
index_value = 0
for item in items:
index_value += item[1]
if index_value >= value:
return item[0]
return items[-1][0]
| Python | 0.000029 |
e58686442750afeae70dd3f669070c1342af0bbe | Fix error in qrepeatall() | ControlFlow.py | ControlFlow.py | from BlockLabel import label, endlabel
from functools import wraps
## QGL control-flow statements ##
def qif(mask, ifSeq, elseSeq=None):
if elseSeq:
endlabel(elseSeq) # make sure to populate label of elseSeq before using it
return [CmpEq(mask), Goto(label(ifSeq))] + elseSeq + [Goto(endlabel(ifSeq))] + ifSeq
else:
endlabel(ifSeq)
return [CmpNeq(mask), Goto(endlabel(ifSeq))] + ifSeq
def qwhile(mask, seq):
return [CmpNeq(mask), Goto(endlabel(seq))] + seq
def qdowhile(mask, seq):
return seq + [CmpEq(mask), Goto(label(seq))]
def qfunction(func):
# caches for sequences and labels
seq = {}
target = {}
@wraps(func)
def crfunc(*args):
if args not in target:
seq[args] = func(*args)
target[args] = label(seq[args])
return [Call(target[args])], seq[args] + [Return()] # TODO: update me to only return seq[args] on first call
return crfunc
def qrepeat(n, seq):
if n < 1:
return None
elif n == 1:
return seq
else:
label(seq)
return [LoadRepeat(n)] + seq + [Repeat(label(seq))]
# utility to repeat all sequences the same number of times
def qrepeatall(n, seqs):
for ct in range(len(seqs)):
seqs[ct] = qrepeat(n, seqs[ct])
return seqs
## Sequencer primitives ##
class ControlInstruction(object):
def __init__(self, instruction, target=None, value=None):
self.instruction = instruction
self.target = target
self.value = value
self.label = None
def __repr__(self):
return self.__str__()
def __str__(self):
labelPart = "{0}: ".format(self.label) if self.label else ""
result = labelPart + self.instruction
if self.target:
result += "(" + str(self.target) + ")"
elif self.value:
result += "(" + str(self.value) + ")"
return result
def __eq__(self, other):
# ignore label in equality testing
mydict = self.__dict__.copy()
otherdict = other.__dict__.copy()
mydict.pop('label')
otherdict.pop('label')
return mydict == otherdict
def promote(self):
return self
@property
def totLength(self):
return 0
@property
def length(self):
return 0
def Goto(target):
return ControlInstruction("GOTO", target=target)
def Call(target):
return ControlInstruction("CALL", target=target)
def Return():
return ControlInstruction("RETURN")
def LoadRepeat(n):
return ControlInstruction("LOAD", value=n)
def Repeat(target):
return ControlInstruction("REPEAT", target=target)
def Wait():
return ControlInstruction("WAIT")
qwait = Wait
class ComparisonInstruction(ControlInstruction):
def __init__(self, mask, operator):
super(ComparisonInstruction, self).__init__("CMP")
self.mask = mask
self.operator = operator
def __str__(self):
labelPart = "{0}: ".format(self.label) if self.label else ""
return labelPart + "CMP " + self.operator + " " + str(self.mask)
def CmpEq(mask):
return ComparisonInstruction(mask, "==")
def CmpNeq(mask):
return ComparisonInstruction(mask, "!=")
def CmpLt(mask):
return ComparisonInstruction(mask, "<")
def CmpGt(mask):
return ComparisonInstruction(mask, ">")
| from BlockLabel import label, endlabel
from functools import wraps
## QGL control-flow statements ##
def qif(mask, ifSeq, elseSeq=None):
if elseSeq:
endlabel(elseSeq) # make sure to populate label of elseSeq before using it
return [CmpEq(mask), Goto(label(ifSeq))] + elseSeq + [Goto(endlabel(ifSeq))] + ifSeq
else:
endlabel(ifSeq)
return [CmpNeq(mask), Goto(endlabel(ifSeq))] + ifSeq
def qwhile(mask, seq):
return [CmpNeq(mask), Goto(endlabel(seq))] + seq
def qdowhile(mask, seq):
return seq + [CmpEq(mask), Goto(label(seq))]
def qfunction(func):
# caches for sequences and labels
seq = {}
target = {}
@wraps(func)
def crfunc(*args):
if args not in target:
seq[args] = func(*args)
target[args] = label(seq[args])
return [Call(target[args])], seq[args] + [Return()] # TODO: update me to only return seq[args] on first call
return crfunc
def qrepeat(n, seq):
if n < 1:
return None
elif n == 1:
return seq
else:
label(seq)
return [LoadRepeat(n)] + seq + [Repeat(label(seq))]
# utility to repeat all sequences the same number of times
def qrepeatall(n, seqs):
for ct in range(len(seqs)):
seqs[ct] = qrepeat(seqs[ct])
## Sequencer primitives ##
class ControlInstruction(object):
def __init__(self, instruction, target=None, value=None):
self.instruction = instruction
self.target = target
self.value = value
self.label = None
def __repr__(self):
return self.__str__()
def __str__(self):
labelPart = "{0}: ".format(self.label) if self.label else ""
result = labelPart + self.instruction
if self.target:
result += "(" + str(self.target) + ")"
elif self.value:
result += "(" + str(self.value) + ")"
return result
def __eq__(self, other):
# ignore label in equality testing
mydict = self.__dict__.copy()
otherdict = other.__dict__.copy()
mydict.pop('label')
otherdict.pop('label')
return mydict == otherdict
def promote(self):
return self
@property
def totLength(self):
return 0
@property
def length(self):
return 0
def Goto(target):
return ControlInstruction("GOTO", target=target)
def Call(target):
return ControlInstruction("CALL", target=target)
def Return():
return ControlInstruction("RETURN")
def LoadRepeat(n):
return ControlInstruction("LOAD", value=n)
def Repeat(target):
return ControlInstruction("REPEAT", target=target)
def Wait():
return ControlInstruction("WAIT")
qwait = Wait
class ComparisonInstruction(ControlInstruction):
def __init__(self, mask, operator):
super(ComparisonInstruction, self).__init__("CMP")
self.mask = mask
self.operator = operator
def __str__(self):
labelPart = "{0}: ".format(self.label) if self.label else ""
return labelPart + "CMP " + self.operator + " " + str(self.mask)
def CmpEq(mask):
return ComparisonInstruction(mask, "==")
def CmpNeq(mask):
return ComparisonInstruction(mask, "!=")
def CmpLt(mask):
return ComparisonInstruction(mask, "<")
def CmpGt(mask):
return ComparisonInstruction(mask, ">")
| Python | 0.000009 |
70ecc48e9fb4d936d9c72d89ccbeec51f42b5252 | fix get master ip | docker/k8s_tools.py | docker/k8s_tools.py | #!/bin/env python
import os
import sys
import time
import socket
from kubernetes import client, config
PADDLE_JOB_NAME = os.getenv("PADDLE_JOB_NAME")
NAMESPACE = os.getenv("NAMESPACE")
PORT = os.getenv("PSERVER_PORT")
if os.getenv("KUBERNETES_SERVICE_HOST", None):
config.load_incluster_config()
else:
config.load_kube_config()
v1 = client.CoreV1Api()
def fetch_pods_info(label_selector):
api_response = v1.list_namespaced_pod(
namespace=NAMESPACE, pretty=True, label_selector=label_selector)
pod_list = []
for item in api_response.items:
pod_list.append((item.status.phase, item.status.pod_ip))
return pod_list
def wait_pods_running(label_selector, desired):
print "label selector: %s, desired: %s" % (label_selector, desired)
while True:
pod_list = fetch_pods_info(label_selector)
running_pod_list = filter(lambda x: x[0] == "Running", pod_list)
print "running pod list: ", running_pod_list
if len(running_pod_list) == int(desired):
return [item[1] for item in running_pod_list]
print "sleep for 5 seconds..."
time.sleep(5)
def fetch_pserver_ips():
label_selector = "paddle-job-pserver=%s" % PADDLE_JOB_NAME
pod_list = fetch_pods_info(label_selector)
pserver_ips = [item[1] for item in pod_list]
return ",".join(pserver_ips)
def fetch_master_ip():
while True:
label_selector = "paddle-job-master=%s" % PADDLE_JOB_NAME
pod_list = fetch_pods_info(label_selector)
master_ip = ""
if len(pod_list) >=1:
master_ip = pod_list[0][1]
if master_ip:
return master_ip
time.sleep(5)
def fetch_trainer_id():
label_selector = "paddle-job=%s" % PADDLE_JOB_NAME
pod_list = fetch_pods_info(label_selector)
trainer_ips = [item[1] for item in pod_list]
trainer_ips.sort()
local_ip = socket.gethostbyname(socket.gethostname())
for i in xrange(len(trainer_ips)):
if trainer_ips[i] == local_ip:
return i
return None
if __name__ == "__main__":
command = sys.argv[1]
if command == "fetch_pserver_ips":
print fetch_pserver_ips()
elif command == "fetch_trainer_id":
print fetch_trainer_id()
elif command == "fetch_master_ip":
print fetch_master_ip()
elif command == "wait_pods_running":
wait_pods_running(sys.argv[2], sys.argv[3])
| #!/bin/env python
import os
import sys
import time
import socket
from kubernetes import client, config
PADDLE_JOB_NAME = os.getenv("PADDLE_JOB_NAME")
NAMESPACE = os.getenv("NAMESPACE")
PORT = os.getenv("PSERVER_PORT")
if os.getenv("KUBERNETES_SERVICE_HOST", None):
config.load_incluster_config()
else:
config.load_kube_config()
v1 = client.CoreV1Api()
def fetch_pods_info(label_selector):
api_response = v1.list_namespaced_pod(
namespace=NAMESPACE, pretty=True, label_selector=label_selector)
pod_list = []
for item in api_response.items:
pod_list.append((item.status.phase, item.status.pod_ip))
return pod_list
def wait_pods_running(label_selector, desired):
print "label selector: %s, desired: %s" % (label_selector, desired)
while True:
pod_list = fetch_pods_info(label_selector)
running_pod_list = filter(lambda x: x[0] == "Running", pod_list)
print "running pod list: ", running_pod_list
if len(running_pod_list) == int(desired):
return [item[1] for item in running_pod_list]
print "sleep for 10 seconds..."
time.sleep(10)
def fetch_pserver_ips():
label_selector = "paddle-job-pserver=%s" % PADDLE_JOB_NAME
pod_list = fetch_pods_info(label_selector)
pserver_ips = [item[1] for item in pod_list]
return ",".join(pserver_ips)
def fetch_master_ip():
label_selector = "paddle-job-master=%s" % PADDLE_JOB_NAME
pod_list = fetch_pods_info(label_selector)
master_ip = ""
if len(pod_list) >=1:
master_ip = pod_list[0][1]
return master_ip
def fetch_trainer_id():
label_selector = "paddle-job=%s" % PADDLE_JOB_NAME
pod_list = fetch_pods_info(label_selector)
trainer_ips = [item[1] for item in pod_list]
trainer_ips.sort()
local_ip = socket.gethostbyname(socket.gethostname())
for i in xrange(len(trainer_ips)):
if trainer_ips[i] == local_ip:
return i
return None
if __name__ == "__main__":
command = sys.argv[1]
if command == "fetch_pserver_ips":
print fetch_pserver_ips()
elif command == "fetch_trainer_id":
print fetch_trainer_id()
elif command == "fetch_master_ip":
print fetch_master_ip()
elif command == "wait_pods_running":
wait_pods_running(sys.argv[2], sys.argv[3])
| Python | 0 |
a0791372d7943a785ae55ed31044d0316b53a2ac | Patch release | elephantblog/__init__.py | elephantblog/__init__.py | from __future__ import absolute_import, unicode_literals
VERSION = (1, 0, 1)
__version__ = '.'.join(map(str, VERSION))
| from __future__ import absolute_import, unicode_literals
VERSION = (1, 0, 0)
__version__ = '.'.join(map(str, VERSION))
| Python | 0.000001 |
34a6ccce1d93843d53efb5985ff5bbb7ea063e31 | add force_text a la Django | babel/_compat.py | babel/_compat.py | import sys
import array
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
binary_type = bytes
string_types = (str,)
integer_types = (int, )
text_to_native = lambda s, enc: s
unichr = chr
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO, BytesIO
import pickle
izip = zip
imap = map
range_type = range
cmp = lambda a, b: (a > b) - (a < b)
array_tobytes = array.array.tobytes
else:
text_type = unicode
binary_type = str
string_types = (str, unicode)
integer_types = (int, long)
text_to_native = lambda s, enc: s.encode(enc)
unichr = unichr
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO as BytesIO
from StringIO import StringIO
import cPickle as pickle
from itertools import imap
from itertools import izip
range_type = xrange
cmp = cmp
array_tobytes = array.array.tostring
number_types = integer_types + (float,)
def force_text(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
return s
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return text_type(s)
#
# Since Python 3.3, a fast decimal implementation is already included in the
# standard library. Otherwise use cdecimal when available
#
if sys.version_info[:2] >= (3, 3):
import decimal
else:
try:
import cdecimal as decimal
except ImportError:
import decimal
| import sys
import array
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
unichr = chr
text_to_native = lambda s, enc: s
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO, BytesIO
import pickle
izip = zip
imap = map
range_type = range
cmp = lambda a, b: (a > b) - (a < b)
array_tobytes = array.array.tobytes
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
text_to_native = lambda s, enc: s.encode(enc)
unichr = unichr
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO as BytesIO
from StringIO import StringIO
import cPickle as pickle
from itertools import imap
from itertools import izip
range_type = xrange
cmp = cmp
array_tobytes = array.array.tostring
number_types = integer_types + (float,)
#
# Since Python 3.3, a fast decimal implementation is already included in the
# standard library. Otherwise use cdecimal when available
#
if sys.version_info[:2] >= (3, 3):
import decimal
else:
try:
import cdecimal as decimal
except ImportError:
import decimal
| Python | 0.00049 |
b65b359402b2f38dad043b1b6d1840f0ef6d8e72 | Fix constants | openprocurement/auctions/dgf/constants.py | openprocurement/auctions/dgf/constants.py | from datetime import datetime, timedelta
from openprocurement.api.models import TZ, ORA_CODES
def read_json(name):
import os.path
from json import loads
curr_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(curr_dir, name)
with open(file_path) as lang_file:
data = lang_file.read()
return loads(data)
#document types
DOCUMENT_TYPE_OFFLINE = ['x_dgfAssetFamiliarization']
DOCUMENT_TYPE_URL_ONLY = ['virtualDataRoom']
#requiremnt periods
MINIMAL_EXPOSITION_PERIOD = timedelta(days=7)
MINIMAL_PERIOD_FROM_RECTIFICATION_END = timedelta(days=5)
VERIFY_AUCTION_PROTOCOL_TIME = timedelta(days=6)
AWARD_PAYMENT_TIME = timedelta(days=20)
CONTRACT_SIGNING_TIME = timedelta(days=20)
#time constants
DGF_ID_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
DGF_DECISION_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
CLASSIFICATION_PRECISELY_FROM = datetime(2017, 7, 19, tzinfo=TZ)
MINIMAL_EXPOSITION_REQUIRED_FROM = datetime(2017, 11, 17, tzinfo=TZ)
DGF_ADDRESS_REQUIRED_FROM = datetime(2018, 2, 9, tzinfo=TZ)
RECTIFICATION_END_EDITING_AND_VALIDATION_REQUIRED_FROM = datetime(2018, 2, 9, tzinfo=TZ)
#codes
CAVPS_CODES = read_json('cav_ps.json')
CPVS_CODES = read_json('cpvs.json')
ORA_CODES[0:0] = ["UA-IPN", "UA-FIN"]
NUMBER_OF_BIDS_TO_BE_QUALIFIED = 2
#code units
CPV_NON_SPECIFIC_LOCATION_UNITS = ('71', '72', '73', '75', '76', '77', '79', '80', '85', '90', '92', '98')
CAV_NON_SPECIFIC_LOCATION_UNITS = ('07', '08')
| from datetime import datetime, timedelta
from openprocurement.api.models import TZ, ORA_CODES
def read_json(name):
import os.path
from json import loads
curr_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(curr_dir, name)
with open(file_path) as lang_file:
data = lang_file.read()
return loads(data)
#document types
DOCUMENT_TYPE_OFFLINE = ['x_dgfAssetFamiliarization']
DOCUMENT_TYPE_URL_ONLY = ['virtualDataRoom']
#requiremnt periods
MINIMAL_EXPOSITION_PERIOD = timedelta(days=7)
MINIMAL_PERIOD_FROM_RECTIFICATION_END = timedelta(days=5)
VERIFY_AUCTION_PROTOCOL_TIME = timedelta(days=6)
AWARD_PAYMENT_TIME = timedelta(days=20)
CONTRACT_SIGNING_TIME = timedelta(days=20)
#time constants
DGF_ID_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
DGF_DECISION_REQUIRED_FROM = datetime(2017, 1, 1, tzinfo=TZ)
CLASSIFICATION_PRECISELY_FROM = datetime(2017, 7, 19, tzinfo=TZ)
MINIMAL_EXPOSITION_REQUIRED_FROM = datetime(2017, 11, 17, tzinfo=TZ)
DGF_ADDRESS_REQUIRED_FROM = datetime(2018, 2, 9, tzinfo=TZ)
RECTIFICATION_END_EDITING_AND_VALIDATION_REQUIRED_FROM = datetime(2018, 01, 22, tzinfo=TZ)
#codes
CAVPS_CODES = read_json('cav_ps.json')
CPVS_CODES = read_json('cpvs.json')
ORA_CODES[0:0] = ["UA-IPN", "UA-FIN"]
NUMBER_OF_BIDS_TO_BE_QUALIFIED = 2
#code units
CPV_NON_SPECIFIC_LOCATION_UNITS = ('71', '72', '73', '75', '76', '77', '79', '80', '85', '90', '92', '98')
CAV_NON_SPECIFIC_LOCATION_UNITS = ('07', '08')
| Python | 0.000021 |
eed9ad741a4bd72d0b476f7580b7d22f84a7f8bb | update test base classes | openprocurement/tender/esco/tests/base.py | openprocurement/tender/esco/tests/base.py | # -*- coding: utf-8 -*-
import os
from copy import deepcopy
from openprocurement.api.tests.base import (
BaseTenderWebTest, BaseWebTest
)
from openprocurement.api.tests.base import test_organization as base_test_organization
from openprocurement.tender.openua.tests.base import test_bids as base_test_bids
from openprocurement.tender.openua.tests.base import test_tender_data as base_ua_test_data
from openprocurement.tender.openeu.tests.base import test_tender_data as base_eu_test_data
from openprocurement.tender.limited.tests.base import test_tender_data as base_reporting_test_data
test_tender_ua_data = deepcopy(base_ua_test_data)
test_tender_ua_data['procurementMethodType'] = "esco.UA"
test_tender_eu_data = deepcopy(base_eu_test_data)
test_tender_eu_data['procurementMethodType'] = "esco.EU"
test_tender_reporting_data = deepcopy(base_reporting_test_data)
test_tender_reporting_data['procurementMethodType'] = "esco.reporting"
test_bids = deepcopy(base_test_bids)
test_organization = deepcopy(base_test_organization)
class BaseESCOWebTest(BaseWebTest):
relative_to = os.path.dirname(__file__)
initial_data = None
initial_status = None
initial_bids = None
initial_lots = None
initial_auth = ('Basic', ('broker', ''))
docservice = None
def setUp(self):
super(BaseESCOWebTest, self).setUp()
self.app.authorization = self.initial_auth
self.couchdb_server = self.app.app.registry.couchdb_server
self.db = self.app.app.registry.db
if self.docservice:
self.setUpDS()
def tearDown(self):
if self.docservice:
self.tearDownDS()
del self.couchdb_server[self.db.name]
class BaseESCOContentWebTest(BaseESCOWebTest):
""" ESCO Content Test """
def setUp(self):
super(BaseESCOContentWebTest, self).setUp()
self.create_tender()
def create_tender(self):
cur_auth = self.app.authorization
self.app.authorization = self.initial_auth
data = deepcopy(self.initial_data)
if self.initial_lots:
lots = []
for i in self.initial_lots:
lot = deepcopy(i)
lot['id'] = uuid4().hex
lots.append(lot)
data['lots'] = self.initial_lots = lots
for i, item in enumerate(data['items']):
item['relatedLot'] = lots[i % len(lots)]['id']
response = self.app.post_json('/tenders', {'data': data})
tender = response.json['data']
self.tender_token = response.json['access']['token']
self.tender_id = tender['id']
status = tender['status']
if self.initial_bids:
self.initial_bids_tokens = {}
response = self.set_status('active.tendering')
status = response.json['data']['status']
bids = []
for i in self.initial_bids:
if self.initial_lots:
i = i.copy()
value = i.pop('value')
i['lotValues'] = [
{
'value': value,
'relatedLot': l['id'],
}
for l in self.initial_lots
]
response = self.app.post_json('/tenders/{}/bids'.format(self.tender_id), {'data': i})
self.assertEqual(response.status, '201 Created')
bids.append(response.json['data'])
self.initial_bids_tokens[response.json['data']['id']] = response.json['access']['token']
self.initial_bids = bids
if self.initial_status != status:
self.set_status(self.initial_status)
self.app.authorization = cur_auth
def set_status(self, status):
return
class BaseESCOUAContentWebTest(BaseESCOContentWebTest):
""" ESCO UA Content Test """
initial_data = test_tender_ua_data
class BaseESCOEUContentWebTest(BaseESCOContentWebTest):
""" ESCO EU Content Test """
initial_data = test_tender_eu_data
class BaseESCOReportingContentWebTest(BaseESCOContentWebTest):
""" ESCO Reporting Content Test """
initial_data = test_tender_reporting_data
| # -*- coding: utf-8 -*-
import os
from copy import deepcopy
from openprocurement.api.tests.base import (
BaseTenderWebTest, BaseWebTest
)
from openprocurement.tender.openua.tests.base import test_tender_data as base_ua_test_data
from openprocurement.tender.openeu.tests.base import test_tender_data as base_eu_test_data
from openprocurement.tender.limited.tests.base import test_tender_data as base_reporting_test_data
test_tender_ua_data = deepcopy(base_ua_test_data)
test_tender_ua_data['procurementMethodType'] = "esco.UA"
test_tender_eu_data = deepcopy(base_eu_test_data)
test_tender_eu_data['procurementMethodType'] = "esco.EU"
test_tender_reporting_data = deepcopy(base_reporting_test_data)
test_tender_reporting_data['procurementMethodType'] = "esco.reporting"
class BaseESCOWebTest(BaseWebTest):
relative_to = os.path.dirname(__file__)
class BaseESCOContentWebTest(BaseESCOWebTest):
initial_data = None
initial_status = None
initial_bids = None
initial_lots = None
initial_auth = ('Basic', ('broker', ''))
docservice = None
def setUp(self):
super(BaseESCOContentWebTest, self).setUp()
self.app.authorization = self.initial_auth
self.couchdb_server = self.app.app.registry.couchdb_server
self.db = self.app.app.registry.db
if self.docservice:
self.setUpDS()
def tearDown(self):
if self.docservice:
self.tearDownDS()
del self.couchdb_server[self.db.name]
class BaseESCOUAContentWebTest(BaseESCOContentWebTest):
initial_data = test_tender_eu_data
class BaseESCOEUContentWebTest(BaseESCOContentWebTest):
initial_data = test_tender_ua_data
class BaseESCOReportingContentWebTest(BaseESCOContentWebTest):
initial_data = test_tender_reporting_data
| Python | 0 |
2fb1c14f9ad0b72f1f059d7e5e233b8001c2b60b | Update auth tests | app/tests/integration/test_twitter_api.py | app/tests/integration/test_twitter_api.py | # -*- coding: utf-8 -*-
"""
Twitter API test module.
Do requests to the Twitter API using configured credentials. NB. These require
valid tokens for a Twitter dev account, plus a network connection.
"""
from unittest import TestCase
from lib.config import AppConf
from lib.twitter_api import authentication
conf = AppConf()
class TestAuth(TestCase):
def test_generateAppAccessToken(self):
auth = authentication._generateAppAccessToken()
def test_getTweepyConnection(self):
auth = authentication._generateAppAccessToken()
api = authentication._getTweepyConnection(auth)
def test_getAPIConnection(self):
"""
Test that App Access token can be used to connect to Twitter API.
"""
api = authentication.getAPIConnection(userFlow=False)
def test_getAppOnlyConnection(self):
"""
Test App-only token.
"""
api = authentication.getAppOnlyConnection()
| # -*- coding: utf-8 -*-
"""
Twitter API test module.
"""
from unittest import TestCase
from lib.twitter_api import authentication
class TestAuth(TestCase):
def test_generateAppToken(self):
auth = authentication._generateAppAccessToken()
def test_getTweepyConnection(self):
auth = authentication._generateAppAccessToken()
api = authentication._getTweepyConnection(auth)
def test_getAPIConnection(self):
"""
Test App Access token.
"""
api = authentication.getAPIConnection(userFlow=False)
def test_getAppOnlyConnection(self):
"""
Test App-only token.
"""
api = authentication.getAppOnlyConnection()
| Python | 0.000003 |
6ba94b87e11b994b02457e57d71eab9b80d28167 | fix import for django 1.8 | tests/notification/test_sites.py | tests/notification/test_sites.py | from mock import patch
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.test import TestCase, override_settings
from callisto.delivery.models import Report
from callisto.notification.models import EmailNotification
User = get_user_model()
class TempSiteID():
'''
with TempSiteID(1):
...
'''
def __init__(self, site_id):
self.site_id_temp = site_id
def __enter__(self):
self.site_id_stable = getattr(settings, 'SITE_ID', 1)
settings.SITE_ID = self.site_id_temp
def __exit__(self, *args):
settings.SITE_ID = self.site_id_stable
class SiteIDTest(TestCase):
def test_on_site_respects_SITE_ID_setting(self):
site_1_pages = 3
site_2_pages = site_1_pages + 1
site_2 = Site.objects.create()
index = 0
for i in range(site_1_pages):
EmailNotification.objects.create(name=index)
index += 1
for i in range(site_2_pages):
notification = EmailNotification.objects.create(name=index)
notification.sites.add(site_2) # site_1 is already added
index += 1
self.assertEqual(EmailNotification.objects.on_site().count(), site_1_pages + site_2_pages)
with TempSiteID(site_2.id):
self.assertEqual(EmailNotification.objects.on_site().count(), site_2_pages)
def test_multiple_added_sites_are_reflected_by_on_site(self):
site_2 = Site.objects.create()
notification = EmailNotification.objects.create()
notification.sites.add(site_2)
self.assertIn(notification, EmailNotification.objects.on_site())
with TempSiteID(site_2.id):
self.assertIn(notification, EmailNotification.objects.on_site())
class SiteRequestTest(TestCase):
def setUp(self):
super(SiteRequestTest, self).setUp()
User.objects.create_user(username='dummy', password='dummy')
self.client.login(username='dummy', password='dummy')
user = User.objects.get(username='dummy')
self.report = Report(owner=user)
self.report_key = 'bananabread! is not my key'
self.report.encrypt_report('{}', self.report_key)
self.report.save()
self.submit_url = reverse('test_submit_report', args=[self.report.pk])
@override_settings()
@patch('django.http.request.HttpRequest.get_host')
def test_can_request_pages_without_site_id_set(self, mock_get_host):
mock_get_host.return_value = Site.objects.get(id=settings.SITE_ID).domain
del settings.SITE_ID
response = self.client.get(self.submit_url)
self.assertNotEqual(response.status_code, 404)
@override_settings()
@patch('django.http.request.HttpRequest.get_host')
@patch('callisto.notification.managers.EmailNotificationQuerySet.on_site')
def test_site_passed_to_email_notification_manager(self, mock_on_site, mock_get_host):
mock_get_host.return_value = Site.objects.get(id=settings.SITE_ID).domain
site_id = settings.SITE_ID
del settings.SITE_ID
self.client.post(
self.submit_url,
data={
'name': 'test submitter',
'email': 'test@example.com',
'phone_number': '555-555-1212',
'email_confirmation': 'True',
'key': self.report_key,
},
)
mock_on_site.assert_called_with(site_id)
| from mock import patch
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.test import TestCase, override_settings
from django.urls import reverse
from callisto.delivery.models import Report
from callisto.notification.models import EmailNotification
User = get_user_model()
class TempSiteID():
'''
with TempSiteID(1):
...
'''
def __init__(self, site_id):
self.site_id_temp = site_id
def __enter__(self):
self.site_id_stable = getattr(settings, 'SITE_ID', 1)
settings.SITE_ID = self.site_id_temp
def __exit__(self, *args):
settings.SITE_ID = self.site_id_stable
class SiteIDTest(TestCase):
def test_on_site_respects_SITE_ID_setting(self):
site_1_pages = 3
site_2_pages = site_1_pages + 1
site_2 = Site.objects.create()
index = 0
for i in range(site_1_pages):
EmailNotification.objects.create(name=index)
index += 1
for i in range(site_2_pages):
notification = EmailNotification.objects.create(name=index)
notification.sites.add(site_2) # site_1 is already added
index += 1
self.assertEqual(EmailNotification.objects.on_site().count(), site_1_pages + site_2_pages)
with TempSiteID(site_2.id):
self.assertEqual(EmailNotification.objects.on_site().count(), site_2_pages)
def test_multiple_added_sites_are_reflected_by_on_site(self):
site_2 = Site.objects.create()
notification = EmailNotification.objects.create()
notification.sites.add(site_2)
self.assertIn(notification, EmailNotification.objects.on_site())
with TempSiteID(site_2.id):
self.assertIn(notification, EmailNotification.objects.on_site())
class SiteRequestTest(TestCase):
def setUp(self):
super(SiteRequestTest, self).setUp()
User.objects.create_user(username='dummy', password='dummy')
self.client.login(username='dummy', password='dummy')
user = User.objects.get(username='dummy')
self.report = Report(owner=user)
self.report_key = 'bananabread! is not my key'
self.report.encrypt_report('{}', self.report_key)
self.report.save()
self.submit_url = reverse('test_submit_report', args=[self.report.pk])
@override_settings()
@patch('django.http.request.HttpRequest.get_host')
def test_can_request_pages_without_site_id_set(self, mock_get_host):
mock_get_host.return_value = Site.objects.get(id=settings.SITE_ID).domain
del settings.SITE_ID
response = self.client.get(self.submit_url)
self.assertNotEqual(response.status_code, 404)
@override_settings()
@patch('django.http.request.HttpRequest.get_host')
@patch('callisto.notification.managers.EmailNotificationQuerySet.on_site')
def test_site_passed_to_email_notification_manager(self, mock_on_site, mock_get_host):
mock_get_host.return_value = Site.objects.get(id=settings.SITE_ID).domain
site_id = settings.SITE_ID
del settings.SITE_ID
self.client.post(
self.submit_url,
data={
'name': 'test submitter',
'email': 'test@example.com',
'phone_number': '555-555-1212',
'email_confirmation': 'True',
'key': self.report_key,
},
)
mock_on_site.assert_called_with(site_id)
| Python | 0.000001 |
0b7b9f95ab2efd16b75dd7d5689ba2329c879b28 | fix default | drillsrs/cmd/study.py | drillsrs/cmd/study.py | import argparse
import random
from datetime import datetime
from drillsrs.cmd.command_base import CommandBase
from drillsrs import db, scheduler, util
from drillsrs.cli_args import Mode
def _learn_single_card(
index: int,
num_cards_to_study: int,
card: db.Card,
mode: Mode) -> None:
print('Card #{} ({:.01%} done, {} left)'.format(
card.num,
index / num_cards_to_study,
num_cards_to_study - index))
raw_question = card.question
raw_answers = card.answers
if mode is Mode.reversed or mode is Mode.mixed and random.random() > 0.5:
raw_question, raw_answers = random.choice(raw_answers), [raw_question]
question = 'Question: %s' % raw_question
if card.tags:
question += ' [%s]' % util.format_card_tags(card.tags)
util.ask(question)
util.ask('Answers: %s' % ', '.join(raw_answers))
print('')
card.is_active = True
card.due_date = scheduler.next_due_date(card)
card.activation_date = datetime.now()
class StudyCommand(CommandBase):
names = ['study', 'learn']
description = 'begin a study session'
def decorate_arg_parser(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'deck', nargs='?', help='choose the deck name')
parser.add_argument(
'-n', type=int, default=10,
help='set how many flashcards to study')
parser.add_argument(
'-m', '--mode', type=Mode.parse, default=Mode.direct,
choices=list(Mode), help='learning mode. whether to involve reversed direction')
def run(self, args: argparse.Namespace) -> None:
deck_name: str = args.deck
how_many: int = args.n
mode: Mode = args.mode
with db.session_scope() as session:
deck = db.get_deck_by_name(session, deck_name)
cards_to_study = scheduler.get_cards_to_study(
session, deck, how_many)
if not cards_to_study:
print('No cards to study.')
return
print(
'%d cards to study. After seeing a card, hit enter.' %
len(cards_to_study))
print()
num_cards_to_study = len(cards_to_study)
for index, card in enumerate(cards_to_study):
_learn_single_card(index, num_cards_to_study, card, mode)
| import argparse
import random
from datetime import datetime
from typing import List
from drillsrs.cmd.command_base import CommandBase
from drillsrs import db, scheduler, util
from drillsrs.cli_args import Mode
def _learn_single_card(
index: int,
num_cards_to_study: int,
card: db.Card,
mode: Mode) -> None:
print('Card #{} ({:.01%} done, {} left)'.format(
card.num,
index / num_cards_to_study,
num_cards_to_study - index))
raw_question = card.question
raw_answers = card.answers
if mode is Mode.reversed or mode is Mode.mixed and random.random() > 0.5:
raw_question, raw_answers = random.choice(raw_answers), [raw_question]
question = 'Question: %s' % raw_question
if card.tags:
question += ' [%s]' % util.format_card_tags(card.tags)
util.ask(question)
util.ask('Answers: %s' % ', '.join(raw_answers))
print('')
card.is_active = True
card.due_date = scheduler.next_due_date(card)
card.activation_date = datetime.now()
class StudyCommand(CommandBase):
names = ['study', 'learn']
description = 'begin a study session'
def decorate_arg_parser(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'deck', nargs='?', help='choose the deck name')
parser.add_argument(
'-n', type=int, default=10,
help='set how many flashcards to study')
parser.add_argument(
'-m', '--mode', type=Mode.parse, default="direct",
choices=list(Mode), help='learning mode. whether to involve reversed direction')
def run(self, args: argparse.Namespace) -> None:
deck_name: str = args.deck
how_many: int = args.n
mode: Mode = args.mode
with db.session_scope() as session:
deck = db.get_deck_by_name(session, deck_name)
cards_to_study = scheduler.get_cards_to_study(
session, deck, how_many)
if not cards_to_study:
print('No cards to study.')
return
print(
'%d cards to study. After seeing a card, hit enter.' %
len(cards_to_study))
print()
num_cards_to_study = len(cards_to_study)
for index, card in enumerate(cards_to_study):
_learn_single_card(index, num_cards_to_study, card, mode)
| Python | 0.000004 |
22a983f264885f355bd6c735604a4ede370365fe | Switch to unittest format | tests/unit/py2/nupic/data/helpers_test.py | tests/unit/py2/nupic/data/helpers_test.py | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import unittest2 as unittest
from nupic.data.datasethelpers import (_getDataDirs,
findDataset,
uncompressAndCopyDataset)
class HelpersTest(unittest.TestCase):
def testGetDataDirs(self):
dd = _getDataDirs()
dd = zip(*dd)[0]
# Make sure the local data is there
self.assertTrue(('data') in dd)
# Make sure there are no duplicates
self.assertEqual(len(set(dd)), len(dd))
nta_data_path = os.environ.get('NTA_DATA_PATH', None)
os.environ['NTA_DATA_PATH'] = 'XXX:YYY'
dd = _getDataDirs()
dd = zip(*dd)[0]
self.assertTrue('XXX' in dd and 'YYY' in dd)
if nta_data_path is None:
del os.environ['NTA_DATA_PATH']
else:
os.environ['NTA_DATA_PATH'] = nta_data_path
def testFindDataset(self):
# Test non-existing dataset (relative path)
try:
findDataset('no_such_dataset.csv')
self.fail("Shoudn't have found dataset")
except:
pass
# Test non-existing dataset (absolute path)
try:
findDataset('/no_such_dataset.csv')
self.fail("Shoudn't have found dataset")
except:
pass
# Test existing dataset (relative path)
if not os.path.isdir('data'):
os.makedirs('data')
datasetPath = 'test_find_dataset.csv'
filename = 'data/test_find_dataset.csv'
# This is the uncompressed name.
fullPath = os.path.abspath(filename)
if os.path.exists(fullPath):
os.remove(fullPath)
fullPathCompressed = fullPath + ".gz"
if os.path.exists(fullPathCompressed):
os.remove(fullPathCompressed)
# Create the "dataset"
open(filename, 'w').write('123')
path = findDataset(datasetPath)
self.assertEqual(path, fullPath)
self.assertTrue(os.path.exists(path))
# This should do nothing, since it is already compressed
path = uncompressAndCopyDataset(path)
self.assertEqual(path, fullPath)
# Test existing dataset (absolute path)
self.assertEqual(findDataset(fullPath), fullPath)
# Test existing dataset (compressed path)
# Create the compressed file
import gzip
f = gzip.GzipFile(fullPathCompressed, 'w')
f.write('1,2,3\n')
f.close()
self.assertTrue(os.path.isfile(fullPathCompressed))
# Remove the original file
os.remove(fullPath)
self.assertEqual(findDataset(datasetPath), fullPathCompressed)
# This should put the uncompressed file in the same directory
path = uncompressAndCopyDataset(fullPathCompressed)
self.assertEqual(path, fullPath)
self.assertTrue(os.path.isfile(path))
os.remove(fullPath)
os.remove(fullPathCompressed)
if __name__=='__main__':
unittest.main()
| #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import datetime
import shutil
import tempfile
from nupic.support import title
from nupic.data.datasethelpers import (_getDataDirs,
findDataset,
uncompressAndCopyDataset)
def testGetDataDirs():
""" """
title()
dd = _getDataDirs()
dd = zip(*dd)[0]
# Make sure the local data is there
assert ('data') in dd
# Make sure there are no duplicates
assert len(set(dd)) == len(dd)
nta_data_path = os.environ.get('NTA_DATA_PATH', None)
os.environ['NTA_DATA_PATH'] = 'XXX:YYY'
dd = _getDataDirs()
dd = zip(*dd)[0]
assert 'XXX' in dd and 'YYY' in dd
if nta_data_path is None:
del os.environ['NTA_DATA_PATH']
else:
os.environ['NTA_DATA_PATH'] = nta_data_path
def testFindDataset():
title()
# Test non-existing dataset (relative path)
try:
findDataset('no_such_dataset.csv')
assert False
except:
pass
# Test non-existing dataset (absolute path)
try:
findDataset('/no_such_dataset.csv')
assert False
except:
pass
# Test existing dataset (relative path)
if not os.path.isdir('data'):
os.makedirs('data')
datasetPath = 'test_find_dataset.csv'
filename = 'data/test_find_dataset.csv'
# This is the uncompressed name.
fullPath = os.path.abspath(filename)
if os.path.exists(fullPath):
os.remove(fullPath)
fullPathCompressed = fullPath + ".gz"
if os.path.exists(fullPathCompressed):
os.remove(fullPathCompressed)
# Create the "dataset"
open(filename, 'w').write('123')
path = findDataset(datasetPath)
assert path == fullPath
assert os.path.exists(path)
# This should do nothing, since it is already compressed
path = uncompressAndCopyDataset(path)
assert path == fullPath
# Test existing dataset (absolute path)
assert findDataset(fullPath) == fullPath
# Test existing dataset (compressed path)
# Create the compressed file
import gzip
f = gzip.GzipFile(fullPathCompressed, 'w')
f.write('1,2,3\n')
f.close()
assert os.path.isfile(fullPathCompressed)
# Remove the original file
os.remove(fullPath)
assert findDataset(datasetPath) == fullPathCompressed
# This should put the uncompressed file in the same directory
path = uncompressAndCopyDataset(fullPathCompressed)
assert path == fullPath
assert os.path.isfile(path)
os.remove(fullPath)
os.remove(fullPathCompressed)
def test():
testFindDataset()
testGetDataDirs()
if __name__=='__main__':
test() | Python | 0 |
f91fc2a8858c243b62d1a9a369d45216fb15f443 | Change auth selenium test to use wait_element_become_present | tests/webui/steps/authentication_steps.py | tests/webui/steps/authentication_steps.py | # -*- coding: utf-8 -*-
import time
from behave import when, then, given
from toxicbuild.ui import settings
from tests.webui.steps.base_steps import ( # noqa f811
given_logged_in_webui, user_sees_main_main_page_login)
# Scenario: Someone try to access a page without being logged.
@when('someone tries to access a waterfall url without being logged')
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'waterfall/some-repo'
browser.get(url)
@then('he sees the login page') # noqa f401
def step_impl(context):
browser = context.browser
def fn():
try:
el = browser.find_element_by_id('inputUsername')
except Exception:
el = None
return el
el = browser.wait_element_become_present(fn)
assert el
# Scenario: Do login
@given('the user is in the login page') # noqa f401
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'login'
browser.get(url)
@when('he inserts "{user_name}" as user name')
def user_inserts_username_login(context, user_name):
browser = context.browser
username_input = browser.find_element_by_id('inputUsername')
username_input.send_keys(user_name)
@when('inserts "{passwd}" as password')
def user_inserts_password_login(context, passwd):
browser = context.browser
passwd_input = browser.find_element_by_id('inputPassword')
passwd_input.send_keys(passwd)
@when('clicks in the login button')
def user_clicks_login_button(context):
browser = context.browser
btn = browser.find_element_by_id('btn-login')
btn.click()
@then('he sees the red warning in the password field')
def user_sees_missing_required_field_warning(context):
browser = context.browser
el = browser.find_element_by_class_name('form-control-error')
assert el
@then('he sees the invalid credentials message')
def user_sees_invalid_credentials_message(context):
browser = context.browser
el = browser.find_element_by_id('login-error-msg-container')
color = el.value_of_css_property('color')
time.sleep(0.5)
assert color != 'rgb(255, 255, 255)'
# Scenario: Do logout
@when('he clicks in the logout link') # noqa f401
def step_impl(context):
browser = context.browser
el = browser.find_element_by_class_name('nav-link')
browser.click(el)
el = browser.find_elements_by_class_name('dropdown-item-logout')[-1]
browser.click(el)
| # -*- coding: utf-8 -*-
import time
from behave import when, then, given
from toxicbuild.ui import settings
from tests.webui.steps.base_steps import ( # noqa f811
given_logged_in_webui, user_sees_main_main_page_login)
# Scenario: Someone try to access a page without being logged.
@when('someone tries to access a waterfall url without being logged')
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'waterfall/some-repo'
browser.get(url)
@then('he sees the login page') # noqa f401
def step_impl(context):
browser = context.browser
el = browser.find_element_by_id('inputUsername')
assert el
# Scenario: Do login
@given('the user is in the login page') # noqa f401
def step_impl(context):
browser = context.browser
base_url = 'http://{}:{}/'.format(settings.TEST_WEB_HOST,
settings.TORNADO_PORT)
url = base_url + 'login'
browser.get(url)
@when('he inserts "{user_name}" as user name')
def user_inserts_username_login(context, user_name):
browser = context.browser
username_input = browser.find_element_by_id('inputUsername')
username_input.send_keys(user_name)
@when('inserts "{passwd}" as password')
def user_inserts_password_login(context, passwd):
browser = context.browser
passwd_input = browser.find_element_by_id('inputPassword')
passwd_input.send_keys(passwd)
@when('clicks in the login button')
def user_clicks_login_button(context):
browser = context.browser
btn = browser.find_element_by_id('btn-login')
btn.click()
@then('he sees the red warning in the password field')
def user_sees_missing_required_field_warning(context):
browser = context.browser
el = browser.find_element_by_class_name('form-control-error')
assert el
@then('he sees the invalid credentials message')
def user_sees_invalid_credentials_message(context):
browser = context.browser
el = browser.find_element_by_id('login-error-msg-container')
color = el.value_of_css_property('color')
time.sleep(0.5)
assert color != 'rgb(255, 255, 255)'
# Scenario: Do logout
@when('he clicks in the logout link') # noqa f401
def step_impl(context):
browser = context.browser
el = browser.find_element_by_class_name('nav-link')
browser.click(el)
el = browser.find_elements_by_class_name('dropdown-item-logout')[-1]
browser.click(el)
| Python | 0.000001 |
9c8bfb78a1e45ab9cb78fc18318b2a39153103db | use the level arg to populate the operations list instead of appending. This allows the operation list to be changed on every iteration and keep a better record | numbers_solver.py | numbers_solver.py | #!/usr/bin/env python
numbers = [25, 7, 9, 3, 1, 8]
target = 642
# target = 225
def mult(x, y):
return x * y
def divide(x, y):
if(x > y):
return x / y
else:
return y / x
def add(x, y):
return x + y
def sub(x, y):
if(x > y):
return x - y
else:
return y - x
OPS = {
'x': mult,
'/': divide,
'+': add,
'-': sub
}
def find_abs_diff(x, y):
return abs(x - y)
class CalcOperation(object):
def __init__(self, num, op=None):
"""
The op arg is the operation that should be applied to the next
value in the chain
"""
self.num = num
self.op = op
self.next = None
def update(self, op=None, next=None, num=None):
if(num):
self.num = num
if(op):
self.op = op
if(next):
self.next = next
def __repr__(self):
return '{num} {op} '.format(num=self.num, op=self.op)
class NumberSolution(object):
def __init__(self, operations, result, target):
self.operations = operations
self.result = result
self.target = target
def __repr__(self):
if(self.result - self.target == 0):
return 'Found A Solution'
return 'Only managed to find a result of {result}, this is an error of {error}'.format(result=self.result, error=abs(self.target - self.result))
class NumberGameSolver(object):
def __init__(self, numbers, target):
self.numbers = sorted(numbers)
self.target = target
self.best = 0
self.solution = self._solve()
print(self.solution)
def __repr__(self):
return self.solution.__repr__()
def _solve(self, operations=[], used=[], current=0, level=0):
if not used:
used = [False for num in self.numbers]
if not operations:
operations = [None for num in self.numbers]
for i, val in enumerate(self.numbers):
if used[i]:
continue
if current > 0:
for op, fn in OPS.items():
combo = sorted([current, val], reverse=True)
result = fn(combo[0], combo[1])
operations[level] = CalcOperation(val)
operations[level - 1].update(op=op, next=operations[level])
if find_abs_diff(result, self.target) < find_abs_diff(self.best, self.target):
if type(result) is int:
self.best = result
# print('NEW BEST')
# print(self.best)
self.solution = NumberSolution(operations, self.best, self.target)
if self.best == self.target:
print(operations)
return self.solution
has_solution = self._solve(
[obj for obj in operations],
[True if used[j] or j == i else False for (j, num) in enumerate(self.numbers)],
result,
level+1)
if has_solution:
return has_solution
else:
operations[level] = CalcOperation(val)
has_solution = self._solve(
[obj for obj in operations],
[True if used[j] or j == i else False for (j, num) in enumerate(self.numbers)],
val,
level+1)
if has_solution:
return has_solution
solution = NumberGameSolver(numbers, target)
| #!/usr/bin/env python
numbers = [25, 7, 9, 3, 1, 8]
target = 642
# target = 225
def mult(x, y):
return x * y
def divide(x, y):
if(x > y):
return x / y
else:
return y / x
def add(x, y):
return x + y
def sub(x, y):
if(x > y):
return x - y
else:
return y - x
OPS = {
'x': mult,
'/': divide,
'+': add,
'-': sub
}
def find_abs_diff(x, y):
return abs(x - y)
class CalcOperation(object):
def __init__(self, num, op=None):
"""
The op arg is the operation that should be applied to the next
value in the chain
"""
self.num = num
self.op = op
self.next = None
def update(self, op=None, next=None, num=None):
if(num):
self.num = num
if(op):
self.op = op
if(next):
self.next = next
def __repr__(self):
return '{num} {op} '.format(num=self.num, op=self.op)
class NumberSolution(object):
def __init__(self, operations, result, target):
self.operations = operations
self.result = result
self.target = target
def __repr__(self):
if(self.result - self.target == 0):
return 'Found A Solution'
return 'Only managed to find a result of {result}, this is an error of {error}'.format(result=self.result, error=abs(self.target - self.result))
class NumberGameSolver(object):
def __init__(self, numbers, target):
self.numbers = sorted(numbers)
self.target = target
self.best = 0
self.solution = self._solve()
print(self.solution)
def __repr__(self):
return self.solution.__repr__()
def _solve(self, operations=[], used=[], current=0, level=0):
if not used:
used = [False for num in self.numbers]
for i, val in enumerate(self.numbers):
if used[i]:
continue
if current > 0:
for op, fn in OPS.items():
combo = sorted([current, val], reverse=True)
result = fn(combo[0], combo[1])
if find_abs_diff(result, self.target) < find_abs_diff(self.best, self.target):
if type(result) is int:
self.best = result
# print('NEW BEST')
# print(self.best)
operations.append(CalcOperation(val))
operations[-2].update(op=op, next=operations[-1])
print(self.best)
print(operations)
self.solution = NumberSolution(operations, self.best, self.target)
if self.best == self.target:
# print(operations)
return self.solution
has_solution = self._solve(
[obj for obj in operations],
[True if used[j] or j == i else False for (j, num) in enumerate(self.numbers)],
result,
level+1)
if has_solution:
return has_solution
else:
operations.append(CalcOperation(val))
has_solution = self._solve(
[obj for obj in operations],
[True if used[j] or j == i else False for (j, num) in enumerate(self.numbers)],
val,
level+1)
if has_solution:
return has_solution
solution = NumberGameSolver(numbers, target)
| Python | 0 |
1ab8224372a5f839c8f0f74f3cafe7926905a7ec | Update __init__.py | nupic/__init__.py | nupic/__init__.py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
__version__ = "0.2.4.dev0"
| # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
__version__ = "0.2.4.dev0"
NUPIC_ROOT = os.environ.get('NUPIC', os.path.dirname(os.path.realpath(__file__)))
| Python | 0.000072 |
39986540e1ad1c4712405e46b988459f2abbf6e9 | Update for new python | Communication/testUDP.py | Communication/testUDP.py | # -------------------------------------------------------
import socket, traceback
import time
host = ''
#host = '192.168.201.251'
port = 1234
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind((host, port))
filein = open('saveUDP.txt', 'w')
t0 = time.time()
while time.time()-t0 < 20:
try:
message, address = s.recvfrom(9000)
print(message)
filein.write('%s\n' % (message))
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
filein.close()
# -------------------------------------------------------
| # -------------------------------------------------------
import socket, traceback
import time
host = ''
port = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind((host, port))
filein = open('saveUDP.txt', 'w')
t0 = time.time()
while time.time()-t0 < 20:
try:
message, address = s.recvfrom(9000)
print message
filein.write('%s\n' % (message))
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
filein.close()
# -------------------------------------------------------
| Python | 0 |
862c42a8abf0836604f56a9008018f34c405ca13 | update version number | f5/__init__.py | f5/__init__.py | # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.1.6'
| # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = '0.1.5'
| Python | 0.000002 |
5ec3da094bd9d6d1517425b27223ad5da6bfe804 | Use run_su in the restart command as well | fabfile/app.py | fabfile/app.py | #:coding=utf8:
import pipes
from fabric.api import (
env, task, roles,
prefix, local as localexec, run, execute,
put, runs_once, settings,
)
env.venv_path = '/var/www/venvs/homepage'
env.deploy_user = 'supervisord'
def run_su(cmd, user=None):
# The Google Compute Engine images prompt for a password when running
# sudo -u so we sudo to root and then su to the user we want to run the
# command as.
if user is None:
user = env.sudo_user or 'root'
if user == env.user:
run(cmd)
else:
# Temporarily disable prefixes and handle them ourselves.
# We need to do that so that the prefix takes effect in the
# shell where the command is executed rather than the shell
# where su is executed.
prefixes = list(env.command_prefixes)
with settings(command_prefixes=[]):
# Support the prefix() context processor.
glue = " && "
prefix = (glue.join(prefixes) + glue) if prefixes else ""
cmd = prefix + cmd
# NOTE: Quote the command since it's being run in a shell.
cmd = "%s %s" % (env.shell, pipes.quote(prefix + cmd))
# NOTE: Quote again since it's being run under su
run('sudo su %s -c %s' % (user, pipes.quote(cmd)))
def virtualenv(path=None):
run_su('mkdir -p `dirname %(venv_path)s`' % env, user=env.deploy_user)
run_su('if [ ! -d %(venv_path)s ];then '
' virtualenv %(venv_path)s;'
'fi' % env, user=env.deploy_user)
return prefix('source %(venv_path)s/bin/activate' % env)
@task
@roles('appservers')
def restart():
"""
Restart the application.
"""
run_su('supervisorctl restart homepage')
@task
@roles('appservers')
def update():
"""
Update the application.
"""
name = localexec("python setup.py --name", capture=True)
version = localexec("python setup.py --version", capture=True)
localexec("python setup.py sdist")
tmp_path = run("mktemp --suffix=.tar.gz")
put("dist/%s-%s.tar.gz" % (name, version), tmp_path, mode=0755)
with virtualenv():
run_su('pip install %s' % tmp_path, user=env.deploy_user)
@task
@runs_once
@roles('appservers')
def migrate_db():
"""
Migrate the database.
"""
with virtualenv():
# NOTE: The app runs as supervisord so
# we run the migrate command as that user also.
run_su("homepage migrate", user=env.deploy_user)
@task
@roles('appservers')
def deploy():
"""
Deploy the latest version of the app
"""
execute(update)
execute(migrate_db)
execute(restart)
| #:coding=utf8:
import pipes
from fabric.api import (
env, task, roles,
prefix, local as localexec, sudo, run, execute,
put, runs_once, settings,
)
env.venv_path = '/var/www/venvs/homepage'
env.deploy_user = 'supervisord'
def run_su(cmd, user=None):
# The Google Compute Engine images prompt for a password when running
# sudo -u so we sudo to root and then su to the user we want to run the
# command as.
if user is None:
user = env.sudo_user or 'root'
if user == env.user:
run(cmd)
else:
# Temporarily disable prefixes and handle them ourselves.
# We need to do that so that the prefix takes effect in the
# shell where the command is executed rather than the shell
# where su is executed.
prefixes = list(env.command_prefixes)
with settings(command_prefixes=[]):
# Support the prefix() context processor.
glue = " && "
prefix = (glue.join(prefixes) + glue) if prefixes else ""
cmd = prefix + cmd
# NOTE: Quote the command since it's being run in a shell.
cmd = "%s %s" % (env.shell, pipes.quote(prefix + cmd))
# NOTE: Quote again since it's being run under su
run('sudo su %s -c %s' % (user, pipes.quote(cmd)))
def virtualenv(path=None):
run_su('mkdir -p `dirname %(venv_path)s`' % env, user=env.deploy_user)
run_su('if [ ! -d %(venv_path)s ];then '
' virtualenv %(venv_path)s;'
'fi' % env, user=env.deploy_user)
return prefix('source %(venv_path)s/bin/activate' % env)
@task
@roles('appservers')
def restart():
"""
Restart the application.
"""
sudo('supervisorctl restart homepage')
@task
@roles('appservers')
def update():
"""
Update the application.
"""
name = localexec("python setup.py --name", capture=True)
version = localexec("python setup.py --version", capture=True)
localexec("python setup.py sdist")
tmp_path = run("mktemp --suffix=.tar.gz")
put("dist/%s-%s.tar.gz" % (name, version), tmp_path, mode=0755)
with virtualenv():
run_su('pip install %s' % tmp_path, user=env.deploy_user)
@task
@runs_once
@roles('appservers')
def migrate_db():
"""
Migrate the database.
"""
with virtualenv():
# NOTE: The app runs as supervisord so
# we run the migrate command as that user also.
run_su("homepage migrate", user=env.deploy_user)
@task
@roles('appservers')
def deploy():
"""
Deploy the latest version of the app
"""
execute(update)
execute(migrate_db)
execute(restart)
| Python | 0 |
db36e08a81d16463d8c76b896593aaeb91c057a0 | Refactor into get_check_digit_from_checkable_int method | falcom/luhn.py | falcom/luhn.py | # Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
def get_check_digit_from_checkable_int (number):
return (9 * ((number // 10) + rotate_digit(number % 10))) % 10
def rotate_digit (digit):
if digit > 4:
return (digit * 2) - 9
else:
return digit * 2
def get_check_digit (number = None):
if number:
return get_check_digit_from_checkable_int(int(number))
else:
return None
| # Copyright (c) 2017 The Regents of the University of Michigan.
# All Rights Reserved. Licensed according to the terms of the Revised
# BSD License. See LICENSE.txt for details.
def convert_into_luhn_checkable_int (number):
if number:
return int(number)
else:
return None
def rotate_digit (digit):
if digit > 4:
return (digit * 2) - 9
else:
return digit * 2
def get_check_digit (number = None):
number = convert_into_luhn_checkable_int(number)
if number is None:
return None
return (9 * ((number // 10) + rotate_digit(number % 10))) % 10
| Python | 0.002407 |
29be4cad4ab90fe5d1fc087f0de2e8a575ced40b | Bump version | nanoservice/version.py | nanoservice/version.py | VERSION = '0.3.1'
| VERSION = '0.3.0'
| Python | 0 |
f66e3e965c00c455608dba994575098e1cd246ae | Update request_tracking_codes.py | samples/request_tracking_codes.py | samples/request_tracking_codes.py | # Copyright 2017 Adler Medrado
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..correios.client import Correios
from ..correios.models.user import User, Service
def get_tracking_codes(service, quantity):
olist_user = User('Your Company\'s Name', 'Your Company\'s CNPJ')
client = Correios('Your Correio\'s username', 'Your correio\'s password')
tracking_codes = client.request_tracking_codes(olist_user, Service.get(service), quantity=quantity)
print(tracking_codes)
get_tracking_codes('41068', 1) # Request 1 PAC Tracking Code
get_tracking_codes('40068', 1) # Request 1 SEDEX Tracking Code
| from ..correios.client import Correios
from ..correios.models.user import User, Service
def get_tracking_codes(service, quantity):
olist_user = User('Your Company\'s Name', 'Your Company\'s CNPJ')
client = Correios('Your Correio\'s username', 'Your correio\'s password')
tracking_codes = client.request_tracking_codes(olist_user, Service.get(service), quantity=quantity)
print(tracking_codes)
get_tracking_codes('41068', 1) # Request 1 PAC Tracking Code
get_tracking_codes('40068', 1) # Request 1 SEDEX Tracking Code
| Python | 0.000003 |
f3791ea0ed11d46edf9998b80fd1ddd54d7e9b20 | Bump to pre-release version 1.0.rc2. | farcy/const.py | farcy/const.py | """Constants used throughout Farcy."""
import os
import re
__version__ = '1.0.rc2'
VERSION_STR = 'farcy v{0}'.format(__version__)
CONFIG_DIR = os.path.expanduser('~/.config/farcy')
MD_VERSION_STR = ('[{0}](https://github.com/appfolio/farcy)'
.format(VERSION_STR))
FARCY_COMMENT_START = '_{0}_'.format(MD_VERSION_STR)
NUMBER_RE = re.compile('(\d+)')
APPROVAL_PHRASES = [x.strip() for x in """
Amazing
Bravo
Excellent
Great job
Lookin' good
Outstanding work
Perfect
Spectacular
Tremendous
Well done
Wicked awesome
Winning
Wonderful
Wow
You are awesome
You do not miss a thing
""".split('\n') if x.strip()]
STATUS_CONTEXT = 'farcy'
| """Constants used throughout Farcy."""
import os
import re
__version__ = '1.0.rc1'
VERSION_STR = 'farcy v{0}'.format(__version__)
CONFIG_DIR = os.path.expanduser('~/.config/farcy')
MD_VERSION_STR = ('[{0}](https://github.com/appfolio/farcy)'
.format(VERSION_STR))
FARCY_COMMENT_START = '_{0}_'.format(MD_VERSION_STR)
NUMBER_RE = re.compile('(\d+)')
APPROVAL_PHRASES = [x.strip() for x in """
Amazing
Bravo
Excellent
Great job
Lookin' good
Outstanding work
Perfect
Spectacular
Tremendous
Well done
Wicked awesome
Winning
Wonderful
Wow
You are awesome
You do not miss a thing
""".split('\n') if x.strip()]
STATUS_CONTEXT = 'farcy'
| Python | 0 |
035d5feee8ea0691e5777a7b96c362877bcf01ca | Add logging | consumerSQS/consumer.py | consumerSQS/consumer.py | import boto3
import logging
from config import config
import answer
import json
import time
import librarian
def work(message):
message = json.loads(message)
print(type(message))
print(message)
return sendToManager(message)
def sendToManager(message):
if("entry" in message):
um = librarian.User_manager(message["entry"][0]["messaging"][0]["sender"]["id"])
try:
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["text"]))
except Exception as e:
print(e)
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["attachments"][0]["url"]))
return True
else:
um = librarian.User_manager(int(message["state"]))
um.user_event("MESSAGE",str(message["code"]))
return True
def parrot_work(message):
try :
msgToSend = "I ear your request :"+str(message["entry"][0]["messaging"][0]["message"]["text"])
idReceiver = message["entry"][0]["messaging"][0]["sender"]["id"]
print (idReceiver+":"+msgToSend)
answer.send_message(msgToSend,idReceiver)
answer.send_message("I will",idReceiver)
return True
except Exception as e:
print(e)
return False
sqs = boto3.resource('sqs',aws_access_key_id = config["access_key"], aws_secret_access_key=config["secret_access_key"], region_name="us-west-2", endpoint_url="https://sqs.us-west-2.amazonaws.com/731910755973/MessagesYouMus.fifo")
while True:
queue = sqs.get_queue_by_name(QueueName='MessagesYouMus.fifo')
for msg in queue.receive_messages():
try:
print("Received ="+str(json.loads(msg.body)))
status = work(json.loads(msg.body))
msg.delete()
except Exception as e:
print(e)
time.sleep(0.2)
| import boto3
import logging
from config import config
import answer
import json
import time
import librarian
def work(message):
message = json.loads(message)
print(type(message))
print(message)
return sendToManager(message)
def sendToManager(message):
if("entry" in message):
um = librarian.User_manager(message["entry"][0]["messaging"][0]["sender"]["id"])
try:
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["text"]))
except Exception as e:
print(e)
um.user_event("MESSAGE",str(message["entry"][0]["messaging"][0]["message"]["attachments"][0]["url"]))
return True
else:
um = librarian.User_manager(int(message["state"]))
um.user_event("MESSAGE",str(message["code"]))
return True
def parrot_work(message):
try :
msgToSend = "I ear your request :"+str(message["entry"][0]["messaging"][0]["message"]["text"])
idReceiver = message["entry"][0]["messaging"][0]["sender"]["id"]
print (idReceiver+":"+msgToSend)
answer.send_message(msgToSend,idReceiver)
answer.send_message("I will",idReceiver)
return True
except Exception as e:
print(e)
return False
sqs = boto3.resource('sqs',aws_access_key_id = config["access_key"], aws_secret_access_key=config["secret_access_key"], region_name="us-west-2", endpoint_url="https://sqs.us-west-2.amazonaws.com/731910755973/MessagesYouMus.fifo")
while True:
queue = sqs.get_queue_by_name(QueueName='MessagesYouMus.fifo')
for msg in queue.receive_messages():
try:
print("Received ="+str(json.loads(msg.body)))
status = work(json.loads(msg.body))
msg.delete()
except:
pass
time.sleep(0.2)
| Python | 0.000002 |
e14b8c6b06c75414f42f730e4c1e1a9208e335b0 | correct shebang | fetch/fetch.py | fetch/fetch.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Greedy climate data fetch
"""
filename = 'wwis.json'
indexurl = 'http://worldweather.wmo.int/en/json/full_city_list.txt'
baseurl = 'http://worldweather.wmo.int/en/json/{0}_en.xml'
guideurl = 'http://worldweather.wmo.int/en/dataguide.html'
notice = 'Please note the guidelines at {0}'
usage = """{0} <index file> [output file]
Data will be downloaded into {1} if no second argument given.
The full index file is available for download at {2}
You can re-run this script to continue downloading in the case of failures."""
from sys import argv
import urllib.request
import csv
import simplejson as json
import time
import sys
def fetch_entry(id):
url = baseurl.format(id)
try:
f = urllib.request.urlopen(url).read()
entry = json.loads(f.decode())
except:
return -1
time.sleep(0.1) # don't DoS
return entry
def nice_entry(entry, country):
data = entry['city']
data['country'] = country
return data
if __name__ == '__main__':
if len(argv) < 2:
print(usage.format(argv[0], filename, indexurl))
exit(1)
print(notice.format(guideurl))
if len(argv) > 2:
filename = argv[2]
data = {}
try:
with open(filename, 'r') as f:
data = json.load(f)
except:
pass
with open(argv[1], 'r', newline='') as f:
reader = csv.reader(f, delimiter=';', quotechar='"')
for row in reader:
if len(row) < 3:
print('?', end='', file=sys.stderr)
continue
if row[0] == 'Country':
continue
key = row[2]
if key in data:
print('✓', end='', file=sys.stderr)
continue
sys.stderr.flush()
entry = fetch_entry(key)
if entry == -1:
print('⚡', end='', file=sys.stderr)
break # bail out, save what we have
print('.', end='', file=sys.stderr)
data[key] = nice_entry(entry, row[0])
print('', file=sys.stderr)
with open(filename, 'w') as f:
json.dump(data, f, sort_keys=True, indent='\t')
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Greedy climate data fetch
"""
filename = 'wwis.json'
indexurl = 'http://worldweather.wmo.int/en/json/full_city_list.txt'
baseurl = 'http://worldweather.wmo.int/en/json/{0}_en.xml'
guideurl = 'http://worldweather.wmo.int/en/dataguide.html'
notice = 'Please note the guidelines at {0}'
usage = """{0} <index file> [output file]
Data will be downloaded into {1} if no second argument given.
The full index file is available for download at {2}
You can re-run this script to continue downloading in the case of failures."""
from sys import argv
import urllib.request
import csv
import simplejson as json
import time
import sys
def fetch_entry(id):
url = baseurl.format(id)
try:
f = urllib.request.urlopen(url).read()
entry = json.loads(f.decode())
except:
return -1
time.sleep(0.1) # don't DoS
return entry
def nice_entry(entry, country):
data = entry['city']
data['country'] = country
return data
if __name__ == '__main__':
if len(argv) < 2:
print(usage.format(argv[0], filename, indexurl))
exit(1)
print(notice.format(guideurl))
if len(argv) > 2:
filename = argv[2]
data = {}
try:
with open(filename, 'r') as f:
data = json.load(f)
except:
pass
with open(argv[1], 'r', newline='') as f:
reader = csv.reader(f, delimiter=';', quotechar='"')
for row in reader:
if len(row) < 3:
print('?', end='', file=sys.stderr)
continue
if row[0] == 'Country':
continue
key = row[2]
if key in data:
print('✓', end='', file=sys.stderr)
continue
sys.stderr.flush()
entry = fetch_entry(key)
if entry == -1:
print('⚡', end='', file=sys.stderr)
break # bail out, save what we have
print('.', end='', file=sys.stderr)
data[key] = nice_entry(entry, row[0])
print('', file=sys.stderr)
with open(filename, 'w') as f:
json.dump(data, f, sort_keys=True, indent='\t')
| Python | 0.997942 |
47555ef5589ccc07a44d9589c93be4fa193625c7 | change qe setting | tests/test_ase_setup/qe_setup.py | tests/test_ase_setup/qe_setup.py | import os
from ase.calculators.espresso import Espresso
# set up executable
label = 'AgI'
input_file_name = label+'.pwi'
output_file_name = label+'.pwo'
no_cpus = 1
npool = 1
pw_loc = os.environ.get('PWSCF_COMMAND')
#pw_loc = '/n/home08/xiey/q-e/bin/pw.x'
#os.environ['ASE_ESPRESSO_COMMAND'] = 'srun -n {0} --mpi=pmi2 {1} -npool {2} < {3} > {4}'.format(no_cpus,
# pw_loc, npool, input_file_name, output_file_name)
os.environ['ASE_ESPRESSO_COMMAND'] = '{0} < {1} > {2}'.format(pw_loc, input_file_name, output_file_name)
# set up input parameters
input_data = {'control': {'prefix': label,
'pseudo_dir': 'test_files/pseudos/',
'outdir': './out',
#'verbosity': 'high',
'calculation': 'scf'},
'system': {'ibrav': 0,
'ecutwfc': 20, # 45,
'ecutrho': 40, # 181,
'smearing': 'gauss',
'degauss': 0.02,
'occupations': 'smearing'},
'electrons': {'conv_thr': 1.0e-02,
#'startingwfc': 'file',
'electron_maxstep': 100,
'mixing_beta': 0.7}}
# pseudo-potentials
ion_pseudo = {'Ag': 'Ag.pbe-n-kjpaw_psl.1.0.0.UPF',
'I': 'I.pbe-n-kjpaw_psl.1.0.0.UPF'}
# create ASE calculator
dft_calc = Espresso(pseudopotentials=ion_pseudo, label=label,
tstress=True, tprnfor=True, nosym=True, #noinv=True,
input_data=input_data, kpts=(1,1,1))
| import os
from ase.calculators.espresso import Espresso
# set up executable
label = 'AgI'
input_file_name = label+'.pwi'
output_file_name = label+'.pwo'
no_cpus = 1
npool = 1
pw_loc = os.environ.get('PWSCF_COMMAND')
#pw_loc = '/n/home08/xiey/q-e/bin/pw.x'
#os.environ['ASE_ESPRESSO_COMMAND'] = 'srun -n {0} --mpi=pmi2 {1} -npool {2} < {3} > {4}'.format(no_cpus,
# pw_loc, npool, input_file_name, output_file_name)
os.environ['ASE_ESPRESSO_COMMAND'] = '{0} < {1} > {2}'.format(pw_loc, input_file_name, output_file_name)
# set up input parameters
input_data = {'control': {'prefix': label,
'pseudo_dir': 'test_files/pseudos/',
'outdir': './out',
#'verbosity': 'high',
'calculation': 'scf'},
'system': {'ibrav': 0,
'ecutwfc': 20, # 45,
'ecutrho': 40, # 181,
'smearing': 'gauss',
'degauss': 0.02,
'occupations': 'smearing'},
'electrons': {'conv_thr': 1.0e-03,
#'startingwfc': 'file',
'electron_maxstep': 100,
'mixing_beta': 0.5}}
# pseudo-potentials
ion_pseudo = {'Ag': 'Ag.pbe-n-kjpaw_psl.1.0.0.UPF',
'I': 'I.pbe-n-kjpaw_psl.1.0.0.UPF'}
# create ASE calculator
dft_calc = Espresso(pseudopotentials=ion_pseudo, label=label,
tstress=True, tprnfor=True, nosym=True, #noinv=True,
input_data=input_data, kpts=(1,1,1))
dft_calc.parameters['parallel'] = False
| Python | 0.000001 |
aeca55a5ca5a8b15314cc7bd31a3c89361436318 | Add return type check test | tests/test_pandas_integration.py | tests/test_pandas_integration.py | from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
self.assertEqual(pd.core.series.Series, type(test_series))
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
self.assertEqual(np.ndarray, type(validation_array))
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
self.assertEqual(pd.core.series.Series, type(test_series))
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
self.assertEqual(np.ndarray, type(validation_array))
if __name__ == '__main__':
main()
| from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
from nimble import Events
class TestAsPandasCondition(TestCase):
def setUp(self):
conditional_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_series > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
class TestAsNpArrCondition(TestCase):
def setUp(self):
conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
condition = (conditional_array > 0)
self.events = Events(condition, sample_period=1)
def test_as_series(self):
validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
test_series = self.events.as_series()
test_series.equals(validation_series)
def test_as_array(self):
validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1])
npt.assert_array_equal(validation_array, self.events.as_array())
self.asser
if __name__ == '__main__':
main()
| Python | 0.000001 |
d6a2e89736932988650c9244139aae1c1b543a88 | Stop skipping tests of secondary reads in Sharded mode. | tests/test_slave_okay_sharded.py | tests/test_slave_okay_sharded.py | # Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test PyMongo's SlaveOkay with:
- A direct connection to a standalone.
- A direct connection to a slave.
- A direct connection to a mongos.
"""
import itertools
from pymongo.read_preferences import make_read_preference
from pymongo.read_preferences import read_pref_mode_from_name
try:
from queue import Queue
except ImportError:
from Queue import Queue
from mockupdb import MockupDB, going
from pymongo import MongoClient
from tests import unittest
from tests.operations import operations
class TestSlaveOkaySharded(unittest.TestCase):
def setup_server(self, wire_version):
self.mongos1, self.mongos2 = MockupDB(), MockupDB()
# Collect queries to either server in one queue.
self.q = Queue()
for server in self.mongos1, self.mongos2:
server.subscribe(self.q.put)
server.run()
self.addCleanup(server.stop)
server.autoresponds('ismaster', maxWireVersion=wire_version,
ismaster=True, msg='isdbgrid')
self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos1.address_string,
self.mongos2.address_string)
def create_slave_ok_sharded_test(mode, operation):
def test(self):
self.setup_server(operation.wire_version)
if operation.op_type == 'always-use-secondary':
slave_ok = True
elif operation.op_type == 'may-use-secondary':
slave_ok = mode != 'primary'
elif operation.op_type == 'must-use-primary':
slave_ok = False
else:
assert False, 'unrecognized op_type %r' % operation.op_type
pref = make_read_preference(read_pref_mode_from_name(mode),
tag_sets=None)
client = MongoClient(self.mongoses_uri, read_preference=pref)
with going(operation.function, client):
request = self.q.get(timeout=1)
request.reply(operation.reply)
if slave_ok:
self.assertTrue(request.slave_ok, 'SlaveOkay not set')
else:
self.assertFalse(request.slave_ok, 'SlaveOkay set')
return test
def generate_slave_ok_sharded_tests():
modes = 'primary', 'secondary', 'nearest'
matrix = itertools.product(modes, operations)
for entry in matrix:
mode, operation = entry
test = create_slave_ok_sharded_test(mode, operation)
test_name = 'test_%s_with_mode_%s' % (
operation.name.replace(' ', '_'), mode)
test.__name__ = test_name
setattr(TestSlaveOkaySharded, test_name, test)
generate_slave_ok_sharded_tests()
if __name__ == '__main__':
unittest.main()
| # Copyright 2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test PyMongo's SlaveOkay with:
- A direct connection to a standalone.
- A direct connection to a slave.
- A direct connection to a mongos.
"""
import itertools
from pymongo.read_preferences import make_read_preference
from pymongo.read_preferences import read_pref_mode_from_name
try:
from queue import Queue
except ImportError:
from Queue import Queue
from mockupdb import MockupDB, going
from pymongo import MongoClient
from tests import unittest
from tests.operations import operations
class TestSlaveOkaySharded(unittest.TestCase):
def setup_server(self, wire_version):
self.mongos1, self.mongos2 = MockupDB(), MockupDB()
# Collect queries to either server in one queue.
self.q = Queue()
for server in self.mongos1, self.mongos2:
server.subscribe(self.q.put)
server.run()
self.addCleanup(server.stop)
server.autoresponds('ismaster', maxWireVersion=wire_version,
ismaster=True, msg='isdbgrid')
self.mongoses_uri = 'mongodb://%s,%s' % (self.mongos1.address_string,
self.mongos2.address_string)
def create_slave_ok_sharded_test(mode, operation):
def test(self):
if mode == 'secondary':
raise unittest.SkipTest('PYTHON-868')
self.setup_server(operation.wire_version)
if operation.op_type == 'always-use-secondary':
raise unittest.SkipTest('PYTHON-868')
slave_ok = True
elif operation.op_type == 'may-use-secondary':
slave_ok = mode != 'primary'
elif operation.op_type == 'must-use-primary':
slave_ok = False
else:
assert False, 'unrecognized op_type %r' % operation.op_type
pref = make_read_preference(read_pref_mode_from_name(mode),
tag_sets=None)
client = MongoClient(self.mongoses_uri, read_preference=pref)
with going(operation.function, client):
request = self.q.get(timeout=1)
request.reply(operation.reply)
if slave_ok:
self.assertTrue(request.slave_ok, 'SlaveOkay not set')
else:
self.assertFalse(request.slave_ok, 'SlaveOkay set')
return test
def generate_slave_ok_sharded_tests():
modes = 'primary', 'secondary', 'nearest'
matrix = itertools.product(modes, operations)
for entry in matrix:
mode, operation = entry
test = create_slave_ok_sharded_test(mode, operation)
test_name = 'test_%s_with_mode_%s' % (
operation.name.replace(' ', '_'), mode)
test.__name__ = test_name
setattr(TestSlaveOkaySharded, test_name, test)
generate_slave_ok_sharded_tests()
if __name__ == '__main__':
unittest.main()
| Python | 0 |
bb330514b5a18389c451cf3ba58a72d7711c2ceb | Add unit tests | tests/unit/modules/test_event.py | tests/unit/modules/test_event.py | # -*- coding: utf-8 -*-
"""
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.event as event
import salt.utils.event
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
# Import Salt Testing Libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
class EventTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.event
"""
def setup_loader_modules(self):
return {
event: {
"__opts__": {
"id": "id",
"sock_dir": RUNTIME_VARS.TMP,
"transport": "zeromq",
}
}
}
def test_fire_master(self):
"""
Test for Fire an event off up to the master server
"""
with patch("salt.crypt.SAuth") as salt_crypt_sauth, patch(
"salt.transport.client.ReqChannel.factory"
) as salt_transport_channel_factory:
preload = {
"id": "id",
"tag": "tag",
"data": "data",
"tok": "salt",
"cmd": "_minion_event",
}
with patch.dict(
event.__opts__,
{"transport": "A", "master_uri": "localhost", "local": False},
):
with patch.object(salt_crypt_sauth, "gen_token", return_value="tok"):
with patch.object(
salt_transport_channel_factory, "send", return_value=None
):
self.assertTrue(event.fire_master("data", "tag", preload))
with patch.dict(event.__opts__, {"transport": "A", "local": False}):
with patch.object(
salt.utils.event.MinionEvent,
"fire_event",
side_effect=Exception("foo"),
):
self.assertFalse(event.fire_master("data", "tag"))
def test_fire(self):
"""
Test to fire an event on the local minion event bus.
Data must be formed as a dict.
"""
with patch("salt.utils.event") as salt_utils_event:
with patch.object(salt_utils_event, "get_event") as mock:
mock.fire_event = MagicMock(return_value=True)
self.assertTrue(event.fire("data", "tag"))
def test_send(self):
"""
Test for Send an event to the Salt Master
"""
with patch.object(event, "fire_master", return_value="B"):
self.assertEqual(event.send("tag"), "B")
def test_send_use_master_when_local_false(self):
"""
Test for Send an event when opts has use_master_when_local and its False
"""
patch_master_opts = patch.dict(event.__opts__, {"use_master_when_local": False})
patch_file_client = patch.dict(event.__opts__, {"file_client": "local"})
with patch.object(event, "fire", return_value="B") as patch_send:
with patch_master_opts, patch_file_client, patch_send:
self.assertEqual(event.send("tag"), "B")
patch_send.assert_called_once()
def test_send_use_master_when_local_true(self):
"""
Test for Send an event when opts has use_master_when_local and its True
"""
patch_master_opts = patch.dict(event.__opts__, {"use_master_when_local": True})
patch_file_client = patch.dict(event.__opts__, {"file_client": "local"})
with patch.object(event, "fire_master", return_value="B") as patch_send:
with patch_master_opts, patch_file_client, patch_send:
self.assertEqual(event.send("tag"), "B")
patch_send.assert_called_once()
| # -*- coding: utf-8 -*-
"""
:codeauthor: Rupesh Tare <rupesht@saltstack.com>
"""
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Libs
import salt.modules.event as event
import salt.utils.event
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
# Import Salt Testing Libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
class EventTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.modules.event
"""
def setup_loader_modules(self):
return {
event: {
"__opts__": {
"id": "id",
"sock_dir": RUNTIME_VARS.TMP,
"transport": "zeromq",
}
}
}
def test_fire_master(self):
"""
Test for Fire an event off up to the master server
"""
with patch("salt.crypt.SAuth") as salt_crypt_sauth, patch(
"salt.transport.client.ReqChannel.factory"
) as salt_transport_channel_factory:
preload = {
"id": "id",
"tag": "tag",
"data": "data",
"tok": "salt",
"cmd": "_minion_event",
}
with patch.dict(
event.__opts__,
{"transport": "A", "master_uri": "localhost", "local": False},
):
with patch.object(salt_crypt_sauth, "gen_token", return_value="tok"):
with patch.object(
salt_transport_channel_factory, "send", return_value=None
):
self.assertTrue(event.fire_master("data", "tag", preload))
with patch.dict(event.__opts__, {"transport": "A", "local": False}):
with patch.object(
salt.utils.event.MinionEvent,
"fire_event",
side_effect=Exception("foo"),
):
self.assertFalse(event.fire_master("data", "tag"))
def test_fire(self):
"""
Test to fire an event on the local minion event bus.
Data must be formed as a dict.
"""
with patch("salt.utils.event") as salt_utils_event:
with patch.object(salt_utils_event, "get_event") as mock:
mock.fire_event = MagicMock(return_value=True)
self.assertTrue(event.fire("data", "tag"))
def test_send(self):
"""
Test for Send an event to the Salt Master
"""
with patch.object(event, "fire_master", return_value="B"):
self.assertEqual(event.send("tag"), "B")
| Python | 0.000001 |
4680812bcefe32a113547485fae692ee5f6d7a52 | Fix typing bugs in loss utils. | enn_acme/losses/utils.py | enn_acme/losses/utils.py | # Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helpful functions relating to losses."""
import dataclasses
import typing as tp
import chex
from enn import base as enn_base
from enn import losses as enn_losses
from enn_acme import base as agent_base
import haiku as hk
import reverb
def add_l2_weight_decay(
loss_fn: agent_base.LossFn[agent_base.Input, agent_base.Output],
scale_fn: tp.Callable[[int], float], # Maps learner_steps --> l2 decay
predicate: tp.Optional[enn_losses.PredicateFn] = None,
) -> agent_base.LossFn[agent_base.Input, agent_base.Output]:
"""Adds l2 weight decay to a given loss function."""
def new_loss(
enn: enn_base.EpistemicNetwork[agent_base.Input, agent_base.Output],
params: hk.Params,
state: agent_base.LearnerState,
batch: reverb.ReplaySample,
key: chex.PRNGKey,
) -> tp.Tuple[chex.Array, agent_base.LossMetrics]:
loss, metrics = loss_fn(enn, params, state, batch, key)
l2_penalty = enn_losses.l2_weights_with_predicate(params, predicate)
decay = l2_penalty * scale_fn(state.learner_steps)
total_loss = loss + decay
metrics['decay'] = decay
metrics['raw_loss'] = loss
return total_loss, metrics
return new_loss
@dataclasses.dataclass
class CombineLossConfig(tp.Generic[agent_base.Input, agent_base.Output]):
loss_fn: agent_base.LossFn[agent_base.Input, agent_base.Output]
name: str = 'unnamed' # Name for the loss function
weight: float = 1. # Weight to scale the loss by
def combine_losses(
losses: tp.Sequence[CombineLossConfig[agent_base.Input, agent_base.Output]],
) -> agent_base.LossFn[agent_base.Input, agent_base.Output]:
"""Combines multiple losses into a single loss."""
def loss_fn(
enn: enn_base.EpistemicNetwork[agent_base.Input, agent_base.Output],
params: hk.Params,
state: agent_base.LearnerState,
batch: reverb.ReplaySample,
key: chex.PRNGKey,
) -> tp.Tuple[chex.Array, agent_base.LossMetrics]:
combined_loss = 0.
combined_metrics = {}
for loss_config in losses:
loss, metrics = loss_config.loss_fn(enn, params, state, batch, key)
combined_metrics[f'{loss_config.name}:loss'] = loss
for name, value in metrics.items():
combined_metrics[f'{loss_config.name}:{name}'] = value
combined_loss += loss_config.weight * loss
return combined_loss, combined_metrics
return loss_fn
| # Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helpful functions relating to losses."""
import dataclasses
import typing as tp
import chex
from enn import base as enn_base
from enn import losses as enn_losses
from enn_acme import base as agent_base
import haiku as hk
import reverb
# Specific types
_LossFn = agent_base.LossFn[agent_base.Input, agent_base.Output]
_Enn = enn_base.EpistemicNetwork[agent_base.Input, agent_base.Output]
def add_l2_weight_decay(
loss_fn: _LossFn,
scale_fn: tp.Callable[[int], float], # Maps learner_steps --> l2 decay
predicate: tp.Optional[enn_losses.PredicateFn] = None,
) -> _LossFn:
"""Adds l2 weight decay to a given loss function."""
def new_loss(
enn: _Enn,
params: hk.Params,
state: agent_base.LearnerState,
batch: reverb.ReplaySample,
key: chex.PRNGKey,
) -> tp.Tuple[chex.Array, agent_base.LossMetrics]:
loss, metrics = loss_fn(enn, params, state, batch, key)
l2_penalty = enn_losses.l2_weights_with_predicate(params, predicate)
decay = l2_penalty * scale_fn(state.learner_steps)
total_loss = loss + decay
metrics['decay'] = decay
metrics['raw_loss'] = loss
return total_loss, metrics
return new_loss
@dataclasses.dataclass
class CombineLossConfig(tp.Generic[agent_base.Input, agent_base.Output]):
loss_fn: _LossFn
name: str = 'unnamed' # Name for the loss function
weight: float = 1. # Weight to scale the loss by
def combine_losses(losses: tp.Sequence[CombineLossConfig]) -> _LossFn:
"""Combines multiple losses into a single loss."""
def loss_fn(
enn: _Enn,
params: hk.Params,
state: agent_base.LearnerState,
batch: reverb.ReplaySample,
key: chex.PRNGKey,
) -> tp.Tuple[chex.Array, agent_base.LossMetrics]:
combined_loss = 0.
combined_metrics = {}
for loss_config in losses:
loss, metrics = loss_config.loss_fn(enn, params, state, batch, key)
combined_metrics[f'{loss_config.name}:loss'] = loss
for name, value in metrics.items():
combined_metrics[f'{loss_config.name}:{name}'] = value
combined_loss += loss_config.weight * loss
return combined_loss, combined_metrics
return loss_fn
| Python | 0.002287 |
d871328e94f804a2a296f4fba44751fa98e498d1 | Update custom dashboard response | rhizome/api/resources/custom_dashboard.py | rhizome/api/resources/custom_dashboard.py | from tastypie.resources import ALL
from rhizome.api.resources.base_model import BaseModelResource
from rhizome.api.exceptions import DatapointsException
from rhizome.models import CustomDashboard, CustomChart, ChartToDashboard
import json
class CustomDashboardResource(BaseModelResource):
class Meta(BaseModelResource.Meta):
resource_name = 'custom_dashboard'
filtering = {
"id": ALL,
}
always_return_data = True
def get_detail(self, request, **kwargs):
requested_id = kwargs['pk']
bundle = self.build_bundle(request=request)
response_data = CustomDashboard.objects.get(id=requested_id).__dict__
response_data.pop('_state')
chart_data = [c for c in CustomChart.objects\
.filter(charttodashboard__dashboard_id = requested_id).values()]
response_data['charts'] = chart_data
bundle.data = response_data
return self.create_response(request, bundle)
def obj_create(self, bundle, **kwargs):
post_data = bundle.data
user_id = bundle.request.user.id
try:
dash_id = int(post_data['id'])
except KeyError:
dash_id = None
title = post_data['title']
try:
description = post_data['description']
except KeyError:
description = ''
try:
layout = int(post_data['layout'])
except KeyError:
layout = 0
defaults = {
'id': dash_id,
'title': title,
'description': description,
'layout': layout
}
if(CustomDashboard.objects.filter(title=title).count() > 0 and (dash_id is None)):
raise DatapointsException('the custom dashboard "{0}" already exists'.format(title))
dashboard, created = CustomDashboard.objects.update_or_create(id=dash_id, defaults=defaults)
bundle.obj = dashboard
bundle.data['id'] = dashboard.id
## optionally add charts to the dashboard ##
try:
chart_uuids = post_data['chart_uuids']
self.upsert_chart_uuids(dashboard.id, chart_uuids)
except KeyError:
pass
return bundle
def upsert_chart_uuids(self, dashboard_id, chart_uuids):
if type(chart_uuids) == unicode:
chart_uuids = [chart_uuids]
chart_ids = CustomChart.objects.filter(uuid__in = chart_uuids)\
.values_list('id',flat=True)
batch = [ChartToDashboard(**{
'chart_id': c_id,
'dashboard_id': dashboard_id
}) for c_id in chart_ids]
ChartToDashboard.objects.filter(dashboard_id = dashboard_id).delete()
ChartToDashboard.objects.bulk_create(batch)
def obj_delete_list(self, bundle, **kwargs):
"""
"""
obj_id = int(bundle.request.GET[u'id'])
CustomChart.objects.filter(dashboard_id=obj_id).delete()
CustomDashboard.objects.filter(id=obj_id).delete()
def get_object_list(self, request):
'''
'''
try:
dash_id = request.GET['id']
return CustomDashboard.objects.filter(id=dash_id).values()
except KeyError:
return CustomDashboard.objects.all().values()
| from tastypie.resources import ALL
from rhizome.api.resources.base_model import BaseModelResource
from rhizome.api.exceptions import DatapointsException
from rhizome.models import CustomDashboard, CustomChart, ChartToDashboard
import json
class CustomDashboardResource(BaseModelResource):
class Meta(BaseModelResource.Meta):
resource_name = 'custom_dashboard'
filtering = {
"id": ALL,
}
always_return_data = True
def get_detail(self, request, **kwargs):
requested_id = kwargs['pk']
bundle = self.build_bundle(request=request)
response_data = CustomDashboard.objects.get(id=requested_id).__dict__
response_data.pop('_state')
chart_data = [c for c in CustomChart.objects\
.filter(charttodashboard__dashboard_id = requested_id).values()]
response_data['charts'] = chart_data
bundle.data = response_data
return self.create_response(request, bundle)
def obj_create(self, bundle, **kwargs):
post_data = bundle.data
user_id = bundle.request.user.id
try:
dash_id = int(post_data['id'])
except KeyError:
dash_id = None
title = post_data['title']
try:
description = post_data['description']
except KeyError:
description = ''
try:
layout = int(post_data['layout'])
except KeyError:
layout = 0
defaults = {
'id': dash_id,
'title': title,
'description': description,
'layout': layout
}
if(CustomDashboard.objects.filter(title=title).count() > 0 and (dash_id is None)):
raise DatapointsException('the custom dashboard "{0}" already exists'.format(title))
dashboard, created = CustomDashboard.objects.update_or_create(id=dash_id, defaults=defaults)
bundle.obj = dashboard
bundle.data['id'] = dashboard.id
## optionally add charts to the dashboard ##
try:
chart_uuids = post_data['chart_uuids']
self.upsert_chart_uuids(dashboard.id, chart_uuids)
except KeyError:
pass
return bundle
def upsert_chart_uuids(self, dashboard_id, chart_uuids):
chart_ids = CustomChart.objects.filter(uuid__in = chart_uuids)\
.values_list('id',flat=True)
batch = [ChartToDashboard(**{
'chart_id': c_id,
'dashboard_id': dashboard_id
}) for c_id in chart_ids]
ChartToDashboard.objects.filter(dashboard_id = dashboard_id).delete()
ChartToDashboard.objects.bulk_create(batch)
def obj_delete_list(self, bundle, **kwargs):
"""
"""
obj_id = int(bundle.request.GET[u'id'])
CustomChart.objects.filter(dashboard_id=obj_id).delete()
CustomDashboard.objects.filter(id=obj_id).delete()
def get_object_list(self, request):
'''
'''
try:
dash_id = request.GET['id']
return CustomDashboard.objects.filter(id=dash_id).values()
except KeyError:
return CustomDashboard.objects.all().values()
| Python | 0 |
7e13edfea2ee0c055f890fba08fa645141cd2f7d | add colourbar | helix.py | helix.py | # Create the data.
from numpy import pi, sin, cos, mgrid
[u,v] = mgrid[-5:5:0.01,0:2*pi+0.1:0.1]
a=2
x = u*cos(v)
y = u*sin(v)
z = a*v
K=-a**2/(u**2 +a**2)**2
from mayavi import mlab
s = mlab.mesh(x, y, z,scalars=K)
mlab.colorbar(orientation='horizontal',title='Gaussian Curvature')
mlab.show()
| # Create the data.
from numpy import pi, sin, cos, mgrid
[u,v] = mgrid[-5:5:0.01,0:2*pi+0.1:0.1]
a=2
x = u*cos(v)
y = u*sin(v)
z = a*v
K=-a**2/(u**2 +a**2)**2
from mayavi import mlab
s = mlab.mesh(x, y, z,scalars=K)
mlab.show()
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.