commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
47fc9784b76f2389ae4392450d67bb4affa96536
|
Fix header comment
|
tests/whitebox/_misc.py
|
tests/whitebox/_misc.py
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous methods to support testing.
"""
# isort: STDLIB
import os
import random
import string
import subprocess
import time
import unittest
# isort: THIRDPARTY
import psutil
# isort: LOCAL
from stratis_cli import run
from stratis_cli._error_reporting import handle_error
from stratis_cli._errors import StratisCliActionError
def device_name_list(min_devices=0, max_devices=10):
"""
Return a function that returns a random list of device names based on
parameters.
"""
def the_func():
return [
"/dev/%s"
% "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(4)
)
for _ in range(random.randrange(min_devices, max_devices + 1))
]
return the_func
class _Service:
"""
Handle starting and stopping the Rust service.
"""
def setup(self):
"""
Start the stratisd daemon with the simulator.
"""
try:
stratisd_var = os.environ["STRATISD"]
except KeyError:
raise RuntimeError(
"STRATISD environment variable must be set to absolute path of stratisd executable"
)
self._stratisd = subprocess.Popen( # pylint: disable=attribute-defined-outside-init
[os.path.join(stratisd_var), "--sim"]
)
time.sleep(1)
def teardown(self):
"""
Stop the stratisd simulator and daemon.
"""
self._stratisd.terminate()
self._stratisd.wait()
def cleanup(self):
"""
Stop the daemon if it has been started.
"""
if hasattr(self, "_stratisd"):
self.teardown()
class RunTestCase(unittest.TestCase):
"""
Test case for running the program.
"""
def check_error(self, expected_cause, command_line, expected_code):
"""
Check that the expected exception was raised, and that the cause
and exit codes were also as expected, based on the command line
arguments passed to the program.
Precondition: command_line contains the "--propagate" flag, so that
the exception is propagated by the source, and can thus be caught
in the test.
:param expected_cause: the expected exception below the StratisCliActionError
:type expected_cause: Exception
:param command_line: the command line arguments
:type command_line: list
:param expected_code: the expected error code
:type expected_code: int
"""
with self.assertRaises(StratisCliActionError) as context:
RUNNER(command_line)
exception = context.exception
cause = exception.__cause__
self.assertIsInstance(cause, expected_cause)
error_string = str(exception)
self.assertIsInstance(error_string, str)
self.assertNotEqual(error_string, "")
with self.assertRaises(SystemExit) as final_err:
handle_error(exception)
final_code = final_err.exception.code
self.assertEqual(final_code, expected_code)
def check_system_exit(self, command_line, expected_code):
"""
Check that SystemExit exception was raised with the expected error
code as a result of running the program.
:param command_line: the command line arguments
:type command_line: list
:param expected_code: the expected error code
:type expected_code: int
"""
with self.assertRaises(SystemExit) as context:
RUNNER(command_line)
exit_code = context.exception.code
self.assertEqual(exit_code, expected_code)
class SimTestCase(RunTestCase):
"""
A SimTestCase must always start and stop stratisd (simulator vesion).
"""
@classmethod
def setUpClass(cls):
"""
Assert that there are no other stratisd processes running.
"""
for pid in psutil.pids():
try:
if psutil.Process(pid).name() == "stratisd":
raise RuntimeError(
"Evidently a stratisd process with process id %u is running"
% pid
)
except psutil.NoSuchProcess:
pass
def setUp(self):
"""
Start the stratisd daemon with the simulator.
"""
self._service = _Service()
self.addCleanup(self._service.cleanup)
self._service.setup()
RUNNER = run()
|
Python
| 0
|
@@ -1427,20 +1427,23 @@
the
-Rust service
+stratisd daemon
.%0A
|
1540fb09fbb26430dccd405d685b6ce0feb0e62c
|
Fix deletion of marketplaces by superusers
|
src/wirecloud/platform/markets/views.py
|
src/wirecloud/platform/markets/views.py
|
# -*- coding: utf-8 -*-
# Copyright 2012 Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
import json
import urlparse
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.utils.decorators import method_decorator
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.utils import simplejson
from django.utils.translation import ugettext as _
from wirecloud.commons.baseviews import Resource, Service
from wirecloud.commons.utils import downloader
from wirecloud.commons.utils.http import supported_request_mime_types
from wirecloud.commons.utils.transaction import commit_on_http_success
from wirecloud.platform.markets.utils import get_market_managers
from wirecloud.platform.models import Market, PublishedWorkspace
from wirecloud.platform.workspace.mashupTemplateGenerator import build_usdl_from_workspace
class MarketCollection(Resource):
@method_decorator(login_required)
def read(self, request):
result = {}
for market in Market.objects.filter(Q(user=None) | Q(user=request.user)):
market_key = unicode(market)
market_data = simplejson.loads(market.options)
market_data['name'] = market.name
if market.user is not None:
market_data['user'] = market.user.username
else:
market_data['user'] = None
market_data['permissions'] = {
'delete': request.user.is_superuser or market.user == request.user
}
result[market_key] = market_data
return HttpResponse(simplejson.dumps(result), mimetype='application/json; charset=UTF-8')
@method_decorator(login_required)
@supported_request_mime_types(('application/json'))
@commit_on_http_success
def create(self, request):
try:
received_data = json.loads(request.raw_post_data)
except:
return HttpResponseBadRequest(_("Request body is not valid JSON data"), mimetype='text/plain; charset=UTF-8')
user_entry = request.user
if received_data['options'].get('share', '') is True:
user_entry = None
if user_entry is None and not request.user.is_superuser:
return HttpResponseForbidden()
Market.objects.create(user=user_entry, name=received_data['name'], options=json.dumps(received_data['options']))
return HttpResponse(status=201)
class MarketEntry(Resource):
@method_decorator(login_required)
def delete(self, request, market, user=None):
if (user is None and not request.user.is_superuser) or (user is None and market == 'local'):
return HttpResponseForbidden()
if user != request.user.username and not request.user.is_superuser:
return HttpResponseForbidden()
if user != request.user.username:
get_object_or_404(Market, user=request.user, name=market).delete()
else:
get_object_or_404(Market, user__username=user, name=market).delete()
return HttpResponse(status=204)
def update(self, request, market):
pass
class PublishService(Service):
@method_decorator(login_required)
@supported_request_mime_types(('application/json'))
def process(self, request):
data = json.loads(request.raw_post_data)
template_url = data['template_url']
path = request.build_absolute_uri()
login_scheme, login_netloc = urlparse.urlparse(template_url)[:2]
current_scheme, current_netloc = urlparse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
pworkspace_id = template_url.split('/')[-2]
published_workspace = PublishedWorkspace.objects.get(id=pworkspace_id)
description = published_workspace.template
else:
description = downloader.download_http_content(template_url, user=request.user)
usdl_info = None
if data.get('usdl', None) is not None:
usdl_info = {
'data': downloader.download_http_content(data['usdl'], user=request.user),
'content_type': 'application/rdf+xml'
}
market_managers = get_market_managers(request.user)
errors = {}
publish_options = json.loads(published_workspace.params)
for market_endpoint in data['marketplaces']:
try:
name = publish_options.get('name').replace(' ', '')
template_location = market_managers[market_endpoint['market']].build_repository_url(market_endpoint, name + 'Mdl')
usdl = build_usdl_from_workspace(publish_options, published_workspace.workspace, request.user, template_location, usdl_info=usdl_info)
market_managers[market_endpoint['market']].publish(market_endpoint, description, name, request.user, usdl=usdl, request=request)
except Exception, e:
errors[market_endpoint['market']] = unicode(e)
if len(errors) == 0:
return HttpResponse(status=204)
elif len(errors) == len(data['marketplaces']):
return HttpResponse(simplejson.dumps(errors), status=502, mimetype='application/json; charset=UTF-8')
else:
return HttpResponse(simplejson.dumps(errors), status=200, mimetype='application/json; charset=UTF-8')
|
Python
| 0.000295
|
@@ -3544,147 +3544,8 @@
()%0A%0A
- if user != request.user.username:%0A get_object_or_404(Market, user=request.user, name=market).delete()%0A else:%0A
|
2a40ff77cc16be1c3327a4b12c6dd3a592375346
|
Add bootstrapped theme
|
project/settings/base.py
|
project/settings/base.py
|
import os
import dj_database_url
from django.core.exceptions import ImproperlyConfigured
from django.contrib.messages import constants as message_constants
def get_env_variable(var_name):
"""Get the environment variable or return exception"""
try:
var = os.environ[var_name]
# Replace unix strings with Python Booleans
if var == 'True':
var = True
if var == 'False':
var = False
except KeyError:
error_msg = "Set the {var_name} env var".format(var_name=var_name)
raise ImproperlyConfigured(error_msg)
return var
DEBUG = get_env_variable("DEBUG")
TEMPLATE_DEBUG = DEBUG
# Globals
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT_NAME = 'barberscore'
USE_TZ = True
TIME_ZONE = get_env_variable("TZ")
LANGUAGE_CODE = 'en-us'
USE_I18N = False
USE_L10N = True
USE_TZ = True
SECRET_KEY = get_env_variable("SECRET_KEY")
ROOT_URLCONF = 'urls'
WSGI_APPLICATION = 'wsgi.application'
LOGIN_URL = 'website:login'
LOGOUT_URL = 'website:logout'
LOGIN_REDIRECT_URL = 'website:home'
DOMAIN = get_env_variable("DOMAIN")
STATICFILES_DIRS = ()
STATIC_URL = '/static/'
ADMINS = (
(get_env_variable("FULL_NAME"), get_env_variable("USER_EMAIL")),
)
SERVER_EMAIL = get_env_variable('SERVER_EMAIL')
# Database
DATABASE_URL = get_env_variable("DATABASE_URL")
DATABASES = {'default': dj_database_url.config(default=DATABASE_URL)}
# Auth
AUTH_USER_MODEL = "api.User"
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
# Middleware
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.security.SecurityMiddleware',
# 'django.middleware.transaction.TransactionMiddleware',
)
# Templating
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
)
# Bootstrap overwrite
MESSAGE_TAGS = {
message_constants.ERROR: 'danger',
}
# Phonenumber support
PHONENUMBER_DEFAULT_REGION = 'US'
PHONENUMBER_DEFAULT_FORMAT = 'NATIONAL'
# Rest Framework
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': [
'apps.api.filters.CoalesceFilterBackend',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
'DEFAULT_RENDERER_CLASSES': [
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
]
}
# CORS Headers
CORS_ORIGIN_ALLOW_ALL = True
# Easy Select2
SELECT2_USE_BUNDLED_JQUERY = False
# Djoser
DJOSER = {
'LOGIN_AFTER_REGISTRATION': True,
'APPEND_USER_DATA': True,
}
# Haystack
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200',
'INDEX_NAME': 'haystack',
},
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Applications
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.humanize',
'haystack',
'timezone_field',
'django_object_actions',
'easy_select2',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'apps.api',
'apps.website',
)
|
Python
| 0
|
@@ -4578,24 +4578,57 @@
aticfiles',%0A
+ 'django_admin_bootstrapped',%0A
'django.
|
b4eb149099b64bcfccc8a8f2fd0c5008c74a4fe0
|
move write() out of the scope of an IOError catch clause not meant for it
|
oonib/deck/handlers.py
|
oonib/deck/handlers.py
|
import glob
import json
import os
import re
import yaml
from oonib import errors as e
from oonib.handlers import OONIBHandler
from oonib import log
from oonib.config import config
class DeckDescHandler(OONIBHandler):
def get(self, deckID):
# note:
# we don't have to sanitize deckID, because it's already checked
# against matching a certain pattern in the handler.
bn = os.path.basename(deckID + '.desc')
try:
with open(os.path.join(config.main.deck_dir, bn)) as f:
response = {}
deckDesc = yaml.safe_load(f)
for k in ['name', 'description', 'version', 'author', 'date']:
response[k] = deckDesc[k]
self.write(response)
except IOError:
log.err("Deck %s missing" % deckID)
raise e.MissingDeck
except KeyError:
log.err("Deck %s missing required keys!" % deckID)
raise e.MissingDeckKeys
class DeckListHandler(OONIBHandler):
def get(self):
if not config.main.deck_dir:
self.set_status(501)
raise e.NoDecksConfigured
path = os.path.abspath(config.main.deck_dir) + "/*"
decknames = map(os.path.basename, glob.iglob(path))
decknames = filter(lambda y: re.match("[a-z0-9]{64}.desc", y), decknames)
deckList = []
for deckname in decknames:
with open(os.path.join(config.main.deck_dir, deckname)) as f:
d = yaml.safe_load(f)
deckList.append({
'id': deckname,
'name': d['name'],
'description': d['description']
})
self.write(deckList)
|
Python
| 0
|
@@ -722,42 +722,8 @@
%5Bk%5D%0A
- self.write(response)%0A%0A
@@ -738,24 +738,24 @@
pt IOError:%0A
+
@@ -822,17 +822,16 @@
ingDeck%0A
-%0A
@@ -902,32 +902,32 @@
eys!%22 %25 deckID)%0A
-
rais
@@ -947,16 +947,46 @@
ckKeys%0A%0A
+ self.write(response)%0A%0A
class De
|
6264f58bdbe7c25132a0153713b97749a5809828
|
Version display should work even if git not installed
|
storage_service/administration/views.py
|
storage_service/administration/views.py
|
import subprocess
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import SetPasswordForm
from django.shortcuts import render, redirect, get_object_or_404
from tastypie.models import ApiKey
from common import utils
from storage_service import __version__ as ss_version
from . import forms as settings_forms
########################## ADMIN ##########################
def settings_edit(request):
initial_data = utils.get_all_settings()
common_form = settings_forms.CommonSettingsForm(request.POST or None,
initial=initial_data, prefix='common')
default_location_form = settings_forms.DefaultLocationsForm(
request.POST or None, initial=initial_data, prefix='default_loc')
if common_form.is_valid() and default_location_form.is_valid():
# Save settings
common_form.save()
default_location_form.save()
messages.success(request, "Setting saved.")
return redirect('settings_edit')
return render(request, 'administration/settings_form.html', locals())
########################## VERSION ########################
def get_git_commit():
try:
return subprocess.check_output(['git', 'rev-parse', 'HEAD'])
except subprocess.CalledProcessError:
return None
def version_view(request):
return render(request, 'administration/version.html',
{'version': ss_version,
'git_commit': get_git_commit()})
########################## USERS ##########################
def user_list(request):
users = get_user_model().objects.all()
return render(request, 'administration/user_list.html', locals())
def user_edit(request, id):
action = "Edit"
edit_user = get_object_or_404(get_user_model(), id=id)
user_form = settings_forms.UserChangeForm(request.POST or None, instance=edit_user)
password_form = SetPasswordForm(data=request.POST or None, user=edit_user)
if 'user' in request.POST and user_form.is_valid():
user_form.save()
messages.success(request, "User information saved.")
return redirect('user_list')
elif 'password' in request.POST and password_form.is_valid():
password_form.save()
api_key = ApiKey.objects.get(user=edit_user)
api_key.key = api_key.generate_key()
api_key.save()
messages.success(request, "Password changed.")
return redirect('user_list')
return render(request, 'administration/user_form.html', locals())
def user_create(request):
action = "Create"
user_form = settings_forms.UserCreationForm(request.POST or None)
if user_form.is_valid():
user_form.save()
messages.success(request, "New user {} created.".format(
user_form.cleaned_data['username']))
return redirect('user_list')
return render(request, 'administration/user_form.html', locals())
|
Python
| 0.000001
|
@@ -1258,16 +1258,26 @@
except
+ (OSError,
subproc
@@ -1298,16 +1298,17 @@
essError
+)
:%0A
|
fd456d55ceb4cd084c9ca754771c7b12235dcb5e
|
reset key
|
s.py
|
s.py
|
#encoding: utf-8
import os
from apscheduler.schedulers.blocking import BlockingScheduler
import requests
MAILGUN_KEY = os.environ.get('MAILGUN_KEY')
sched = BlockingScheduler()
@sched.scheduled_job('cron', day_of_week='mon-fri', hour=12)
def add_egg():
print(send_mail(get_text(get_price())))
@sched.scheduled_job('calc_lost_money', 'interval', minutes=1, id='calc_lost_money')
def calc_lost_money():
price = get_price()
sell = price['sell']
lost = _calc_lose_money(float(sell))
print 'Current lost %s...' % lost
if lost > 10000:
send_mail('Lost > %s' % lost)
if lost < -50000:
send_mail('Win 5w!!!!!!!!')
send_mail('Win 5w!!!!!!!!')
send_mail('Win 5w!!!!!!!!')
def _calc_lose_money(x):
return ((16.72 - x) / 16.72 + 0.0002) * 40000
def get_price():
r = requests.get('https://yunbi.com/api/v2/tickers').json()
eos = r['eoscny']
return eos['ticker']
def get_text(price):
return '''
Cool!
Eos Sumary:
Buy: {buy}
Sell: {sell},
Low: {low},
High: {high},
Last: {last},
Vol: {vol}
Add an egg for your lunch!
'''.format(**price)
def send_mail(text):
api_host = 'https://api.mailgun.net/v3/no-reply.alipay-inc.xyz/messages'
token = MAILGUN_KEY
sender = 'NoReply <no-reply@no-reply.alipay-inc.xyz>'
subject = u'加个蛋'
to = 'Jiatai <liujiatai@gmail.com>'
cc = 'Yuwei <akabyw@gmail.com>'
text = text
r = requests.post(api_host, auth=('api', token), data={
'from': sender,
'to': to,
'cc': cc,
'text': text,
})
return r.status_code, r.content
if __name__ == '__main__':
# sched.start()
calc_lose_money()
|
Python
| 0.000003
|
@@ -472,25 +472,25 @@
= _calc_los
-e
+t
_money(float
@@ -738,25 +738,25 @@
ef _calc_los
-e
+t
_money(x):%0A
|
c81ecba98638666c19659832aa197a4d1f0c79f1
|
Change filter-thread-sanitizer-output.py python script printing settings to print full trace on skip
|
tools/filter-thread-sanitizer-output.py
|
tools/filter-thread-sanitizer-output.py
|
"""
You can pipe clang-thread-sanitizer output to this program, and it'll filter it, removig false positives indicated in the blacklist file below
You run neblio-qt with, as an example:
neblio-qt --noquicksync 2<&1 | python tools/filter-thread-sanitizer-output.py | tee output.txt
"""
import sys
import os
import fnmatch
import copy
separator = '==================' # separator between different messages from sanitizer
sanitizer_blacklist_file = "../sanitizer-blacklist.txt"
############################################################
import datetime
print(datetime.datetime.utcnow())
sanitizer_blacklist_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), sanitizer_blacklist_file)
with open(sanitizer_blacklist_file_path, 'r') as content_file:
blacklist_data = content_file.read()
blacklisted_expressions = []
def parse_blacklist_data(data):
lines = blacklist_data.replace("\r", "").split("\n")
for line in lines:
line = line.split("#")[0] # remove comments
if len(line.replace(" ", "")) == 0:
continue
split_line = line.split(":")
if len(split_line) != 2:
raise ValueError("Lines in the blacklist file are expected to have two entries separated by ':'; this line is invalid: " + line)
blacklisted_expressions.append(split_line[1])
parse_blacklist_data(blacklist_data)
print("Filtering for: " + str(blacklisted_expressions))
READSTATE_in_section = 0
READSTATE_out_of_section = 1
state = READSTATE_out_of_section
# data queue is the data that's received from stdin
data_queue = []
def process_data_queue(data_queue):
prev_len = len(data_queue)
orig_data_queue = copy.deepcopy(data_queue) # since filtering will ruin the list
# print("analyzing...")
matched = False
for blacklist_line in blacklisted_expressions:
data_queue = copy.deepcopy(orig_data_queue)
data_queue = fnmatch.filter(data_queue, blacklist_line)
if len(data_queue) > 0: # filtering will keep anything that matches the pattern provided
print("Skipping after match of '" + blacklist_line + "' in: " + str(data_queue) + "\n")
matched = True
break
if not matched: # if the filter doesn't match anything, then this is not filtered, print it
sys.stdout.write(separator + "\n")
sys.stdout.write("\n".join(orig_data_queue))
sys.stdout.write(separator + "\n")
sys.stdout.flush()
data_queue = []
for line in sys.stdin:
if line.startswith(separator) and state == READSTATE_out_of_section:
# print("out")
state = READSTATE_in_section
elif line.startswith(separator) and state == READSTATE_in_section:
# print("in-separator")
state = READSTATE_out_of_section
process_data_queue(data_queue)
data_queue = []
elif not line.startswith(separator) and state == READSTATE_in_section:
# print("in-section")
data_queue.append(line)
if len(data_queue) > 0: # process anything left
process_data_queue(data_queue)
|
Python
| 0
|
@@ -2085,13 +2085,10 @@
ing
-after
+on
mat
@@ -2130,16 +2130,21 @@
%22 + str(
+orig_
data_que
|
d83e0a34161d3783c12a271608c81780dd6f14dd
|
fix path to 'kmip_server' module
|
kmip/tests/integration/utils/server.py
|
kmip/tests/integration/utils/server.py
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import optparse
import os
import sys
from kmip.core.config_helper import ConfigHelper
from kmip.services.kmip_server import KMIPServer
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
def run_server(host, port, certfile, keyfile, cert_reqs, ssl_version,
ca_certs, do_handshake_on_connect, suppress_ragged_eofs):
logger = logging.getLogger(__name__)
server = KMIPServer(host=host, port=port, keyfile=keyfile,
certfile=certfile, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
logger.info('Starting the KMIP server')
try:
server.serve()
except KeyboardInterrupt:
logger.info('KeyboardInterrupt received while serving')
except Exception as e:
logger.info('Exception received while serving: {0}'.format(e))
finally:
server.close()
logger.info('Shutting down KMIP server')
def build_cli_parser():
parser = optparse.OptionParser(usage="%prog [options]",
description="Run KMIP Server")
parser.add_option("-n", "--host", action="store", default='127.0.0.1',
dest="host",
help="Hostname/IP address of platform running the KMIP "
"server (e.g., localhost, 127.0.0.1)")
parser.add_option("-p", "--port", action="store", default=5696,
dest="port", help="Port number for KMIP services")
parser.add_option("-k", "--keyfile", action="store",
default=os.path.normpath(os.path.join(
FILE_PATH, '../utils/certs/server.key')),
dest="keyfile")
parser.add_option("-c", "--certfile", action="store",
default=os.path.normpath(os.path.join(
FILE_PATH, '../utils/certs/server.crt')),
dest="certfile")
parser.add_option("-r", "--cert_reqs", action="store",
default="CERT_NONE", dest="cert_reqs")
parser.add_option("-s", "--ssl_version", action="store",
default='PROTOCOL_SSLv23', dest="ssl_version")
parser.add_option("-a", "--ca_certs", action="store",
default=ConfigHelper.NONE_VALUE, dest="ca_certs")
parser.add_option("-d", "--do_handshake_on_connect", action="store",
default="True", dest="do_handshake_on_connect")
parser.add_option("-e", "--suppress_ragged_eofs", action="store",
default="True", dest="suppress_ragged_eofs")
return parser
if __name__ == '__main__':
parser = build_cli_parser()
opts, args = parser.parse_args(sys.argv[1:])
run_server(host=opts.host,
port=opts.port,
certfile=opts.certfile,
keyfile=opts.keyfile,
cert_reqs=opts.cert_reqs,
ssl_version=opts.ssl_version,
ca_certs=opts.ca_certs,
do_handshake_on_connect=opts.do_handshake_on_connect,
suppress_ragged_eofs=opts.suppress_ragged_eofs)
|
Python
| 0.000007
|
@@ -761,16 +761,23 @@
ervices.
+server.
kmip_ser
|
0b4bd61d4e89618bd4950aca8fa1f5e154104297
|
Remove unused import
|
cli/main.py
|
cli/main.py
|
#! /usr/bin/python
import sys
import readline
import argparse
from cli.built_in_commands import BuildInCommand
from cli.commands import CommandList
from cli.crypto import KeyPair
from cli.exception import CliException
from cli.network import generateTransaction, sendTx
import cli.file_io as file_io
import cli.completer as completer
from cliff.app import App
from cliff.commandmanager import CommandManager
from cliff.command import Command
BASE_NAME = "iroha-cli"
TARGET = "iroha"
class ChiekuiCli:
class Context:
def __init__(self, filepath):
self.name,\
self.public_key,\
self.private_key,\
address, port = file_io.load_config(filepath)
self.location = "{}:{}".format(address,str(port))
self.key_pair = KeyPair(
raw_private_key=KeyPair.decode(self.private_key),
raw_public_key=KeyPair.decode(self.public_key))
def __init__(self):
self.tx_commands = CommandList().commands
self.built_in_commands = BuildInCommand().commands
self.context =None
# ================================
# Parser
# ================================
self.parser = argparse.ArgumentParser(description='Cli of {}'.format(TARGET))
_sub_parser = self.parser.add_subparsers()
# parse: transaction
parse_tx = _sub_parser.add_parser("tx")
sup_parser_tx = parse_tx.add_subparsers()
for cmd in self.tx_commands:
_parser = sup_parser_tx.add_parser(cmd, help='{} help'.format(cmd))
for name, val in self.tx_commands[cmd]['option'].items():
_parser.add_argument("--{}".format(name), type=val["type"], required=val["required"],
help=val["detail"])
_parser.add_argument("--config", type=str, required=False,help="config.yml's path")
# parse: built in command
for cmd_name, cmd_val in self.built_in_commands.items():
_parser = _sub_parser.add_parser(cmd_name, help='{} help'.format(cmd_name))
for name, val in self.cli_commands[cmd]['option'].items():
_parser.add_argument("--{}".format(name), type=val["type"], required=val["required"],help=val["detail"])
def print_introduction(self):
print(
"----------------\n"
"Iroha-mizuki-cli\n"
"----------------\n\n"
"Current support commands"
)
for cmd in self.tx_commands.keys():
print(" - {}".format(cmd))
print(
"\n"
"Sample:\n\n"
" > python ./cli.py CreateAsset --domain_id japan --precision 0 --asset_name yen\n"
)
sys.exit(0)
def exec_tx(self, cmd, argv):
loader.load(argv.config)
command = self.tx_commands[cmd]["function"](vars(argv))
if command:
tx = generateTransaction(self.context.name, [command], self.context.key_pair)
if not sendTx(self.context.location, tx):
print(
"Transaction is not arrived...\n"
"Could you ckeck this => {}\n".format(self.context.location)
)
return False
else:
print("Err")
def exec_query(self, cmd, argv):
pass
def exec(self, argv):
parsed_argv = self.parser.parse_args(argv[1:])
if len(argv) < 3:
self.print_introduction()
self.context = self.Context(parsed_argv.config)
if argv[1] == 'tx':
self.exec_tx(argv[2], parsed_argv)
elif argv[1] == 'query':
self.exec_query(argv[2], parsed_argv)
if argv[2] in self.built_in_commands:
self.built_in_commands[argv[2]]["functions"](argv)
def main():
cli = ChiekuiCli()
cli.exec(sys.argv)
return
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -28,25 +28,8 @@
sys%0A
-import readline%0A%0A
impo
@@ -40,16 +40,16 @@
rgparse%0A
+
%0Afrom cl
@@ -162,47 +162,8 @@
air%0A
-from cli.exception import CliException%0A
from
@@ -243,150 +243,8 @@
e_io
-%0Aimport cli.completer as completer%0Afrom cliff.app import App%0Afrom cliff.commandmanager import CommandManager%0Afrom cliff.command import Command
%0A%0ABA
@@ -265,16 +265,16 @@
ha-cli%22%0A
+
TARGET =
@@ -283,17 +283,16 @@
iroha%22%0A%0A
-%0A
class Ch
|
5687a00e4ec24b78e41b0377bfe524d7f0b13a43
|
Allow three_pid_creds as well as threePidCreds in /account/3pid
|
synapse/rest/client/v2_alpha/account.py
|
synapse/rest/client/v2_alpha/account.py
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.constants import LoginType
from synapse.api.errors import LoginError, SynapseError, Codes
from synapse.http.servlet import RestServlet
from synapse.util.async import run_on_reactor
from ._base import client_v2_patterns, parse_json_dict_from_request
import logging
logger = logging.getLogger(__name__)
class PasswordRestServlet(RestServlet):
PATTERNS = client_v2_patterns("/account/password")
def __init__(self, hs):
super(PasswordRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_handlers().auth_handler
@defer.inlineCallbacks
def on_POST(self, request):
yield run_on_reactor()
body = parse_json_dict_from_request(request)
authed, result, params = yield self.auth_handler.check_auth([
[LoginType.PASSWORD],
[LoginType.EMAIL_IDENTITY]
], body, self.hs.get_ip_from_request(request))
if not authed:
defer.returnValue((401, result))
user_id = None
if LoginType.PASSWORD in result:
# if using password, they should also be logged in
requester = yield self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
if user_id != result[LoginType.PASSWORD]:
raise LoginError(400, "", Codes.UNKNOWN)
elif LoginType.EMAIL_IDENTITY in result:
threepid = result[LoginType.EMAIL_IDENTITY]
if 'medium' not in threepid or 'address' not in threepid:
raise SynapseError(500, "Malformed threepid")
# if using email, we must know about the email they're authing with!
threepid_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
threepid['medium'], threepid['address']
)
if not threepid_user_id:
raise SynapseError(404, "Email address not found", Codes.NOT_FOUND)
user_id = threepid_user_id
else:
logger.error("Auth succeeded but no known type!", result.keys())
raise SynapseError(500, "", Codes.UNKNOWN)
if 'new_password' not in params:
raise SynapseError(400, "", Codes.MISSING_PARAM)
new_password = params['new_password']
yield self.auth_handler.set_password(
user_id, new_password
)
defer.returnValue((200, {}))
def on_OPTIONS(self, _):
return 200, {}
class ThreepidRestServlet(RestServlet):
PATTERNS = client_v2_patterns("/account/3pid")
def __init__(self, hs):
super(ThreepidRestServlet, self).__init__()
self.hs = hs
self.identity_handler = hs.get_handlers().identity_handler
self.auth = hs.get_auth()
self.auth_handler = hs.get_handlers().auth_handler
@defer.inlineCallbacks
def on_GET(self, request):
yield run_on_reactor()
requester = yield self.auth.get_user_by_req(request)
threepids = yield self.hs.get_datastore().user_get_threepids(
requester.user.to_string()
)
defer.returnValue((200, {'threepids': threepids}))
@defer.inlineCallbacks
def on_POST(self, request):
yield run_on_reactor()
body = parse_json_dict_from_request(request)
if 'threePidCreds' not in body:
raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
threePidCreds = body['threePidCreds']
requester = yield self.auth.get_user_by_req(request)
user_id = requester.user.to_string()
threepid = yield self.identity_handler.threepid_from_creds(threePidCreds)
if not threepid:
raise SynapseError(
400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED
)
for reqd in ['medium', 'address', 'validated_at']:
if reqd not in threepid:
logger.warn("Couldn't add 3pid: invalid response from ID sevrer")
raise SynapseError(500, "Invalid response from ID Server")
yield self.auth_handler.add_threepid(
user_id,
threepid['medium'],
threepid['address'],
threepid['validated_at'],
)
if 'bind' in body and body['bind']:
logger.debug(
"Binding emails %s to %s",
threepid, user_id
)
yield self.identity_handler.bind_threepid(
threePidCreds, user_id
)
defer.returnValue((200, {}))
def register_servlets(hs, http_server):
PasswordRestServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
|
Python
| 0.000012
|
@@ -3975,38 +3975,149 @@
-if 'threePidCreds' not in body
+threePidCreds = body.get('threePidCreds')%0A threePidCreds = body.get('three_pid_creds', threePidCreds)%0A if threePidCreds is None
:%0A
@@ -4191,54 +4191,8 @@
RAM)
-%0A threePidCreds = body%5B'threePidCreds'%5D
%0A%0A
|
c3fb87846d1f1a38fe2e37521464dea59832ff6c
|
remove unused import from distutils
|
pywt/__init__.py
|
pywt/__init__.py
|
# flake8: noqa
# Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
# Copyright (c) 2012-2016 The PyWavelets Developers
# <https://github.com/PyWavelets/pywt>
# See COPYING for license details.
"""
Discrete forward and inverse wavelet transform, stationary wavelet transform,
wavelet packets signal decomposition and reconstruction module.
"""
from __future__ import division, print_function, absolute_import
from distutils.version import LooseVersion
from ._extensions._pywt import *
from ._functions import *
from ._multilevel import *
from ._multidim import *
from ._thresholding import *
from ._wavelet_packets import *
from ._dwt import *
from ._swt import *
from ._cwt import *
from . import data
__all__ = [s for s in dir() if not s.startswith('_')]
try:
# In Python 2.x the name of the tempvar leaks out of the list
# comprehension. Delete it to not make it show up in the main namespace.
del s
except NameError:
pass
from pywt.version import version as __version__
from ._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
Python
| 0
|
@@ -441,51 +441,8 @@
port
-%0Afrom distutils.version import LooseVersion
%0A%0Afr
|
009c982fe796a9b31fda226cd9304afb1cdb8c38
|
consolidate update* functions
|
clusters.py
|
clusters.py
|
import numpy as np
import pandas as pd
from astropy.cosmology import Planck13 as cosmo
from astropy import units
import sys
sys.path.insert(1,'/Users/jesford/astrophysics/cofm') #temporary path adjust
from cofm import c_DuttonMaccio
try:
from IPython.display import display
notebook_display = True
except:
notebook_display = False
#default parameters
h = cosmo.H0.value
Om_M = cosmo.Om0
Om_L = 1. - Om_M
class Clusters():
"""Ensemble of galaxy clusters and their properties."""
def __init__(self, redshifts):
if type(redshifts) != np.ndarray:
redshifts = np.array(redshifts)
if redshifts.ndim != 1:
raise ValueError("Input redshift array must have 1 dimension.")
self.describe = "Ensemble of galaxy clusters and their properties."
self.number = redshifts.shape[0]
self.z = redshifts
self._rho_crit = cosmo.critical_density(self.z)
self._massrich_norm = 2.7*10**13
self._massrich_slope = 1.4
self._df = pd.DataFrame(self.z, columns=['z'])
def update_richness(self, richness):
if type(richness) != np.ndarray:
richness = np.array(richness)
if richness.ndim != 1:
raise ValueError("Input richness array must have 1 dimension.")
if richness.shape[0] == self.number:
self.n200 = richness
self._df['n200'] = pd.Series(self.n200, index = self._df.index)
self._update_depends_on_richness()
else:
raise ValueError("Input richness array must be same \
length as current cluster ensemble.")
def _update_depends_on_richness(self):
self._richness_to_mass()
def update_z(self, redshifts):
self.z = redshifts
self._df['z'] = pd.Series(self.z, index = self._df.index)
self._rho_crit = cosmo.critical_density(self.z)
self._update_dependant_variables()
def _update_dependant_variables(self):
self._r200()
self._c200()
self._rs()
#what else depends on z or m or?
def _richness_to_mass(self):
"""Calculate M_200 for simple power-law scaling relation
(with default parameters from arXiv:1409.3571)."""
self.m200 = self._massrich_norm * (self.n200 ** self._massrich_slope)
self._df['m200'] = pd.Series(self.m200, index = self._df.index)
self._update_dependant_variables()
def massrich_parameters(self):
print "\nMass-Richness Power Law: M200 = norm * N200^slope"
print " norm:", self._massrich_norm
print " slope:", self._massrich_slope
def update_massrichrelation(self, norm = None, slope = None):
if norm != None:
self._massrich_norm = norm
if slope != None:
self._massrich_slope = slope
self._richness_to_mass()
def view(self, notebook = notebook_display):
print "\nCluster Ensemble:"
if notebook == True:
display(self._df)
elif notebook == False:
print self._df
self.massrich_parameters()
def _r200(self):
self.r200 = (3.*self.m200 / (800.*np.pi*self._rho_crit))**(1./3.)
self._df['r200'] = pd.Series(self.r200, index = self._df.index)
def _c200(self):
"""Use c(M) from Dutton & Maccio 2014."""
self.c200 = c_DuttonMaccio(self.z,self.m200)
self._df['c200'] = pd.Series(self.c200, index = self._df.index)
def _rs(self):
"""Cluster scale radius."""
self.rs = self.r200 / self.c200
self._df['rs'] = pd.Series(self.rs, index = self._df.index)
|
Python
| 0
|
@@ -1485,32 +1485,22 @@
lf._
-update_depends_on_richne
+richness_to_ma
ss()
@@ -1634,109 +1634,359 @@
.%22)%0A
+%0A
- %0A def _update_depends_on_richness(self):%0A self._richness_to_mass()%0A
+def _richness_to_mass(self):%0A %22%22%22Calculate M_200 for simple power-law scaling relation%0A (with default parameters from arXiv:1409.3571).%22%22%22%0A self.m200 = self._massrich_norm * (self.n200 ** self._massrich_slope)%0A self._df%5B'm200'%5D = pd.Series(self.m200, index = self._df.index)%0A self._update_dependant_variables()%0A
%0A
@@ -2367,364 +2367,13 @@
or?%0A
+
%0A
- def _richness_to_mass(self):%0A %22%22%22Calculate M_200 for simple power-law scaling relation%0A (with default parameters from arXiv:1409.3571).%22%22%22%0A self.m200 = self._massrich_norm * (self.n200 ** self._massrich_slope)%0A self._df%5B'm200'%5D = pd.Series(self.m200, index = self._df.index)%0A self._update_dependant_variables()%0A%0A
|
e0f6f8af00e17a29fe43906fd4861e6a4f2d7cec
|
Fix byte code for ifs without elses
|
visitor.py
|
visitor.py
|
from .ast import *
from .invoke import S
from itertools import count
tagger = lambda x=count(): next(x)
def fixtags(t):
return list(fixtags_(t))
def fixtags_(t):
reg = {}
i = 0
for x in t:
if x[0] == 'label':
reg[x[1]] = i
else:
i += 1
for x in t:
fx = x[0]
if fx in {'jump if nil', 'jump'}:
yield fx, reg[x[1]]
elif fx != 'label':
yield x
def flattenbody(b, droplast=False):
b = list(_flattenbody(b))
if not droplast:
if b[-1] == ('drop',): # return last expr
b.pop()
return b
def _flattenbody(b):
statement = True
for s in b:
yield from s
statement = isinstance(s, (Assign, ReturnValue))
if not statement:
yield ('drop',)
if statement:
yield from Nil()
@ReturnValue._method
def __iter__(self):
yield from self.value
yield ('return',)
@Assign._method
def __iter__(self):
yield from self.name.assignto(self.value, binop=self.assign[:-1])
@For._method
def __iter__(self):
start = tagger()
end = tagger()
yield from Nil()
yield from self.iterable
yield ('label', start)
yield ('dup',)
yield ('jump if nil', end)
yield ('swap',)
yield ('drop',)
for i, name in enumerate(self.namelist, start=1):
yield from Name(name).assignto([('dup',), ('get attr raw', i)])
yield from flattenbody(self.body)
yield ('swap',)
yield ('dup',)
yield ('get attr raw', S.arg)
yield ('swap',)
yield ('get attr raw', S.func)
yield ('call',)
yield ('jump', start)
yield ('label', end)
yield ('drop',)
@Do._method
def __iter__(self):
yield ('lambda', fixtags(flattenbody(self.body)))
#@Class._method
#def __iter__(self):
# yield from Do(self.body + [Name(-1)])
@If._method
def __iter__(self):
yield from self.cond
tag = tagger()
yield ('jump if nil', tag)
yield from flattenbody(self.thenbody)
if self.elsebody is not None:
end = tagger()
yield ('jump', end)
yield ('label', tag)
if self.elsebody is not None:
yield from flattenbody(self.elsebody)
yield ('label', end)
else:
yield from Nil()
@BinOp._method
def __iter__(self):
yield from self.left
yield from self.right
yield ('binop', self.op)
@UnOp._method
def __iter__(self):
yield from self.right
yield ('unop', self.op)
@UnOpS._method
def __iter__(self):
yield from self.right.assignto(unops=self.op)
@FuncCall._method
def __iter__(self):
yield from self.arg
if isinstance(self.func, Attr):
yield from AttrGet(coll=self.func.coll, attr=self.func.attr)
else:
yield from self.func
yield ('call',)
@Index._method
def assignto(self, value=None, binop=None, unops=None):
yield from self.coll
yield from self.key
if unops is not None:
yield from UnOp(unops, [('over',), ('over',), ('get index',)])
yield ('dup',)
yield ('rot', -3)
elif binop:
yield from BinOp([('over',), ('over',), ('get index',)], binop, value)
else:
yield from value
yield ('set index',)
@Index._method
def __iter__(self):
yield from self.coll
yield from self.key
yield ('get index',)
@Attr._method
def assignto(self, value=None, binop=None, unops=None):
yield from self.coll
if unops is not None:
yield from UnOp(unops, [('dup',), ('get attr', self.attr)])
yield ('dup',)
yield ('rot', -2)
elif binop:
yield from BinOp([('dup',), ('get attr', self.attr)], binop, value)
else:
yield from value
yield ('set attr', self.attr)
@Attr._method
def __iter__(self):
yield from self.coll
yield ('get attr', self.attr)
@AttrGet._method
def assignto(self, value=None, binop=None, unops=None):
yield from self.coll
if unops is not None:
yield from UnOp(unops, [('dup',), ('get attr raw', self.attr)])
yield ('dup',)
yield ('rot', -2)
elif binop:
yield from BinOp([('dup',), ('get attr raw', self.attr)], binop, value)
else:
yield from value
yield ('set attr raw', self.attr)
@AttrGet._method
def __iter__(self):
yield from self.coll
yield ('get attr raw', self.attr)
@TableLit._method
def __iter__(self):
yield ('new table',)
for key, value in self.value:
yield from Index(coll=[('dup',)], key=key).assignto(value)
@StrLit._method
def __iter__(self):
if len(self.value) > 1:
yield from Nil() # string terminator
for v in self.value:
if isinstance(v, RegFrag):
if v.value:
yield ('lit', v.value)
else:
yield from v
yield ('convert to string',)
yield ('collect string',)
else:
yield ('lit', self.value[0].value)
@Sym._method
def __iter__(self):
yield ('lit', self.value)
@Name._method
def __iter__(self):
yield ('get name', self.value)
@Name._method
def assignto(self, value=None, binop=None, unops=None):
if unops is not None:
yield from UnOp(unops, [('get name', self.value)])
yield ('dup',)
elif binop:
yield from BinOp([('get name', self.value)], binop, value)
else:
yield from value
yield ('set name', self.value)
@Int._method
def __iter__(self):
yield ('lit', self.value)
@Nil._method
def __iter__(self):
yield ('lit', None)
|
Python
| 0.005679
|
@@ -1990,46 +1990,8 @@
dy)%0A
- if self.elsebody is not None:%0A
@@ -2005,20 +2005,16 @@
agger()%0A
-
yiel
@@ -2074,31 +2074,62 @@
elsebody is
-not Non
+None:%0A yield from Nil()%0A els
e:%0A y
@@ -2165,20 +2165,16 @@
sebody)%0A
-
yiel
@@ -2193,43 +2193,8 @@
end)
-%0A else:%0A yield from Nil()
%0A%0A@B
|
dafd492d7a91d8001b7ae2a1bddc2f0e18466c2c
|
Allow for chosing quotes
|
quotes/quotes.py
|
quotes/quotes.py
|
import discord
from random import choice as randchoice
from redbot.core import commands, checks, Config, bot
from redbot.core.utils.chat_formatting import box, humanize_list
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
# https://red-discordbot.readthedocs.io/en/latest/framework_utils.html
# https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/cogs/economy/economy.py
# Message storage information
"""
quote_list = []
"""
# commands
class Quotes(commands.Cog):
"""My custom counter cog"""
def __init__(self):
self.config = Config.get_conf(self, identifier=108112868100055040420, force_registration=True)
default_guild = {
"quote_list": []
}
self.config.register_guild(**default_guild)
@commands.group(invoke_without_command=True)
async def quote(self, ctx): #Recount group
"""
Prints a random quote
"""
quote_list = await self.config.guild(ctx.guild).quote_list()
if quote_list != []:
quote = randchoice(quote_list)
await ctx.send(box(f"{quote}"))
@checks.admin()
@quote.group(name="add", invoke_without_command=True)
async def quote_add(self, ctx, *, quote : str):
"""
Adds a quote to the quote list
"""
async with self.config.guild(ctx.guild).quote_list() as quote_list:
quote_list.append(quote)
await ctx.send(f'Added "{quote}"')
@checks.admin()
@quote.group(name="del", invoke_without_command=True)
async def quote_del(self, ctx, quote_pos : int):
"""
Deletes a quote from the quote list
"""
# make sure pos starts at 1
if quote_pos > 0:
actual_quote_pos = quote_pos - 1
async with self.config.guild(ctx.guild).quote_list() as quote_list:
if actual_quote_pos < len(quote_list):
removed_quote = quote_list.pop(actual_quote_pos)
await ctx.send(f'Removed "{removed_quote}"')
@quote.group(name="all", invoke_without_command=True)
async def quote_all(self, ctx):
"""
Prints all a list of all quotes
"""
quote_list = await self.config.guild(ctx.guild).quote_list()
pos = 1
quote_groups = []
quote_pos_len = len(str(len(quote_list))) # Gets the length of the largest quote number
temp_msg = ""
for quote in quote_list:
temp_msg += (
f"{f'{pos}': <{quote_pos_len+2}}{quote}\n\n"
)
if pos % 5 == 0:
quote_groups.append(box(temp_msg, lang="md"))
temp_msg = ""
pos += 1
if temp_msg != "":
quote_groups.append(box(temp_msg, lang="md"))
await menu(ctx, quote_groups, DEFAULT_CONTROLS)
|
Python
| 0
|
@@ -997,29 +997,320 @@
-if quote_list != %5B%5D:%0A
+arguments = ctx.message.split(%22 %22)%0A%0A if quote_list != %5B%5D:%0A if len(arguments) %3E 1:%0A try:%0A quote_pos = int(arguments%5B1%5D)%0A await ctx.send(box(f%22%7Bquote_list%5Bquote_pos-1%5D%7D%22))%0A except:%0A pass%0A else:%0A
@@ -1348,16 +1348,20 @@
e_list)%0A
+
|
730dc123e20a774ea0355400ac9ec0b5d286b7c6
|
Change minimal value for ai time
|
cmd_line.py
|
cmd_line.py
|
#! /usr/bin/env python
# title :non_gui.py
# description :Runs a cmd-line version of Reversi
# author :andresthor
# date :05-02-2017
# usage :python non_gui.py
# python_version :3.5.2
# =============================================================================
from reversi import Reversi
from constants import BLACK, WHITE
from cmd import Cmd
INVALID_INPUT = '\n'.join(['Input should be in the form x#',
'where x is a lowercase letter between a-h',
'and # is a number between 1-8'])
NUMBERS = [1, 2, 3, 4, 5, 6, 7, 8]
LETTERS = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8}
LET_LST = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
class ReversiCMD(Cmd):
'''An implementation of the Reversi game using a command line interface
Prints out an ascii verison of the game state and accepts input in the
form of "move a1", "move h8" etc.
Player controls Black, Computer controls White
'''
prompt = '$: '
def __init__(self, reversi):
Cmd.__init__(self)
self.reversi = reversi
def cmdloop(self):
# print(intro)
self.print_board()
print('Write help to get information on the available commands')
return Cmd.cmdloop(self)
def postcmd(self, stop, line):
if self.reversi.terminal_test(self.reversi.board,
self.reversi.board.turn):
self.print_end_game()
self.do_quit('')
return Cmd.postcmd(self, stop, line)
def do_quit(self, line):
'''Ends the program'''
return True
def do_move(self, line):
if len(line) != 2:
print(INVALID_INPUT)
return
number = self.contains(line, NUMBERS)
letter = self.contains(line, LETTERS)
if not (letter and number):
print(INVALID_INPUT)
return
if self.reversi.try_move((LETTERS[letter], number)):
self.print_board()
self.print_info((letter, number), BLACK)
self.reversi.update()
self.print_board()
self.print_info(self.get_last_computer_move(), WHITE)
else:
print('Invalid move: {}{}'.format(letter, number))
def help_move(self):
print('move [x#] OR move [#x]\nWhere x is a letter a-h and # a nbr 1-8')
def do_hint(self, line):
'''Gives the player an optimal move hint (alpha-beta search)'''
if not self.reversi.hints:
self.reversi.toggle_hints()
self.reversi.alpha_beta_search()
move = self.reversi.get_optimal_move()
print('Optimal move: {}{}'.format(LET_LST[move[0] - 1], move[1]))
def do_cheat(self, line):
'''Make optimal move'''
if not self.reversi.hints:
self.reversi.toggle_hints()
self.reversi.alpha_beta_search()
move = self.reversi.get_optimal_move()
self.do_move(LET_LST[move[0] - 1] + str(move[1]))
def do_show(self, line):
'''Prints out the current game state'''
self.print_board()
self.print_info(self.get_last_computer_move(), self.reversi.board.turn)
def do_ai(self, line):
'''Set the maximum time (seconds) allowed for the AI to think'''
if not self._is_number(line) or (float(line) < 0.5):
print('Not a valid number.'
' Enter the maximum allowed time for the AI to think.')
else:
self.reversi.cutoff_time = float(line)
def _is_number(self, n):
try:
float(n)
return True
except:
return False
def get_last_computer_move(self):
move = self.reversi.board.last
move = (LET_LST[move[0] - 1], move[1])
return move
def print_board(self):
print('')
self.reversi.board.ascii()
def print_info(self, move, player):
turn_time = 0
if player is BLACK:
print('Your move was {}{}'.format(move[0], move[1]))
turn_time = self.reversi.black_last
elif player is WHITE:
print("The computer's move was {}{}".format(move[0], move[1]))
turn_time = self.reversi.white_last
print('Move took {:.3f}s'.format(turn_time))
score = self.reversi.score
print('Score is (black/white): {}/{}\n'.format(score[BLACK], score[WHITE]))
def contains(self, line, items):
for i in items:
if str(i) in line:
return i
return False
def print_end_game(self):
score = self.reversi.score
winner = 'Nobody - Draw'
if score[BLACK] > score[WHITE]:
winner = 'Black'
elif score[WHITE] > score[BLACK]:
winner = 'White'
print('Game Over!')
print('Time used (black/white): {:.3f}/{:.3f}'
.format(self.reversi.black_time, self.reversi.white_time))
print('Score (black/white): {}/{}'.format(score[BLACK], score[WHITE]))
print('Winner: {}'.format(winner))
if __name__ == '__main__':
reversi = Reversi()
ReversiCMD(reversi).cmdloop()
|
Python
| 0
|
@@ -35,23 +35,24 @@
:
-non_gui
+cmd_line
.py%0A# de
@@ -3388,11 +3388,11 @@
) %3C
-0.5
+1.0
):%0A
|
e65295f1c9b7ac3ac1fa6583fdbd69fd651c5662
|
Fix ReplaceInstance constructor
|
touchdown/aws/ec2/auto_scaling_group.py
|
touchdown/aws/ec2/auto_scaling_group.py
|
# Copyright 2014-2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core.action import Action
from touchdown.core.resource import Resource
from touchdown.core.target import Target
from touchdown.core import argument, errors
from ..account import AWS
from .. import serializers
from ..elb import LoadBalancer
from ..vpc import Subnet
from ..common import SimpleApply
from .launch_configuration import LaunchConfiguration
class AutoScalingGroup(Resource):
resource_name = "auto_scaling_group"
""" A name for this AutoScalingGroup. Unique within an AWS account """
name = argument.String(aws_field="AutoScalingGroupName")
""" A launch configuration """
launch_configuration = argument.Resource(LaunchConfiguration, aws_field="LaunchConfigurationName")
""" The minimum number of EC2 instances that must be running """
min_size = argument.Integer(aws_field="MinSize")
""" The maximum number of EC2 instances that can be started by this
AutoScalingGroup """
max_size = argument.Integer(aws_field="MaxSize")
""" The number of EC2 instances that should be running. Must be between
min_size and max_size. """
desired_capacity = argument.Integer(aws_field="DesiredCapacity")
""" The amount of time (in seconds) between scaling activities. """
default_cooldown = argument.Integer(default=300, aws_field="DefaultCooldown")
availability_zones = argument.List(aws_field="AvailabilityZones")
# FIXME: This needs a custom serializer: Instead of a list, botocore expects
# a comma separated string!
subnets = argument.List(
Subnet,
aws_field="VPCZoneIdentifier",
aws_serializer=serializers.CommaSeperatedList(serializers.List()),
)
load_balancers = argument.ResourceList(LoadBalancer, aws_field="LoadBalancerNames", aws_update=False)
""" The kind of health check to use to detect unhealthy instances. By
default if you are using ELB with the ASG it will use the same health
checks as ELB. """
health_check_type = argument.String(
max=32,
default=lambda instance: "ELB" if instance.load_balancers else None,
aws_field="HealthCheckType",
)
health_check_grace_period = argument.String(aws_field="HealthCheckGracePeriod")
placement_group = argument.String(max=255, aws_field="PlacementGroup")
termination_policies = argument.List(aws_field="TerminationPolicies")
replacement_policy = argument.String(choices=['singleton', 'graceful'], default='graceful')
account = argument.Resource(AWS)
class ReplaceInstance(Action):
scaling_processes = [
"AlarmNotification",
"AZRebalance",
"ReplaceUnhealthy",
"ScheduledActions",
]
def __init__(self, runner, target, instance_id):
super(ReplaceInstance, self).__init__(runner, target)
self.instance_id = instance_id
def suspend_processes(self):
self.client.suspend_processes(
AutoScalingGroupName=self.resource.name,
ScalingProcesses=self.scaling_processes,
)
def scale(self):
raise NotImplementedError(self.scale)
# FIXME: If TerminateInstanceInAutoScalingGroup is graceful then we don't
# need to detach from the ASG.
"""
def remove_from_balancer(self):
self.client.detach_instances(
AutoScalingGroupName=self.resource.name,
InstanceIds=[self.instance_id],
ShouldDecrementDesiredCapacity=False,
)
"""
def terminate_instance(self):
self.client.terminate_instance_in_auto_scaling_group(
InstanceId=self.instance_id,
ShouldDecrementDesiredCapacity=False,
)
def wait_for_healthy_asg(self):
# FIXME: Consider the grace period of the ASG + few minutes for booting
# and use that as a timeout for the release process.
while True:
asg = self.target.describe_object()
if asg['DesiredCapacity'] == len(i for i in asg['Instances'] if i['HealthStatus'] == 'Healthy'):
return True
def unscale(self):
raise NotImplementedError(self.unscale)
def resume_processes(self):
self.client.resume_processes(
AutoScalingGroupName=self.resource.name,
ScalingProcesses=self.scaling_processes,
)
def run(self):
self.suspend_processes()
try:
self.scale()
try:
# self.remove_from_balancer()
self.terminate_instance()
if not self.wait_for_healthy_asg():
raise errors.Error("Auto scaling group {} is not returning to a healthy state".format(self.resource.name))
finally:
self.unscale()
finally:
self.resume_processes()
class GracefulReplacement(ReplaceInstance):
@property
def description(self):
yield "Gracefully replace instance {} (by increasing ASG pool and then terminating)".format(self.instance_id)
def scale(self):
desired_capacity = self.target.object['DesiredCapacity']
desired_capacity += 1
max = self.resource.max
if desired_capacity > max:
max = desired_capacity
self.client.update_auto_scaling_group(
AutoScalingGroupName=self.resource.name,
Max=max,
DesiredCapacity=desired_capacity,
)
def unscale(self):
self.client.update_auto_scaling_group(
AutoScalingGroupName=self.resource.name,
Max=self.resource.max,
DesiredCapacity=self.object['DesiredCapacity'],
)
class SingletonReplacement(Action):
@property
def description(self):
yield "Replace singleton instance {}".format(self.instance_id)
def scale(self):
pass
def unscale(self):
pass
class Apply(SimpleApply, Target):
resource = AutoScalingGroup
service_name = 'autoscaling'
create_action = "create_auto_scaling_group"
update_action = "update_auto_scaling_group"
describe_action = "describe_auto_scaling_groups"
describe_list_key = "AutoScalingGroups"
key = 'AutoScalingGroupName'
def get_describe_filters(self):
return {"AutoScalingGroupNames": [self.resource.name]}
def update_object(self):
launch_config_name = self.runner.get_target(self.resource.launch_configuration).resource_id
for instance in self.object.get("Instances", []):
if instance['LifecycleState'] in ('Terminating', ):
continue
if instance.get('LaunchConfigurationName', '') != launch_config_name:
klass = {
'graceful': GracefulReplacement,
'singleton': SingletonReplacement,
}[self.resource.replacement_policy]
yield klass(self, instance['InstanceId'])
|
Python
| 0.000001
|
@@ -3281,16 +3281,8 @@
elf,
- runner,
tar
@@ -3350,16 +3350,8 @@
t__(
-runner,
targ
|
a25c0cce29c7e2c05f0d0036cf1b7b1a67d2ccf0
|
Fix : Store one keyword as an array when Image (IPTC metadata) parser is used [SDESK-6566] (#2372)
|
superdesk/io/feed_parsers/image_iptc.py
|
superdesk/io/feed_parsers/image_iptc.py
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013 - 2018 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import arrow
from superdesk.io.feed_parsers import FileFeedParser
from superdesk.io.registry import register_feed_parser
from superdesk.errors import ParserError
from superdesk.media.media_operations import process_file_from_stream
from superdesk.media.image import get_meta_iptc
from superdesk.media.iim_codes import TAG
from superdesk.metadata.item import GUID_TAG, ITEM_TYPE, CONTENT_TYPE
from superdesk.metadata import utils
from superdesk.media.renditions import generate_renditions, get_renditions_spec
from superdesk.upload import url_for_media
from superdesk.utc import utcnow
from superdesk import filemeta
from flask import current_app as app
from eve.utils import config
from datetime import datetime
import mimetypes
import logging
import os.path
logger = logging.getLogger(__name__)
class ImageIPTCFeedParser(FileFeedParser):
"""
Feed Parser which can parse images using IPTC metadata
"""
NAME = "image_iptc"
label = "Image (IPTC metadata)"
ALLOWED_EXT = mimetypes.guess_all_extensions("image/jpeg")
DATETIME_FORMAT = "%Y%m%dT%H%M%S%z"
IPTC_MAPPING = {
TAG.HEADLINE: "headline",
TAG.BY_LINE: "byline",
TAG.OBJECT_NAME: "slugline",
TAG.CAPTION_ABSTRACT: "description_text",
TAG.KEYWORDS: "keywords",
TAG.SPECIAL_INSTRUCTIONS: "ednote",
TAG.COPYRIGHT_NOTICE: "copyrightnotice",
TAG.ORIGINAL_TRANSMISSION_REFERENCE: "assignment_id",
}
def can_parse(self, image_path):
if not isinstance(image_path, str):
return False
return mimetypes.guess_type(image_path)[0] == "image/jpeg"
def parse(self, image_path, provider=None):
try:
item = self.parse_item(image_path)
return item
except Exception as ex:
raise ParserError.parseFileError(exception=ex, provider=provider)
def parse_item(self, image_path):
filename = os.path.basename(image_path)
content_type = mimetypes.guess_type(image_path)[0]
guid = utils.generate_guid(type=GUID_TAG)
item = {
"guid": guid,
"uri": guid,
config.VERSION: 1,
ITEM_TYPE: CONTENT_TYPE.PICTURE,
"mimetype": content_type,
"versioncreated": utcnow(),
}
with open(image_path, "rb") as f:
_, content_type, file_metadata = process_file_from_stream(f, content_type=content_type)
f.seek(0)
file_id = app.media.put(f, filename=filename, content_type=content_type, metadata=file_metadata)
filemeta.set_filemeta(item, file_metadata)
f.seek(0)
metadata = get_meta_iptc(f)
f.seek(0)
self.parse_meta(item, metadata)
rendition_spec = get_renditions_spec(no_custom_crops=True)
renditions = generate_renditions(
f, file_id, [file_id], "image", content_type, rendition_spec, url_for_media
)
item["renditions"] = renditions
return item
def parse_date_time(self, date, time):
if not date or not time:
return
datetime_string = "{}T{}".format(date, time)
try:
return datetime.strptime(datetime_string, self.DATETIME_FORMAT)
except ValueError:
try:
arrow.get(datetime_string).datetime
except ValueError:
return
def parse_meta(self, item, metadata):
datetime_created = self.parse_date_time(metadata.get(TAG.DATE_CREATED), metadata.get(TAG.TIME_CREATED))
if datetime_created:
item["firstcreated"] = datetime_created
# now we map IPTC metadata to superdesk metadata
for source_key, dest_key in self.IPTC_MAPPING.items():
try:
item[dest_key] = metadata[source_key]
except KeyError:
continue
return item
register_feed_parser(ImageIPTCFeedParser.NAME, ImageIPTCFeedParser())
|
Python
| 0
|
@@ -4214,16 +4214,139 @@
ontinue%0A
+%0A # SDESK-6566%0A if isinstance(item.get(%22keywords%22), str):%0A item%5B%22keywords%22%5D = %5Bitem%5B%22keywords%22%5D%5D%0A%0A
|
b82f21ea92aad44ca101744a3f5300280f081524
|
Fix site when logged in
|
sweettooth/review/context_processors.py
|
sweettooth/review/context_processors.py
|
from extensions.models import ExtensionVersion
def n_unreviewed_extensions(request):
if not request.user.has_perm("review.can-review-extensions"):
return dict()
return dict(n_unreviewed_extensions=ExtensionVersion.unreviewed().count())
|
Python
| 0
|
@@ -226,16 +226,24 @@
Version.
+objects.
unreview
|
7d5cc169387ec97d79fa2f2a0c59fed40c888638
|
change the internal name of tests to descriptively show maximum number of retries
|
conftest.py
|
conftest.py
|
from collections import namedtuple, OrderedDict
import glob
import logging
import os
import re
import yaml
import pytest
Scenario = namedtuple("Scenario", ["path", "qmin", "config"])
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
"""Make YaML load to OrderedDict.
This is done to ensure compability with Python versions prior to 3.6.
See docs.python.org/3.6/whatsnew/3.6.html#new-dict-implementation for more information.
repr(config) is a part of testcase's name in pytest.
We need to ensure that it is ordered in the same way.
See https://github.com/pytest-dev/pytest/issues/1075.
"""
class OrderedLoader(Loader): # pylint: disable=too-many-ancestors
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def config_sanity_check(config_dict, config_name):
"""Checks if parsed configuration is valid"""
mandatory_keys = {'name', 'binary', 'templates', 'configs', 'additional'}
for cfg in config_dict['programs']:
missing_keys = mandatory_keys - set(cfg.keys())
assert not missing_keys, 'Mandatory fields in configuration are missing: %s' % missing_keys
# sanity check templates vs. configs
assert len(cfg['templates']) == len(cfg['configs']),\
('Number of jinja2 template files is not equal '
'to number of config files to be generated for '
'program "%s" (%s), i.e. len(templates) != len(configs)'
% (cfg['name'], config_name))
for additional in cfg["additional"]:
assert isinstance(additional, str),\
"All additional arguments in yaml should be strings. (%s, %s)"\
% (cfg['name'], config_name)
def get_qmin_config(path):
"""Reads configuration from the *.rpl file and determines query-minimization setting."""
with open(path) as f:
for line in f:
if re.search(r"^CONFIG_END", line) or re.search(r"^SCENARIO_BEGIN", line):
return None
if re.search(r"^\s*query-minimization:\s*(on|yes)", line):
return True
if re.search(r"^\s*query-minimization:\s*(off|no)", line):
return False
return None
def scenarios(paths, configs):
"""Returns list of *.rpl files from given path and packs them with their minimization setting"""
assert len(paths) == len(configs),\
"Number of --config has to be equal to number of --scenarios arguments."
scenario_list = []
for path, config in zip(paths, configs):
config_dict = ordered_load(open(config), yaml.SafeLoader)
config_sanity_check(config_dict, config)
if os.path.isfile(path):
filelist = [path] # path to single file, accept it
else:
filelist = sorted(glob.glob(os.path.join(path, "*.rpl")))
if not filelist:
raise ValueError('no *.rpl files found in path "{}"'.format(path))
for file in filelist:
scenario_list.append(Scenario(file, get_qmin_config(file), config_dict))
return scenario_list
def rpls(paths):
for path in paths:
if os.path.isfile(path):
filelist = [path] # path to single file, accept it
else:
filelist = sorted(glob.glob(os.path.join(path, "*.rpl")))
return filelist
def pytest_addoption(parser):
parser.addoption("--config", action="append", help="path to Deckard configuration .yaml file")
parser.addoption("--scenarios", action="append", help="directory with .rpl files")
parser.addoption("--retries", action="store", help=("number of retries per"
"test when Deckard is under load"))
def pytest_generate_tests(metafunc):
"""This is pytest weirdness to parametrize the test over all the *.rpl files.
See https://docs.pytest.org/en/latest/parametrize.html#basic-pytest-generate-tests-example
for more info."""
if 'scenario' in metafunc.fixturenames:
if metafunc.config.option.config is None:
configs = []
else:
configs = metafunc.config.option.config
if metafunc.config.option.scenarios is None:
paths = ["sets/resolver"] * len(configs)
else:
paths = metafunc.config.option.scenarios
metafunc.parametrize("scenario", scenarios(paths, configs), ids=str)
if 'rpl_path' in metafunc.fixturenames:
paths = metafunc.config.option.scenarios
metafunc.parametrize("rpl_path", rpls(paths), ids=str)
if 'max_retries' in metafunc.fixturenames:
max_retries = metafunc.config.option.retries
if max_retries is None:
max_retries = 3
metafunc.parametrize("max_retries", [max_retries], ids=str)
def check_log_level_xdist(level):
if level < logging.ERROR:
pytest.exit("Advanced logging not available while running with xdist "
"(try ommiting -n option)")
def pytest_configure(config):
# This means pytest-xdist is installed and enabled
if hasattr(config.option, "dist") and config.option.dist == "load":
log_level = config.option.log_level
if log_level is None:
return
try:
log_level = int(log_level)
except ValueError:
log_level = logging.getLevelName(log_level)
check_log_level_xdist(log_level)
|
Python
| 0
|
@@ -5039,19 +5039,49 @@
s%5D, ids=
-str
+lambda id: %22max-retries-%22+str(id)
)%0A%0A%0Adef
|
cb93088cb6a3f7522838fb74f133f5598c76897f
|
Fix multiline magic
|
tensorflow/tools/compatibility/ipynb.py
|
tensorflow/tools/compatibility/ipynb.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A module to support operations on ipynb files"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import re
import shutil
import tempfile
CodeLine = collections.namedtuple("CodeLine", ["cell_number", "code"])
def is_python(cell):
"""Checks if the cell consists of Python code."""
return (cell["cell_type"] == "code" # code cells only
and cell["source"] # non-empty cells
and not cell["source"][0].startswith("%%")) # multiline eg: %%bash
def process_file(in_filename, out_filename, upgrader):
"""The function where we inject the support for ipynb upgrade."""
print("Extracting code lines from original notebook")
raw_code, notebook = _get_code(in_filename)
raw_lines = [cl.code for cl in raw_code]
# The function follows the original flow from `upgrader.process_fil`
with tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
processed_file, new_file_content, log, process_errors = (
upgrader.update_string_pasta("\n".join(raw_lines), in_filename))
if temp_file and processed_file:
new_notebook = _update_notebook(notebook, raw_code,
new_file_content.split("\n"))
json.dump(new_notebook, temp_file)
else:
raise SyntaxError(
"Was not able to process the file: \n%s\n" % "".join(log))
files_processed = processed_file
report_text = upgrader._format_log(log, in_filename, out_filename)
errors = process_errors
shutil.move(temp_file.name, out_filename)
return files_processed, report_text, errors
def skip_magic(code_line, magic_list):
"""Checks if the cell has magic, that is not Python-based.
Args:
code_line: A line of Python code
magic_list: A list of jupyter "magic" exceptions
Returns:
If the line jupyter "magic" line, not Python line
>>> skip_magic('!ls -laF', ['%', '!', '?'])
True
"""
for magic in magic_list:
if code_line.startswith(magic):
return True
return False
def check_line_split(code_line):
r"""Checks if a line was split with `\`.
Args:
code_line: A line of Python code
Returns:
If the line was split with `\`
>>> skip_magic("!gcloud ml-engine models create ${MODEL} \\\n")
True
"""
return re.search(r"\\\s*\n$", code_line)
def _get_code(input_file):
"""Loads the ipynb file and returns a list of CodeLines."""
raw_code = []
with open(input_file) as in_file:
notebook = json.load(in_file)
cell_index = 0
for cell in notebook["cells"]:
if is_python(cell):
cell_lines = cell["source"]
is_line_split = False
for line_idx, code_line in enumerate(cell_lines):
# Sometimes, jupyter has more than python code
# Idea is to comment these lines, for upgrade time
if skip_magic(code_line, ["%", "!", "?"]) or is_line_split:
# Found a special character, need to "encode"
code_line = "###!!!" + code_line
# if this cell ends with `\` -> skip the next line
is_line_split = check_line_split(code_line)
if is_line_split:
is_line_split = check_line_split(code_line)
# Sometimes, people leave \n at the end of cell
# in order to migrate only related things, and make the diff
# the smallest -> here is another hack
if (line_idx == len(cell_lines) - 1) and code_line.endswith("\n"):
code_line = code_line.replace("\n", "###===")
# sometimes a line would start with `\n` and content after
# that's the hack for this
raw_code.append(
CodeLine(cell_index,
code_line.rstrip().replace("\n", "###===")))
cell_index += 1
return raw_code, notebook
def _update_notebook(original_notebook, original_raw_lines, updated_code_lines):
"""Updates notebook, once migration is done."""
new_notebook = copy.deepcopy(original_notebook)
# validate that the number of lines is the same
assert len(original_raw_lines) == len(updated_code_lines), \
("The lengths of input and converted files are not the same: "
"{} vs {}".format(len(original_raw_lines), len(updated_code_lines)))
code_cell_idx = 0
for cell in new_notebook["cells"]:
if cell["cell_type"] != "code":
continue
applicable_lines = [
idx for idx, code_line in enumerate(original_raw_lines)
if code_line.cell_number == code_cell_idx
]
new_code = [updated_code_lines[idx] for idx in applicable_lines]
cell["source"] = "\n".join(new_code).replace("###!!!", "").replace(
"###===", "\n")
code_cell_idx += 1
return new_notebook
|
Python
| 0.000002
|
@@ -5010,35 +5010,27 @@
if
-cell%5B%22cell_type%22%5D != %22code%22
+not is_python(cell)
:%0A
|
898016cbde52392d6e85156f8e7f7084698475f1
|
Fix API change in make_test_data.py
|
tools/make_test_data.py
|
tools/make_test_data.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool for creating test data for ceilometer.
"""
import argparse
import datetime
import logging
import sys
from ceilometer import counter
from ceilometer import meter
from ceilometer import storage
from ceilometer.openstack.common import cfg
from ceilometer.openstack.common import timeutils
def main():
cfg.CONF([])
parser = argparse.ArgumentParser(
description='generate metering data',
)
parser.add_argument(
'--interval',
default=10,
type=int,
help='the period between events, in minutes',
)
parser.add_argument(
'--start',
default=31,
help='the number of days in the past to start timestamps',
)
parser.add_argument(
'--end',
default=2,
help='the number of days into the future to continue timestamps',
)
parser.add_argument(
'--type',
choices=('gauge', 'cumulative'),
default='gauge',
help='counter type',
)
parser.add_argument(
'--project',
help='project id of owner',
)
parser.add_argument(
'--user',
help='user id of owner',
)
parser.add_argument(
'resource',
help='the resource id for the meter data',
)
parser.add_argument(
'counter',
help='the counter name for the meter data',
)
parser.add_argument(
'volume',
help='the amount to attach to the meter',
type=int,
default=1,
)
args = parser.parse_args()
# Set up logging to use the console
console = logging.StreamHandler(sys.stderr)
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
root_logger = logging.getLogger('')
root_logger.addHandler(console)
root_logger.setLevel(logging.DEBUG)
# Connect to the metering database
conn = storage.get_connection(cfg.CONF)
# Find the user and/or project for a real resource
if not (args.user or args.project):
for r in conn.get_resources():
if r['resource_id'] == args.resource:
args.user = r['user_id']
args.project = r['project_id']
break
# Compute start and end timestamps for the
# new data.
timestamp = timeutils.parse_isotime(args.start)
end = timeutils.parse_isotime(args.end)
increment = datetime.timedelta(minutes=args.interval)
# Generate events
n = 0
while timestamp <= end:
c = counter.Counter(source='artificial',
name=args.counter,
type=args.type,
volume=args.volume,
user_id=args.user,
project_id=args.project,
resource_id=args.resource,
timestamp=timestamp,
resource_metadata={},
)
data = meter.meter_message_from_counter(c)
conn.record_metering_data(data)
n += 1
timestamp = timestamp + increment
print 'Added %d new events' % n
return 0
if __name__ == '__main__':
main()
|
Python
| 0.000008
|
@@ -3785,16 +3785,42 @@
ounter(c
+, cfg.CONF.metering_secret
)%0A
|
885c4e4ae1a0cebc6cc08affeabf7232870307c0
|
Add station and line as optional variables
|
web/app.py
|
web/app.py
|
import os
from flask import Flask
from flask import render_template
from helpers.tube import line_status, station_open
from helpers.metoffice import get_forcast
app = Flask(__name__)
app.debug = True
def get_value():
result = 100
forcast = get_forcast()
print "Feels Like Temperature: %s" % forcast['Feels Like Temperature']
print "Weather Type: %s" % forcast['Weather Type']
print "Wind Speed: %s" % forcast['Wind Speed']
print "Line status: %s" % line_status('Victoria')
#0 remove points for cold
if int(forcast['Feels Like Temperature']) <= 15 and int(forcast['Feels Like Temperature']) > 5:
print "Removing 15 points because temperature > 5 and <= 15"
result = result - 10
if int(forcast['Feels Like Temperature']) <= 5 and int(forcast['Feels Like Temperature']) > 0:
print "Removing 25 points because temperature > 0 and <= 5"
result = result - 15
if int(forcast['Feels Like Temperature']) <= 0:
print "Removing 50 points because temperature < 0"
result = result - 45
# 1) remove points for cycling based on different weather conditions (http://www.metoffice.gov.uk/datapoint/support/code-definitions)
if int(forcast['Weather Type']) in (9, 10, 11, 12):
print "Removing 30 points for weather type"
result = result - 30
if int(forcast['Weather Type']) in (13, 14, 15,):
print "Removing 45 points for weather type"
result = result - 45
if int(forcast['Weather Type']) in (16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30):
print "Removing 75 points for weather type"
result = result - 75
#2) remove points for wind
if int(forcast['Wind Speed']) > 10 and int(forcast['Wind Speed']) <= 15:
result = result - 10
if int(forcast['Wind Speed']) > 15 and int(forcast['Wind Speed']) <= 25:
print "Removing 20 points because wind speed > 15 and < 25"
result = result - 20
if int(forcast['Wind Speed']) > 25:
print "Removing 35 points because wind speed > 25"
result = result - 35
#3) add points for cycling based on status of line
if line_status('Victoria') != 'Good Service':
print "Adding 35 points tube line not running well"
result = result + 35
#4) if station shut, need to get on a bike whatever the weather
if not station_open('Brixton'):
print "Forced 100 as station is shut"
result = 100
#make sure value is between 0 and 100
if result > 100:
result = 100
if result < 0:
result = 0
return result
@app.route("/")
def index():
value = 100 - get_value()
return render_template('index.html', value=value)
@app.route("/api")
def api():
value = get_value()
return render_template('api.html', value=value)
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
Python
| 0
|
@@ -61,16 +61,42 @@
emplate%0A
+from flask import request%0A
from hel
@@ -234,16 +234,29 @@
t_value(
+station, line
):%0A r
@@ -297,20 +297,16 @@
rcast()%0A
-
%0A pri
@@ -427,20 +427,16 @@
r Type'%5D
-
%0A pri
@@ -525,24 +525,14 @@
tus(
-'Victoria')%0A
+line)%0A
%0A
@@ -923,24 +923,16 @@
nd %3C= 5%22
-
%0A
@@ -1234,24 +1234,16 @@
tions)%0A%0A
-
%0A if
@@ -1372,24 +1372,16 @@
lt - 30%0A
-
%0A if
@@ -1508,20 +1508,16 @@
lt - 45%0A
-
%0A if
@@ -1661,24 +1661,16 @@
er type%22
-
%0A
@@ -2214,18 +2214,12 @@
tus(
-'Victoria'
+line
) !=
@@ -2421,17 +2421,15 @@
pen(
-'Brixt
+stati
on
-'
):%0A
@@ -2617,20 +2617,16 @@
ult = 0%0A
-
%0A ret
@@ -2662,24 +2662,129 @@
ef index():%0A
+ station = request.args.get('station') or %22Brixton%22%0A line = request.args.get('line') or %22Victoria%22%0A
value =
@@ -2791,32 +2791,45 @@
100 - get_value(
+station, line
)%0A return ren
@@ -2895,24 +2895,129 @@
%0Adef api():%0A
+ station = request.args.get('station') or %22Brixton%22%0A line = request.args.get('line') or %22Victoria%22%0A
value =
@@ -3026,16 +3026,29 @@
t_value(
+station, line
)%0A re
@@ -3098,20 +3098,8 @@
e)%0A%0A
-
%0Aif
@@ -3264,12 +3264,8 @@
t=port)%0A
-
|
c8b051020d2bbab75b0e15bfdca2ad7a20d1af71
|
rename attr TableColumns.cols to .names.
|
db.py
|
db.py
|
import sqlite3
from consts import DB_NAME
from common import AttrDict
from schema import TABLE_SCHEMAS
from verbosity import verbose, set_verbosity
g_conn = None
g_table_info = AttrDict() # {tname: TableColumns()}
class TableColumns(object):
def __init__(self, *args, sep='|'):
self._sep = sep
self._cols = args
def __repr__(self):
return self._sep.join(self.cols)
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, value):
self._sep = value
@property
def cols(self):
return self._extract(1)
def _extract(self, index):
return (col[index] for col in self._cols)
def connect():
global g_conn
if g_conn is None:
g_conn = sqlite3.connect(DB_NAME)
verbose(2, 'connected to', DB_NAME)
def disconnect():
global g_conn
if g_conn is not None:
g_conn.commit()
g_conn.close()
verbose(2, 'closed connection to:', DB_NAME)
g_conn = None
def init(drop=False):
connect()
for tname in ['tasks']:
if drop or not load_table_info(tname):
_drop_create_table(tname)
load_table_info(tname)
def load_table_info(tname):
if tname not in g_table_info:
cols = g_conn.cursor().execute('PRAGMA table_info("{}")'.format(tname)).fetchall()
if cols:
g_table_info[tname] = TableColumns(*cols)
verbose(2, 'loaded info of table:', tname)
else:
return None
return g_table_info[tname]
def _drop_create_table(tname):
cur = g_conn.cursor()
cur.execute('DROP TABLE IF EXISTS ' + tname)
cur.execute('CREATE TABLE {} ({})'.format(tname, str(TABLE_SCHEMAS[tname])))
verbose(1, 'initialized table:', tname)
def create_task(**kwargs):
task = TABLE_SCHEMAS.tasks.new(**kwargs)
sql = 'INSERT INTO tasks ({}) VALUES ({})'.format(*task.for_insert())
g_conn.cursor().execute(sql)
g_conn.commit()
verbose(1, 'added task', repr(task))
def update_task(**kwargs):
read_task(name=kwargs['name'])
task = TABLE_SCHEMAS.tasks.new(**kwargs)
sql = 'UPDATE tasks SET {} WHERE name="{}"'.format(task.for_update(), kwargs['name'])
g_conn.cursor().execute(sql)
g_conn.commit()
verbose(1, 'updated task', repr(task))
def read_task(name):
sql = 'SELECT * FROM tasks WHERE name="{}"'.format(name)
values = g_conn.cursor().execute(sql).fetchone()
if not values:
raise NameError('missing task: ' + name)
task = TABLE_SCHEMAS.tasks.new(**dict(zip(g_table_info.tasks.cols, values)))
verbose(1, 'got task', repr(task))
return task
def delete_task(name):
read_task(name)
sql = 'DELETE FROM tasks WHERE name="{}"'.format(name)
g_conn.cursor().execute(sql)
g_conn.commit()
verbose(1, 'deleted task:', name)
def list_tasks(rowsep='\n', colsep='|'):
sql = 'SELECT * FROM tasks'
return rowsep.join(colsep.join(row) for row in g_conn.cursor().execute(sql).fetchall())
if __name__ == '__main__':
set_verbosity(1)
init(drop=True)
sep = '\n\t\t\t'
verbose(1, 'info tasks:', str(g_table_info.tasks))
create_task(name='task1', schedule='daily')
verbose(1, 'all tasks:', '\t' + list_tasks(sep))
create_task(name='task2', schedule='continuous')
verbose(1, 'all tasks:', '\t' + list_tasks(sep))
update_task(name='task2', state='running')
verbose(1, 'got task', repr(read_task('task2')))
delete_task(name='task1')
verbose(1, 'all tasks:', '\t' + list_tasks(sep))
|
Python
| 0
|
@@ -392,19 +392,20 @@
in(self.
-col
+name
s)%0A%0A
@@ -550,19 +550,20 @@
def
-col
+name
s(self):
@@ -2589,19 +2589,20 @@
o.tasks.
-col
+name
s, value
|
13ab494e0caaca6a460a49528c3aae4c7707042a
|
add a bit more docstring
|
pyecharts/custom/page.py
|
pyecharts/custom/page.py
|
#!/usr/bin/env python
# coding=utf-8
from pyecharts import template
class Page(object):
def __init__(self):
self.__charts = []
def add(self, achart_or_charts):
"""
:param achart_or_charts:
:return:
"""
if isinstance(achart_or_charts, list):
self.__charts.extend(achart_or_charts)
else:
self.__charts.append(achart_or_charts)
def render(self, path="render.html"):
"""
:param path:
:return:
"""
template_name = "multicharts.html"
chart_content = self.render_embed()
tmp = template.JINJA2_ENV.get_template(template_name)
html = tmp.render(multi_chart_content=chart_content)
html = template.freeze_js(html)
template.write_utf8_html_file(path, html)
def render_embed(self):
"""
:return:
"""
chart_content = ""
for chart in self.__charts:
chart_content += chart.render_embed()
return chart_content
|
Python
| 0
|
@@ -177,32 +177,78 @@
ts):%0A %22%22%22
+%0A Append chart(s) to the rendering page
%0A%0A :param
@@ -504,32 +504,79 @@
l%22):%0A %22%22%22
+%0A Produce rendered charts in a html file
%0A%0A :param
@@ -932,32 +932,32 @@
er_embed(self):%0A
-
%22%22%22%0A%0A
@@ -951,16 +951,78 @@
%22%22%22
+%0A Produce rendered charts in html for embedding purpose
%0A%0A
|
353c09e221483929075d5673b6ac3f705754caaf
|
Add --version option
|
pyinstrument/__main__.py
|
pyinstrument/__main__.py
|
import sys, os, codecs, runpy, tempfile
from optparse import OptionParser
from pyinstrument import Profiler
from .six import exec_
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-m', '',
dest='module_name', action='store',
help="run library module as a script, like 'python -m module'")
parser.add_option('-r', '--renderer',
dest='renderer', action='store', type='string',
help="how the report should be rendered. One of: 'text', 'html', 'json', or python import path to a renderer class",
default='text')
parser.add_option('', '--html',
dest="output_html", action='store_true',
help="Shortcut for '--renderer=html'", default=False)
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save report to <outfile>", default=None)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='(text renderer only) force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='(text renderer only) force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='(text renderer only) force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='(text renderer only) force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
options, args = parser.parse_args()
if args == [] and options.module_name is None:
parser.print_help()
sys.exit(2)
if options.module_name is not None:
sys.argv[:] = [options.module_name] + args
code = "run_module(modname, run_name='__main__')"
globs = {
'run_module': runpy.run_module,
'modname': options.module_name
}
else:
sys.argv[:] = args
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
profiler = Profiler()
profiler.start()
try:
exec_(code, globs, None)
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
if options.output_html:
options.renderer = 'html'
output_to_temp_file = (options.renderer == 'html'
and not options.outfile
and file_is_a_tty(sys.stdout))
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
elif output_to_temp_file:
output_file = tempfile.NamedTemporaryFile(suffix='.html', delete=False)
f = codecs.getwriter('utf-8')(output_file)
output_filename = output_file.name
else:
f = sys.stdout
renderer_kwargs = {}
if options.renderer == 'text':
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
renderer_kwargs = {'unicode': unicode, 'color': color}
f.write(profiler.output(renderer=options.renderer, **renderer_kwargs))
f.close()
if output_to_temp_file:
print('stdout is a terminal, so saved profile output to %s' % output_filename)
import webbrowser, urllib.parse
url = urllib.parse.urlunparse(('file', '', output_filename, '', '', ''))
webbrowser.open(url)
def file_supports_color(file_obj):
"""
Returns True if the running system's terminal supports color.
Borrowed from Django
https://github.com/django/django/blob/master/django/core/management/color.py
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = file_is_a_tty(file_obj)
return (supported_platform and is_a_tty)
def file_supports_unicode(file_obj):
encoding = getattr(file_obj, 'encoding', None)
if not encoding:
return False
codec_info = codecs.lookup(encoding)
return ('utf' in codec_info.name)
def file_is_a_tty(file_obj):
return hasattr(file_obj, 'isatty') and file_obj.isatty()
if __name__ == '__main__':
main()
|
Python
| 0.000002
|
@@ -67,16 +67,36 @@
nParser%0A
+import pyinstrument%0A
from pyi
@@ -225,16 +225,174 @@
%5D ...%22)%0A
+ version_string = 'pyinstrument %7Bv%7D, on Python %7Bpyv%5B0%5D%7D.%7Bpyv%5B1%5D%7D.%7Bpyv%5B2%5D%7D'.format(%0A v=pyinstrument.__version__,%0A pyv=sys.version_info,%0A )%0A
pars
@@ -420,16 +420,40 @@
ge=usage
+, version=version_string
)%0A pa
|
b86d5dbca2e8f772720bcd21297d292edbe04606
|
Update bot.py
|
intelmq/lib/bot.py
|
intelmq/lib/bot.py
|
import re
import sys
import json
import time
import ConfigParser
from intelmq.lib.event import Event
from intelmq.lib.pipeline import Pipeline
from intelmq.lib.utils import decode, log
SYSTEM_CONF_FILE = "/etc/intelmq/system.conf"
PIPELINE_CONF_FILE = "/etc/intelmq/pipeline.conf"
BOTS_CONF_FILE = "/etc/intelmq/runtime.conf"
LOGS_PATH = "/var/log/intelmq/"
class Bot(object):
def __init__(self, bot_id):
self.current_message = None
self.last_message = None
self.message_counter = 0
self.check_bot_id(bot_id)
self.bot_id = bot_id
self.logger = self.load_logger()
self.logger.info('Bot is starting')
self.load_configurations()
self.src_queue, self.dest_queues = self.load_pipeline()
self.parameters.processing_interval = float(self.parameters.processing_interval)
self.init()
def init(self):
pass
def start(self):
self.logger.info('Bot start processing')
self.pipeline = None
while True:
try:
if not self.pipeline:
self.logger.info("Connecting to pipeline queues")
self.pipeline = Pipeline(self.src_queue, self.dest_queues)
self.logger.info("Connected to pipeline queues. Start processing.")
self.process()
self.pipeline.sleep(self.parameters.processing_interval)
except Exception, ex:
retry_delay = 30
self.logger.error("Last Correct Message(event): %r" % self.last_message)
self.logger.error("Current Message(event): %r" % self.current_message)
self.logger.exception("Check the following exception:")
self.logger.error('Pipeline connection failed (%r)' % ex)
self.logger.info('Pipeline will reconnect in %s seconds' % retry_delay)
time.sleep(retry_delay)
#self.pipeline.disconnect() # caused problems
self.pipeline = None
except KeyboardInterrupt as e:
if self.pipeline:
self.pipeline.disconnect()
self.logger.info("Disconnecting from pipeline")
self.logger.info("Bot is shutting down")
break
def stop(self):
try:
self.logger.error("Bot found an error. Exiting")
except:
pass
finally:
print "Bot found an error. Exiting"
exit(-1)
def check_bot_id(self, str):
res = re.search('[^0-9a-zA-Z\-]+', str)
if res:
print "Invalid bot id."
self.stop()
def load_configurations(self):
self.parameters = Parameters()
with open(BOTS_CONF_FILE, 'r') as fpconfig:
config = json.loads(fpconfig.read())
self.logger.debug("Loading configuration in %s section from '%s' file" % (self.bot_id, BOTS_CONF_FILE))
if self.bot_id in config.keys():
for option, value in config[self.bot_id].iteritems():
setattr(self.parameters, option, value)
self.logger.debug("Parameter '%s' loaded with the value '%s'" % (option, value))
def load_logger(self):
with open(SYSTEM_CONF_FILE, 'r') as fpconfig:
config = json.loads(fpconfig.read())
loglevel = config['logging_level']
return log(LOGS_PATH, self.bot_id, loglevel)
def load_pipeline(self):
with open(PIPELINE_CONF_FILE, 'r') as fpconfig:
config = json.loads(fpconfig.read())
self.logger.debug("Loading pipeline queues from '%s' file" % PIPELINE_CONF_FILE)
source_queue = None
destination_queues = None
if self.bot_id in config.keys():
if 'source-queue' in config[self.bot_id].keys():
source_queue = config[self.bot_id]['source-queue']
self.logger.info("Source queue '%s'" % source_queue)
if 'destination-queues' in config[self.bot_id].keys():
destination_queues = config[self.bot_id]['destination-queues']
self.logger.info("Destination queues '%s'" % ", ".join(destination_queues))
return [source_queue, destination_queues]
self.logger.error("Failed to load queues")
self.stop()
def send_message(self, message):
if not message:
self.logger.warning("Empty message found.")
return False
if isinstance(message, Event):
message = unicode(message) # convert Event Object to string (UTF-8)
self.message_counter += 1
if self.message_counter % int(self.parameters.logging_threshold) == 0:
self.logger.info("Processed %s messages." % self.message_counter)
self.pipeline.send(message)
def receive_message(self):
self.current_message = self.pipeline.receive()
if not self.current_message:
return None
message = self.current_message.decode('utf-8')
try: # Event Object
return Event.from_unicode(message)
except: # Report Object
return message
def acknowledge_message(self):
self.last_message = self.current_message
self.pipeline.acknowledge()
class Parameters(object):
pass
|
Python
| 0.000001
|
@@ -4982,46 +4982,11 @@
r %25
-int(self.parameters.logging_threshold)
+500
==
|
423aa887375c071f0a269466e050020f331c84e1
|
Remove print statement
|
copytext.py
|
copytext.py
|
#!/usr/bin/env python
from markupsafe import Markup
from openpyxl.reader.excel import load_workbook
class CopyException(Exception):
pass
class Error(object):
"""
An error object that can mimic the structure of the COPY data, whether the error happens at the Copy, Sheet or Row level. Will print the error whenever it gets repr'ed.
"""
_error = ''
def __init__(self, error):
self._error = error
def __getitem__(self, i):
return self
def __iter__(self):
return iter([self])
def __len__(self):
return 1
def __repr__(self):
return self._error
def __nonzero__(self):
return False
class Row(object):
"""
Wraps a row of copy for error handling.
"""
_sheet = None
_row = []
_columns = []
_index = 0
def __init__(self, sheet, row, columns, index):
self._sheet = sheet
self._row = row
self._columns = columns
self._index = index
def __getitem__(self, i):
"""
Allow dict-style item access by index (column id), or by column name.
"""
if isinstance(i, int):
if i >= len(self._row):
return Error('COPY.%s.%i.%i [column index outside range]' % (self._sheet.name, self._index, i))
value = self._row[i]
return Markup(value or '')
if i not in self._columns:
return Error('COPY.%s.%i.%s [column does not exist in sheet]' % (self._sheet.name, self._index, i))
value = self._row[self._columns.index(i)]
return Markup(value or '')
def __iter__(self):
return iter(self._row)
def __len__(self):
return len(self._row)
def __unicode__(self):
if 'value' in self._columns:
value = self._row[self._columns.index('value')]
return Markup(value or '')
return Error('COPY.%s.%s [no value column in sheet]' % (self._sheet.name, self._row[self._columns.index('key')]))
def __html__(self):
return self.__unicode__()
def __nonzero__(self):
if 'value' in self._columns:
val = self._row[self._columns.index('value')]
if not val:
return False
return len(val)
return True
class Sheet(object):
"""
Wrap copy text, for a single worksheet, for error handling.
"""
name = None
_sheet = []
_columns = []
def __init__(self, name, data, columns):
self.name = name
self._sheet = [Row(self, [row[c] for c in columns], columns, i) for i, row in enumerate(data)]
self._columns = columns
def __getitem__(self, i):
"""
Allow dict-style item access by index (row id), or by row name ("key" column).
"""
if isinstance(i, int):
if i >= len(self._sheet):
return Error('COPY.%s.%i [row index outside range]' % (self.name, i))
return self._sheet[i]
if 'key' not in self._columns:
return Error('COPY.%s.%s [no key column in sheet]' % (self.name, i))
for row in self._sheet:
if row['key'] == i:
return row
return Error('COPY.%s.%s [key does not exist in sheet]' % (self.name, i))
def __iter__(self):
return iter(self._sheet)
def __len__(self):
return len(self._sheet)
def json(self):
"""
Serialize the sheet as JSON.
"""
import json
obj = {}
if 'key' in self._columns and 'value' in self._columns:
for row in self:
obj[row['key']] = row['value']
elif 'key' in self._columns:
for row in self:
obj[row['key']] = {}
for column in self._columns:
if column == 'key':
continue
value = row[column]
obj[row['key']][column] = value
else:
obj = []
keys = []
for column in self._columns:
keys.append(column)
for row in self:
row_obj = {}
for i, column in enumerate(row):
row_obj[keys[i]] = column
obj.append(row_obj)
print json.dumps(obj)
return json.dumps(obj)
class Copy(object):
"""
Wraps copy text, for multiple worksheets, for error handling.
"""
_filename = ''
_copy = {}
def __init__(self, filename):
self._filename = filename
self.load()
def __getitem__(self, name):
"""
Allow dict-style item access by sheet name.
"""
if name not in self._copy:
return Error('COPY.%s [sheet does not exist]' % name)
return self._copy[name]
def load(self):
"""
Parses the downloaded Excel file and writes it as JSON.
"""
try:
book = load_workbook(self._filename, data_only=True)
except IOError:
raise CopyException('"%s" does not exist. Have you run "fab update_copy"?' % self._filename)
for sheet in book:
columns = []
rows = []
for i, row in enumerate(sheet.rows):
row_data = [c.internal_value for c in row]
if i == 0:
columns = row_data
continue
# If nothing in a row then it doesn't matter
if all([c is None for c in row_data]):
continue
rows.append(dict(zip(columns, row_data)))
self._copy[sheet.title] = Sheet(sheet.title, rows, columns)
def json(self):
"""
Serialize the copy as JSON.
"""
import json
obj = {}
for name, sheet in self._copy.items():
if 'key' in sheet._columns and 'value' in sheet._columns:
obj[name] = {}
for row in sheet:
obj[name][row['key']] = row['value']
elif 'key' in sheet._columns:
obj[name] = {}
for row in sheet:
obj[name][row['key']] = {}
for column in sheet._columns:
if column == 'key':
continue
value = row[column]
obj[name][row['key']][column] = value
else:
obj[name] = []
for row in sheet:
obj[name].append(row._row)
return json.dumps(obj)
|
Python
| 0.007015
|
@@ -4306,47 +4306,8 @@
j)%0A%0A
- print json.dumps(obj)%0A%0A
|
92dc3cd54c0e6ecd934f2e09955cd9b3f315bc33
|
Improve comment in QrOpTest
|
tensorflow/compiler/tests/qr_op_test.py
|
tensorflow/compiler/tests/qr_op_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_without_tensor_float_32(
"It's unknown why this test requires TF32 to be disabled")
# TODO(reedwm): Determine why this test requires TF32 disabled. Debugging is
# difficult due to this test's flakiness
class QrOpTest(xla_test.XLATestCase, parameterized.TestCase):
def AdjustedNorm(self, x):
"""Computes the norm of matrices in 'x', adjusted for dimension and type."""
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / (max(x.shape[-2:]) * np.finfo(x.dtype).eps)
def CompareOrthogonal(self, x, y, rank):
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertTrue(np.all(self.AdjustedNorm(x - y) < 30.0))
def CheckApproximation(self, a, q, r):
# Tests that a ~= q*r.
precision = self.AdjustedNorm(a - np.matmul(q, r))
self.assertTrue(np.all(precision < 10.0))
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
precision = self.AdjustedNorm(xx.eval() - self.evaluate(identity))
self.assertTrue(np.all(precision < 5.0))
def _test(self, dtype, shape, full_matrices):
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.device_scope():
q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
q_dims = q_tf_val.shape
np_q = np.ndarray(q_dims, dtype)
np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
new_first_dim = np_q_reshape.shape[0]
x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
for i in range(new_first_dim):
if full_matrices:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="complete")
else:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="reduced")
np_q = np.reshape(np_q_reshape, q_dims)
self.CompareOrthogonal(np_q, q_tf_val, min(shape[-2:]))
self.CheckApproximation(x_np, q_tf_val, r_tf_val)
self.CheckUnitary(q_tf_val)
SIZES = [1, 2, 5, 10, 32, 100, 300]
DTYPES = [np.float32]
PARAMS = itertools.product(SIZES, SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testQR(self, rows, cols, dtype):
# TODO(b/111317468): Test other types.
for full_matrices in [True, False]:
# Only tests the (3, 2) case for small numbers of rows/columns.
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
self._test(dtype, batch_dims + (rows, cols), full_matrices)
def testLarge2000x2000(self):
self._test(np.float32, (2000, 2000), full_matrices=True)
if __name__ == "__main__":
test.main()
|
Python
| 0
|
@@ -1253,184 +1253,223 @@
-%22It's unknown why this test requires TF32 to be disabled%22)%0A# TODO(reedwm): Determine why this test requires TF32 disabled. Debugging is%0A# difficult due to this test's flakiness
+'XLA QR op calls matmul. Also, matmul used for verification. Also with '%0A 'TF32, mysterious %22Unable to launch cuBLAS gemm%22 error occasionally occurs')%0A# TODO(b/165435566): Fix %22Unable to launch cuBLAS gemm%22 error
%0Acla
|
1142a0c3eda4b09ead668fb4fac553d1cb59fe62
|
Remove global/local variable optimization hacks
|
whip/db.py
|
whip/db.py
|
"""
Whip database storage module.
All IP ranges with associated information are stored in a LevelDB
database. The key/value layout is as follows:
* The end IP is used as the key. This allows for quick
fast range lookups.
* The begin IP and the actual information is stored in the value. The
most recent infoset for a range is stored in full, encoded as JSON, so
that it can be returned quickly without any decoding and processing
overhead.
To save a lot of space (and hence improve performance), historical
data for an IP range is stored as diffs from the most recent version.
When querying for older versions, the original data is reconstructed
on-demand.
The value is packed as follows:
* IPv4 begin address (4 bytes)
* Length of the JSON data for the most recent information (2 bytes)
* JSON encoded data for the latest version (variable length)
* JSON encoded diffs for older versions (variable length)
"""
import logging
import operator
import struct
import plyvel
import simplejson as json
from whip.util import (
dict_diff,
dict_patch,
ipv4_int_to_bytes,
ipv4_int_to_str,
merge_ranges,
ProgressReporter,
)
SIZE_STRUCT = struct.Struct('>H')
logger = logging.getLogger(__name__)
json_encoder = json.JSONEncoder(
check_circular=False,
separators=(',', ':'), # no whitespace
)
json_decoder = json.JSONDecoder()
def _build_db_record(
begin_ip_int, end_ip_int, infosets,
_extract_datetime=operator.itemgetter('datetime'),
_encode=json_encoder.encode):
"""Create database records for an iterable of merged infosets."""
# Build history structure. The latest version is stored in
# full, ...
infosets.sort(key=_extract_datetime, reverse=True)
latest = infosets[0]
latest_json = _encode(latest)
# ... while older versions are stored as (reverse) diffs to the
# previous (in time) version.
history_json = _encode([
dict_diff(infosets[i + 1], infosets[i])
for i in xrange(len(infosets) - 1)
])
# Build the actual key and value byte strings
key = ipv4_int_to_bytes(end_ip_int)
value = (ipv4_int_to_bytes(begin_ip_int)
+ SIZE_STRUCT.pack(len(latest_json))
+ latest_json
+ history_json)
return key, value
class Database(object):
def __init__(self, database_dir, create_if_missing=False):
logger.debug("Opening database %s", database_dir)
self.db = plyvel.DB(
database_dir,
create_if_missing=create_if_missing,
write_buffer_size=16 * 1024 * 1024,
max_open_files=512,
lru_cache_size=128 * 1024 * 1024)
self._make_iter()
def _make_iter(self):
"""Make an iterator for the current database.
Iterator construction is relatively costly, so reuse it for
performance reasons. The iterator won't see any data written
after its construction, but this is not a problem since the data
set is static.
"""
self.iter = self.db.iterator(include_key=False)
def load(self, *iters):
"""Load data from importer iterables"""
# Merge all iterables to produce unique, non-overlapping IP
# ranges with multiple timestamped infosets.
merged = merge_ranges(*iters)
reporter = ProgressReporter(lambda: logger.info(
"%d database records stored; current position: %s",
n, ipv4_int_to_str(item[0])))
n = 0
for n, item in enumerate(merged, 1):
key, value = _build_db_record(*item)
self.db.put(key, value)
reporter.tick()
reporter.tick(True)
# Refresh iterator so that it sees the new data
self._make_iter()
def lookup(self, ip, dt=None, _unpack=SIZE_STRUCT.unpack,
_decode=json_decoder.decode,
_encode=json_encoder.encode):
"""Lookup a single IP address in the database
This either returns the stored information, or `None` if no
information was found.
"""
# The database key stores the end IP of all ranges, so a simple
# seek positions the iterator at the right key (if found).
self.iter.seek(ip)
try:
value = next(self.iter)
except StopIteration:
# Past any range in the database: no hit
return None
# Check range boundaries. The first 4 bytes store the begin IP.
# If the IP currently being looked up is in a gap, there is no
# hit after all.
if ip < value[:4]:
return None
# The next 2 bytes indicate the length of the JSON string for
# the most recent information
size = _unpack(value[4:6])[0]
infoset_json = value[6:size + 6]
# If the lookup is for the most recent version, we're done
if dt is None:
return infoset_json
# This is a lookup for a specific timestamp. This means we
# actually need to peek into the record.
infoset = _decode(infoset_json)
# The most recent version may be the one asked for.
if infoset['datetime'] <= dt:
# TODO: store latest date somewhere more easily accessible
# (timestamp field after the JSON length field perhaps?) to
# avoid JSON parsing overhead for this case.
return infoset_json
# Too bad, we need to delve deeper into history by iteratively
# applying patches.
history = _decode(value[size + 6:])
for to_delete, to_set in history:
dict_patch(infoset, to_delete, to_set)
if infoset['datetime'] <= dt:
return _encode(infoset)
# Too bad, no result
return None
|
Python
| 0
|
@@ -1403,25 +1403,16 @@
_record(
-%0A
begin_ip
@@ -1441,104 +1441,8 @@
sets
-,%0A _extract_datetime=operator.itemgetter('datetime'),%0A _encode=json_encoder.encode
):%0A
@@ -1616,25 +1616,37 @@
key=
-_extract_
+operator.itemgetter('
datetime
, re
@@ -1641,16 +1641,18 @@
datetime
+')
, revers
@@ -1698,25 +1698,37 @@
test_json =
-_
+json_encoder.
encode(lates
@@ -1852,17 +1852,29 @@
_json =
-_
+json_encoder.
encode(%5B
@@ -3745,124 +3745,8 @@
None
-, _unpack=SIZE_STRUCT.unpack,%0A _decode=json_decoder.decode,%0A _encode=json_encoder.encode
):%0A
@@ -4577,17 +4577,28 @@
size =
-_
+SIZE_STRUCT.
unpack(v
@@ -4910,17 +4910,29 @@
foset =
-_
+json_decoder.
decode(i
@@ -5393,17 +5393,29 @@
story =
-_
+json_decoder.
decode(v
@@ -5589,17 +5589,29 @@
return
-_
+json_encoder.
encode(i
|
96e3d2720a805a08190936a78c91a8c9746daab9
|
Update Keras.py
|
Depthwise-Separable-Convolution/Keras.py
|
Depthwise-Separable-Convolution/Keras.py
|
from keras.datasets import mnist
from keras.initializers import RandomUniform
from keras.layers import Conv2D, Dense, DepthwiseConv2D, Flatten, MaxPooling2D
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import to_categorical
# input image dimensions
img_rows, img_cols = 28, 28
batch_size = 128
epochs = 30
learning_rate = 0.05
momentum = 0.9
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape([x_train.shape[0], img_rows, img_cols, 1]).astype('float32') / 255
y_train = to_categorical(y_train, num_classes)
x_test = x_test.reshape([x_test.shape[0], img_rows, img_cols, 1]).astype('float32') / 255
y_test = to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(24,
activation='relu',
kernel_initializer='he_normal',
kernel_size=(5, 5),
input_shape=(img_rows, img_cols, 1)))
model.add(MaxPooling2D())
# depthwise
model.add(DepthwiseConv2D(activation='relu',
depthwise_initializer='he_normal',
kernel_size=(5, 5)))
# pointwise
model.add(Conv2D(48,
activation='relu',
kernel_initializer='he_normal',
kernel_size=(1, 1)))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(512,
activation='relu',
kernel_initializer='he_normal'))
model.add(Dense(num_classes,
activation='softmax',
kernel_initializer='glorot_uniform'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate, momentum=momentum, nesterov=True),
metrics=['accuracy'])
history = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
|
Python
| 0
|
@@ -398,26 +398,8 @@
.9%0D%0A
-num_classes = 10%0D%0A
%0D%0A(
@@ -593,24 +593,27 @@
num_classes
+=10
)%0D%0A %0D%0Ax_test
@@ -740,16 +740,19 @@
_classes
+=10
)%0D%0A %0D%0Amo
@@ -1510,27 +1510,18 @@
d(Dense(
-num_classes
+10
,%0D%0A
|
efbb841bb0968abeb2d3bba5a535cb8619131b2b
|
Remove dupe licence header
|
touchdown/config/ini.py
|
touchdown/config/ini.py
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from six.moves import configparser
from touchdown.core.plan import Plan
from touchdown.core import argument, resource
from touchdown.interfaces import File, FileNotFound
class IniFile(resource.Resource):
resource_name = "ini_file"
name = argument.String()
file = argument.Resource(File)
class Describe(Plan):
resource = IniFile
name = "describe"
def write(self, c):
fp = self.runner.get_plan(self.resource.file)
s = six.StringIO()
c.write(s)
fp.write(s.getvalue())
def read(self):
fp = self.runner.get_plan(self.resource.file)
config = configparser.ConfigParser()
try:
config.readfp(fp.read())
except FileNotFound:
pass
return config
def get_actions(self):
self.object = self.read()
return []
|
Python
| 0
|
@@ -1,581 +1,4 @@
-# Copyright 2015 Isotoma Limited%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A
# Co
|
23359b52f468d3eed62533e7aed3ea9afce998e4
|
fix webdav on windows
|
pyspider/webui/webdav.py
|
pyspider/webui/webdav.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2015-6-3 11:29
import os
import re
import time
import base64
from six import BytesIO
from wsgidav.wsgidav_app import DEFAULT_CONFIG, WsgiDAVApp
from wsgidav.dav_provider import DAVProvider, DAVCollection, DAVNonCollection
from wsgidav.dav_error import DAVError, HTTP_NOT_FOUND, HTTP_FORBIDDEN
from pyspider.libs.utils import utf8, text
from .app import app
class ContentIO(BytesIO):
def close(self):
self.content = self.getvalue()
BytesIO.close(self)
class ScriptResource(DAVNonCollection):
def __init__(self, path, environ, app, project=None):
super(ScriptResource, self).__init__(path, environ)
self.app = app
self.new_project = False
self._project = project
self.project_name = self.name
self.writebuffer = None
if self.project_name.endswith('.py'):
self.project_name = self.project_name[:-len('.py')]
@property
def project(self):
if self._project:
return self._project
projectdb = self.app.config['projectdb']
if projectdb:
self._project = projectdb.get(self.project_name)
if not self._project:
if projectdb.verify_project_name(self.project_name) and self.name.endswith('.py'):
self.new_project = True
self._project = {
'name': self.project_name,
'script': '',
'status': 'TODO',
'rate': self.app.config.get('max_rate', 1),
'burst': self.app.config.get('max_burst', 3),
'updatetime': time.time(),
}
else:
raise DAVError(HTTP_FORBIDDEN)
return self._project
@property
def readonly(self):
projectdb = self.app.config['projectdb']
if not projectdb:
return True
if 'lock' in projectdb.split_group(self.project.get('group')) \
and self.app.config.get('webui_username') \
and self.app.config.get('webui_password'):
authheader = self.environ.get("HTTP_AUTHORIZATION")
if not authheader:
return True
authheader = authheader[len("Basic "):]
try:
username, password = text(base64.b64decode(authheader)).split(':', 1)
except Exception as e:
self.app.logger.error('wrong api key: %r, %r', authheader, e)
return True
if username == self.app.config['webui_username'] \
and password == self.app.config['webui_password']:
return False
else:
return True
return False
def getContentLength(self):
return len(utf8(self.project['script']))
def getContentType(self):
return 'text/plain'
def getLastModified(self):
return self.project['updatetime']
def getContent(self):
return BytesIO(utf8(self.project['script']))
def beginWrite(self, contentType=None):
if self.readonly:
self.app.logger.error('webdav.beginWrite readonly')
return super(ScriptResource, self).beginWrite(contentType)
self.writebuffer = ContentIO()
return self.writebuffer
def endWrite(self, withErrors):
if withErrors:
self.app.logger.error('webdav.endWrite error: %r', withErrors)
return super(ScriptResource, self).endWrite(withErrors)
if not self.writebuffer:
return
projectdb = self.app.config['projectdb']
if not projectdb:
return
info = {
'script': text(getattr(self.writebuffer, 'content', ''))
}
if self.project.get('status') in ('DEBUG', 'RUNNING'):
info['status'] = 'CHECKING'
if self.new_project:
self.project.update(info)
self.new_project = False
return projectdb.insert(self.project_name, self.project)
else:
return projectdb.update(self.project_name, info)
class RootCollection(DAVCollection):
def __init__(self, path, environ, app):
super(RootCollection, self).__init__(path, environ)
self.app = app
self.projectdb = self.app.config['projectdb']
def getMemberList(self):
members = []
for project in self.projectdb.get_all():
project_name = utf8(project['name'])
if not project_name.endswith('.py'):
project_name += '.py'
members.append(ScriptResource(
os.path.join(self.path, project_name),
self.environ,
self.app,
project
))
return members
def getMemberNames(self):
members = []
for project in self.projectdb.get_all(fields=['name', ]):
project_name = utf8(project['name'])
if not project_name.endswith('.py'):
project_name += '.py'
members.append(project_name)
return members
class ScriptProvider(DAVProvider):
def __init__(self, app):
super(ScriptProvider, self).__init__()
self.app = app
def __repr__(self):
return "pyspiderScriptProvider"
def getResourceInst(self, path, environ):
path = os.path.normpath(path)
if path in ('/', '.', ''):
return RootCollection(path, environ, self.app)
else:
return ScriptResource(path, environ, self.app)
config = DEFAULT_CONFIG.copy()
config.update({
'mount_path': '/dav',
'provider_mapping': {
'/': ScriptProvider(app)
},
'user_mapping': {},
'verbose': 1 if app.debug else 0,
'dir_browser': {'davmount': False,
'enable': True,
'msmount': False,
'response_trailer': ''},
})
dav_app = WsgiDAVApp(config)
|
Python
| 0.000001
|
@@ -5524,16 +5524,35 @@
th(path)
+.replace('%5C%5C', '/')
%0A
@@ -5575,24 +5575,47 @@
, '.', ''):%0A
+ path = '/'%0A
|
701792e4d4ecc1597b2f66fc3e63b38b3482c1bf
|
Fix tf.complex_abs gradient test to not be flaky Change: 117872871
|
tensorflow/python/ops/math_grad_test.py
|
tensorflow/python/ops/math_grad_test.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class SquaredDifferenceOpTest(tf.test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
left_tensor = tf.constant(l, shape=left_shape)
right_tensor = tf.constant(r, shape=right_shape)
output = tf.squared_difference(left_tensor, right_tensor)
left_err = tf.test.compute_gradient_error(left_tensor,
left_shape,
output,
output_shape,
x_init_value=l)
right_err = tf.test.compute_gradient_error(right_tensor,
right_shape,
output,
output_shape,
x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(tf.test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
if dtype == tf.complex64:
value = tf.complex(self._biasedRandN(shape, bias=bias, sigma=sigma),
self._biasedRandN(shape, bias=bias, sigma=sigma))
else:
value = tf.convert_to_tensor(self._biasedRandN(shape, bias=bias),
dtype=dtype)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
if dtype == tf.complex64:
output = tf.complex_abs(value)
else:
output = tf.abs(value)
error = tf.test.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad([3, 3], dtype=tf.float32, max_error=2e-5, bias=0.1,
sigma=1.0)
self._testGrad([3, 3], dtype=tf.complex64, max_error=2e-5, bias=0.1,
sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad([3, 3], dtype=tf.float32, max_error=100.0, bias=0.0,
sigma=0.1)
self._testGrad([3, 3], dtype=tf.complex64, max_error=100.0, bias=0.0,
sigma=0.1)
if __name__ == "__main__":
tf.test.main()
|
Python
| 0.000095
|
@@ -2580,24 +2580,46 @@
igma=None):%0A
+ np.random.seed(7)%0A
if dtype
|
4a1eeebc753dde28e2d9c29b532730e531a04d77
|
write results after each test to save memory
|
pytest_cagoule/plugin.py
|
pytest_cagoule/plugin.py
|
from coverage import coverage
import six
from .db import get_connection
from .git_parser import get_changes
from .select import get_node_ids
def pytest_addoption(parser):
parser.addoption(
'--cagoule-capture', action='store_true', dest='cagoule_capture',
help='capture coverage info for cagoule',
)
# TODO: better help text
parser.addoption(
'--cagoule-select', metavar='spec', action='store',
dest='cagoule_select', help='run only tests that cover the spec',
)
parser.addoption(
'--cagoule-git', '--diff', nargs='?', dest='cagoule_git', const='HEAD',
help='run only tests that cover files with git changes',
)
# coverage params, at least concurrency
class CagouleCapturePlugin(object):
def __init__(self):
self.cov = coverage(source='.')
self.tracing = False
self.data = {}
def pytest_runtest_setup(self, item):
cov = self.cov
cov.erase()
cov.start()
self.tracing = True
def pytest_runtest_teardown(self, item):
cov = self.cov
if not self.tracing:
return
cov.stop()
self.tracing = False
cov._harvest_data()
data = []
for filename, lines in six.iteritems(cov.data.lines):
for line in lines:
data.append((filename, line))
self.data[item.nodeid] = data
def data_for_insert(self):
for node_id, lines in six.iteritems(self.data):
for filename, line in lines:
yield node_id, filename, line
def write_results(self):
connection = get_connection()
with connection:
connection.execute("DROP TABLE IF EXISTS coverage;")
connection.execute("""
CREATE TABLE coverage (
node_id text,
filename text,
line int,
PRIMARY KEY(node_id, filename, line)
);
""")
connection.executemany(
"INSERT INTO coverage VALUES (?, ?, ?)",
self.data_for_insert()
)
def pytest_sessionfinish(self):
self.write_results()
class CagouleSelectPlugin(object):
def __init__(self, spec=None, git_spec=None):
if spec is not None:
specs = [spec]
elif git_spec is not None:
specs = get_changes(git_spec)
selected = get_node_ids(specs)
self.selected = set(selected)
def pytest_collection_modifyitems(self, session, config, items):
covered = []
uncovered = []
for item in items:
if item.nodeid in self.selected:
covered.append(item)
else:
uncovered.append(item)
items[:] = covered
config.hook.pytest_deselected(items=uncovered)
def pytest_configure(config):
if (
config.getvalue('cagoule_capture') and
not config.pluginmanager.hasplugin('_cagoule_capture')
):
plugin = CagouleCapturePlugin()
config.pluginmanager.register(plugin, '_cagoule_capture')
spec = config.getvalue('cagoule_select')
if (
spec and
not config.pluginmanager.hasplugin('_cagoule_select')
):
plugin = CagouleSelectPlugin(spec=spec)
config.pluginmanager.register(plugin, '_cagoule_select')
git_spec = config.getvalue('cagoule_git')
if (
git_spec and
not config.pluginmanager.hasplugin('_cagoule_select')
):
plugin = CagouleSelectPlugin(git_spec=git_spec)
config.pluginmanager.register(plugin, '_cagoule_select')
|
Python
| 0.000001
|
@@ -876,17 +876,439 @@
elf.
-data = %7B%7D
+setup_db()%0A%0A%0A def setup_db(self):%0A connection = get_connection()%0A with connection:%0A connection.execute(%22DROP TABLE IF EXISTS coverage;%22)%0A connection.execute(%22%22%22%0A CREATE TABLE coverage (%0A node_id text,%0A filename text,%0A line int,%0A PRIMARY KEY(node_id, filename, line)%0A );%0A %22%22%22)
%0A%0A
@@ -1644,175 +1644,27 @@
-data = %5B%5D%0A for filename, lines in six.iteritems(cov.data.lines):%0A for line in lines:%0A data.append((filename, line))%0A self.data%5B
+self.write_results(
item
@@ -1674,16 +1674,19 @@
deid
-%5D =
+, cov.
data
+)
%0A%0A
@@ -1707,24 +1707,43 @@
_insert(self
+, node_id, cov_data
):%0A f
@@ -1745,23 +1745,24 @@
for
-node_id
+filename
, lines
@@ -1778,25 +1778,30 @@
eritems(
-self.data
+cov_data.lines
):%0A
@@ -1806,34 +1806,24 @@
for
- filename,
line in lin
@@ -1895,24 +1895,43 @@
results(self
+, node_id, cov_data
):%0A c
@@ -1988,340 +1988,8 @@
on:%0A
- connection.execute(%22DROP TABLE IF EXISTS coverage;%22)%0A connection.execute(%22%22%22%0A CREATE TABLE coverage (%0A node_id text,%0A filename text,%0A line int,%0A PRIMARY KEY(node_id, filename, line)%0A );%0A %22%22%22)%0A
@@ -2118,88 +2118,39 @@
ert(
-)%0A )%0A%0A def pytest_sessionfinish(self):%0A self.write_results(
+node_id, cov_data)%0A
)%0A%0A%0A
|
1e21ba5101fe1e47fec5acacd8ac9329a71fc9bb
|
Change __init__
|
__init__.py
|
__init__.py
|
from .mygmm.mygmm import *
|
Python
| 0.000028
|
@@ -4,22 +4,16 @@
m .mygmm
-.mygmm
import
@@ -13,8 +13,9 @@
import *
+%0A
|
c26c44f044a2e48cc53a0f52adce366807c87e2d
|
Add version number.
|
__init__.py
|
__init__.py
|
from .Averager import Averager
from .Config import Config
from .RateTicker import RateTicker
from .Ring import Ring
from .SocketTalk import SocketTalk
from .SortedList import SortedList
from .String import string2time, time2string, time2levels, time2dir, time2fname
from .Timer import Timer
from .UserInput import user_input
|
Python
| 0
|
@@ -1,12 +1,32 @@
+__version__ = '1.0'%0A
from .Averag
|
629a965cbba1b66d3f5c520b1a90cf26f131a434
|
fix a test for large requests
|
test/alternator/test_manual_requests.py
|
test/alternator/test_manual_requests.py
|
# Copyright 2020 ScyllaDB
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
# Tests for manual requests - not necessarily generated
# by boto3, in order to allow non-validated input to get through
import pytest
import requests
from botocore.exceptions import BotoCoreError, ClientError
def gen_json(n):
return '{"":'*n + '{}' + '}'*n
def get_signed_request(dynamodb, target, payload):
# NOTE: Signing routines use boto3 implementation details and may be prone
# to unexpected changes
class Request:
url=dynamodb.meta.client._endpoint.host
headers={'X-Amz-Target': 'DynamoDB_20120810.' + target, 'Content-Type': 'application/x-amz-json-1.0'}
body=payload.encode(encoding='UTF-8')
method='POST'
context={}
params={}
req = Request()
signer = dynamodb.meta.client._request_signer
signer.get_auth(signer.signing_name, signer.region_name).add_auth(request=req)
return req
# Test that deeply nested objects (e.g. with depth of 200k) are parsed correctly,
# i.e. do not cause stack overflows for the server. It's totally fine for the
# server to refuse these packets with an error message though.
# NOTE: The test uses raw HTTP requests, because it's not easy to send
# a deeply nested object via boto3 - it quickly crashes on 'too deep recursion'
# for objects with depth as low as 150 (with sys.getrecursionlimit() == 3000).
# Hence, a request is manually crafted to contain a deeply nested JSON document.
def test_deeply_nested_put(dynamodb, test_table):
big_json = gen_json(200000)
payload = '{"TableName": "' + test_table.name + '", "Item": {"p": {"S": "x"}, "c": {"S": "x"}, "attribute":' + big_json + '}}'
req = get_signed_request(dynamodb, 'PutItem', payload)
# Check that the request delivery succeeded and the server
# responded with a comprehensible message - it can be either
# a success report or an error - both are acceptable as long as
# the oversized message did not make the server crash.
response = requests.post(req.url, headers=req.headers, data=req.body, verify=False)
print(response, response.text)
# If the PutItem request above failed, the deeply nested item
# was not put into the database, so it's fine for this request
# to receive a response that it was not found. An error informing
# about not being able to process this request is also acceptable,
# as long as the server didn't crash.
item = test_table.get_item(Key={'p': 'x', 'c':'x'})
print(item)
# Test that a too deeply nested object is refused,
# assuming max depth of 32 - and keeping the nested level
# low enough for Python not to choke on it with too deep recursion
def test_exceed_nested_level_a_little(dynamodb, test_table):
p = 'xxx'
c = 'yyy'
nested = dict()
nested_it = nested
for i in range(50):
nested_it['a'] = dict()
nested_it = nested_it['a']
with pytest.raises(ClientError, match='.*Exception.*nested'):
test_table.put_item(Item={'p': p, 'c': c, 'nested': nested})
# Test that we indeed allow the maximum level of 32 nested objects
def test_almost_exceed_nested_level(dynamodb, test_table):
p = 'xxx'
c = 'yyy'
nested = dict()
nested_it = nested
for i in range(30): # 30 added levels + top level + the item itself == 32 total
nested_it['a'] = dict()
nested_it = nested_it['a']
test_table.put_item(Item={'p': p, 'c': c, 'nested': nested})
def test_too_large_request(dynamodb, test_table):
p = 'abc'
c = 'def'
big = 'x' * (16 * 1024 * 1024 + 7)
# The exception type differs due to differences between HTTP servers
# in alternator and DynamoDB. The former returns 413, the latter
# a ClientError explaining that the element size was too large.
with pytest.raises(BotoCoreError):
test_table.put_item(Item={'p': p, 'c': c, 'big': big})
def test_incorrect_json(dynamodb, test_table):
correct_req = '{"TableName": "' + test_table.name + '", "Item": {"p": {"S": "x"}, "c": {"S": "x"}}}'
# Check all non-full prefixes of a correct JSON - none of them are valid JSON's themselves
# NOTE: DynamoDB returns two kinds of errors on incorrect input - SerializationException
# or "Page Not Found". Alternator returns "ValidationExeption" for simplicity.
validate_resp = lambda t: "SerializationException" in t or "ValidationException" in t or "Page Not Found" in t
for i in range(len(correct_req)):
req = get_signed_request(dynamodb, 'PutItem', correct_req[:i])
response = requests.post(req.url, headers=req.headers, data=req.body, verify=False)
assert validate_resp(response.text)
incorrect_reqs = [
'}}}', '}{', 'habababa', '7', '124463gwe', '><#', '????', '"""', '{"""}', '{""}', '{7}',
'{3: }}', '{"2":{}', ',', '{,}', '{{}}', '"a": "b"', '{{{', '{'*10000 + '}'*9999, '{'*10000 + '}'*10007
]
for incorrect_req in incorrect_reqs:
req = get_signed_request(dynamodb, 'PutItem', incorrect_req)
response = requests.post(req.url, headers=req.headers, data=req.body, verify=False)
assert validate_resp(response.text)
|
Python
| 0
|
@@ -4449,16 +4449,33 @@
Error):%0A
+ try:%0A
@@ -4528,16 +4528,78 @@
': big%7D)
+%0A except ClientError:%0A raise BotoCoreError()
%0A%0Adef te
|
bf6b49af7540acc56892ee1ed05479b141e18a14
|
Update commands.py
|
commands.py
|
commands.py
|
# Not entirely sure these are sane...
def conditional(statement):
compiledScript = ''
# Compare an array
if isinstance(statement[0], classes.List):
compiledScript += '\tmov ecx,' + utils.getVar(statement[0].value[0]) + '\n\tadd ecx,' + utils.getVar(statement[0].value[1]) + '\n'
# Compare a normal variable
else:
#var = getVar(word[0][1])
compiledScript += '\tmov ecx,[' + utils.getVar(statement[0]).key + ']\n'
# With an array
if isinstance(statement[2], classes.List):
compiledScript += '\tmov ecx,' + utils.getVar(statement[2].value[0]) + '\n\tadd ecx,' + utils.getVar(statement[2].value[1]) + '\n'
# With a normal variable
else:
var2 = utils.getVar(statement[2])
# EQUAL
if statement[1] == '==':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tje ' + iff + '\n\tint 80h\n'
# NOT EQUAL
elif statement[1] == '!=':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjne ' + iff + '\n\tint 80h\n'
# GREATER THAN
elif statement[1] == '>':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjg ' + iff + '\n\tint 80h\n'
# LESS THAN
elif statement[1] == '<':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjl ' + iff + '\n\tint 80h\n'
# GREATER THAN OR EQUAL TO
elif statement[1] == '>=':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjge ' + iff + '\n\tint 80h\n'
# LESS THAN OR EQUAL TO
elif statement[1] == '<=':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjle ' + iff + '\n\tint 80h\n'
return compiledScript
def loop(statement):
compiledScript = ''
# Compare an array
if isinstance(word[0], classes.List):
compiledScript += '\tmov ecx,[' + utils.getVar(statement[0].value[0]) + ']\n\tadd ecx,' + utils.getVar(word[0].value[1]) + '\n'
# Compare a normal variable
else:
compiledScript += '\tmov ecx,[' + utils.getVar(statement[1]) + ']\n'
# With an array
if isinstance(statement[2], classes.List):
compiledScript += '\tmov ecx,' + utils.getVar(statement[2].value[0]) + '\n\tadd ecx,' + utils.getVar(statement[2].value[1]) + '\n'
# With a normal variable
else:
var2 = utils.getVar(statement[2])
# EQUAL
if statement[1] == '==':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tje ' + whilef + '\n\tint 80h\n'
# NOT EQUAL
elif statement[1] == '!=':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjne ' + whilef + '\n\tint 80h\n'
# GREATER THAN
elif statement[1] == '>':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjg ' + whilef + '\n\tint 80h\n'
# LESS THAN
elif statement[1] == '<':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjl ' + whilef + '\n\tint 80h\n'
# GREATER THAN OR EQUAL
elif statement[1] == '>=':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjge ' + whilef + '\n\tint 80h\n'
# LESS THAN OR EQUAL
elif statement[1] == '<=':
compiledScript += '\tcmp ecx,[' + var2 + ']\n\tjle ' + whilef + '\n\tint 80h\n'
return compiledScript
def returnvalue(function, statement):
compiledScript = ''
if function.key == 'main':
compiledScript += '\tmov eax,1\n\tmov ebx,[' + utils.getVar(statement[0]) + ']\n\tint 80h\n'
else:
if isinstance(getVar(statement[0]), list):
if utils.getVar(statement[0])[0] == 'math':
compiledScript += utils.getVar(statement[0])[1]
else:
compiledScript += '\tmov eax,' + str(utils.getVar(statement[0])) + '\n'
return compiledScript
|
Python
| 0.000001
|
@@ -1,8 +1,1580 @@
+# Copyright (c) 2016, Connor E. Haight %3Cconnor.haight@gmail.com%3E%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are met:%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list of conditions and the following disclaimer.%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in the%0A# documentation and/or other materials provided with the distribution.%0A# * Neither the name of the VectorOne nor the%0A# names of its contributors may be used to endorse or promote products%0A# derived from this software without specific prior written permission.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22 AND%0A# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED%0A# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE%0A# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY%0A# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES%0A# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND%0A# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT%0A# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS%0A# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0A
# Not en
|
62b1db51b2742ff57e48d692e034cdeb6e648d7c
|
Fix issue when adding multiple subscriptions that the previous record is overwritten in the GUI.
|
invoicing/views.py
|
invoicing/views.py
|
"""
SynLogistics: Invoicing and subscription views.
"""
#
# Copyright (C) by Wilco Baan Hofman <wilco@baanhofman.nl> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from random import getrandbits
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.template import RequestContext
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.utils import simplejson as json
from django.db import transaction as db_trans
from decimal import Decimal
from datetime import datetime
from main.models import Subscription, Relation, Product
import settings
@login_required
def create(request):
""" Displays the create invoice template """
ctx = RequestContext(request, {
'BASE_URL': settings.BASE_URL,
'uniquestring': str(getrandbits(32)),
})
ctx.update(csrf(request))
return render_to_response('invoicing/create.html', ctx)
@login_required
def subscriptions_view(request):
""" Displays the invoicing/subscriptions template """
ctx = RequestContext(request, {
'BASE_URL': settings.BASE_URL,
'uniquestring': str(getrandbits(32)),
})
ctx.update(csrf(request))
return render_to_response('invoicing/subscriptions.html', ctx)
@login_required
@db_trans.commit_manually
def subscription_data(request):
""" AJAX handler for the subscription data in the sales->subscription view. """
# New subscriptions come in through a POST request
if request.method == "POST":
response = json.loads(request.raw_post_data, parse_float=Decimal)
# Catch phony record creation request.
if response['product'] == 0 or response['startdate'] == None:
return HttpResponse('')
try:
# insert the subscription
subscription = Subscription()
subscription.product = Product.objects.get(pk=int(response['product']))
subscription.customer = Relation.objects.get(pk=int(response['customer']))
subscription.start_date = datetime.strptime(response['startdate'], '%Y-%m-%dT%H:%M:%S')
if response['enddate']:
subscription.end_date = datetime.strptime(response['enddate'], '%Y-%m-%dT%H:%M:%S')
subscription.discount = Decimal(response['discount'])
subscription.intervals_per_invoice = response['intervals_per_invoice']
subscription.extra_info = response['extra_info']
subscription.active = response['active']
subscription.save()
# Make output parseable
response['customer_display'] = subscription.customer.displayname
response['product_display'] = subscription.product.name
response['startdate'] = subscription.start_date.strftime("%Y-%m-%d")
if response['enddate']:
response['enddate'] = subscription.end_date.strftime("%Y-%m-%d")
# The decimal can't be serialized by json
response['discount'] = str(response['discount'])
except:
db_trans.rollback()
raise
else:
db_trans.commit()
return HttpResponse(json.dumps({ 'success': True, 'data': response }))
# Updates come in as PUT subscriptiondata/id
elif request.method == "PUT":
response = json.loads(request.raw_post_data, parse_float=Decimal)
try:
subscription = Subscription.objects.get(pk=response['id'])
subscription.product = Product.objects.get(pk=int(response['product']))
subscription.customer = Relation.objects.get(pk=int(response['customer']))
subscription.start_date = datetime.strptime(response['startdate'], '%Y-%m-%dT%H:%M:%S')
if response['enddate']:
subscription.end_date = datetime.strptime(response['enddate'], '%Y-%m-%dT%H:%M:%S')
else:
subscription.end_date = None
subscription.discount = Decimal(response['discount'])
subscription.intervals_per_invoice = response['intervals_per_invoice']
subscription.extra_info = response['extra_info']
subscription.active = response['active']
subscription.save()
# Make output parseable
response['customer_display'] = subscription.customer.displayname
response['product_display'] = subscription.product.name
response['startdate'] = subscription.start_date.strftime("%Y-%m-%d")
if response['enddate']:
response['enddate'] = subscription.end_date.strftime("%Y-%m-%d")
# The decimal can't be serialized by json
response['discount'] = str(response['discount'])
except:
db_trans.rollback()
raise
else:
db_trans.commit()
return HttpResponse(json.dumps({ 'success': True, 'data': response }))
# A delete is done via DELETE subscriptiondata/id
elif request.method == "DELETE":
response = json.loads(request.raw_post_data, parse_float=Decimal)
try:
subscription = Subscription.objects.get(pk=response['id'])
subscription.delete()
except:
db_trans.rollback()
raise
else:
db_trans.commit()
return HttpResponse(json.dumps({ 'success': True }))
else:
try:
# TODO: Allow for filtering here!
subscriptions = Subscription.objects.all()
response = []
for subscription in subscriptions:
response.append({
'id': subscription.id,
'product': subscription.product.id,
'product_display': subscription.product.name,
'customer': subscription.customer.id,
'customer_display': subscription.customer.displayname,
'startdate': subscription.start_date.strftime("%Y-%m-%d"),
'enddate': subscription.end_date.strftime("%Y-%m-%d")
if subscription.end_date else None,
'discount': int(subscription.discount*10000)/10000,
'intervals_per_invoice': subscription.intervals_per_invoice,
'extra_info': subscription.extra_info,
'active': subscription.active,
})
# This temporary variable is for benefit of django's lazy database retrieval that does
# database transactions late, when the json is built
except:
db_trans.rollback()
raise
else:
db_trans.commit()
return HttpResponse(json.dumps({ 'success': True, 'data': response }))
|
Python
| 0
|
@@ -3404,16 +3404,84 @@
unt'%5D)%0A%0A
+%09%09%09# Give the id to the frontend%0A%09%09%09response%5B'id'%5D = subscription.id
%0A%09%09excep
|
08da61299b9ae970194c57748741a27fa77bf6f9
|
Fix bug: modify devices type. from dict_items to list
|
uitester/test_manager/device_manager.py
|
uitester/test_manager/device_manager.py
|
from uitester.test_manager import adb
from uitester.test_manager import rpc_server
from threading import Thread
from queue import Queue
class Device:
BLANK = 0x0, 'Default status'
OFFLINE = 0x1, 'Device is offline'
ONLINE = 0x2, 'Device is online'
RUNNING = 0x3, 'Device is running tests'
def __init__(self, device_id):
self.status = self.BLANK
self.id = device_id
self.agent = None
@property
def description(self):
return self.status[1]
class DeviceManager:
"""
DeviceManager:
Create a device instance for every android devices
Use device instance to execute scripts
"""
def __init__(self, context):
self.context = context
self._devices = {}
self.selected_devices = []
self.server = None
self.server_thread = None
self.msg_queue = Queue()
@property
def devices(self):
self.update_devices()
return self._devices.items()
def update_devices(self):
"""
update android devices by adb
:return:
"""
devices_info = adb.devices()
# update device status
for device_id in devices_info:
device_status = devices_info[device_id]
if device_id not in self._devices:
self._devices[device_id] = Device(device_id)
self._update_device_status(self._devices[device_id], device_status)
# remove not exist device
for device_id in self._devices:
if device_id not in devices_info:
self._devices.pop(device_id)
def _update_device_status(self, device, device_status):
if device_status == 'device':
agent = self.server.get_agent(device.id)
if agent:
device.agent = agent
device.status = Device.ONLINE
else:
device.status = Device.OFFLINE
def start_rpc_server(self):
self.server = rpc_server.get_server(self.context.config.port)
self.server_thread = Thread(target=self.server.serve_forever, daemon=True)
self.server_thread.start()
def restart_rpc_server(self):
"""
restart rpc socket server
:return:
"""
if self.server:
self.server.shutdown()
self.server = None
if self.server_thread:
self.server_thread = None
self.start_rpc_server()
|
Python
| 0.000001
|
@@ -950,24 +950,29 @@
return
+list(
self._device
@@ -977,15 +977,17 @@
ces.
-item
+value
s()
+)
%0A%0A
|
47e483315b7586795bd52ae0a010aa23aa1a9d56
|
Fix bug for not commenting on non-python PRs
|
pep8speaks/handlers.py
|
pep8speaks/handlers.py
|
# -*- coding: utf-8 -*-
import json
import os
from flask import Response
import requests
from pep8speaks import helpers
def handle_pull_request(request):
# A variable which is set to False whenever a criteria is not met
# Ultimately if this is True, only then the comment is made
PERMITTED_TO_COMMENT = True
if request.json["action"] in ["synchronize", "opened", "reopened"]:
# This dictionary is used and updated after making API calls
data = {
"after_commit_hash": request.json["pull_request"]["head"]["sha"],
"repository": request.json["repository"]["full_name"],
"author": request.json["pull_request"]["user"]["login"],
"diff_url": request.json["pull_request"]["diff_url"],
# Dictionary with filename matched with list of results
"results": {},
# Dictionary with filename matched with list of results caused by
# pycodestyle arguments
"extra_results": {},
"pr_number": request.json["number"],
}
# Update users of the integration
helpers.update_users(data["repository"])
# Get the config from .pep8speaks.yml file of the repository
config = helpers.get_config(data["repository"])
# Personalising the messages obtained from the config file
# Replace {name} with name of the author
if "message" in config:
for act in config["message"]:
# can be either "opened" or "updated"
for pos in config["message"][act]:
# can be either "header" or "footer"
msg = config["message"][act][pos]
new_msg = msg.replace("{name}", data["author"])
config["message"][act][pos] = new_msg
# Updates data dictionary with the results
# This function runs the pep8 checker
helpers.run_pycodestyle(data, config)
# Construct the comment
header, body, footer = helpers.prepare_comment(request, data, config)
# If there is nothing in the comment body, no need to make the comment
if len(body) == 0:
PERMITTED_TO_COMMENT = False
# Concatenate comment parts
comment = header + body + footer
# Do not make duplicate comment made on the PR by the bot
# Check if asked to keep quiet
PERMITTED_TO_COMMENT = helpers.comment_permission_check(data, comment)
# Do not run on PR's created by pep8speaks which use autopep8
# Too much noisy
if data["author"] == "pep8speaks":
PERMITTED_TO_COMMENT = False
# Make the comment
if PERMITTED_TO_COMMENT:
headers = {"Authorization": "token " + os.environ["GITHUB_TOKEN"]}
query = "https://api.github.com/repos/{}/issues/{}/comments"
query = query.format(data["repository"], str(data["pr_number"]))
response = requests.post(query, json={"body": comment}, headers=headers)
data["comment_response"] = response.json()
js = json.dumps(data)
return Response(js, status=200, mimetype='application/json')
def handle_review(request):
# Handle the request when a new review is submitted
data = dict()
data["author"] = request.json["pull_request"]["user"]["login"]
data["reviewer"] = request.json["review"]["user"]["login"]
data["repository"] = request.json["repository"]["full_name"]
data["diff_url"] = request.json["pull_request"]["diff_url"]
data["sha"] = request.json["pull_request"]["head"]["sha"]
data["review_url"] = request.json["review"]["html_url"]
data["pr_number"] = request.json["pull_request"]["number"]
# Get the .pep8speaks.yml config file from the repository
config = helpers.get_config(data["repository"])
condition1 = request.json["action"] == "submitted"
# Mainly the summary of the review matters
## pep8speaks must be mentioned
condition2 = "@pep8speaks" in request.json["review"]["body"]
## Check if asked to pep8ify
condition3 = "pep8ify" in request.json["review"]["body"]
## If pep8ify is not there, all other reviews with body summary
## having the mention of pep8speaks, will result in commenting
## with autpep8 diff gist.
conditions_matched = condition1 and condition2 and condition3
if conditions_matched:
return _pep8ify(request, data, config)
else:
conditions_matched = condition1 and condition2
if conditions_matched:
return _create_diff(request, data, config)
def _pep8ify(request, data, config):
data["target_repo_fullname"] = request.json["pull_request"]["head"]["repo"]["full_name"]
data["target_repo_branch"] = request.json["pull_request"]["head"]["ref"]
data["results"] = {}
# Check if the fork of the target repo exists
# If yes, then delete it
helpers.delete_if_forked(data)
# Fork the target repository
helpers.fork_for_pr(data)
# Update the fork description. This helps in fast deleting it
helpers.update_fork_desc(data)
# Create a new branch for the PR
helpers.create_new_branch(data)
# Fix the errors in the files
helpers.autopep8ify(data, config)
# Commit each change onto the branch
helpers.commit(data)
# Create a PR from the branch to the target repository
helpers.create_pr(data)
js = json.dumps(data)
return Response(js, status=200, mimetype='application/json')
def _create_diff(request, data, config):
# Dictionary with filename matched with a string of diff
data["diff"] = {}
# Process the files and prepare the diff for the gist
helpers.autopep8(data, config)
# Create the gist
helpers.create_gist(data, config)
comment = "Here you go with [the gist]({}) !\n\n" + \
"> You can ask me to create a PR against this branch " + \
"with those fixes. Submit a review comment as " + \
"`@pep8speaks pep8ify`.\n\n"
if data["reviewer"] == data["author"]: # Both are the same person
comment += "@{} "
comment = comment.format(data["gist_url"], data["reviewer"])
else:
comment += "@{} @{} "
comment = comment.format(data["gist_url"], data["reviewer"],
data["author"])
headers = {"Authorization": "token " + os.environ["GITHUB_TOKEN"]}
query = "https://api.github.com/repos/{}/issues/{}/comments"
query = query.format(data["repository"], str(data["pr_number"]))
response = requests.post(query, json={"body": comment}, headers=headers)
data["comment_response"] = response.json()
status_code = 200
if "error" in data.keys():
status_code = 400
js = json.dumps(data)
return Response(js, status=status_code, mimetype='application/json')
def handle_review_comment(request):
# Figure out what does "position" mean in the response
pass
|
Python
| 0
|
@@ -2403,38 +2403,22 @@
-PERMITTED_TO_COMMENT =
+if not
helpers
@@ -2457,16 +2457,58 @@
comment)
+:%0A PERMITTED_TO_COMMENT = False
%0A%0A
|
bde3b3d1d90338e23e2550bc6fdd317e5e696f0f
|
Add command-line arguments: root, hosts and name with regexp
|
zk-find.py
|
zk-find.py
|
#
# This is a FIND utility for Zookeeper
#
# Author: Aleksandr Vinokurov <aleksandr.vin@gmail.com>
# Url: https://github.com/aleksandr-vin/zk-find
#
import logging
import logging.config
try:
logging.config.fileConfig('logging.conf')
except:
logging.basicConfig()
logger = logging.getLogger('zk-find')
from kazoo.client import KazooClient
from kazoo.client import KazooState
from kazoo.exceptions import NoNodeError
def list_children(parent):
try:
for node in zk.get_children(parent):
path = parent + "/" + node
print path
list_children(path)
except NoNodeError:
pass
from sys import argv
path = ''
hosts = '127.0.0.1:2181'
if len(argv) > 2:
hosts = argv[1]
path = argv[2]
elif len(argv) > 1:
path = argv[1]
def my_listener(state):
if state == KazooState.LOST:
logger.debug('Session lost')
elif state == KazooState.SUSPENDED:
logger.debug('Session suspended')
else:
logger.info('Session connected')
zk = KazooClient(hosts=hosts)
zk.add_listener(my_listener)
zk.start()
list_children(path)
zk.stop()
|
Python
| 0.000002
|
@@ -182,16 +182,32 @@
g.config
+%0Aimport argparse
%0A%0Atry:%0A
@@ -446,16 +446,26 @@
odeError
+%0Aimport re
%0A%0Adef li
@@ -478,24 +478,29 @@
ldren(parent
+,prog
):%0A try:%0A
@@ -583,16 +583,83 @@
+ node%0A
+ if prog:%0A if prog.search(node):%0A
@@ -703,16 +703,21 @@
ren(path
+,prog
)%0A ex
@@ -751,163 +751,72 @@
ass%0A
-%0Afrom sys import argv%0A%0Apath = ''%0Ahosts = '127.0.0.1:2181'%0Aif len(argv) %3E 2:%0A hosts = argv%5B1%5D%0A path = argv%5B2%5D%0Aelif len(argv) %3E 1:%0A path = argv%5B1%5D
+ except ValueError as e:%0A print 'ValueError: %25s' %25 (e)
%0A%0Ade
@@ -1041,16 +1041,1022 @@
cted')%0A%0A
+# defaults%0Adefaults = %7B%0A 'hosts' : '127.0.0.1:2181'%0A ,'root' : ''%0A%7D%0A%0Aparser = argparse.ArgumentParser(epilog='''%0AReport (and track progress on fixing) bugs via the github issues%0Apage at https://github.com/aleksandr-vin/zk-find/issues or,%0Aif you have no web access, by sending email to %3Caleksandr.vin+bug-zk-find@gmail.com%3E.%0A''')%0Aparser.add_argument('root', nargs='?', type=str,%0A help='root of the search', default='%25s' %25 defaults%5B'root'%5D,);%0Aparser.add_argument('--hosts', default='%25s' %25 defaults%5B'hosts'%5D,%0A type=str, metavar='HOST:PORT%5B,HOST:PORT%5D', dest='hosts', required=False,%0A help='comma-separated list of hosts to connect to (default: %25s)' %25 defaults%5B'hosts'%5D)%0Aparser.add_argument('--name',%0A type=str, metavar='REGEXP', dest='name',%0A help='regexp for matching node names')%0A%0Aif __name__ == %22__main__%22:%0A # setting run-time args by the command-line parameters%0A settings = parser.parse_args()%0A
zk = Kaz
@@ -2074,15 +2074,28 @@
sts=
+settings.
hosts)%0A
+
zk.a
@@ -2119,16 +2119,20 @@
stener)%0A
+
zk.start
@@ -2138,28 +2138,147 @@
t()%0A
-list_children(path)%0A
+ global prog%0A prog = None%0A if (settings.name):%0A prog = re.compile(settings.name)%0A list_children(settings.root,prog)%0A
zk.s
|
bf471cdcef8ce8cce973139fa4c4ad3dd4f306a3
|
Disable 'hey Siri' with AirPods
|
conf/mac.py
|
conf/mac.py
|
# type: ignore
# silence linter errors
defaults = defaults
run = run
### trackpad settings ###
for key in (
'com.apple.AppleMultitouchTrackpad',
'com.apple.driver.AppleBluetoothMultitouch.trackpad'
):
trackpad = defaults[key]
trackpad['Clicking'] = True # touch to click
# enable *both* methods of right clicking
trackpad['TrackpadRightClick'] = True # two finger tap
trackpad['TrackpadCornerSecondaryClick'] = 2 # pushing to click in right corner
# disable "smart zoom" because it puts a delay on two-finger-tap right click
trackpad['TrackpadTwoFingerDoubleTapGesture'] = False
trackpad['TrackpadThreeFingerDrag'] = True
# disable dashboard
defaults['com.apple.dashboard']['mcx-disabled'] = True
dock = defaults['com.apple.dock']
dock['autohide'] = False
dock['autohide-delay'] = .05
dock['autohide-time-modifier'] = 0.4
dock['show-recents'] = False
# http://www.defaults-write.com/enable-highlight-hover-effect-for-grid-view-stacks/
dock['mouse-over-hilite-stack'] = True
# Spaces
dock['mru-spaces'] = False # don't reorder spaces based on use
defaults.g['AppleSpacesSwitchOnActivate'] = False # don't switch to another space when alt tabbing
# hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
dock['wvous-bl-corner'] = 10 # bottom left: sleep
dock['wvous-bl-modifier'] = 0
dock['wvous-br-corner'] = 3 # bottom right: application windows
dock['wvous-br-modifier'] = 0
dock['wvous-tl-corner'] = 2 # top left: mission control
dock['wvous-tl-modifier'] = 0
dock['wvous-tr-corner'] = 4 # top right: desktop
dock['wvous-tr-modifier'] = 0
finder = defaults['com.apple.finder']
finder['ShowPathbar'] = True
finder['ShowStatusBar'] = True
# show battery % in menubar
defaults['com.apple.menuextra.battery']['ShowPercent'] = True
# key repeat rate and delay
defaults.g['InitialKeyRepeat'] = 10
defaults.g['KeyRepeat'] = 2
# turn on "shake mouse pointer to locate"
defaults.g['CGDisableCursorLocationMagnification'] = False
defaults['com.apple.screencapture']['show-thumbnail'] = False
# set file-type associations
associations = {
'com.microsoft.vscode': [
# plain-text association also sets default text editor (open -t)
'public.plain-text',
'public.python-script',
],
'org.videolan.vlc': [
'public.mp3',
'public.mpeg-4',
],
'org.libreoffice.script': [
'public.comma-separated-values-text',
],
}
for program, types in associations.items():
for type in types:
run(['duti', '-s', program, type, 'all'])
# make tab move between "All Controls" (System Prefs -> Keyboard -> Shortcuts)
defaults.g['AppleKeyboardUIMode'] = 3
# show the date in the clock
defaults['com.apple.menuextra.clock']['DateFormat'] = "EEE MMM d h:mm a"
# use function keys as function keys
defaults.g['com.apple.keyboard.fnState'] = True
# don't close windows when quitting program (required for iterm2 to restore windows)
defaults.g['NSQuitAlwaysKeepsWindows'] = True
# zoom with ctrl+mouse wheel (System Prefs -> Accessibility -> Zoom)
defaults['com.apple.universalaccess']['closeViewScrollWheelToggle'] = True
flycut = defaults['com.generalarcade.flycut']
# shortcut to ctrl+cmd v
flycut["ShortcutRecorder mainHotkey"] = {'keyCode': 47, 'modifierFlags': 1310720}
flycut['loadOnStartup'] = 1
flycut['pasteMovesToTop'] = 1
flycut['removeDuplicates'] = 1
flycut['savePreference'] = 2 # "after each clip"
iterm = defaults['com.googlecode.iterm2']
iterm['PrefsCustomFolder'] = '~/.config/iterm2'
iterm['LoadPrefsFromCustomFolder'] = True
dash = defaults['com.kapeli.dashdoc']
dash['syncFolderPath'] = "~/Documents/Dash"
dash['snippetSQLPath'] = "~/Documents/Dash/snippets.dash"
caffeine = defaults['com.intelliscapesolutions.caffeine']
caffeine['ActivateOnLaunch'] = False
caffeine['SuppressLaunchMessage'] = True
# startup items - https://apple.stackexchange.com/a/310502/
required_login_apps = {'Flycut', 'SpotMenu', 'Flux', 'iTerm', 'Alfred 4', 'Horo', 'Caffeine'}
current_login_apps = set(
filter(None,
run(['osascript', '-e' 'tell application "System Events" to get the name of every login item'], cap='stdout').strip().split(', ')
)
)
script = 'tell application "System Events" to make login item at end with properties {{path:"/Applications/{app}.app", hidden:false}}'
print(f"Current login apps: {current_login_apps}. Required login apps: {required_login_apps}")
for app in required_login_apps - current_login_apps:
print(f"Setting '{app}' to run on login")
run(['osascript', '-e', script.format(app=app)])
# menubar items
menus = [
'/System/Library/CoreServices/Menu Extras/{}.menu'.format(m)
for m in ['Bluetooth', 'Volume', 'AirPort', 'TextInput', 'Battery', 'Clock', 'User']
]
current_menus = defaults['com.apple.systemuiserver']['menuExtras'].read()
menu_items_to_remove = set(current_menus) - set(menus)
if menu_items_to_remove:
print("Removing:", menu_items_to_remove)
defaults['com.apple.systemuiserver']['menuExtras'] = menus
# change screenshots location
screenshot_dir = '~/Desktop/Screenshots'
run(f"mkdir -p {screenshot_dir}")
defaults['com.apple.screencapture']['location'] = screenshot_dir
|
Python
| 0.999967
|
@@ -5362,8 +5362,145 @@
hot_dir%0A
+%0A# turn off %22hey Siri%22 (on Mac, triggers more by accident than on purpose)%0Adefaults%5B'com.apple.Siri'%5D%5B'VoiceTriggerUserEnabled'%5D = False%0A
|
04dd322f8e3f040cf9b25d2c0ce77e3aa9fea02c
|
Update .cluster.py
|
.cluster.py
|
.cluster.py
|
from socket import gethostname
import os
defaults = {
'netmask': '255.255.255.0',
'public_key': '~/.ssh/id_rsa.pub',
'private_key': '~/.ssh/id_rsa',
'domain_name': 'local',
'extra_disks': {},
'openstack': {
'flavor': 'm1.small',
'image': 'CC-Ubuntu14.04',
'key_name': gethostname(),
'network': '{}-net'.format(os.getenv('OS_PROJECT_NAME')),
'create_floating_ip': True,
'floating_ip_pool': 'ext-net',
'security_groups': ['default'],
},
'vagrant': {
'provider': 'libvirt',
'box': 'ubuntu/14.04'
},
'provider': 'openstack',
}
zk = lambda i: {
'zk%d' % i: {}
}
master = lambda i: {
'master%d' % i: {}
# 'openstack': {'security_groups': ['default', 'hadoop-status']}
}
data = lambda i: {
'data%d' % i: {}
}
frontend = lambda i: {
'frontend%d' % i: {
# 'extra_disks': {'vdb': {'size': '10G'}},
# 'openstack': {'create_floating_ip': True},
}
}
loadbalancer = lambda i: {
'loadbalancer%d' % i: {
# 'openstack': {'flavor': 'm1.medium',
# 'security_groups': ['default', 'sshlb'],}
}
}
monitor = lambda i: {
'monitor%d' % i: {}
}
gluster = lambda i: {
'gluster%d' % i: {
'ip': '10.0.6.{}'.format(i+10),
'openstack': {'flavor': 'm1.large',}
}
}
from vcl.specification import expand, group, combine, chain
N_MASTER = 3
N_DATA = 3
machines = list(chain(
expand(master, N_MASTER),
# expand(data, N_DATA),
))
_zookeepernodes = [(master, [0,1,2])]
_namenodes = [(master, [0, 1])]
_journalnodes = [(master, [0,1,2])]
_historyservers = [(master, [2])]
_resourcemanagers = [(master, [0,1])]
_datanodes = [(master, xrange(N_DATA))]
_frontends = [(master, [0])]
_monitor = [(master, [2])]
zookeepers = group('zookeepernodes', _zookeepernodes)
namenodes = group('namenodes', _namenodes)
journalnodes = group('journalnodes', _journalnodes)
historyservers = group('historyservernodes', _historyservers)
resourcemanagers = group('resourcemanagernodes', _resourcemanagers)
datanodes = group('datanodes', _datanodes)
frontends = group('frontendnodes', _frontends)
hadoopnodes = combine('hadoopnodes', namenodes, datanodes,
journalnodes, historyservers, frontends)
monitor = group('monitornodes', _monitor)
inventory = [
zookeepers,
namenodes,
journalnodes,
historyservers,
resourcemanagers,
datanodes,
frontends,
hadoopnodes,
monitor,
]
spec = {
'defaults': defaults,
'machines': machines,
'inventory': inventory,
}
######################################################################
# hack to define zookeeper_id for zookeeper nodes automatically
host_vars = 'host_vars'
if not os.path.exists(host_vars):
os.makedirs(host_vars)
import time
zk_id = 0
for grp in zookeepers.itervalues():
for host in grp:
zk_id += 1
host_file = os.path.join(host_vars, host)
if os.path.exists(host_file):
print 'WARNING', host_file, 'already exists'
now = time.time()
bkp = host_file + '.' + str(now)
os.rename(host_file, bkp)
entry = 'zookeeper_id: {}\n'.format(zk_id)
print host_file, 'WROTE', entry
with open(host_file, 'w') as fd:
fd.write(entry)
|
Python
| 0.000001
|
@@ -279,22 +279,23 @@
': '
-CC-
Ubuntu
+-
14.04
+-64
',%0A
@@ -424,19 +424,20 @@
ng_ip':
-Tru
+Fals
e, %0A
|
04ee8994563ee5bdbb9ae48604b2911197c54afd
|
Add environment variable to opt out of log collection (#2176)
|
conftest.py
|
conftest.py
|
""" This file configures python logging for the pytest framework
integration tests
Note: pytest must be invoked with this file in the working directory
E.G. py.test frameworks/<your-frameworks>/tests
"""
import logging
import os
import os.path
import sys
import pytest
import sdk_diag
import sdk_utils
import teamcity
log_level = os.getenv('TEST_LOG_LEVEL', 'INFO').upper()
log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'EXCEPTION')
assert log_level in log_levels, \
'{} is not a valid log level. Use one of: {}'.format(log_level, ', '.join(log_levels))
# write everything to stdout due to the following circumstances:
# - shakedown uses print() aka stdout
# - teamcity splits out stdout vs stderr into separate outputs, we'd want them combined
logging.basicConfig(
format='[%(asctime)s|%(name)s|%(levelname)s]: %(message)s',
level=log_level,
stream=sys.stdout)
# reduce excessive DEBUG/INFO noise produced by some underlying libraries:
for noise_source in [
'dcos.http',
'dcos.marathon',
'dcos.util',
'paramiko.transport',
'urllib3.connectionpool']:
logging.getLogger(noise_source).setLevel('WARNING')
log = logging.getLogger(__name__)
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: pytest.Item, call): # _pytest.runner.CallInfo
'''Hook to run after every test, before any other post-test hooks.
See also: https://docs.pytest.org/en/latest/example/simple.html\
#making-test-result-information-available-in-fixtures
'''
# Execute all other hooks to obtain the report object.
outcome = yield
# Handle failures. Must be done here and not in a fixture in order to
# properly handle post-yield fixture teardown failures.
sdk_diag.handle_test_report(item, outcome.get_result())
def pytest_runtest_teardown(item: pytest.Item):
'''Hook to run after every test.'''
# Inject footer at end of test, may be followed by additional teardown.
# Don't do this when running in teamcity, where it's redundant.
if not teamcity.is_running_under_teamcity():
print('''
==========
======= END: {}::{}
=========='''.format(sdk_diag.get_test_suite_name(item), item.name))
def pytest_runtest_setup(item: pytest.Item):
'''Hook to run before every test.'''
# Inject header at start of test, following automatic "path/to/test_file.py::test_name":
# Don't do this when running in teamcity, where it's redundant.
if not teamcity.is_running_under_teamcity():
print('''
==========
======= START: {}::{}
=========='''.format(sdk_diag.get_test_suite_name(item), item.name))
sdk_diag.handle_test_setup(item)
sdk_utils.check_dcos_min_version_mark(item)
|
Python
| 0
|
@@ -1213,16 +1213,267 @@
ame__)%0A%0A
+# The following environment variable allows for log collection to be turned off.%0A# This is useful, for exampl in testing.%0AINTEGRATION_TEST_LOG_COLLECTION = str(%0A os.environ.get('INTEGRATION_TEST_LOG_COLLECTION', %22True%22)%0A).lower() in %5B%22true%22, %221%22%5D%0A%0A
%0A@pytest
@@ -1570,16 +1570,17 @@
, call):
+
# _pyte
@@ -2019,16 +2019,60 @@
ilures.%0A
+ if INTEGRATION_TEST_LOG_COLLECTION:%0A
sdk_
@@ -2123,16 +2123,107 @@
sult())%0A
+ else:%0A print(%22INTEGRATION_TEST_LOG_COLLECTION==False. Skipping log collection%22)%0A
%0A%0Adef py
@@ -3034,16 +3034,60 @@
name))%0A%0A
+ if INTEGRATION_TEST_LOG_COLLECTION:%0A
sdk_
|
15314a94f9c590cfda24f3ae4191c7fcf9997668
|
Revert "Hack code to pass tests"
|
pytmatrix/orientation.py
|
pytmatrix/orientation.py
|
"""
Copyright (C) 2009-2013 Jussi Leinonen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
from scipy.integrate import quad, dblquad
def gaussian_pdf(std=10.0):
"""Gaussian PDF for orientation averaging.
Args:
std: The standard deviation of the PDF (the mean is always taken to
be 0).
Returns:
pdf(x), a function that returns the value of the spherical Jacobian-
normalized Gaussian PDF with the given STD at x (degrees). It is
normalized for the interval [0, 180].
"""
norm_const = 1.0
def pdf(x):
return norm_const*np.exp(-0.5 * (x/std)**2) * \
np.sin(np.pi/180.0 * x)
norm_dev = quad(pdf, 0.0, 180.0)[0]
# ensure that the integral over the distribution equals 1
norm_const /= norm_dev
return pdf
def uniform_pdf():
"""Uniform PDF for orientation averaging.
Returns:
pdf(x), a function that returns the value of the spherical Jacobian-
normalized uniform PDF. It is normalized for the interval [0, 180].
"""
norm_const = 1.0
def pdf(x):
return norm_const * np.sin(np.pi/180.0 * x)
norm_dev = quad(pdf, 0.0, 180.0)[0]
# ensure that the integral over the distribution equals 1
norm_const /= norm_dev
return pdf
def orient_single(tm):
"""Compute the T-matrix using a single orientation scatterer.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
"""
return tm.get_SZ_single()
def orient_averaged_adaptive(tm):
"""Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
"""
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
def Sfunc(beta, alpha, i, j, real):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
s = S_ang[i,j].real if real else S_ang[i,j].imag
return s * tm.or_pdf(beta)
ind = range(2)
for i in ind:
for j in ind:
S.real[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0
S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0
def Zfunc(beta, alpha, i, j):
(S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
return Z_ang[i,j] * tm.or_pdf(beta)
ind = range(4)
for i in ind:
for j in ind:
Z[i,j] = dblquad(Zfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0
return (S, Z)
def orient_averaged_fixed(tm):
"""Compute the T-matrix using variable orientation scatterers.
This method uses a fast Gaussian quadrature and is suitable
for most use. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance.
Returns:
The amplitude (S) and phase (Z) matrices.
"""
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
ap = np.linspace(0, 360, tm.n_alpha+1)[:-1]
aw = 1.0/tm.n_alpha
import pytmatrix.orientation as orientation
tm.orient = orientation.orient_averaged_fixed
tm._init_orient()
for alpha in ap:
for (beta, w) in zip(tm.beta_p, tm.beta_w):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
S += w * S_ang
Z += w * Z_ang
sw = tm.beta_w.sum()
#normalize to get a proper average
S *= aw/sw
Z *= aw/sw
return (S, Z)
|
Python
| 0.000001
|
@@ -4417,128 +4417,8 @@
ha%0A%0A
- import pytmatrix.orientation as orientation%0A tm.orient = orientation.orient_averaged_fixed%0A tm._init_orient()%0A
|
3a9057996797aade8214a88f34fe2d0bec7bd460
|
Add return value for /init endpoint.
|
perimeter/perimeter.py
|
perimeter/perimeter.py
|
import sys
from cassandra.cluster import Cluster
from flask import Flask, render_template, jsonify
# from kafka import SimpleProducer, KafkaClient
import srvlookup
# This uses mesos-dns to locate our cassandra cluster. Cassandra
# always runs on the same port.
cassandra_cli = Cluster(['cassandra-dcos-node.cassandra.dcos.mesos'],
protocol_version=3)
# Setup cassandra if necessary
def try_setup():
print >> sys.stderr, "verifying and possibly creating cassandra schema"
cc = cassandra_cli.connect()
cc.execute("""
CREATE KEYSPACE IF NOT EXISTS TEMPLATE_CASSANDRA_KEYSPACE
WITH replication = {'class':'SimpleStrategy', 'replication_factor':3}""")
cc.execute("""
CREATE TABLE IF NOT EXISTS TEMPLATE_CASSANDRA_KEYSPACE.spark_results (
x int,
y int,
value int,
PRIMARY KEY (x, y)
)""")
print >> sys.stderr, "cassandra configured"
# Kafka may not always run on the same port, so we need to perform
# an SRV record lookup in order to find it.
# kafka_location = srvlookup.lookup('broker-0', 'tcp', 'kafka.mesos')[0]
# kafka = KafkaClient("%s:%s" % (kafka_location.host, kafka_location.port))
# Real-world Kafka workloads will gain an order of magnitude++
# more throughput when using async mode. The trade-off is your
# requests may have higher latency (the cli will instantly return
# however.) This is the classic throughput-latency trade-off at play.
# producer = SimpleProducer(kafka, async=True)
app = Flask(__name__)
default_ranges = {"intensity": [0, 2000],
"x": [0, 70],
"y": [0, 100]}
sensor_map = {1: [95, 20],
2: [80, 20],
3: [58, 20],
4: [15, 5],
5: [55, 5],
6: [55, 95],
7: [15, 95]}
print >> sys.stderr, "after env setup"
@app.route('/')
@app.route('/test')
def test_endpoint():
return render_template('index.html')
@app.route('/init')
def init_endpoint():
try_setup()
@app.route('/whatever/<value>')
def index(value=None):
# serve code that periodically hits /read to get the
# latest spark results from Cassandra
return render_template('index.html', value=value)
@app.route('/read')
def read():
# read data from cassandra, if it's been populated yet
try:
session = cassandra_cli.connect()
results = session.execute('SELECT x, y, value '
'FROM TEMPLATE_CASSANDRA_KEYSPACE.spark_results',
timeout=5)
rows = [{"x": r.x, "y": r.y, "intensity": r.value} for r in results]
return jsonify({"ranges": default_ranges,
"sources": rows})
except e:
print >> sys.stderr, "failed to execute read on cassandra: %s" % e
return e
@app.route('/remove/<sensor_id>')
def remove(sensor_id):
(x, y) = sensor_id.split(',')
# read data from cassandra, if it's been populated yet
try:
session = cassandra_cli.connect()
results = session.execute('DELETE FROM '
'TEMPLATE_CASSANDRA_KEYSPACE.spark_results '
'WHERE x = ' + x + ' AND y = ' + y)
return 'removed data at x=%s, y=%s' % (x, y)
except e:
print >> sys.stderr, "failed to execute delete on cassandra: %s" % e
return e
@app.route('/submit/<sensor_id>/<sensor_value>')
def write(sensor_id, sensor_value):
if sensor_id.find(',') >= 0:
(x, y) = sensor_id.split(',')
else:
(x, y) = [str(i) for i in sensor_map[int(sensor_id)]]
value_array = [int(i) for i in sensor_value.split(':')]
average_value = sum(value_array) / len(value_array)
# producer.send_messages(b'TEMPLATE_KAFKA_TOPIC',
# b"%s %d" % (sensor_id, sensor_value))
try:
session = cassandra_cli.connect()
results = session.execute('INSERT INTO '
'TEMPLATE_CASSANDRA_KEYSPACE.spark_results '
'(x, y, value) '
'VALUES (%s, %s, %d)' % (x, y, average_value),
timeout=5)
print >> sys.stderr, "after execute"
return 'sensor %s submitted value %d' % (sensor_id, average_value)
except e:
print >> sys.stderr, "failed to execute read on cassandra: %s" % e
# This doesn't actually run in the container (not __main__ when in uWSGI) and
# is for local debugging.
if __name__ == "__main__":
# In a real environment, never run with debug=True
# because it gives you an interactive shell when you
# trigger an unhandled exception.
app.run(host="0.0.0.0", debug=True, port=8080)
|
Python
| 0
|
@@ -907,16 +907,50 @@
figured%22
+%0A return %22cassandra configured%22
%0A%0A# Kafk
|
b6da8865c9a12b9ce88d809d2fa4dfb601be01d0
|
make sure same timezone is used when calculating delta
|
reboot_required/check.py
|
reboot_required/check.py
|
# vim: ts=4:sw=4:et
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# project
from checks import AgentCheck
from os import stat, utime, remove
from os.path import isfile
from stat import ST_MTIME
from datetime import datetime, timedelta
class RebootRequiredCheck(AgentCheck):
REBOOT_SIGNAL_FILE = '/var/run/reboot-required'
CREATED_AT_FILE = '/var/run/reboot-required.created_at'
def check(self, instance):
status, days_since, msg = self._check(instance)
self.service_check('system.reboot_required', status, message=msg)
def _check(self, instance):
reboot_signal_file = instance.get('reboot_signal_file', self.REBOOT_SIGNAL_FILE)
created_at_file = instance.get('created_at_file', self.CREATED_AT_FILE)
warning_days = int(instance.get('days_warning', 7))
critical_days = int(instance.get('days_critical', 14))
return self._get_status(critical_days, warning_days, self._days_since(reboot_signal_file, created_at_file))
def _days_since(self, reboot_signal_file, created_at_file):
if isfile(reboot_signal_file):
if isfile(created_at_file):
created_at = self._get_created_at(created_at_file)
return datetime.utcnow() - datetime.fromtimestamp(created_at)
else:
self._touch(created_at_file)
elif isfile(created_at_file):
remove(created_at_file)
return timedelta()
def _get_status(self, critical_days, warning_days, deltatime):
if deltatime.days > critical_days:
return AgentCheck.CRITICAL, deltatime.days, "Reboot is critical: security patches applied {0} days ago"\
.format(deltatime.days)
elif deltatime.days > warning_days:
return AgentCheck.WARNING, deltatime.days, "Reboot is necessary; security patches applied {0} days ago"\
.format(deltatime.days)
else:
return AgentCheck.OK, 0, ''
def _get_created_at(self, fname):
file_stat = stat(fname)
created_at = file_stat[ST_MTIME]
return created_at
def _touch(self, fname, times=None):
open(fname, 'a').close()
utime(fname, times)
|
Python
| 0.000003
|
@@ -1311,16 +1311,19 @@
atetime.
+utc
fromtime
|
7e398bce17bfc95471e49d95b00d396049eae7df
|
Add default postgres connection
|
phrasebook/settings.py
|
phrasebook/settings.py
|
"""
Django settings for phrasebook project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# https://devcenter.heroku.com/articles/django-assets
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!x%0lxmb9%c_3=#yu^&9g_h+7(mx7rm5g!a#+l0xz*486ca#b4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cards.apps.CardsConfig',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'phrasebook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'phrasebook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
Python
| 0
|
@@ -2511,16 +2511,221 @@
%7D%0A%7D%0A%0A
+DATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.postgresql',%0A 'NAME': 'phrasebook',%0A 'USER': '',%0A 'PASSWORD': '',%0A 'HOST': '',%0A 'PORT': '',%0A %7D%0A%7D%0A%0A
# Update
|
6c9ab323883160744f9048112079f1b7964c42b7
|
Version bump
|
bespin/__init__.py
|
bespin/__init__.py
|
VERSION="0.5.7.8"
|
Python
| 0.000001
|
@@ -12,7 +12,7 @@
5.7.
-8
+9
%22%0A
|
6f7fc9067df57c4c15204a3208768acf4b76ed85
|
Update version to 0.2.0.dev0
|
bfg9000/version.py
|
bfg9000/version.py
|
version = '0.1.0'
|
Python
| 0.000001
|
@@ -10,9 +10,14 @@
'0.
-1.
+2.0.dev
0'%0A
|
ef156eca331203696f38b2f829314c48eeb5f207
|
Update version to 0.1.0
|
bfg9000/version.py
|
bfg9000/version.py
|
version = '0.1.0-dev'
|
Python
| 0.000001
|
@@ -13,10 +13,6 @@
.1.0
--dev
'%0A
|
02a5d982178d7916d17d2310476d35657780a1e6
|
fix a bug of css
|
bgmi/front/http.py
|
bgmi/front/http.py
|
# encoding: utf-8
from __future__ import print_function, unicode_literals
import os
import json
import datetime
import hashlib
import tornado.ioloop
import tornado.options
import tornado.httpserver
import tornado.web
import tornado.template
from tornado.options import options, define
from collections import OrderedDict
from bgmi.config import BGMI_SAVE_PATH, DB_PATH
from bgmi.models import Download, Bangumi, Followed, STATUS_NORMAL, STATUS_UPDATING, STATUS_END
WEEK = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
define('port', default=8888, help='listen on the port', type=int)
define('address', default='0.0.0.0', help='binding at given address', type=str)
IMAGE_URL = 'http://bangumi.redrock-team.com'
def md5(_, string):
return hashlib.md5(string.encode('utf-8')).hexdigest()
def make_dicts(cursor, row):
return dict((cursor.description[idx][0], value)
for idx, value in enumerate(row))
class BangumiHandler(tornado.web.RequestHandler):
def get(self, _):
self.set_header('Content-Type', 'text/html')
self.write('<h1>BGmi HTTP Service</h1>')
self.write('<pre>Please modify your web server configure file\n'
'to server this path to \'%s\'.\n'
'e.g.\n\n'
'...\n'
'autoindex on;\n'
'location /bangumi {\n'
' alias %s;\n'
'}\n'
'...\n</pre>' % (BGMI_SAVE_PATH, BGMI_SAVE_PATH)
)
self.finish()
class BangumiPlayerHandler(tornado.web.RequestHandler):
def get(self, bangumi_name):
data = Followed(bangumi_name=bangumi_name)
data.select_obj()
if not data:
return self.write_error(404)
episode_list = {}
bangumi_path = os.path.join(BGMI_SAVE_PATH, bangumi_name)
for root, _, files in os.walk(bangumi_path):
if not _ and files:
_ = root.replace(bangumi_path, '').split('/')
base_path = root.replace(BGMI_SAVE_PATH, '')
if len(_) >= 2:
episode_path = root.replace(os.path.join(BGMI_SAVE_PATH, bangumi_name), '')
episode = int(episode_path.split('/')[1])
else:
episode = -1
for bangumi in files:
episode_list[episode] = {'path': os.path.join(base_path, bangumi),
'playable': bangumi.endswith('.mp4')}
break
self.render('templates/dplayer.html', bangumi=episode_list, bangumi_name=bangumi_name)
class ImageCSSHandler(tornado.web.RequestHandler):
def get(self):
data = Followed.get_all_followed()
self.set_header('Content-Type', 'text/css')
self.render('templates/image.css', data=data, image_url=IMAGE_URL)
class RssHandler(tornado.web.RequestHandler):
def get(self):
data = Download.get_all_downloads()
self.set_header('Content-Type', 'text/xml')
self.render('templates/download.xml', data=data)
class MainHandler(tornado.web.RequestHandler):
def get(self):
is_json = self.get_argument('json', False)
is_old = self.get_argument('old', False)
if not os.path.exists(DB_PATH):
self.write('BGmi db file not found.')
self.finish()
return
data = Followed.get_all_followed(STATUS_NORMAL, STATUS_UPDATING if not is_old else STATUS_END,
order='followed.updated_time', desc=True)
calendar = Bangumi.get_all_bangumi()
def shift(seq, n):
n = n % len(seq)
return seq[n:] + seq[:n]
weekday_order = shift(WEEK, datetime.datetime.today().weekday())
cal_ordered = OrderedDict()
for week in weekday_order:
cal_ordered[week] = calendar[week.lower()]
if is_json:
self.write(json.dumps(cal_ordered))
self.finish()
else:
self.render('templates/bangumi.html', data=data, cal=cal_ordered)
def make_app():
settings = {
'static_path': os.path.join(os.path.dirname(__file__), 'static'),
'ui_methods': [{'md5': md5}],
'debug': True,
}
return tornado.web.Application([
(r'/', MainHandler),
(r'^/css/image.css$', ImageCSSHandler),
(r'^/player/(.*)/$', BangumiPlayerHandler),
(r'^/bangumi/(.*)', BangumiHandler),
(r'^/rss$', RssHandler),
], **settings)
def main():
tornado.options.parse_command_line()
print('BGmi HTTP Server listening on %s:%d' % (options.address, options.port))
http_server = tornado.httpserver.HTTPServer(make_app())
http_server.listen(options.port, address=options.address)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
Python
| 0.000032
|
@@ -2726,32 +2726,31 @@
data =
-Followed
+Bangumi
.get_all_fol
@@ -2746,24 +2746,23 @@
get_all_
-followed
+bangumi
()%0A
|
8099c35b31e67643e14dcd3cd30fa104fcca6fb5
|
Revert accidental change out of version.py
|
scikits/image/version.py
|
scikits/image/version.py
|
# THIS FILE IS GENERATED FROM THE SCIKITS.IMAGE SETUP.PY
version='0.2dev'
|
Python
| 0.000001
|
@@ -1,61 +1,4 @@
-# THIS FILE IS GENERATED FROM THE SCIKITS.IMAGE SETUP.PY%0A
vers
@@ -6,12 +6,18 @@
on='
-0.2
+unbuilt-
dev'%0A
+%0A
|
fc51c36b636d4a396faac02285605dafe0779104
|
Bump version to 18.0.0a5
|
resolwe_bio/__about__.py
|
resolwe_bio/__about__.py
|
"""Central place for package metadata."""
# NOTE: We use __title__ instead of simply __name__ since the latter would
# interfere with a global variable __name__ denoting object's name.
__title__ = 'resolwe-bio'
__summary__ = 'Bioinformatics pipelines for the Resolwe platform'
__url__ = 'https://github.com/genialis/resolwe-bio'
# Semantic versioning is used. For more information see:
# https://packaging.python.org/en/latest/distributing/#semantic-versioning-preferred
__version__ = "18.0.0a4"
__author__ = 'Genialis, Inc.'
__email__ = 'dev-team@genialis.com'
__license__ = 'Apache License (2.0)'
__copyright__ = '2015-2019, ' + __author__
__all__ = (
"__title__", "__summary__", "__url__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
)
|
Python
| 0
|
@@ -498,9 +498,9 @@
0.0a
-4
+5
%22%0A%0A_
|
06b410666b0720a7fbf78d57be0907ce7fd19990
|
version bump
|
picraftzero/version.py
|
picraftzero/version.py
|
version = "0.2.9"
build_string = "beta"
|
Python
| 0.000001
|
@@ -12,9 +12,10 @@
0.2.
-9
+10
%22%0Abu
|
c307dc1ef794f9a3262be67621892adec4aff25f
|
friendly task name
|
dbaas/notification/admin/task_history.py
|
dbaas/notification/admin/task_history.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
import logging
from ..models import TaskHistory
from dbaas import constants
LOG = logging.getLogger(__name__)
class TaskHistoryAdmin(admin.ModelAdmin):
perm_add_database_infra = constants.PERM_ADD_DATABASE_INFRA
actions = None
list_display_basic = ["task_id", "task_name", "task_status", "arguments", "created_at", "ended_at"]
list_display_advanced = list_display_basic + ["user"]
#list_display = ["task_id", "task_name", "task_status", "user", "arguments", "created_at", "ended_at"]
search_fields = ('task_id', "task_name", "task_status")
# list_filter = ("task_status",)
list_filter_basic = ["task_status",]
list_filter_advanced = list_filter_basic + ["user", ]
readonly_fields = ('created_at', 'ended_at', 'task_name', 'task_id', 'task_status', 'user', 'context', 'arguments', 'details')
def queryset(self, request):
qs = None
if request.user.has_perm(self.perm_add_database_infra):
qs = super(TaskHistoryAdmin, self).queryset(request)
return qs
else:
if request.GET.get('user'):
query_dict_copy = request.GET.copy()
del query_dict_copy['user']
request.GET = query_dict_copy
qs = super(TaskHistoryAdmin, self).queryset(request)
return qs.filter(user=request.user.username)
def changelist_view(self, request, extra_context=None):
if request.user.has_perm(self.perm_add_database_infra):
self.list_display = self.list_display_advanced
self.list_filter = self.list_filter_advanced
self.list_display_links = ("task_id",)
else:
self.list_display = self.list_display_basic
self.list_filter = self.list_filter_basic
self.list_display_links = (None,)
return super(TaskHistoryAdmin, self).changelist_view(request, extra_context=extra_context)
|
Python
| 0.998809
|
@@ -431,32 +431,41 @@
= %5B%22task_id%22, %22
+friendly_
task_name%22, %22tas
@@ -576,115 +576,8 @@
r%22%5D%0A
- #list_display = %5B%22task_id%22, %22task_name%22, %22task_status%22, %22user%22, %22arguments%22, %22created_at%22, %22ended_at%22%5D%0A
@@ -636,45 +636,8 @@
s%22)%0A
- # list_filter = (%22task_status%22,)%0A
@@ -863,16 +863,263 @@
ails')%0A%0A
+ def friendly_task_name(self, task_history):%0A if task_history.task_name:%0A return %22%25s%22 %25 task_history.task_name.split('.')%5B::-1%5D%5B0%5D%0A else:%0A return %22N/A%22%0A friendly_task_name.short_description = %22Task Name%22%0A%0A
def
|
485c0baaf3a712d6ff2954016ae95bec451eab2f
|
Update postprocess.py
|
postprocessor/postprocess.py
|
postprocessor/postprocess.py
|
import json
import csv
def save_dict_as_csv(data, filename):
node_keys = sorted(data.keys())
time_keys = sorted(data[node_keys[0]].keys()) # add key=int for integer timesteps
header = ['date'] + node_keys
writer = csv.writer(open(filename, 'w'))
writer.writerow(header)
for t in time_keys:
row = [t]
for k in node_keys:
if t in data[k] and data[k][t] is not None:
row.append(data[k][t])
else:
row.append(0.0)
writer.writerow(row)
def dict_get(D, k1, k2, default = 0.0):
if k1 in D and k2 in D[k1]:
return D[k1][k2]
else:
return default
def dict_insert(D, k1, k2, v, collision_rule = None):
if k1 not in D:
D[k1] = {k2: v}
elif k2 not in D[k1]:
D[k1][k2] = v
else:
if collision_rule == 'sum':
D[k1][k2] += v
# elif collision_rule == 'max':
# if v is not None and (D[k1][k2] is None or v > D[k1][k2]):
# D[k1][k2] = v
elif collision_rule == 'first':
pass # do nothing, we already have the first value
elif collision_rule == 'last':
D[k1][k2] = v # replace
else:
raise ValueError('Keys [%s][%s] already exist in dictionary' % (k1,k2))
# start with empty dicts -- this is
# what we want to output (in separate files):
# flows (F), storages (S), duals (D), evap (E), shortage vol (SV) and cost (SC)
F,S,E,SV,SC = {}, {}, {}, {}, {}
D_up,D_lo,D_node = {}, {}, {}
# load network links
with open('links.csv', 'rU') as f:
reader = csv.reader(f)
network = list(reader)
# load network nodes
with open('nodes.csv', 'rU') as f:
reader = csv.reader(f)
network_nodes = list(reader)
# load list of demand nodes to find shortages/costs for
with open('demand_nodes.csv', 'r') as f:
reader = csv.reader(f)
demand_nodes = [row[0] for row in reader]
# results from Pyomo
with open('results.json', 'r') as f:
results = json.load(f)
flows = results['Solution'][1]['Variable']
constraints = results['Solution'][1]['Constraint']
for link in network:
s = ','.join(link[0:3])
if '.' in link[0] and '.' in link[1]:
n1,t1 = link[0].split('.')
n2,t2 = link[1].split('.')
is_storage_node = (n1 == n2)
if is_storage_node:
amplitude = float(link[4])
elif '.' in link[0] and link[1] == 'FINAL': # End-of-period storage for reservoirs
n1,t1 = link[0].split('.')
is_storage_node = True
amplitude = 1
else:
continue
# get values from JSON results. If they don't exist, default is 0.0.
# (sometimes pyomo does not include zero values in the output)
v = dict_get(flows, 'X[%s]' % s, 'Value')
d1 = dict_get(constraints, 'limit_upper[%s]' % s, 'Dual')
d2 = dict_get(constraints, 'limit_lower[%s]' % s, 'Dual')
# sum over piecewise components
if is_storage_node:
key = n1
evap = (1 - amplitude)*float(v)/amplitude
dict_insert(S, key, t1, v, 'sum')
dict_insert(E, key, t1, evap, 'sum')
else:
key = n1 + '-' + n2
dict_insert(F, key, t1, v, 'sum')
# Check for urban or ag demands
if key in demand_nodes:
ub = float(link[6])
unit_cost = float(link[3])
if (ub - v) > 1e-6: # if there is a shortage
dict_insert(SV, key, t1, ub-v, 'sum')
dict_insert(SC, key, t1, -1*unit_cost*(ub-v), 'sum')
else:
dict_insert(SV, key, t1, 0.0, 'sum')
dict_insert(SC, key, t1, 0.0, 'sum')
# open question: what to do about duals on pumping links? Is this handled?
dict_insert(D_up, key, t1, d1, 'last')
dict_insert(D_lo, key, t1, d2, 'first')
# get dual values for nodes (mass balance)
for node in network_nodes:
if '.' in node[0]:
n3,t3 = node[0].split('.')
d3 = dict_get(constraints,'flow[%s]' % node[0], 'Dual')
dict_insert(D_node, n3, t3, d3)
# write the output files
things_to_save = [(F, 'flow'), (S, 'storage'), (D_up, 'dual_upper'),
(D_lo, 'dual_lower'), (D_node, 'dual_node'),
(E,'evaporation'), (SV,'shortage_volume'),
(SC,'shortage_cost')]
for data,name in things_to_save:
save_dict_as_csv(data, name + '.csv')
|
Python
| 0.000001
|
@@ -15,16 +15,36 @@
port csv
+%0Aimport pandas as pd
%0A%0Adef sa
@@ -1450,18 +1450,25 @@
n('links
-.c
+updated.t
sv', 'rU
@@ -1491,32 +1491,47 @@
r = csv.reader(f
+,delimiter='%5Ct'
)%0A network = li
@@ -1575,26 +1575,33 @@
open('nodes
-.c
+updated.t
sv', 'rU') a
@@ -1624,24 +1624,39 @@
csv.reader(f
+,delimiter='%5Ct'
)%0A network_
|
5bdd659768ad5e5a50f85113a2d354ac51653b42
|
Update ds_list_contains_duplicate.py
|
leetcode/ds_list_contains_duplicate.py
|
leetcode/ds_list_contains_duplicate.py
|
# @file Contains Duplicate
# @brief Given an array of numbers find if there are any duplicates
# https://leetcode.com/problems/contains-duplicate/
'''
Given an array of integers, find if the array contains any duplicates.
Your function should return true if any value appears at least twice in the
array, and it should return false if every element is distinct.
'''
# Brute force - 2 loop approach (complexity: time = O(n^2), space = O(1))
# Note: This is not accepted by leetcode because of high time complexity
def containsDuplicate1(self, nums):
if len(nums) < 2:
return False
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
if nums[i] == nums[j]:
return True
return False
# Sort approach (complexity: time = O(n log n), space = O(1))
def containsDuplicate2(self, nums):
if len(nums) < 2: #if num elements is less than 2, no duplicates
return False
nums.sort()
for i in range(len(nums)-1):
if nums[i] == nums[i+1]:
return True
return False
# Dictionary approach (complexity: time = O(n), space = O(n))
def containsDuplicate3(self, nums):
dict = {}
for elem in nums:
dict[elem] = dict.get(elem, 0) + 1
if dict[elem] > 1:
return True
return False
# Set approach (complexity: time = O(n), space = O(n))
def containsDuplicate4(self, nums):
return len(nums) != len(set(nums))
|
Python
| 0.000003
|
@@ -1084,48 +1084,177 @@
oach
- (complexity: time = O(n), space = O(n))
+%0A#Use a dictionary to store all numbers. If a number is seen 2nd time return immediately%0A#Time Complexity = O(n) since we have a single for-loop to look at all numbers
%0Adef
@@ -1308,19 +1308,18 @@
for
-ele
+nu
m in num
@@ -1333,40 +1333,40 @@
-dict%5Belem%5D = dict.get(elem, 0) +
+if num not in dict: dict%5Bnum%5D =
1%0A
@@ -1376,37 +1376,27 @@
+el
if
-dict%5Belem%5D %3E 1:%0A
+num in dict:
re
|
e10195f93cb39afcc432ec25466073cec093b2bb
|
remove debug output
|
app/form/validators.py
|
app/form/validators.py
|
from wtforms.validators import ValidationError
from app.pacman import get_pkg
from app.util import multiline_to_list
from pyalpm import vercmp
class ValidPackageName(object):
def __init__(self):
self.message = u'Unknown package.'
def __call__(self, form, field):
versions = get_pkg(field.data)
if not versions:
raise ValidationError(self.message)
class ValidPackageNames(object):
def __init__(self):
self.message = u'Unknown package {}.'
def __call__(self, form, field):
pkgnames = multiline_to_list(field.data)
print(pkgnames)
for pkgname in pkgnames:
print(pkgname)
versions = get_pkg(pkgname)
if not versions:
raise ValidationError(self.message.format(pkgname))
class SamePackageVersions(object):
def __init__(self):
self.message = u'Mismatching version {}.'
def __call__(self, form, field):
pkgnames = multiline_to_list(field.data)
ref_version = None
for pkgname in pkgnames:
versions = get_pkg(pkgname)
ref_version = ref_version if ref_version else versions[0]
if not versions or 0 != vercmp(ref_version.version, versions[0].version):
raise ValidationError(self.message.format(pkgname))
|
Python
| 0
|
@@ -591,83 +591,32 @@
-print(pkgnames)%0A for pkgname in pkgnames:%0A print(pkgname)
+for pkgname in pkgnames:
%0A
|
43b5fdf54442bf1e80afd722e86d51ea029f6996
|
Fix repository-sync crashing on overlapping external projects (RCE-1968)
|
mint/scripts/repository_sync.py
|
mint/scripts/repository_sync.py
|
#
# Copyright (c) 2011 rPath, Inc.
#
import hashlib
import logging
import sys
from conary.dbstore import sqlerrors
from mint.lib.scriptlibrary import GenericScript
from mint.db import database
from mint.db import repository
from rpath_proddef import api1 as proddef
log = logging.getLogger(__name__)
# Bump this to force all branches to be refreshed.
SYNC_VERSION = 3
class Script(GenericScript):
logFileName = 'scripts.log'
newLogger = True
def action(self):
self.loadConfig()
args = sys.argv[1:]
if '--debug' in args:
args.remove('--debug')
self.resetLogging(verbose=True)
sync = SyncTool(self.cfg)
if args:
for fqdn in args:
sync.syncReposByFQDN(fqdn)
else:
sync.syncAll()
class SyncTool(object):
def __init__(self, cfg, db=None):
self.cfg = cfg
if not db:
db = database.Database(cfg)
self.db = db
self.reposManager = repository.RepositoryManager(self.cfg, self.db.db,
bypass=True)
self.client = self.reposManager.getClient(userId=repository.ANY_READER)
self.repos = self.client.getRepos()
self._platformMap = None
def syncAll(self):
for handle in self.reposManager.iterRepositories():
self._syncReposMaybe(handle)
def syncReposByFQDN(self, fqdn):
handle = self.reposManager.getRepositoryFromFQDN(fqdn)
self._syncReposMaybe(handle)
def _syncReposMaybe(self, handle):
self._syncRepos(handle)
self.reposManager.reset()
def _syncRepos(self, handle):
# FIXME: more error handling: missing or inaccessible stuff shouldn't
# crash the whole script or make excessive noise.
for x in range(3):
try:
# Get current branch/stage structure.
cu = self.db.cursor()
cu.execute("""SELECT label, productversionid, cache_key
FROM ProductVersions WHERE projectId = ?""", handle.projectId)
branchMap = dict((x[0], x[1:]) for x in cu)
# Resolve a list of proddef troves on this repository.
name = 'product-definition:source'
result = self.repos.getAllTroveLeaves(handle.fqdn, {name: None})
if name not in result:
return
for version in result[name]:
self._syncBranchMaybe(handle, version, branchMap)
except sqlerrors.ColumnNotUnique:
# Often, creating or modifying a branch through the API results
# in the database being updated directly as well, which
# conflicts here. Just go through the cycle again to make sure
# everything is tidy.
self.db.rollback()
continue
else:
self.db.commit()
break
else:
raise
def _syncBranchMaybe(self, handle, version, branchMap):
"""Compute a hash of the proddef version, then compare to the existing
one in the database to decide whether to update this branch."""
label = str(version.trailingLabel())
cacheKey = '\0'.join((str(SYNC_VERSION), version.freeze()))
cacheKey = hashlib.sha1(cacheKey).hexdigest()
if label in branchMap:
branchId, oldKey = branchMap[label]
if oldKey == cacheKey:
log.debug("Skipping label %s due to matching hash", label)
return
self._syncBranch(handle, version, branchMap, cacheKey)
def _syncBranch(self, handle, version, branchMap, cacheKey):
"""Synchronize the database branch and stages to the ones enumerated in
the product definition."""
cu = self.db.cursor()
label = str(version.trailingLabel())
pd = proddef.ProductDefinition()
pd.setBaseLabel(label)
try:
pd.loadFromRepository(self.client)
except proddef.ProductDefinitionFileNotFoundError:
return
if pd.getProductDefinitionLabel() != label:
# baselabel does not match
log.info("Product definition on label %s has base label %s; not "
"synchronizing into database", label,
pd.getProductDefinitionLabel())
return
platformLabel = pd.getPlatformSourceLabel()
platformId = self.getPlatformMap().get(platformLabel)
sourceGroup = pd.getSourceGroup() or pd.getImageGroup()
fields = {
'projectId': handle.projectId,
'namespace': pd.getConaryNamespace(),
'name': pd.getProductVersion(),
'description': pd.getProductDescription(),
'source_group': sourceGroup,
'platform_id': platformId,
'platform_label': platformLabel,
'cache_key': cacheKey,
}
items = fields.items()
if label in branchMap:
branchId, _ = branchMap[label]
setters = ', '.join('%s = ?' % x[0] for x in items)
values = [x[1] for x in items] + [label]
cu.execute(("UPDATE ProductVersions SET %s WHERE label = ?"
% setters), tuple(values))
log.info("Updated branch information for label %s", label)
else:
items.append(('label', label))
names = ', '.join(x[0] for x in items)
placeholders = ', '.join('?' for x in items)
values = [x[1] for x in items]
cu.execute("INSERT INTO ProductVersions (%s) VALUES (%s)"
% (names, placeholders), tuple(values))
branchId = cu.lastid()
log.info("Created branch information for label %s", label)
cu.execute("""SELECT name, stage_id FROM project_branch_stage
WHERE project_branch_id = ?""", branchId)
sqlStages = dict(cu)
pdStages = [x.name for x in pd.getStages()]
for stage in set(pdStages) - set(sqlStages):
isPromotable = (stage != pdStages[-1] and not handle.isExternal)
cu.execute("""INSERT INTO project_branch_stage (name, label,
project_branch_id, project_id, promotable, created_date)
VALUES (?, ?, ?, ?, ?, now())""",
(stage, pd.getLabelForStage(stage),
branchId, handle.projectId, isPromotable))
log.info("Created stage information for stage %s on label %s",
stage, label)
for stage in set(pdStages) & set(sqlStages):
isPromotable = (stage != pdStages[-1] and not handle.isExternal)
cu.execute("""UPDATE project_branch_stage SET name = ?, label = ?,
project_id = ?, promotable = ? WHERE stage_id = ?""",
(stage, pd.getLabelForStage(stage),
handle.projectId, isPromotable, sqlStages[stage]))
log.info("Updated stage information for stage %s on label %s",
stage, label)
for stage in set(sqlStages) - set(pdStages):
cu.execute("DELETE FROM project_branch_stage WHERE stage_id = ?",
sqlStages[stage])
log.info("Deleted stage information for stage %s on label %s",
stage, label)
cu.execute("""UPDATE querysets_queryset SET tagged_date = NULL
WHERE resource_type = 'project_branch_stage'
OR resource_type = 'project'""")
def getPlatformMap(self):
if self._platformMap is None:
cu = self.db.cursor()
cu.execute("SELECT label, platformId FROM Platforms")
self._platformMap = dict(cu)
return self._platformMap
if __name__ == '__main__':
sys.exit(Script().run())
|
Python
| 0
|
@@ -1247,32 +1247,53 @@
syncAll(self):%0A
+ seen = set()%0A
for hand
@@ -1336,16 +1336,111 @@
ries():%0A
+ if handle.fqdn in seen:%0A continue%0A seen.add(handle.fqdn)%0A
|
b2143f62c79c30b7fd2a365894dddf86a24d39fe
|
fix save_fig
|
prediction/anf_py/remodel.py
|
prediction/anf_py/remodel.py
|
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# Save figure for 1-dimension data
def save_figures(data, label,
x_label, y_label,
path):
array = np.asarray(data)
x = np.arange(1, array.shape[0] + 1)
plt.plot(x, array, label=label)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend()
plt.savefig(path)
# Initialize variables as Premise Parameter
def premise_parameter(para_shape,
min_mu=10.0, max_mu=15.0,
min_sigma=5.0, max_sigma=10.0):
"""
:param para_shape:
:param min_mu:
:param max_mu:
:param min_sigma:
:param max_sigma:
:return:
# """
para_init = {
'mu': tf.Variable(tf.random_uniform(para_shape, minval=min_mu, maxval=max_mu)),
'sigma': tf.Variable(tf.random_uniform(para_shape, minval=min_sigma, maxval=max_sigma))
}
return para_init
# Adding timestamp for writer
def writer(context):
print(f"{datetime.now()}: {context}")
# Initialize variables as Consequence Parameter
def consequence_parameter(para_shape):
para_init = tf.random_normal(para_shape)
return tf.Variable(para_init)
class ANFIS:
def __init__(self,
rule_number=5, window_size=20):
"""
:param rule_number: So luat trong mo hinh mang Takagi-Sugeno
:param window_size: Kich thuoc lay mau cho input dau vao
"""
self.rule_number = rule_number
self.window_size = window_size
self.premise_shape = [rule_number, window_size]
self.consequence_shape_weights = [window_size, rule_number]
self.consequence_shape_bias = [1, rule_number]
self.w_fuzz = premise_parameter(self.premise_shape)
self.weights = consequence_parameter(self.consequence_shape_weights)
self.bias = consequence_parameter(self.consequence_shape_bias)
def output(self, x: np.ndarray):
"""
Show list of outputs from list of inputs
:param x:
:return output:
"""
# Reshape
with tf.name_scope("reshape"):
x_input = tf.tile(x, [1, self.rule_number, 1])
# Fuzzification Layer
with tf.name_scope('layer_1'):
fuzzy_sets = tf.exp(- tf.divide(tf.square(tf.subtract(x_input, self.w_fuzz['mu']) / 2.0),
tf.square(self.w_fuzz['sigma'])))
# Rule-set Layer
with tf.name_scope('layer_2'):
fuzzy_rules = tf.reduce_prod(fuzzy_sets, axis=2)
# Normalization Layer
with tf.name_scope('layer_3'):
sum_fuzzy_rules = tf.reduce_sum(fuzzy_rules, axis=1)
normalized_fuzzy_rules = tf.divide(fuzzy_rules, tf.reshape(sum_fuzzy_rules, (-1, 1)))
# Defuzzification Layer and Output Layer
with tf.name_scope('layer_4_5'):
f = tf.add(tf.matmul(tf.reshape(x, (-1, self.window_size)), self.weights), self.bias)
output = tf.reduce_sum(tf.multiply(normalized_fuzzy_rules, f), axis=1)
return tf.reshape(output, (-1, 1))
# Traning ANFIS model
def train(self,
x_train, y_train,
epoch=10000, rate=1e-2,
tracking_loss=False,
load_path=None, save_path=None
):
"""
:param x_train: Inputs to train
:param y_train: Labels to train
:param epoch: Number epoch to train
:param rate: Learning rate to train
:param tracking_loss: Flags if you wanna track loss values
:param load_path: Flags if you wanna load model from specified path
:param save_path: Cpecified path you wanna save model
:return:
"""
# Creating Placeholder
writer("Creating Placeholder ... ")
x = tf.placeholder(dtype=tf.float32, shape=[None, 1, self.window_size])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
# Creating cost and optimizer
writer("Creating cost and optimizer")
cost = tf.reduce_mean(tf.squared_difference(self.output(x), y))
optimizer = tf.train.AdamOptimizer(learning_rate=rate).minimize(cost)
saver = tf.train.Saver()
# Check tracking_loss flags
track_list = np.empty((0,))
# Initializing session
with tf.Session() as sess:
# Check Model path Loading
if load_path is not None:
saver.restore(sess, load_path)
# Start training
sess.run(tf.global_variables_initializer())
writer("Starting train ... ")
for e in range(1, epoch + 1):
sess.run(optimizer, feed_dict={x: x_train, y: y_train})
c = sess.run(cost, feed_dict={x: x_train, y: y_train})
writer(f"{e}: {c}")
# Appened new loss value to track_list
if tracking_loss:
track_list = np.append(track_list, c)
# Check save_path
if save_path is not None:
saver.save(sess, save_path)
# Saving figures
fig_path = f"{save_path}_tracking.png"
writer(f"Saving tracking figures to {fig_path} ")
save_figures(data=track_list, label="Loss_function",
x_label='epoch', y_label='loss value',
path=fig_path)
# Compute loss from input data and compare output with labels
def loss(self,
x_test, y_test,
load_path=None):
"""
:param x: Input to execute
:param y: label to compare
:param load_path: Load model from this path
:return: loss function
"""
x = tf.placeholder(dtype=tf.float32, shape=[None, 1, self.window_size])
y = tf.placeholder(dtype=tf.float32, shape=[None, 1])
cost = tf.reduce_mean(tf.squared_difference(self.output(x), y))
saver = tf.train.Saver()
with tf.Session() as sess:
# Check Model path Loading
sess.run(tf.global_variables_initializer())
if load_path is not None:
saver.restore(sess, load_path)
# op = sess.run(self.output(x_), feed_dict={x_: x})
mse = sess.run(cost, feed_dict={x: x_test, y: y_test})
writer(f"mse: {mse}")
return mse
|
Python
| 0.000001
|
@@ -100,16 +100,43 @@
as tf%0A%0A
+plt.switch_backend('agg')%0A%0A
%0A# Save
|
85ece96a2dd566b19f88686b54ea903d9850abd6
|
Introduce MapOptions and MapPlot abstract base classes
|
bokeh/models/map_plots.py
|
bokeh/models/map_plots.py
|
""" Models for displaying maps in Bokeh plots.
"""
from __future__ import absolute_import
from ..properties import HasProps
from ..properties import Enum, Float, Instance, Int, JSON
from ..enums import MapType
from ..validation.warnings import MISSING_RENDERERS, NO_GLYPH_RENDERERS
from ..validation.errors import REQUIRED_RANGE
from .. import validation
from .plots import Plot
class GMapOptions(HasProps):
""" Options for GMapPlot objects.
"""
lat = Float(help="""
The latitude where the map should be centered.
""")
lng = Float(help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the GMapPlot.
""")
map_type = Enum(MapType, help="""
The `map type`_ to use for the GMapPlot.
.. _map type: https://developers.google.com/maps/documentation/javascript/reference#MapTypeId
""")
styles = JSON(help="""
A JSON array of `map styles`_ to use for the GMapPlot. Many example styles can
`be found here`_.
.. _map styles: https://developers.google.com/maps/documentation/javascript/reference#MapTypeStyle
.. _be found here: https://snazzymaps.com
""")
class GMapPlot(Plot):
""" A Bokeh Plot with a `Google Map`_ displayed underneath.
.. _Google Map: https://www.google.com/maps/
"""
# TODO (bev) map plot might not have these
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@validation.warning(NO_GLYPH_RENDERERS)
def _check_no_glyph_renderers(self):
pass
map_options = Instance(GMapOptions, help="""
Options for displaying the plot.
""")
class GeoJSOptions(HasProps):
""" Options for GeoJSPlot objects.
"""
lat = Float(help="""
The latitude where the map should be centered.
""")
lng = Float(help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the GeoJSPlot.
""")
class GeoJSPlot(Plot):
""" A Bokeh Plot with a `GeoJS Map`_ displayed underneath.
.. warning::
GeoJSPlot support should be considered experimental, a subject
to revision or removal.
.. _GeoJS Map: https://github.com/OpenGeoscience/geojs
"""
# TODO (bev) map plot might not have these
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@validation.warning(NO_GLYPH_RENDERERS)
def _check_no_glyph_renderers(self):
pass
map_options = Instance(GeoJSOptions, help="""
Options for displaying the plot.
""")
|
Python
| 0
|
@@ -118,16 +118,26 @@
HasProps
+, abstract
%0Afrom ..
@@ -386,23 +386,32 @@
t Plot%0A%0A
+@abstract%0A
class
-G
MapOptio
@@ -436,35 +436,50 @@
%22%22%22
-Options for GMapPlot object
+Abstract base class for map options' model
s.%0A%0A
@@ -746,25 +746,285 @@
the
-GMapPlot.
+map.%0A %22%22%22)%0A%0A@abstract%0Aclass MapPlot(Plot):%0A %22%22%22 Abstract base class for map plot models.%0A%0A %22%22%22%0A%0A map_options = Instance(MapOptions, help=%22%22%22%0A Options for displaying the plot.%0A %22%22%22)%0A%0Aclass GMapOptions(MapOptions):%0A %22%22%22 Options for GMapPlot objects.%0A
%0A %22%22%22
)%0A%0A
@@ -1015,25 +1015,24 @@
ts.%0A%0A %22%22%22
-)
%0A%0A map_ty
@@ -1514,32 +1514,35 @@
%0Aclass GMapPlot(
+Map
Plot):%0A %22%22%22 A
@@ -2093,39 +2093,41 @@
ss GeoJSOptions(
-HasProp
+MapOption
s):%0A %22%22%22 Opti
@@ -2167,284 +2167,8 @@
%22%22%0A%0A
- lat = Float(help=%22%22%22%0A The latitude where the map should be centered.%0A %22%22%22)%0A%0A lng = Float(help=%22%22%22%0A The longitude where the map should be centered.%0A %22%22%22)%0A%0A zoom = Int(12, help=%22%22%22%0A The initial zoom level to use when displaying the GeoJSPlot.%0A %22%22%22)%0A%0A
clas
@@ -2175,24 +2175,27 @@
s GeoJSPlot(
+Map
Plot):%0A %22
|
a76aed1fd8410e4dee300b7c7c5665885c8e61e0
|
Add a sanity check if all websocket connections were closed
|
bokeh/server/wsmanager.py
|
bokeh/server/wsmanager.py
|
import uuid
import logging
from .. import protocol
from flask import request
log = logging.getLogger(__name__)
class MultiDictionary(object):
def __init__(self):
self.dict = {}
def add(self, k, v):
self.dict.setdefault(k, set()).add(v)
def remove_val(self, k, v):
self.dict.setdefault(k, set()).remove(v)
if len(self.dict[k]) == 0:
self.remove(k)
def remove(self, k):
del self.dict[k]
def get(self, *args):
return self.dict.get(*args)
class WebSocketManager(object):
def __init__(self):
self.sockets = {}
self.topic_clientid_map = MultiDictionary()
self.clientid_topic_map = MultiDictionary()
self.auth_functions = {}
def remove_clientid(self, clientid):
topics = self.clientid_topic_map.get(clientid, [])
for topic in topics:
self.topic_clientid_map.remove_val(topic, clientid)
def remove_topic(self, topic):
clientids = self.topic_clientid_map.get(topic)
for clientid in clientids:
self.clientid_topic_map.remove_val(clientid, topic)
def subscribe_socket(self, socket, topic, clientid=None):
if clientid is None :
clientid = str(uuid.uuid4())
self.subscribe(clientid, topic)
self.add_socket(socket, clientid)
def can_subscribe(self, clientid, topic):
#auth goes here
return True
def register_auth(self, authtype, func):
self.auth_functions[authtype] = func
def auth(self, authtoken, topic):
#authtoken - some string, whatever you want it to be
#topic - string topic, of syntax type:value.
#topic type maps to auth function
authtype, topic = topic.split(":", 1)
if self.auth_functions.get(authtype):
return self.auth_functions[authtype](authtoken, topic)
else:
return True
def subscribe(self, clientid, topic):
if self.can_subscribe(clientid, topic):
self.topic_clientid_map.add(topic, clientid)
self.clientid_topic_map.add(clientid, topic)
def add_socket(self, socket, clientid):
self.sockets[clientid] = socket
def remove_socket(self, clientid):
del self.sockets[clientid]
def send(self, topic, msg, exclude=None):
if exclude is None:
exclude = set()
for clientid in tuple(self.topic_clientid_map.get(topic, [])):
if clientid in exclude:
continue
socket = self.sockets[clientid]
try:
socket.send(topic + ":" + msg)
except Exception as e: #what exception is this?if a client disconnects
log.exception(e)
self.remove_socket(clientid)
self.remove_clientid(clientid)
def run_socket(socket, manager, clientid=None):
clientid = clientid if clientid is not None else str(uuid.uuid4())
log.debug("CLIENTID: %s" % clientid)
manager.add_socket(socket, clientid)
while True:
msg = socket.receive()
if msg is None:
manager.remove_socket(clientid)
manager.remove_clientid(clientid)
break
msgobj = protocol.deserialize_web(msg)
msgtype = msgobj.get('msgtype')
if msgtype == 'subscribe':
auth = msgobj['auth']
topic = msgobj['topic']
if manager.auth(auth, topic):
manager.subscribe(clientid, topic)
msg = protocol.serialize_web(protocol.status_obj(['subscribesuccess', topic, clientid]))
socket.send(topic + ":" + msg)
else:
msg = protocol.serialize_web(protcol.error_obj('unauthorized'))
socket.send(topic + ":" + msg)
break
def pub_from_redis(redisconn, wsmanager):
ps = redisconn.pubsub()
ps.psubscribe("*")
for message in ps.listen():
wsmanager.send(message['channel'], message['data'])
|
Python
| 0.000003
|
@@ -5,16 +5,30 @@
rt uuid%0A
+import atexit%0A
import l
@@ -757,16 +757,183 @@
+atexit.register(self._atexit)%0A%0A def _atexit(self):%0A if len(self.sockets) != 0:%0A log.warning(%22Not all websocket connections were closed properly%22)%0A
%0A def
|
f8a8e936b2f6f3edcbc68b69056271975626631c
|
Terminate the build log recorder if the parent builder disappears
|
rmake/worker/recorder.py
|
rmake/worker/recorder.py
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncore
import os
import socket
from conary.lib import util
from rmake.lib import procutil
from rmake.lib import server
class BuildLogRecorder(asyncore.dispatcher, server.Server):
def __init__(self, key=None):
server.Server.__init__(self)
self.host = procutil.getNetName()
self.port = None
self.logPath = None
self.logFd = None
self.key = key
def _exit(self, rc=0):
return os._exit(rc)
def closeOtherFds(self):
for fd in range(3,256):
if fd not in (self.logFd, self._fileno):
try:
os.close(fd)
except OSError, e:
pass
def attach(self, trove, map=None):
asyncore.dispatcher.__init__(self, None, map)
self.trove = trove
self.openSocket()
self.openLogFile()
def handleRequestIfReady(self, sleepTime=0.1):
asyncore.poll2(timeout=sleepTime, map=self._map)
def getPort(self):
return self.port
def getHost(self):
return self.host
def getLogPath(self):
return self.logPath
def openLogFile(self):
util.mkdirChain(os.path.dirname(self.trove.logPath))
fd = os.open(self.trove.logPath, os.W_OK | os.O_CREAT | os.O_APPEND)
self.logPath = self.trove.logPath
self.logFd = fd
def openSocket(self):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(('', 0))
self.listen(1)
self.port = self.socket.getsockname()[1]
def handle_accept(self):
csock, caddr = self.accept()
if self.key:
key = csock.recv(len(self.key) + 1)
if key != (self.key + '\n'):
csock.close()
csock.send('OK\n')
csock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# we only need to accept one request.
self.del_channel()
self.set_socket(csock)
self.accepting = False
self.connected = True
def close(self):
asyncore.dispatcher.close(self)
if self.logFd:
os.close(self.logFd)
self.logFd = None
self._halt = True
def handle_read(self):
rv = self.socket.recv(4096)
if not rv:
self.connected = False
self.close()
else:
os.write(self.logFd, rv)
def _signalHandler(self, sigNum, frame):
server.Server._signalHandler(self, sigNum, frame)
# we got a signal, but have not finished reading yet.
if self.connected and self.logFd:
# keep reading until the socket is closed
# or until we're killed again.
self._halt = False
def writable(self):
return False
|
Python
| 0.000002
|
@@ -771,16 +771,17 @@
erver):%0A
+%0A
def
@@ -986,16 +986,41 @@
ey = key
+%0A self.ppid = None
%0A%0A de
@@ -1296,16 +1296,49 @@
pass
+%0A self.ppid = os.getppid()
%0A%0A de
@@ -1554,11 +1554,9 @@
ime=
-0.1
+2
):%0A
@@ -1610,16 +1610,103 @@
lf._map)
+%0A if os.getppid() != self.ppid:%0A # Orphaned!%0A self.close()
%0A%0A de
|
b20853aa4c41535ff5277d347105e9ec55bc473d
|
Fix work on MySQL backend
|
mistral/db/sqlalchemy/models.py
|
mistral/db/sqlalchemy/models.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlalchemy as sa
import uuid
from mistral.db.sqlalchemy import model_base as mb
from mistral.db.sqlalchemy import types as st
## Helpers
def _generate_unicode_uuid():
return unicode(str(uuid.uuid4()))
def _id_column():
return sa.Column(sa.String(36),
primary_key=True,
default=_generate_unicode_uuid)
class Trigger(mb.MistralBase):
"""Contains all info about trigger."""
__tablename__ = 'triggers'
__table_args__ = (
sa.UniqueConstraint('name'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
pattern = sa.Column(sa.String(20), nullable=False)
next_execution_time = sa.Column(sa.DateTime, nullable=False)
workbook_name = sa.Column(sa.String(80), nullable=False)
class WorkflowExecution(mb.MistralBase):
"""Contains info about particular workflow execution."""
__tablename__ = 'workflow_executions'
id = _id_column()
workbook_name = sa.Column(sa.String(80))
task = sa.Column(sa.String(80))
state = sa.Column(sa.String(20))
context = sa.Column(st.JsonDictType())
class Workbook(mb.MistralBase):
"""Contains info about workbook (including definition in Mistral DSL)."""
__tablename__ = 'workbooks'
__table_args__ = (
sa.UniqueConstraint('name'),
)
id = _id_column()
name = sa.Column(sa.String(80), primary_key=True)
definition = sa.Column(sa.String(), nullable=True)
description = sa.Column(sa.String())
tags = sa.Column(st.JsonListType())
scope = sa.Column(sa.String())
project_id = sa.Column(sa.String())
trust_id = sa.Column(sa.String())
class Task(mb.MistralBase):
"""Contains info about particular task."""
__tablename__ = 'tasks'
id = _id_column()
name = sa.Column(sa.String(80))
requires = sa.Column(st.JsonDictType())
workbook_name = sa.Column(sa.String(80))
execution_id = sa.Column(sa.String(36))
description = sa.Column(sa.String())
task_spec = sa.Column(st.JsonDictType())
action_spec = sa.Column(st.JsonDictType())
state = sa.Column(sa.String(20))
tags = sa.Column(st.JsonListType())
# Data Flow properties.
in_context = sa.Column(st.JsonDictType())
parameters = sa.Column(st.JsonDictType())
output = sa.Column(st.JsonDictType())
# Runtime context like iteration_no of a repeater.
# Effectively internal engine properties which will be used to determine
# execution of a task.
task_runtime_context = sa.Column(st.JsonDictType())
|
Python
| 0.000001
|
@@ -2072,22 +2072,20 @@
lumn(sa.
-String
+Text
(), null
@@ -2125,32 +2125,35 @@
olumn(sa.String(
+200
))%0A tags = sa
@@ -2207,24 +2207,26 @@
n(sa.String(
+80
))%0A proje
@@ -2245,32 +2245,34 @@
olumn(sa.String(
+80
))%0A trust_id
@@ -2293,16 +2293,18 @@
.String(
+80
))%0A%0A%0Acla
@@ -2634,16 +2634,19 @@
.String(
+200
))%0A t
|
54d9c6a9c21e5e4856c8bd890add8dfa4c992ced
|
Add from _peak_finding import * to signal.__init__
|
scipy/signal/__init__.py
|
scipy/signal/__init__.py
|
"""
=======================================
Signal processing (:mod:`scipy.signal`)
=======================================
.. module:: scipy.signal
Convolution
===========
.. autosummary::
:toctree: generated/
convolve -- N-dimensional convolution.
correlate -- N-dimensional correlation.
fftconvolve -- N-dimensional convolution using the FFT.
convolve2d -- 2-dimensional convolution (more options).
correlate2d -- 2-dimensional correlation (more options).
sepfir2d -- Convolve with a 2-D separable FIR filter.
B-splines
=========
.. autosummary::
:toctree: generated/
bspline -- B-spline basis function of order n.
gauss_spline -- Gaussian approximation to the B-spline basis function.
cspline1d -- Coefficients for 1-D cubic (3rd order) B-spline.
qspline1d -- Coefficients for 1-D quadratic (2nd order) B-spline.
cspline2d -- Coefficients for 2-D cubic (3rd order) B-spline.
qspline2d -- Coefficients for 2-D quadratic (2nd order) B-spline.
spline_filter -- Smoothing spline (cubic) filtering of a rank-2 array.
Filtering
=========
.. autosummary::
:toctree: generated/
order_filter -- N-dimensional order filter.
medfilt -- N-dimensional median filter.
medfilt2d -- 2-dimensional median filter (faster).
wiener -- N-dimensional wiener filter.
symiirorder1 -- 2nd-order IIR filter (cascade of first-order systems).
symiirorder2 -- 4th-order IIR filter (cascade of second-order systems).
lfilter -- 1-dimensional FIR and IIR digital linear filtering.
lfiltic -- Construct initial conditions for `lfilter`.
lfilter_zi -- Compute an initial state zi for the lfilter function that
-- corresponds to the steady state of the step response.
filtfilt -- A forward-backward filter.
deconvolve -- 1-d deconvolution using lfilter.
hilbert -- Compute the analytic signal of a 1-d signal.
get_window -- Create FIR window.
decimate -- Downsample a signal.
detrend -- Remove linear and/or constant trends from data.
resample -- Resample using Fourier method.
Filter design
=============
.. autosummary::
:toctree: generated/
bilinear -- Digital filter from an analog filter using
-- the bilinear transform.
firwin -- Windowed FIR filter design, with frequency response
-- defined as pass and stop bands.
firwin2 -- Windowed FIR filter design, with arbitrary frequency
-- response.
freqs -- Analog filter frequency response.
freqz -- Digital filter frequency response.
iirdesign -- IIR filter design given bands and gains.
iirfilter -- IIR filter design given order and critical frequencies.
kaiser_atten -- Compute the attenuation of a Kaiser FIR filter, given
-- the number of taps and the transition width at
-- discontinuities in the frequency response.
kaiser_beta -- Compute the Kaiser parameter beta, given the desired
-- FIR filter attenuation.
kaiserord -- Design a Kaiser window to limit ripple and width of
-- transition region.
remez -- Optimal FIR filter design.
unique_roots -- Unique roots and their multiplicities.
residue -- Partial fraction expansion of b(s) / a(s).
residuez -- Partial fraction expansion of b(z) / a(z).
invres -- Inverse partial fraction expansion.
Matlab-style IIR filter design
==============================
.. autosummary::
:toctree: generated/
butter -- Butterworth
buttord
cheby1 -- Chebyshev Type I
cheb1ord
cheby2 -- Chebyshev Type II
cheb2ord
ellip -- Elliptic (Cauer)
ellipord
bessel -- Bessel (no order selection available -- try butterod)
Continuous-Time Linear Systems
==============================
.. autosummary::
:toctree: generated/
lti -- linear time invariant system object.
lsim -- continuous-time simulation of output to linear system.
lsim2 -- like lsim, but `scipy.integrate.odeint` is used.
impulse -- impulse response of linear, time-invariant (LTI) system.
impulse2 -- like impulse, but `scipy.integrate.odeint` is used.
step -- step response of continous-time LTI system.
step2 -- like step, but `scipy.integrate.odeint` is used.
Discrete-Time Linear Systems
============================
dlsim -- simulation of output to a discrete-time linear system.
dimpulse -- impulse response of a discrete-time LTI system.
dstep -- step response of a discrete-time LTI system.
LTI Representations
===================
.. autosummary::
:toctree: generated/
tf2zpk -- transfer function to zero-pole-gain.
zpk2tf -- zero-pole-gain to transfer function.
tf2ss -- transfer function to state-space.
ss2tf -- state-pace to transfer function.
zpk2ss -- zero-pole-gain to state-space.
ss2zpk -- state-space to pole-zero-gain.
cont2discrete -- continuous-time to discrete-time LTI conversion.
Waveforms
=========
.. autosummary::
:toctree: generated/
chirp -- Frequency swept cosine signal, with several freq functions.
gausspulse -- Gaussian modulated sinusoid
sawtooth -- Periodic sawtooth
square -- Square wave
sweep_poly -- Frequency swept cosine signal; freq is arbitrary polynomial
Window functions
================
.. autosummary::
:toctree: generated/
get_window -- Return a window of a given length and type.
barthann -- Bartlett-Hann window
bartlett -- Bartlett window
blackman -- Blackman window
blackmanharris -- Minimum 4-term Blackman-Harris window
bohman -- Bohman window
boxcar -- Boxcar window
chebwin -- Dolph-Chebyshev window
flattop -- Flat top window
gaussian -- Gaussian window
general_gaussian -- Generalized Gaussian window
hamming -- Hamming window
hann -- Hann window
kaiser -- Kaiser window
nuttall -- Nuttall's minimum 4-term Blackman-Harris window
parzen -- Parzen window
slepian -- Slepian window
triang -- Triangular window
Wavelets
========
.. autosummary::
:toctree: generated/
cascade -- compute scaling function and wavelet from coefficients
daub -- return low-pass
morlet -- Complex Morlet wavelet.
qmf -- return quadrature mirror filter from low-pass
"""
import sigtools
from waveforms import *
# The spline module (a C extension) provides:
# cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2
from spline import *
from bsplines import *
from cont2discrete import *
from dltisys import *
from filter_design import *
from fir_filter_design import *
from ltisys import *
from windows import *
from signaltools import *
from spectral import *
from wavelets import *
__all__ = filter(lambda s: not s.startswith('_'), dir())
from numpy.testing import Tester
test = Tester().test
|
Python
| 0
|
@@ -6665,16 +6665,105 @@
low-pass
+%0A ricker -- return ricker wavelet%0A cwt -- perform continuous wavelet transform
%0A%0A%22%22%22%0A%0Ai
@@ -7178,16 +7178,44 @@
import *
+%0Afrom _peak_finding import *
%0A%0A__all_
|
b2ca3cf7e6610bf0f3c6ac5aa79c3322c276c439
|
set the exit code to non-zero when contracts fails
|
scrapy/commands/check.py
|
scrapy/commands/check.py
|
from __future__ import print_function
from collections import defaultdict
from functools import wraps
from unittest import TextTestRunner
from scrapy.command import ScrapyCommand
from scrapy.contracts import ContractsManager
from scrapy.utils.misc import load_object
from scrapy.utils.spider import iterate_spider_output
from scrapy.utils.conf import build_component_list
def _generate(cb):
""" create a callback which does not return anything """
@wraps(cb)
def wrapper(response):
output = cb(response)
output = list(iterate_spider_output(output))
return wrapper
class Command(ScrapyCommand):
requires_project = True
default_settings = {'LOG_ENABLED': False}
def syntax(self):
return "[options] <spider>"
def short_desc(self):
return "Check spider contracts"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-l", "--list", dest="list", action="store_true",
help="only list contracts, without checking them")
parser.add_option("-v", "--verbose", dest="verbose", default=1, action="count",
help="print all contract hooks")
def run(self, args, opts):
# load contracts
contracts = build_component_list(
self.settings['SPIDER_CONTRACTS_BASE'],
self.settings['SPIDER_CONTRACTS'],
)
self.conman = ContractsManager([load_object(c) for c in contracts])
self.results = TextTestRunner(verbosity=opts.verbose)._makeResult()
# contract requests
contract_reqs = defaultdict(list)
spman_cls = load_object(self.settings['SPIDER_MANAGER_CLASS'])
spiders = spman_cls.from_settings(self.settings)
for spider in args or spiders.list():
spider = spiders.create(spider)
requests = self.get_requests(spider)
if opts.list:
for req in requests:
contract_reqs[spider.name].append(req.callback.__name__)
elif requests:
crawler = self.crawler_process.create_crawler(spider.name)
crawler.crawl(spider, requests)
# start checks
if opts.list:
for spider, methods in sorted(contract_reqs.iteritems()):
print(spider)
for method in sorted(methods):
print(' * %s' % method)
else:
self.crawler_process.start()
self.results.printErrors()
def get_requests(self, spider):
requests = []
for key, value in vars(type(spider)).items():
if callable(value) and value.__doc__:
bound_method = value.__get__(spider, type(spider))
request = self.conman.from_method(bound_method, self.results)
if request:
request.callback = _generate(request.callback)
requests.append(request)
return requests
|
Python
| 0.000113
|
@@ -2502,16 +2502,85 @@
Errors()
+%0A self.exitcode = 0 if self.results.wasSuccessful() else 1
%0A%0A de
|
f131fc524d557312da010b22379d75d52637ebd1
|
missing comma
|
semantics.py
|
semantics.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Semantics, module for Odoo, Open Source Management Solution
# Copyright (C) 2014 InsPyration EURL (<http://www.inspyration.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__author__ = "Sébastien CHAZALLET & Alicia FLOREZ"
__copyright__ = "Copyright 2014"
__credits__ = ["Sébastien CHAZALLET", "Alicia FLOREZ", "www.insPyration.fr", "www.formation-python.com"]
__license__ = "AGPL"
__version__ = "1.0"
__maintainer__ = "Alicia FLOREZ"
__email__ = "contact@inspyration.fr"
__status__ = "Production"
import openerp
from openerp.osv import fields, osv
#==============================================================================#
# class Field #
#==============================================================================#
class Field(osv.Model):
_name = 'semantics.field'
_description = "Field"
def _get_signifier_items(self, cr, uid, ids, field, arg, context=None):
result = {}
for field in self.browse(cr, uid, ids, context=context):
res = []
for signifier in field.signifier_ids:
res.append({'id': signifier.id, 'name': signifier.name})
result[field.id] = res
return result
_columns = {
'name': fields.char(
'Name',
size=256,
required=True,
select=True,
unique=True,
),
'signifier_ids': fields.one2many(
'semantics.signifier',
'field_id',
string="Signifiers"
),
'signifier_items': fields.function(
_get_signifier_items,
type="text",
string="Signifier Items",
),
'active': fields.boolean(
string="Active",
),
}
_defaults = {
'active': True,
}
#==============================================================================#
# class Signifier #
#==============================================================================#
class Signifier(osv.Model):
_name = 'semantics.signifier'
_description = "Signifier"
_columns = {
'name': fields.char(
'Name',
size=256,
required=True,
select=True,
unique=True,
),
'field_id': fields.many2one(
'semantics.field',
string="Field"
required=True,
),
'active': fields.boolean(
string="Active",
),
}
_defaults = {
'active': True,
}
|
Python
| 0.999998
|
@@ -3313,24 +3313,25 @@
ring=%22Field%22
+,
%0A
|
0e60e7b04d71149c39e8fe3a047de777cd381638
|
enable shell in subprocess`
|
breakseq2/breakseq_top.py
|
breakseq2/breakseq_top.py
|
#!/usr/bin/env python
import logging
import os
import subprocess
import pysam
import preprocess_and_align
import breakseq_core
import breakseq_post
import compute_zygosity
import gen_vcf
import breakseq_index
from _version import __version__
def add_options(main_parser):
preprocess_and_align.add_options(main_parser)
breakseq_core.add_options(main_parser)
breakseq_post.add_options(main_parser)
compute_zygosity.add_options(main_parser)
gen_vcf.add_options(main_parser)
breakseq_index.add_options(main_parser)
main_parser.add_argument("--nthreads", help="Number of processes to use for parallelism", type=int, default=1)
main_parser.add_argument("--bams", help="Alignment BAMs", nargs="+", required=True, default=[])
main_parser.add_argument("--work", help="Working directory", default="work")
main_parser.add_argument("--chromosomes", nargs="+", help="List of chromosomes to process", default=[])
main_parser.add_argument("--reference", help="Reference FASTA", required=True)
main_parser.add_argument("--sample", help="Sample name. Leave unspecified to infer sample name from BAMs.")
main_parser.add_argument("--keep_temp", help="Keep temporary files", action="store_true")
main_parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
def infer_sample(bam):
samfile = pysam.Samfile(bam, "rb")
if "RG" not in samfile.header:
raise Exception("Unable to infer sample name from %s since RG is missing" % bam)
samples = list(set([item["SM"] for item in samfile.header["RG"]]))
if len(samples) > 1:
raise Exception("Multiple samples found: %s" % (", ".join(samples)))
samfile.close()
return samples[0]
def breakseq2_workflow(sample=None, bplib=None, bplib_gff=None, bwa=None, samtools=None, bams=[], work="work", chromosomes=[],
nthreads=1, min_span=breakseq_core.DEFAULT_MIN_SPAN,
min_overlap=compute_zygosity.DEFAULT_MIN_OVERLAP, reference=None, keep_temp=False, window=compute_zygosity.DEFAULT_WINDOW, junction_length=breakseq_index.DEFAULT_JUNCTION_LENGTH):
func_logger = logging.getLogger(breakseq2_workflow.__name__)
bams = [os.path.abspath(bam) for bam in bams]
if not bams:
func_logger.error("No BAMs specified so nothing to do")
return
if not sample:
sample = infer_sample(bams[0])
if not os.path.isdir(work):
func_logger.info("Created working directory %s" % work)
os.makedirs(work)
if bplib_gff:
bplib = os.path.join(work, "bplib.fa")
func_logger.info("Generating breakpoint-library using %s" % bplib_gff)
breakseq_index.generate_bplib(bplib_gff, reference, bplib, junction_length)
# Index the bplib
index_cmd = "{bwa} index {bplib}".format(bwa=bwa, bplib=bplib)
func_logger.info("Indexing {bplib} using {index_cmd}".format(bplib=bplib, index_cmd=index_cmd))
with open(os.path.join(work, "index.log"), "w") as index_log_fd:
subprocess.check_call(index_cmd, stderr=index_log_fd)
aligned_bams = preprocess_and_align.parallel_preprocess_and_align(bplib, bwa, samtools, bams, work, chromosomes,
nthreads, keep_temp)
if not aligned_bams:
func_logger.warn("Read-extraction and alignment generated nothing")
return
breakseq_core.breakseq_core(aligned_bams, "%s/breakseq.out" % work, min_span=min_span)
breakseq_post.generate_final_gff(["%s/breakseq.out" % work], "%s/breakseq.gff" % work)
compute_zygosity.compute_zygosity(bams, window, "%s/breakseq.gff" % work, "%s/breakseq_genotyped.gff" % work,
min_overlap)
gen_vcf.gff_to_vcf(reference, "%s/breakseq_genotyped.gff" % work, sample, "%s/breakseq.vcf" % work)
|
Python
| 0
|
@@ -3073,16 +3073,28 @@
dex_cmd,
+ shell=True,
stderr=
|
e8b4f7d6917647d4158ed39991cd00cfe45a0264
|
Add some support for bigg.ucsd.edu/api/v2
|
cameo/webmodels.py
|
cameo/webmodels.py
|
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WebModels API
-------------
An high level API for retrieving models from the
http://darwin.di.uminho.pt/models database
"""
from __future__ import absolute_import, print_function
import json
import requests
from pandas import DataFrame
import tempfile
import logging
logger = logging.getLogger(__name__)
class NotFoundException(Exception):
def __init__(self, type, index, *args, **kwargs):
message = "Could not retrieve %s for entry with index %i" % (type, index)
Exception.__init__(self, message, *args, **kwargs)
def index_models(host="http://darwin.di.uminho.pt/models"):
"""
Retrieves a summary of all models in the database.
Parameters
----------
host: the service host (optional, default: http://darwin.di.uminho.pt/models)
Returns
-------
pandas.DataFrame
summary of the models in the database
"""
uri = host + "/models.json"
try:
response = requests.get(uri)
except requests.ConnectionError as e:
logger.error("Cannot reach %s. Are you sure that you are connected to the internet?" % host)
raise e
if response.status_code == 200:
response = json.loads(response.text)
return DataFrame(response, columns=["id", "name", "doi", "author", "year", "formats", "organism", "taxonomy"])
else:
raise Exception("Could not index available models. %s returned status code %d" % (host, response.status_code))
def get_sbml_file(index, host="http://darwin.di.uminho.pt/models"):
temp = tempfile.NamedTemporaryFile()
uri = host + "/models/%i.sbml" % index
response = requests.get(uri)
if response.status_code == 200:
temp.write(response.text.encode('utf-8'))
temp.flush()
return temp
raise NotFoundException("sbml", index)
if __name__ == "__main__":
print(index_models())
from cameo import load_model
model = load_model(get_sbml_file(2))
print(model.objective)
|
Python
| 0
|
@@ -2423,16 +2423,669 @@
index)%0A%0A
+def index_models_bigg():%0A response = requests.get('http://bigg.ucsd.edu/api/v2/models')%0A if response.ok:%0A return DataFrame.from_dict(response.json()%5B'results'%5D)%0A else:%0A raise Exception(%22Could not index available models. bigg.ucsd.edu returned status code %7B%7D%22.format(response.status_code))%0A%0Adef get_model_from_bigg(id):%0A response = requests.get('http://bigg.ucsd.edu/api/v2/models/%7B%7D/download'.format(id))%0A if response.ok:%0A return DataFrame.from_dict(response.json()%5B'results'%5D)%0A else:%0A raise Exception(%22Could not download model %7B%7D. bigg.ucsd.edu returned status code %7B%7D%22.format(id, response.status_code))%0A%0A%0A
if __nam
|
b60664cebec29dff0917fb13af270d3b47404256
|
set setttings default
|
royalerant/royalerant.py
|
royalerant/royalerant.py
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
from collections import defaultdict
import discord
import peony
from __main__ import send_cmd_help
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
PATH = os.path.join("data", "royalerant")
JSON = os.path.join(PATH, "settings.json")
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
class RoyaleRant:
"""RoyaleRant Twitter client.
User type !royalerant message which gets broadcasted to @RoyaleRant
"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
@property
def peony_client(self):
"""Return Twitter API instance."""
return peony.PeonyClient(**self.settings['twitter_api'])
@commands.group(pass_context=True)
async def royalerantset(self, ctx):
"""Settings."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@checks.is_owner()
@royalerantset.command(name="twitterapi", pass_context=True)
async def royalerantset_twitterapi(self,
ctx, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""Twitter API settings"""
if not any([consumer_key, consumer_secret, access_token, access_token_secret]):
await send_cmd_help(ctx)
em = discord.Embed(title="RoyaleRant Settings")
for k, v in self.settings['twitter_api'].items():
em.add_field(name=k, value=v)
await self.bot.send_message(ctx.message.author, embed=em)
return
self.settings.update({
"twitter_api": {
"consumer_key": consumer_key,
"consumer_secret": consumer_secret,
"access_token": access_token,
"access_token_secret": access_token_secret
}
})
dataIO.save_json(JSON, self.settings)
await self.bot.say("Settings updated")
await self.bot.delete_message(ctx.message)
@commands.has_any_role("Rant")
@commands.command(aliases=['rrant'], pass_context=True, no_pm=True)
async def royalerant(self, ctx, *, msg):
"""Post a Tweet from @RoyaleRant."""
try:
resp = await self.peony_client.api.statuses.update.post(status=msg)
except peony.exceptions.PeonyException as e:
await self.bot.say("Error tweeting: {}".format(e.response))
return
url = "https://twitter.com/{0[user][screen_name]}/status/{0[id_str]}".format(resp)
await self.bot.say("Tweeted: <{}>".format(url))
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = RoyaleRant(bot)
bot.add_cog(n)
|
Python
| 0.000001
|
@@ -1792,16 +1792,340 @@
JSON))%0A%0A
+ if self.settings.get(%22twitter_api%22) is None:%0A self.settings%5B%22twitter_api%22%5D = %7B%0A %22consumer_key%22: None,%0A %22consumer_secret%22: None,%0A %22access_token%22: None,%0A %22access_token_secret%22: None%0A %7D%0A dataIO.save_json(JSON, self.settings)%0A%0A
@pro
|
2d51b0ee6c6b4b34863d745b86963d5af9e55dee
|
Use self.fail() in MiGdbSetShowTestCase.test_lldbmi_gdb_set_target_async_off (MI)
|
test/tools/lldb-mi/TestMiGdbSetShow.py
|
test/tools/lldb-mi/TestMiGdbSetShow.py
|
"""
Test lldb-mi -gdb-set and -gdb-show commands.
"""
import lldbmi_testcase
from lldbtest import *
import unittest2
class MiGdbSetShowTestCase(lldbmi_testcase.MiTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_gdb_set_target_async_default(self):
"""Test that 'lldb-mi --interpreter' switches to async mode by default."""
self.spawnLldbMi(args = None)
# Switch to sync mode
self.runCmd("-gdb-set target-async off")
self.expect("\^done")
self.runCmd("-gdb-show target-async")
self.expect("\^done,value=\"off\"")
# Test that -gdb-set switches to async by default
self.runCmd("-gdb-set target-async")
self.expect("\^done")
self.runCmd("-gdb-show target-async")
self.expect("\^done,value=\"on\"")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_gdb_set_target_async_on(self):
"""Test that 'lldb-mi --interpreter' can execute commands in async mode."""
self.spawnLldbMi(args = None)
# Switch to sync mode
self.runCmd("-gdb-set target-async off")
self.expect("\^done")
self.runCmd("-gdb-show target-async")
self.expect("\^done,value=\"off\"")
# Test that -gdb-set can switch to async mode
self.runCmd("-gdb-set target-async on")
self.expect("\^done")
self.runCmd("-gdb-show target-async")
self.expect("\^done,value=\"on\"")
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Test that program is executed in async mode
self.runCmd("-exec-run")
self.expect("\*running")
self.expect("@\"argc=1")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_gdb_set_target_async_off(self):
"""Test that 'lldb-mi --interpreter' can execute commands in sync mode."""
self.spawnLldbMi(args = None)
# Test that -gdb-set can switch to sync mode
self.runCmd("-gdb-set target-async off")
self.expect("\^done")
self.runCmd("-gdb-show target-async")
self.expect("\^done,value=\"off\"")
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Test that program is executed in async mode
self.runCmd("-exec-run")
unexpected = [ "\*running" ] # "\*running" is async notification
it = self.expect(unexpected + [ "@\"argc=1\\\\r\\\\n" ])
if it < len(unexpected):
# generate error if it's not "@\"argc=1\\\\r\\\\n"
self.expect("$UNEXPECTED FOUND: %s\.^" % unexpected[it], timeout = 0)
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_gdb_show_target_async(self):
"""Test that 'lldb-mi --interpreter' in async mode by default."""
self.spawnLldbMi(args = None)
# Test that default target-async value is "on"
self.runCmd("-gdb-show target-async")
self.expect("\^done,value=\"on\"")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@unittest2.expectedFailure("-gdb-set ignores unknown properties")
def test_lldbmi_gdb_set_unknown(self):
"""Test that 'lldb-mi --interpreter' fails when setting an unknown property."""
self.spawnLldbMi(args = None)
# Test that -gdb-set fails if property is unknown
self.runCmd("-gdb-set unknown some_value")
self.expect("\^error")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@unittest2.expectedFailure("-gdb-show ignores unknown properties")
def test_lldbmi_gdb_show_unknown(self):
"""Test that 'lldb-mi --interpreter' fails when showing an unknown property."""
self.spawnLldbMi(args = None)
# Test that -gdb-show fails if property is unknown
self.runCmd("-gdb-show unknown")
self.expect("\^error")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipIfLinux # llvm.org/pr22841: lldb-mi tests fail on all Linux buildbots
def test_lldbmi_gdb_set_ouptut_radix(self):
"""Test that 'lldb-mi --interpreter' works for -gdb-set output-radix."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to BP_printf
line = line_number('main.cpp', '// BP_printf')
self.runCmd("-break-insert main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running");
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Setup variable
self.runCmd("-var-create var_a * a");
self.expect("\^done,name=\"var_a\",numchild=\"0\",value=\"10\",type=\"int\",thread-id=\"1\",has_more=\"0\"")
# Test default output
self.runCmd("-var-evaluate-expression var_a");
self.expect("\^done,value=\"10\"");
# Test hex output
self.runCmd("-gdb-set output-radix 16");
self.expect("\^done");
self.runCmd("-var-evaluate-expression var_a");
self.expect("\^done,value=\"0xa\"");
# Test octal output
self.runCmd("-gdb-set output-radix 8");
self.expect("\^done");
self.runCmd("-var-evaluate-expression var_a");
self.expect("\^done,value=\"012\"");
# Test decimal output
self.runCmd("-gdb-set output-radix 10");
self.expect("\^done");
self.runCmd("-var-evaluate-expression var_a");
self.expect("\^done,value=\"10\"");
if __name__ == '__main__':
unittest2.main()
|
Python
| 0.00001
|
@@ -3049,108 +3049,39 @@
-# generate error if it's not %22@%5C%22argc=1%5C%5C%5C%5Cr%5C%5C%5C%5Cn%22%0A self.expect(%22$UNEXPECTED FOUND
+self.fail(%22unexpected found
: %25s
-%5C.%5E
%22 %25
@@ -3098,21 +3098,8 @@
%5Bit%5D
-, timeout = 0
)%0A%0A
|
57eb5eb910eeedca757526fe104b3ed2ee104413
|
Add note that -F option (file storage) is experimental
|
sacredboard/bootstrap.py
|
sacredboard/bootstrap.py
|
# coding=utf-8
"""
Bootstrap module parses command line arguments and initializes the app.
Configures the database connection and starts the web application.
"""
import locale
import sys
import click
from flask import Flask
from gevent.pywsgi import WSGIServer
from sacredboard.app.config import jinja_filters
from sacredboard.app.data.filestorage import FileStorage
from sacredboard.app.data.mongodb import PyMongoDataAccess
from sacredboard.app.webapi import routes
locale.setlocale(locale.LC_ALL, '')
app = Flask(__name__)
@click.command()
@click.option("-m", default=None, metavar="HOST:PORT:DATABASE",
help="Connect to MongoDB using the format"
" host:port:database_name or just the database_name. "
"Default: sacred"
" Mutually exclusive with -mu")
@click.option("-mu", default=(None, None),
metavar="CONNECTION_STRING DATABASE", type=(str, str),
help="Connect to MongoDB using mongodb://..."
" and specify the database name."
" Mutually exclusive with -m")
@click.option("-mc", default="runs", metavar="COLLECTION",
help="The collection containing the Sacred's list of runs. "
"You might need it if you use a custom collection name "
"or Sacred v0.6 (which used default.runs). "
"Default: runs")
@click.option("-F", default="",
help="Path to directory containing experiment results of the"
"File Storage observer.")
@click.option("--no-browser", is_flag=True, default=False,
help="Do not open web browser automatically.")
@click.option("--debug", is_flag=True, default=False,
help="Run the application in Flask debug mode "
"(for development).")
@click.version_option()
def run(debug, no_browser, m, mu, mc, f):
"""
Sacredboard.
\b
Sacredboard is a monitoring dashboard for Sacred.
Homepage: http://github.com/chovanecm/sacredboard
Example usage:
\b
sacredboard -m sacred
Starts Sacredboard on default port (5000) and connects to
a local MongoDB database called 'sacred'. Opens web browser.
Note: MongoDB must be listening on localhost.
\b
sacredboard -m 192.168.1.1:27017:sacred
Starts Sacredboard on default port (5000) and connects to
a MongoDB database running on 192.168.1.1 on port 27017
to a database called 'sacred'. Opens web browser.
\b
sacredboard -mu mongodb://user:pwd@host/admin?authMechanism=SCRAM-SHA-1 sacred
Starts Sacredboard on default port (5000) and connects to
a MongoDB database running on localhost on port 27017
to a database called 'sacred'. Opens web browser.
\b
sacredboard -m sacred -mc default.runs
Starts Sacredboard on default port (5000) and connects to
a local MongoDB database called 'sacred' and uses the Sacred's 0.6
default collection 'default.runs' to search the runs in.
Opens web browser.
Note: MongoDB must be listening on localhost.
"""
if m or mu != (None, None):
add_mongo_config(app, m, mu, mc)
app.config["data"].connect()
elif f:
app.config["data"] = FileStorage(f)
else:
print("Must specify either a mongodb instance or \
a path to a file storage.")
app.config['DEBUG'] = debug
app.debug = debug
jinja_filters.setup_filters(app)
routes.setup_routes(app)
if debug:
app.run(host="0.0.0.0", debug=True)
else:
for port in range(5000, 5050):
http_server = WSGIServer(('0.0.0.0', port), app)
try:
http_server.start()
except OSError as e:
# try next port
continue
print("Starting sacredboard on port %d" % port)
if not no_browser:
click.launch("http://127.0.0.1:%d" % port)
http_server.serve_forever()
break
def add_mongo_config(app, simple_connection_string,
mongo_uri, collection_name):
"""
Configure the application to use MongoDB.
:param app: Flask application
:param simple_connection_string:
Expects host:port:database_name or database_name
Mutally_exclusive with mongo_uri
:param mongo_uri: Expects mongodb://... as defined
in https://docs.mongodb.com/manual/reference/connection-string/
Mutually exclusive with simple_connection_string (must be None)
:param collection_name: The collection containing Sacred's runs
:return:
"""
if mongo_uri != (None, None):
add_mongo_config_with_uri(app, mongo_uri[0], mongo_uri[1],
collection_name)
if simple_connection_string is not None:
print("Ignoring the -m option. Overridden by "
"a more specific option (-mu).", file=sys.stderr)
else:
# Use the default value 'sacred' when not specified
if simple_connection_string is None:
simple_connection_string = "sacred"
add_mongo_config_simple(app, simple_connection_string, collection_name)
def add_mongo_config_simple(app, connection_string, collection_name):
"""
Configure the app to use MongoDB.
:param app: Flask Application
:type app: Flask
:param connection_string: in format host:port:database or database
(default: sacred)
:type connection_string: str
:param collection_name: Name of the collection
:type collection_name: str
"""
split_string = connection_string.split(":")
config = {"host": "localhost", "port": 27017, "db": "sacred"}
if len(split_string) > 0 and len(split_string[-1]) > 0:
config["db"] = split_string[-1]
if len(split_string) > 1:
config["port"] = int(split_string[-2])
if len(split_string) > 2:
config["host"] = split_string[-3]
app.config["data"] = PyMongoDataAccess.build_data_access(
config["host"], config["port"], config["db"], collection_name)
def add_mongo_config_with_uri(app, connection_string_uri,
database_name, collection_name):
"""
Configure PyMongo with a MongoDB connection string.
:param app: Flask application
:param connection_string_uri: MongoDB connection string
:param database_name: Sacred database name
:param collection_name: Sacred's collection with runs
:return:
"""
app.config["data"] = PyMongoDataAccess.build_data_access_with_uri(
connection_string_uri, database_name, collection_name
)
if __name__ == '__main__':
run()
|
Python
| 0.000003
|
@@ -1560,16 +1560,31 @@
bserver.
+ (experimental)
%22)%0A@clic
|
81328ba4de673786029f9a52bdde716e5395272e
|
add delete test
|
sacrud/tests/__init__.py
|
sacrud/tests/__init__.py
|
# -*- coding: utf-8 -*-
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import unittest
from sacrud.tests.test_models import User, Profile, PHOTO_PATH
from sacrud.action import get_relations, delete_fileobj, read, update
from sacrud.action import get_pk, index, create
from pyramid.testing import DummyRequest
from StringIO import StringIO
import glob
import os
class MockCGIFieldStorage(object):
pass
class SacrudTests(unittest.TestCase):
def setUp(self):
engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=engine)
session = Session()
# You probably need to create some tables and
# load some test data, do so here.
self.session = session
# To create tables, you typically do:
#User.metadata.create_all(engine)
User.metadata.create_all(engine)
Profile.metadata.create_all(engine)
def tearDown(self):
def clear_files():
for filename in glob.glob("%s/*.html" % (PHOTO_PATH, )):
os.remove(os.path.join(PHOTO_PATH, filename))
clear_files()
def test_relations(self):
user = User(u'Vasya', u'Pupkin', u"123")
self.session.add(user)
self.session.commit()
profile = Profile(user=user)
self.session.add(profile)
self.session.commit()
profile = self.session.query(Profile).get(1)
self.assertEqual(get_relations(user), [('profile',
[profile, ])])
self.session.delete(profile)
self.session.delete(user)
def test_get_pk(self):
pk = get_pk(User)
self.assertEqual("id", pk)
def test_index(self):
user = User(u'Vasya', u'Pupkin', u"123")
self.session.add(user)
self.session.commit()
result = index(self.session, User)
self.assertEqual(result['pk'], 'id')
self.assertEqual(result["prefix"], "crud")
self.assertEqual(result["table"], User)
self.assertEqual(result["row"], [user, ])
self.session.delete(user)
def test_create(self):
request = DummyRequest()
request['name'] = ["Vasya", ]
request['fullname'] = ["Vasya Pupkin", ]
request['password'] = ["123", ]
create(self.session, User, request)
user = self.session.query(User).get(1)
self.assertEqual(user.name, "Vasya")
self.assertEqual(user.fullname, "Vasya Pupkin")
self.assertEqual(user.password, "123")
request = DummyRequest()
request['phone'] = ["213123123", ]
request['cv'] = ["Vasya Pupkin was born in Moscow", ]
request['married'] = ["true", ]
request["salary"] = ["23.0", ]
request["user_id"] = ["1", ]
create(self.session, Profile, request)
profile = self.session.query(Profile).get(1)
self.assertEqual(profile.phone, "213123123")
self.assertEqual(profile.cv, "Vasya Pupkin was born in Moscow")
self.assertEqual(profile.married, True)
self.assertEqual(profile.salary, float(23))
self.assertEqual(profile.user.id, 1)
self.session.delete(profile)
self.session.delete(user)
def test_read(self):
user = User(u'Vasya', u'Pupkin', u"123")
self.session.add(user)
self.session.commit()
result = read(self.session, User, 1)
self.assertEqual(result['obj'].id, 1)
self.assertEqual(result['pk'], "id")
self.assertEqual(result['prefix'], "crud")
self.assertEqual(result['table'], User)
self.assertEqual(result['rel'], [('profile', [])])
def test_update(self):
user = User(u'Vasya', u'Pupkin', u"123")
self.session.add(user)
self.session.commit()
profile = Profile(user=user, salary="25.7")
self.session.add(profile)
self.session.commit()
user = User(u'Vasya', u'Pupkin', u"123")
self.session.add(user)
self.session.commit()
profile = self.session.query(Profile).get(1)
request = DummyRequest()
request['phone'] = ["213123123", ]
request['cv'] = ["Vasya Pupkin was born in Moscow", ]
request['married'] = ["true", ]
request["salary"] = ["23.0", ]
request["user_id"] = ["2", ]
upload = MockCGIFieldStorage()
upload.file = StringIO('foo')
upload.filename = 'foo.html'
request["photo"] = [upload, ]
update(self.session, Profile, 1, request)
self.assertEqual(profile.phone, "213123123")
self.assertEqual(profile.cv, "Vasya Pupkin was born in Moscow")
self.assertEqual(profile.married, True)
self.assertEqual(profile.user.id, 2)
self.assertEqual(profile.salary, float(23))
|
Python
| 0.000001
|
@@ -243,16 +243,24 @@
, update
+, delete
%0Afrom sa
@@ -3821,24 +3821,16 @@
ommit()%0A
-
%0A
@@ -4834,8 +4834,430 @@
t(23))%0A%0A
+ def test_delete(self):%0A%0A user = User(u'Vasya', u'Pupkin', u%22123%22)%0A self.session.add(user)%0A self.session.commit()%0A%0A profile = Profile(user=user, salary=%2225.7%22)%0A%0A self.session.add(profile)%0A self.session.commit()%0A delete(self.session, Profile, 1)%0A self.session.commit()%0A profile = self.session.query(Profile).get(1)%0A self.assertEqual(profile, None)%0A%0A
|
49bff5b61336b9fbec79078ff0a8f6a1344b86fa
|
Remove unnecessary bar comments
|
skylines/model/search.py
|
skylines/model/search.py
|
import shlex
from sqlalchemy import literal_column, cast, desc, Unicode
from sqlalchemy.dialects.postgresql import array
from skylines.model import db
PATTERNS = [
('{}', 5), # Matches token exactly
('{}%', 3), # Begins with token
('% {}%', 2), # Has token at word start
('%{}%', 1), # Has token
]
##############################
def search_query(cls, tokens,
weight_func=None, include_misses=False, ordered=True):
# Read the searchable columns from the table (strings)
columns = cls.__searchable_columns__
# Convert the columns from strings into column objects
columns = [getattr(cls, c) for c in columns]
# The model name that can be used to match search result to model
cls_name = literal_column('\'{}\''.format(cls.__name__))
# Filter out id: tokens for later
ids, tokens = process_id_option(tokens)
# If there are still tokens left after id: token filtering
if tokens:
# Generate the search weight expression from the
# searchable columns, tokens and patterns
if not weight_func:
weight_func = weight_expression
weight = weight_func(columns, tokens)
# If the search expression only included "special" tokens like id:
else:
weight = literal_column(str(1))
# Create an array of stringified detail columns
details = getattr(cls, '__search_detail_columns__', None)
if details:
details = [cast(getattr(cls, d), Unicode) for d in details]
else:
details = [literal_column('NULL')]
# Create a query object
query = db.session.query(
cls_name.label('model'), cls.id.label('id'),
cls.name.label('name'), array(details).label('details'),
weight.label('weight'))
# Filter out specific ids (optional)
if ids:
query = query.filter(cls.id.in_(ids))
# Filter out results that don't match the patterns at all (optional)
if not include_misses:
query = query.filter(weight > 0)
# Order by weight (optional)
if ordered:
query = query.order_by(desc(weight))
return query
def combined_search_query(models, tokens, include_misses=False, ordered=True):
models, tokens = process_type_option(models, tokens)
# Build sub search queries
queries = [model.search_query(
tokens, include_misses=include_misses, ordered=False)
for model in models]
# Build combined search query
query = queries[0]
if len(queries) > 1:
query = query.union(*queries[1:])
# Order by weight (optional)
if ordered:
query = query.order_by(desc('weight'))
return query
##############################
def process_type_option(models, tokens):
"""
This function looks for "type:<type>" in the tokens and filters the
searchable models for the requested types.
Returns the filtered list of models.
"""
# Filter for type: and types: tokens
types, new_tokens = __filter_prefixed_tokens('type', tokens)
# Filter the list of models according to the type filter
def in_types_list(model):
return model.__name__.lower() in types
new_models = filter(in_types_list, models)
# Return original models list if there are no matching models
if len(new_models) == 0:
return models, new_tokens
# Return filtered models and tokens
return new_models, new_tokens
def process_id_option(tokens):
"""
This function looks for "id:<id>" in the tokens, removes them from the
token list and returns a list of ids.
"""
# Filter for id: and ids: tokens
ids, new_tokens = __filter_prefixed_tokens('id', tokens)
# Convert ids to integers
def int_or_none(value):
try:
return int(value)
except ValueError:
return None
ids = filter(None, map(int_or_none, ids))
# Return ids and tokens
return ids, new_tokens
def __filter_prefixed_tokens(prefix, tokens):
len_prefix = len(prefix)
# The original tokens without the prefixed tokens
new_tokens = []
# The contents that were found after the prefixed tokens
contents = []
# Iterate through original tokens to find prefixed tokens
for token in tokens:
_token = token.lower()
if _token.startswith(prefix + ':'):
contents.append(_token[(len_prefix + 1):])
elif _token.startswith(prefix + 's:'):
contents.extend(_token[(len_prefix + 2):].split(','))
else:
new_tokens.append(token)
# Strip whitespace from the types
contents = map(str.strip, contents)
return contents, new_tokens
##############################
def text_to_tokens(search_text):
try:
return shlex.split(search_text.encode('utf-8'))
except ValueError:
return search_text.split(' ')
def escape_tokens(tokens):
# Escape % and _ properly
tokens = [t.replace('%', '\\%').replace('_', '\\_') for t in tokens]
# Use * as wildcard character
tokens = [t.replace('*', '%') for t in tokens]
return tokens
##############################
def weight_expression(columns, tokens):
expressions = []
# Use entire search string as additional token
if len(tokens) > 1:
tokens = tokens + [' '.join(tokens)]
for column in columns:
for token in tokens:
len_token = len(token)
for pattern, weight in PATTERNS:
# Inject the token in the search pattern
token_pattern = pattern.format(token)
# Adjust the weight for the length of the token
# (the long the matched token, the greater the weight)
weight *= len_token
# Create the weighted ILIKE expression
expression = column.weighted_ilike(token_pattern, weight)
# Add the expression to list
expressions.append(expression)
return sum(expressions)
##############################
def process_result_details(models, results):
models = {m.__name__: m for m in models}
for result in results:
model = models.get(result.model, None)
if not model:
continue
details = getattr(model, '__search_detail_columns__', [None])
if len(details) != len(result.details):
continue
for key, value in zip(details, result.details):
if isinstance(key, str):
setattr(result, key, value)
|
Python
| 0
|
@@ -326,40 +326,8 @@
%0A%5D%0A%0A
-##############################%0A%0A
%0Adef
@@ -2634,40 +2634,8 @@
ry%0A%0A
-##############################%0A%0A
%0Adef
@@ -4603,40 +4603,8 @@
ns%0A%0A
-##############################%0A%0A
%0Adef
@@ -5001,40 +5001,8 @@
ns%0A%0A
-##############################%0A%0A
%0Adef
@@ -5821,24 +5821,24 @@
xpression)%0A%0A
+
return s
@@ -5858,40 +5858,8 @@
s)%0A%0A
-##############################%0A%0A
%0Adef
|
6fd689f64d6de0e719935f607be49abf223117aa
|
correct key in validation
|
slimta/app/validation.py
|
slimta/app/validation.py
|
# Copyright (c) 2013 Ian C. Good
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import absolute_import
from config import Mapping, Sequence
class ConfigValidationError(Exception):
def __init__(self, msg, stack):
final = msg + ' in config '+self._repr_stack(stack)
super(ConfigValidationError, self).__init__(final)
def _repr_stack(self, stack):
ret = []
for item in stack:
if isinstance(item, int):
ret[-1] += '[{0}]'.format(item)
else:
ret.append(item)
return '.'.join(ret)
class ConfigValidation(object):
def __init__(self, cfg):
self.cfg = cfg
def _check_ref(self, path, name):
try:
resolved_path = self.cfg.getByPath(path)
return name in resolved_path
except AttributeError:
return False
def _check_keys(self, opts, keydict, stack, only_keys=False):
for k, v in opts.iteritems():
if k not in keydict:
if only_keys:
msg = "Unexpected key '{0}'".format(k)
raise ConfigValidationError(msg, stack)
else:
continue
if not isinstance(v, keydict[k][0]):
type_name = keydict[k][0].__name__.lower()
msg = "Expected key '{0}' to be {1}".format(k, type_name)
raise ConfigValidationError(msg, stack)
del keydict[k]
for k, v in keydict.iteritems():
if v[1]:
msg = "Missing required key '{0}'".format(k)
raise ConfigValidationError(msg, stack)
def _check_process(self, opts, stack):
if stack[-1] not in ('slimta', 'worker'):
msg = "Unexpected process type '{0}'".format(stack[-1])
raise ConfigValidationError(msg, stack[:-1])
keydict = {'daemon': (bool, False),
'user': (basestring, False),
'group': (basestring, False),
'stdout': (basestring, False),
'stderr': (basestring, False),
'logging': (Mapping, False)}
self._check_keys(opts, keydict, stack, True)
def _check_edge(self, opts, stack):
keydict = {'type': (basestring, True),
'queue': (basestring, True),
'factory': (basestring, False),
'listener': (Mapping, False),
'hostname': (basestring, False),
'max_size': (int, False),
'tls': (Mapping, False),
'tls_immediately': (bool, False),
'rules': (Mapping, False)}
self._check_keys(opts, keydict, stack)
if not self._check_ref('queue', opts.queue):
msg = "No match for reference key 'queue'"
raise ConfigValidationError(msg, stack)
if opts.type == 'custom' and not opts.get('factory'):
msg = "The 'factory' key must be given when using 'custom' type"
raise ConfigValidationError(msg, stack)
if 'listener' in opts:
listener_keydict = {'interface': (basestring, False),
'port': (int, False)}
self._check_keys(opts.listener, listener_keydict,
stack+['listener'], True)
if 'tls' in opts:
tls_keydict = {'certfile': (basestring, True),
'keyfile': (basestring, True)}
self._check_keys(opts.tls, tls_keydict, stack+['tls'])
if 'rules' in opts:
rules_keydict = {'banner': (basestring, False),
'dnsbl': (basestring, False),
'reject_spf': (Sequence, False),
'only_senders': (Sequence, False),
'only_recipients': (Sequence, False),
'require_credentials': (Mapping, False),
'scan': (Mapping, False)}
self._check_keys(opts.rules, rules_keydict, stack+['rules'], True)
def _check_queue(self, opts, stack):
keydict = {'type': (basestring, True),
'relay': (basestring, False),
'factory': (basestring, False),
'policies': (Sequence, False)}
self._check_keys(opts, keydict, stack)
if 'relay' in opts and not self._check_ref('relay', opts.relay):
msg = "No match for reference key 'relay'"
raise ConfigValidationError(msg, stack)
if opts.type == 'custom' and not opts.get('factory'):
msg = "The 'factory' key must be given when using 'custom' type"
raise ConfigValidationError(msg, stack)
policies = opts.get('policies', [])
for i, p in enumerate(policies):
mystack = stack + ['policies', i]
if not isinstance(p, Mapping):
msg = 'Expected dictionary'
raise ConfigValidationError(msg, mystack)
self._check_keys(p, {'type': (basestring, True)}, mystack)
if 'retry' in opts:
retry_keydict = {'maximum': (int, False),
'delay': (basestring, False)}
self._check_keys(opts.retry, retry_keydict, stack+['retry'], True)
def _check_relay(self, opts, stack):
keydict = {'type': (basestring, True),
'factory': (basestring, False),
'ehlo_as': (basestring, False),
'credentials': (Mapping, False)}
self._check_keys(opts, keydict, stack)
if opts.type == 'custom' and not opts.get('factory'):
msg = "The 'factory' key must be given when using 'custom' type"
raise ConfigValidationError(msg, stack)
if 'credentials' in opts:
creds_keydict = {'username': (basestring, True),
'password': (basestring, True)}
self._check_keys(opts.credentials, creds_keydict,
stack+['credentials'], True)
def _check_toplevel(self, stack, program):
keydict = {'process': (Mapping, True),
'edge': (Mapping, False),
'relay': (Mapping, False),
'queue': (Mapping, True),
'celery_app': (Mapping, False)}
self._check_keys(self.cfg, keydict, stack)
for process, opts in self.cfg.process.iteritems():
self._check_process(opts, stack+['process', process])
if 'edge' in self.cfg:
for edge, opts in self.cfg.edge.iteritems():
self._check_edge(opts, stack+['edge', edge])
for queue, opts in self.cfg.queue.iteritems():
self._check_queue(opts, stack+['queue', queue])
if 'relay' in self.cfg:
for relay, opts in self.cfg.relay.iteritems():
self._check_relay(opts, stack+['relay', relay])
if program not in self.cfg.process:
msg = "Missing required key '{0}'".format(program)
raise ConfigValidationError(msg, stack+['process'])
@classmethod
def check(cls, cfg, program):
return cls(cfg)._check_toplevel(['root'], program)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
Python
| 0.000031
|
@@ -5035,12 +5035,19 @@
'
-scan
+reject_spam
': (
|
0fe6e79f9bc201b3c63ad6f1ce400c9d79bd484a
|
Fix path issues in example script
|
bin/idiotscript.py
|
bin/idiotscript.py
|
#!/usr/bin/python3
import os, sys
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Invalid number of arguments.")
sys.exit()
if os.path.isfile(sys.argv[1]) == False:
print("IdiotScript program does not exist.")
sys.exit()
import io
import idiotscript
from idiotscript import InstructionSet, Collector, ScriptParser, ScriptRunner, InputContainer
from idiotscript import formatters
def get_input():
if len(sys.argv) == 3:
# We've been passed a filename for the input content
with open(sys.argv[2], "r", encoding = "utf-8") as input_file:
return input_file.read()
else:
# Assume we're receiving data from stdin
from io import StringIO
try:
stdin_file = sys.stdin.buffer.read()
except AttributeError:
stdin_file = sys.stdin.read()
io_obj = StringIO(stdin_file.decode("utf-8"))
return io_obj.read()
# Prepare the default instruction set.
my_iset = idiotscript.load_default_instruction_set(InstructionSet())
# Initialise the script parser with the default instruction set.
# We need to pass it an instruction list factory, as it's going to
# be creating lots of them.
parser = ScriptParser(my_iset, idiotscript.ilist_factory)
# Load the IdiotScript program into memory
with open(sys.argv[1]) as program_file:
program = program_file.read()
my_ilist = parser.parse(program)
inputtext = get_input()
my_collector = Collector()
runner = ScriptRunner(InputContainer(inputtext))
runner.run(my_ilist, my_collector)
nl_formatter = formatters.NewlineFormatter()
print(nl_formatter.format(my_collector))
|
Python
| 0.000001
|
@@ -29,16 +29,235 @@
s, sys%0A%0A
+def alter_path():%0A script_path = os.path.dirname(os.path.realpath(__file__))%0A try:%0A path_index = sys.path.index(script_path)%0A except ValueError:%0A return%0A sys.path.pop(path_index)%0Aalter_path()%0A%0A
if len(s
|
d926f321a26fe7c6b72513f88fe60bc4f3c899e4
|
Update model cnes bed
|
app/models/cnes_bed.py
|
app/models/cnes_bed.py
|
from sqlalchemy import Column, Integer, String, func
from app import db
class CnesBed(db.Model):
__tablename__ = 'cnes_bed'
year = Column(Integer, primary_key=True)
region = Column(String(1), primary_key=True)
mesoregion = Column(String(4), primary_key=True)
microregion = Column(String(5), primary_key=True)
state = Column(String(2), primary_key=True)
municipality = Column(String(7), primary_key=True)
cnes = Column(String(7), primary_key=True)
bed_type = Column(String(7), primary_key=True)
@classmethod
def dimensions(cls):
return [
'year',
'region',
'mesoregion',
'microregion',
'state',
'municipality',
'cnes',
'bed_type',
]
@classmethod
def aggregate(cls, value):
return {
'beds': func.count()
}[value]
@classmethod
def values(cls):
return ['beds']
|
Python
| 0
|
@@ -108,16 +108,46 @@
lename__
+
= 'cnes
@@ -162,32 +162,62 @@
year
+
= Column(Integer
@@ -246,32 +246,62 @@
region
+
= Column(String(
@@ -336,24 +336,54 @@
soregion
+
= Column(Str
@@ -426,16 +426,46 @@
egion
+
= Column
@@ -504,32 +504,62 @@
state
+
= Column(String(
@@ -598,16 +598,46 @@
pality
+
= Column
@@ -670,20 +670,50 @@
ue)%0A
-cn
es
+tablishment
@@ -760,39 +760,663 @@
-bed_type = Column(String(7
+unit_type = Column(String(2), primary_key=True)%0A bed_type = Column(String(1), primary_key=True)%0A bed_type_per_specialty = Column(String(2), primary_key=True)%0A number_existing_bed = Column(Integer, primary_key=True)%0A number_existing_contract = Column(Integer, primary_key=True)%0A number_sus_bed = Column(Integer, primary_key=True)%0A number_non_sus_bed = Column(Integer, primary_key=True)%0A health_region = Column(String(5
), p
@@ -1653,20 +1653,29 @@
'
-cn
es
+tablishment
',%0A
@@ -1689,24 +1689,90 @@
'bed_type',%0A
+ 'bed_type_per_specialty',%0A 'health_region'%0A
%5D%0A%0A
|
3974bcaf151c491f4608b17860741d2388d15002
|
convert bed to bigBed
|
data_formats/converters.py
|
data_formats/converters.py
|
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 28-09-2017 14:20
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
This module offers converters between different formats
"""
import abc
# App imports
import config_manager
import ensembl.service as ensembl_service
from . import config_manager as module_config_manager
from parallel.models import ParallelRunner, CommandLineRunnerFactory
from .exceptions import DataFormatConversionNotFinished
# Factories
class DataFormatConverterFactory:
pass
# Possible base class for data format converters
class DataFormatConverter(ParallelRunner):
def __init__(self):
super().__init__()
self.conversion_status_error = False
self._stdout = []
self._stderr = []
def get_conversion_output(self):
if not self.is_done():
raise DataFormatConversionNotFinished("{} - NOT FINISHED YET".format(self._get_conversion_details))
return "\n".join(self._stdout)
def get_conversion_output_error(self):
if not self.is_done():
raise DataFormatConversionNotFinished("{} - NOT FINISHED YET".format(self._get_conversion_details))
return "\n".join(self._stderr)
@abc.abstractmethod
def _get_conversion_details(self):
...
def is_conversion_ok(self):
if not self.is_done():
raise DataFormatConversionNotFinished("{} - NOT FINISHED YET".format(self._get_conversion_details))
return self.conversion_status_error or super().is_error()
class FileDataFormatConverter(DataFormatConverter):
def __init__(self):
super().__init__()
self.file_path_source = ''
self.file_path_destination = ''
class BedToBigBedConverter(FileDataFormatConverter):
def __init__(self):
super().__init__()
self.taxonomy_id = ''
@staticmethod
def get_bed_to_bigbed_conversion_command(self, input_file_path, chromosome_sizes_file_path, output_file_path):
return "time {} {} {} {}" \
.format(
module_config_manager.get_configuration_service().get_file_path_binary_bed_to_bigbed_conversion_tool(),
input_file_path,
chromosome_sizes_file_path,
output_file_path)
@abc.abstractmethod
def _get_command_line_runner(self):
...
def _sort_bed_file(self, bed_file_path, sorted_bed_file_path):
runner = self._get_command_line_runner()
runner.command = "time sort -k1,1 -k2,2n {} > {}".format(bed_file_path, sorted_bed_file_path)
runner.start()
return runner
def _fetch_and_dump_chromosome_sizes(self, taxonomy_id, chromosome_sizes_file_path):
chromosome_sizes = ensembl_service.get_service().get_ucsc_chromosome_sizes_for_taxonomy(taxonomy_id)
with open(chromosome_sizes_file_path, 'w') as wf:
for chromosome, size in chromosome_sizes.items():
wf.write("{}\t{}".format(chromosome, size))
return chromosome_sizes
def _run(self):
file_path_sorted_bed = "{}_sorted.bed".format(self.file_path_source[:self.file_path_source.rfind('.')])
file_path_chromosome_sizes = "chromosome_sizes_{}.txt".format(self.taxonomy_id)
# TODO - Conversion algorithm goes here -
# Sort the .bed file
runner_sort = self._sort_bed_file(self.file_path_source, file_path_sorted_bed)
# Fetch chromosome sizes for this .bed file
chromosome_sizes = self._fetch_and_dump_chromosome_sizes(self.taxonomy_id, file_path_chromosome_sizes)
runner_sort.wait()
self._stdout.append(runner_sort.get_stdout())
self._stderr.append(runner_sort.get_stderr())
if not runner_sort.command_success:
self.conversion_status_error = True
return False
# TODO - Use bedToBigBed utility to create the .bb (bigBed) file
pass
# Leaves - actual implementations
class BedToBigBedMultithreadedConverter(BedToBigBedConverter):
def __init__(self):
super().__init__()
self._logger = config_manager.get_app_config_manager() \
.get_logger_for("{}.{}".format(__name__, type(self).__name__))
def _get_conversion_details(self):
# TODO
pass
def _get_command_line_runner(self):
return CommandLineRunnerFactory.get_multithread_command_line_runner()
class BedToBigBedHpcConverter(BedToBigBedConverter):
pass
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
|
Python
| 0.999999
|
@@ -1957,22 +1957,16 @@
command(
-self,
input_fi
@@ -3872,23 +3872,16 @@
#
- TODO -
Use bed
@@ -3933,28 +3933,718 @@
ile%0A
-pass
+runner_conversion = self._get_command_line_runner()%0A runner_conversion.command = self.get_bed_to_bigbed_conversion_command(self.file_path_source,%0A file_path_chromosome_sizes,%0A self.file_path_destination)%0A runner_conversion.start()%0A runner_conversion.wait()%0A self._stdout.append(runner_conversion.get_stdout())%0A self._stderr.append(runner_conversion.get_stderr())%0A if not runner_conversion.command_success:%0A self.conversion_status_error = True%0A return False%0A return True
%0A%0A%0A# Leaves
|
824d78322e36d97daaf3a61a6ef503ee45d94e99
|
Print table name in the preview
|
databaker/jupybakeutils.py
|
databaker/jupybakeutils.py
|
# HTML preview of the dimensions and table (will be moved to a function in databakersolo)
from IPython.display import display
from IPython.core.display import HTML
import databaker.constants
OBS = databaker.constants.OBS
# copied out again
def create_colourlist():
# Function to dynamically assign colours to dimensions for preview
"https://github.com/python-excel/xlwt/blob/master/xlwt/Style.py#L309"
colours = ["lavender", "violet", "gray25", "sea_green",
"pale_blue", "blue", "gray25", "rose", "tan", "light_yellow", "light_green", "light_turquoise",
"light_blue", "sky_blue", "plum", "gold", "lime", "coral", "periwinkle", "ice_blue", "aqua"]
numbers = []
for i in range(len(databaker.constants.template.dimension_names)-1, \
-(len(colours) - len(databaker.constants.template.dimension_names)), -1):
numbers.append(-i)
colourlist = dict(list(zip(numbers, colours)))
return colourlist
colourlist = create_colourlist()
colchange = {"rose":"misty_rose", "ice_blue":"cornflower_blue", "periwinkle":"burly_wood", "pale_blue":"deep_sky_blue", "gray25":"light_gray", "light_turquoise":"pale_turquoise"}
def tsubsets(headers, segment):
tsubs = [ ]
if segment:
tsubs.append((OBS, "OBS", segment))
for i, header in headers.items():
if header.direction is not None: # filter out TempValue headers
label = header.Dlabel
if isinstance(label, int) and label < 0:
label = databaker.constants.template.dimension_names[len(databaker.constants.template.dimension_names)-1+label]
tsubs.append((i, label, header.bag))
return tsubs
def dsubsets(dimensions, segment):
tsubs = [ ]
if segment:
tsubs.append((OBS, "OBS", segment))
for i, (header_bag, label, strict, direction) in enumerate(dimensions):
if direction is not None: # filter out TempValue headers
if isinstance(label, int) and label < 0:
label = databaker.constants.template.dimension_names[len(databaker.constants.template.dimension_names)-1+label]
tsubs.append((i, label, header_bag))
return tsubs
def displaytable(tab, tsubs):
key = [ ]
key.append('<table class="ex">\n')
key.append('<tr>')
ixyheaderlookup = { }
for i, label, bag in tsubs:
for h in bag:
ixyheaderlookup[(h.x, h.y)] = i
key.append('<td class="exc%d">' % i)
key.append(label)
key.append('</td>')
key.append('</tr>')
key.append('</table>\n')
sty = [ ]
sty.append("<style>\n")
sty.append("table.ex td, table.ex tr { border: none }\n")
sty.append("td.exbold { font-weight: bold }\n")
sty.append("td.exnumber { color: green }\n")
sty.append("td.exdate { color: purple }\n")
for i, col in colourlist.items():
sty.append("td.exc%d { background-color: %s }\n" % (i, "".join(lv.capitalize() for lv in colchange.get(col, col).split("_"))))
sty.append("</style>\n\n")
htm = [ ]
htm.append('<table class="ex">\n')
for row in tab.rows():
htm.append("<tr>")
assert len(row) == tab._max_x + 1
rrow = sorted(row, key=lambda X: X.x)
for c in rrow:
cs = [ ]
ih = ixyheaderlookup.get((c.x, c.y))
if ih is not None: cs.append("exc%d" % ih)
if c.properties.get_bold(): cs.append("exbold")
if c.is_date(): cs.append("exdate")
if c.is_number(): cs.append("exnumber")
htm.append('<td class="%s">' % " ".join(cs))
htm.append(str(c.value))
htm.append("</td>")
htm.append("</tr>\n")
htm.append("</table>\n")
display(HTML("".join(sty)))
display(HTML("".join(key)))
display(HTML("".join(htm)))
|
Python
| 0
|
@@ -2225,24 +2225,123 @@
key = %5B %5D%0A
+ key.append('Table: ')%0A key.append('%3Cb%3E')%0A key.append(tab.name); %0A key.append('%3C/b%3E ')%0A
key.appe
|
5e225294c133a4b90c81461d38ed582819638004
|
Replace Unicode decoding processing
|
dataproperty/_converter.py
|
dataproperty/_converter.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
from __future__ import division
import abc
import re
from ._error import TypeConversionError
class ValueConverterInterface(object):
@abc.abstractmethod
def convert(self): # pragma: no cover
pass
@abc.abstractmethod
def try_convert(self): # pragma: no cover
pass
class ValueConverter(ValueConverterInterface):
__slots__ = ("_value")
def __init__(self, value):
self._value = value
def __repr__(self):
try:
string = str(self.convert())
except TypeConversionError:
string = "[ValueConverter ERROR] failed to convert"
return string
def try_convert(self):
try:
return self.convert()
except TypeConversionError:
return None
class NopConverter(ValueConverter):
def convert(self):
return self._value
class StringConverter(ValueConverter):
def convert(self):
try:
return str(self._value)
except UnicodeEncodeError:
return self._value
class IntegerConverter(ValueConverter):
def convert(self):
try:
return int(self._value)
except (TypeError, ValueError, OverflowError):
try:
raise TypeConversionError(
"failed to convert: {}".format(self._value))
except UnicodeEncodeError:
raise TypeConversionError("failed to convert to integer")
class FloatConverter(ValueConverter):
def convert(self):
import decimal
if isinstance(self._value, float):
return self._value
try:
return decimal.Decimal(self._value)
except (TypeError, ValueError, decimal.InvalidOperation):
try:
raise TypeConversionError(
"failed to convert: {}".format(self._value))
except UnicodeEncodeError:
raise TypeConversionError("failed to convert to float")
class BoolConverter(ValueConverter):
def convert(self):
try:
return self.__strict_strtobool(self._value)
except ValueError:
try:
raise TypeConversionError(
"failed to convert: {}".format(self._value))
except UnicodeEncodeError:
raise TypeConversionError("failed to convert to bool")
@staticmethod
def __strict_strtobool(value):
from distutils.util import strtobool
if isinstance(value, bool):
return value
try:
lower_text = value.lower()
except AttributeError:
raise ValueError("invalid value '{}'".format(str(value)))
binary_value = strtobool(lower_text)
if lower_text not in ["true", "false"]:
raise ValueError("invalid value '{}'".format(str(value)))
return bool(binary_value)
class DateTimeConverter(ValueConverter):
__DAYS_TO_SECONDS_COEF = 60 ** 2 * 24
__MICROSECONDS_TO_SECONDS_COEF = 1000 ** 2
__COMMON_DST_TIMEZONE_TABLE = {
-36000: "America/Adak", # -1000
-32400: "US/Alaska", # -0900
-28800: "US/Pacific", # -0800
-25200: "US/Mountain", # -0700
-21600: "US/Central", # -0600
-18000: "US/Eastern", # -0500
-14400: "Canada/Atlantic", # -0400
-12600: "America/St_Johns", # -0330
-10800: "America/Miquelon", # -0300
7200: "Africa/Tripoli", # 0200
}
__RE_VERSION_STR = re.compile("\d+\.\d+\.\d")
def __init__(self, value):
super(DateTimeConverter, self).__init__(value)
self.__datetime = None
def convert(self):
import datetime
import dateutil.parser
import pytz
if isinstance(self._value, datetime.datetime):
self.__datetime = self._value
return self.__datetime
self.__validate_datetime_string()
try:
self.__datetime = dateutil.parser.parse(self._value)
except (AttributeError, ValueError, OverflowError):
try:
raise TypeConversionError(
"failed to parse as a datetime: {}".format(self._value))
except UnicodeEncodeError:
raise TypeConversionError("failed to parse as a datetime")
try:
dst_timezone_name = self.__get_dst_timezone_name(
self.__get_timedelta_sec())
except (AttributeError, KeyError):
return self.__datetime
pytz_timezone = pytz.timezone(dst_timezone_name)
self.__datetime = self.__datetime.replace(tzinfo=None)
self.__datetime = pytz_timezone.localize(self.__datetime)
return self.__datetime
def __get_timedelta_sec(self):
dt = self.__datetime.utcoffset()
return int(
(
dt.days *
self.__DAYS_TO_SECONDS_COEF +
float(dt.seconds)
) +
dt.microseconds / self.__MICROSECONDS_TO_SECONDS_COEF
)
def __get_dst_timezone_name(self, offset):
return self.__COMMON_DST_TIMEZONE_TABLE[offset]
def __validate_datetime_string(self):
"""
This will require validating version string (such as "3.3.5").
A version string could be converted to a datetime value if this
validation is not executed.
"""
try:
if self.__RE_VERSION_STR.search(self._value) is not None:
raise TypeConversionError(
"invalid datetime string: version string found " +
self._value)
except TypeError:
try:
raise TypeConversionError(
"invalid datetime string: {}".format(self._value))
except UnicodeEncodeError:
raise TypeConversionError(
"invalid datetime string")
class DictionaryConverter(ValueConverter):
def convert(self):
try:
return dict(self._value)
except (TypeError, ValueError):
try:
raise TypeConversionError(
"failed to convert: {}".format(self._value))
except UnicodeEncodeError:
raise TypeConversionError("failed to convert to bool")
|
Python
| 0.015828
|
@@ -210,16 +210,50 @@
onError%0A
+from ._function import to_unicode%0A
%0A%0Aclass
@@ -1090,103 +1090,26 @@
-try:%0A return str(self._value)%0A except UnicodeEncodeError:%0A return
+return to_unicode(
self
@@ -1107,32 +1107,33 @@
code(self._value
+)
%0A%0A%0Aclass Integer
|
93d53e4dc47d31e1bca817ce4718032154814c80
|
Clean up debug code.
|
datawire/views/entities.py
|
datawire/views/entities.py
|
from flask import Blueprint, request, url_for
from sqlalchemy.sql.functions import count
from sqlalchemy.sql.expression import or_, and_, func
from sqlalchemy.orm import aliased
from datawire.core import db, elastic, elastic_index
from datawire.auth import require
from datawire.model import Entity, Category, Match, Frame
from datawire.processing.queue import publish, entity_queue
from datawire.views.util import jsonify, obj_or_404
from datawire.views.pager import query_pager
entities = Blueprint('entities', __name__)
@entities.route('/categories')
def category_index():
categories = Category.all()
return jsonify({
'results': categories,
'count': len(categories)
})
@entities.route('/categories/<key>')
def category_get(key):
category = obj_or_404(Category.by_key(key))
return jsonify(category)
@entities.route('/users/<int:id>/entities')
def user_index(id):
require.user_id(id)
q = Entity.all().filter(Entity.user_id == id)
if 'category' in request.args:
q = q.filter(Entity.category == request.args.get('category'))
all_entities = [{"term": {"entities": e.id}} for e in q]
# esq['query']['filtered']['filter']['or'].append(fq)
esq = {
"query": {
"filtered": {
"query": {"match_all": {}}, "filter": {}
}
},
"size": 0,
"facets": {
"entities": {
"terms": {"field": "entities"}
},
"global": {
"terms": {"field": "entities"},
"global": True,
"facet_filter": {"or": all_entities}
}
}
}
filters = request.args.getlist('entity')
if len(filters):
esq['query']['filtered']['filter']['and'] = []
for entity_id in filters:
fq = {"term": {"entities": entity_id}}
esq['query']['filtered']['filter']['and'].append(fq)
else:
esq['query']['filtered']['filter']['or'] = all_entities
#esq['facets']['global']['facet_filter'] = esq['query']['filtered']['filter'].copy()
res = elastic.search_raw(esq, elastic_index, 'frame')
from pprint import pprint
pprint(res)
filtered_counts = res['facets']['entities']['terms']
filtered_counts = dict([(int(c['term']), c['count']) for c in filtered_counts])
total_counts = res['facets']['global']['terms']
total_counts = dict([(int(c['term']), c['count']) for c in total_counts])
q = Entity.all().filter(Entity.user_id == id)
if 'category' in request.args:
q = q.filter(Entity.category == request.args.get('category'))
def transform_result(entity):
data = entity.to_ref()
data['filtered_count'] = filtered_counts.get(entity.id, 0)
data['total_count'] = total_counts.get(entity.id, 0)
return data
return query_pager(q, 'entities.user_index', transform=transform_result, id=id)
@entities.route('/entities/<id>')
def get(id):
require.logged_in()
entity = obj_or_404(Entity.by_user_and_id(request.user, id))
return jsonify(entity)
@entities.route('/entities', methods=['POST'])
def create():
require.logged_in()
entity = Entity.create(request.form, request.user)
db.session.commit()
publish(entity_queue, 'entity.create', entity)
return jsonify(entity)
@entities.route('/entities/<int:id>', methods=['POST'])
def update(id):
require.logged_in()
entity = obj_or_404(Entity.by_user_and_id(request.user, id))
data = {'old': entity.to_dict()}
entity.update(request.form)
db.session.commit()
data['new'] = entity
publish(entity_queue, 'entity.update', data)
return jsonify(entity)
@entities.route('/entities/<int:id>', methods=['DELETE'])
def delete(id):
require.logged_in()
entity = obj_or_404(Entity.by_user_and_id(request.user, id))
publish(entity_queue, 'entity.delete', entity)
entity.delete()
db.session.commit()
return jsonify({'status': 'gone'}, status=410)
|
Python
| 0.000005
|
@@ -1078,25 +1078,24 @@
category'))%0A
-%0A
all_enti
@@ -1146,69 +1146,8 @@
n q%5D
-%0A # esq%5B'query'%5D%5B'filtered'%5D%5B'filter'%5D%5B'or'%5D.append(fq)
%0A%0A
@@ -1948,98 +1948,8 @@
es%0A%0A
- #esq%5B'facets'%5D%5B'global'%5D%5B'facet_filter'%5D = esq%5B'query'%5D%5B'filtered'%5D%5B'filter'%5D.copy()%0A%0A
@@ -2006,55 +2006,8 @@
e')%0A
- from pprint import pprint%0A pprint(res)%0A%0A
|
74e9ab11dcaef919cba198aef6b8124dcfc3007c
|
handle leaving a room and entering again right after (fixes #17)
|
socketio/base_manager.py
|
socketio/base_manager.py
|
import itertools
import six
class BaseManager(object):
"""Manage client connections.
This class keeps track of all the clients and the rooms they are in, to
support the broadcasting of messages. The data used by this class is
stored in a memory structure, making it appropriate only for single process
services. More sophisticated storage backends can be implemented by
subclasses.
"""
def __init__(self):
self.server = None
self.rooms = {}
self.pending_removals = []
self.callbacks = {}
def initialize(self, server):
self.server = server
def get_namespaces(self):
"""Return an iterable with the active namespace names."""
return six.iterkeys(self.rooms)
def get_participants(self, namespace, room):
"""Return an iterable with the active participants in a room."""
for sid, active in six.iteritems(self.rooms[namespace][room]):
if active:
yield sid
self._clean_rooms()
def connect(self, sid, namespace):
"""Register a client connection to a namespace."""
self.enter_room(sid, namespace, None)
self.enter_room(sid, namespace, sid)
def is_connected(self, sid, namespace):
return sid in self.rooms[namespace][None] and \
self.rooms[namespace][None][sid]
def disconnect(self, sid, namespace):
"""Register a client disconnect from a namespace."""
rooms = []
for room_name, room in six.iteritems(self.rooms[namespace]):
if sid in room:
rooms.append(room_name)
for room in rooms:
self.leave_room(sid, namespace, room)
if sid in self.callbacks and namespace in self.callbacks[sid]:
del self.callbacks[sid][namespace]
if len(self.callbacks[sid]) == 0:
del self.callbacks[sid]
def enter_room(self, sid, namespace, room):
"""Add a client to a room."""
if namespace not in self.rooms:
self.rooms[namespace] = {}
if room not in self.rooms[namespace]:
self.rooms[namespace][room] = {}
self.rooms[namespace][room][sid] = True
def leave_room(self, sid, namespace, room):
"""Remove a client from a room."""
try:
# do not delete immediately, just mark the client as inactive
# _clean_rooms() will do the clean up when it is safe to do so
self.rooms[namespace][room][sid] = False
self.pending_removals.append((namespace, room, sid))
except KeyError:
pass
def close_room(self, room, namespace):
"""Remove all participants from a room."""
try:
for sid in self.get_participants(namespace, room):
self.leave_room(sid, namespace, room)
except KeyError:
pass
def get_rooms(self, sid, namespace):
"""Return the rooms a client is in."""
r = []
for room_name, room in six.iteritems(self.rooms[namespace]):
if room_name is not None and sid in room and room[sid]:
r.append(room_name)
return r
def emit(self, event, data, namespace, room=None, skip_sid=None,
callback=None):
"""Emit a message to a single client, a room, or all the clients
connected to the namespace."""
if namespace not in self.rooms or room not in self.rooms[namespace]:
return
for sid in self.get_participants(namespace, room):
if sid != skip_sid:
if callback is not None:
id = self._generate_ack_id(sid, namespace, callback)
else:
id = None
self.server._emit_internal(sid, event, data, namespace, id)
def trigger_callback(self, sid, namespace, id, data):
"""Invoke an application callback."""
callback = None
try:
callback = self.callbacks[sid][namespace][id]
except KeyError:
# if we get an unknown callback we just ignore it
self.server.logger.warning('Unknown callback received, ignoring.')
else:
del self.callbacks[sid][namespace][id]
if callback is not None:
callback(*data)
def _generate_ack_id(self, sid, namespace, callback):
"""Generate a unique identifier for an ACK packet."""
namespace = namespace or '/'
if sid not in self.callbacks:
self.callbacks[sid] = {}
if namespace not in self.callbacks[sid]:
self.callbacks[sid][namespace] = {0: itertools.count(1)}
id = six.next(self.callbacks[sid][namespace][0])
self.callbacks[sid][namespace][id] = callback
return id
def _clean_rooms(self):
"""Remove all the inactive room participants."""
for namespace, room, sid in self.pending_removals:
try:
del self.rooms[namespace][room][sid]
except KeyError:
# failures here could mean there were duplicates so we ignore
continue
if len(self.rooms[namespace][room]) == 0:
del self.rooms[namespace][room]
if len(self.rooms[namespace]) == 0:
del self.rooms[namespace]
self.pending_removals = []
|
Python
| 0
|
@@ -1977,32 +1977,101 @@
t to a room.%22%22%22%0A
+ self._clean_rooms() # ensure our rooms are up to date first%0A
if names
|
ec7b52b457e749b4b4a1e9110ede221f2f0d5fe9
|
Fix fatal error in data processing.
|
data/propaganda2mongo.py
|
data/propaganda2mongo.py
|
import bson.json_util
from bson.objectid import ObjectId
import json
import sys
def main():
node_table = {}
while True:
line = sys.stdin.readline()
if not line:
break
record = json.loads(line)
ident = str(record["twitter_id"])
aoid = node_table.get(ident)
if aoid is None:
node_table[ident] = aoid = ObjectId()
print bson.json_util.dumps({"_id": aoid,
"type": "node",
"data": {"twitter_id": ident,
"type": "audience",
"propaganda_urls_exposed_to": record["propaganda_urls_exposed_to"],
"geos": record["geos"],
"timestamps_of_propaganda": record["timestamps_of_propaganda"]}})
for p in record["propagandists_followed"]:
oid = node_table.get(p)
if oid is None:
node_table[ident] = oid = ObjectId()
print bson.json_util.dumps({"_id": oid,
"type": "node",
"data": {"twitter_id": p,
"type": "propagandist"}})
print bson.json_util.dumps({"_id": ObjectId(),
"type": "link",
"source": aoid,
"target": oid,
"data": {}})
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0.000005
|
@@ -1081,29 +1081,25 @@
node_table%5B
-ident
+p
%5D = oid = Ob
|
8fb80540499d0f303d68150304fd896367313f94
|
remove incorrect is instance check in children_changed
|
IPython/html/widgets/widget_container.py
|
IPython/html/widgets/widget_container.py
|
"""ContainerWidget class.
Represents a container that can be used to group other widgets.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, Tuple, Instance, TraitError
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class TupleOfDOMWidgets(Tuple):
"""Like Tuple(Instance(DOMWidget)), but without checking length."""
def validate_elements(self, obj, value):
for v in value:
if not isinstance(v, DOMWidget):
raise TraitError("Container.children must be DOMWidgets, not %r" % v)
return value
class ContainerWidget(DOMWidget):
_view_name = Unicode('ContainerView', sync=True)
# Keys, all private and managed by helper methods. Flexible box model
# classes...
children = TupleOfDOMWidgets()
_children = TupleOfDOMWidgets(sync=True)
def _children_changed(self, name, old, new):
"""Validate children list.
Makes sure only one instance of any given model can exist in the
children list.
An excellent post on uniqifiers is available at
http://www.peterbe.com/plog/uniqifiers-benchmark
which provides the inspiration for using this implementation. Below
I've implemented the `f5` algorithm using Python comprehensions."""
if new is not None and isinstance(new, list):
seen = {}
def add_item(i):
seen[i.model_id] = True
return i
self._children = [add_item(i) for i in new if not i.model_id in seen]
class PopupWidget(ContainerWidget):
_view_name = Unicode('PopupView', sync=True)
description = Unicode(sync=True)
button_text = Unicode(sync=True)
|
Python
| 0.000062
|
@@ -1954,34 +1954,8 @@
None
- and isinstance(new, list)
:%0A
|
d24f3a1f008e4d2bef262a0f1253da04071a180d
|
move dataset crop into generic as it is used in several datasets
|
data_provider/dataset.py
|
data_provider/dataset.py
|
class AbstractDataset(object):
def __init__(self, games):
self.games = games
def get_data(self, indices=list()):
if len(indices) > 0:
return [self.games[i] for i in indices]
else:
return self.games
def n_examples(self):
return len(self.games)
class DatasetMerger(AbstractDataset):
def __init__(self, datasets):
games = []
for d in datasets:
games += d.get_data()
super(DatasetMerger, self).__init__(games)
|
Python
| 0.000001
|
@@ -1,12 +1,26 @@
+import copy%0A%0A%0A
class Abstra
@@ -328,29 +328,27 @@
%0A%0Aclass
+Crop
Dataset
-Merger
(Abstrac
@@ -366,46 +366,181 @@
-def __init__(self, datasets):%0A
+%22%22%22%0A Each game contains no question/answers but a new object%0A %22%22%22%0A%0A def __init__(self, dataset, expand_objects):%0A old_games = dataset.get_data()%0A new_
game
@@ -546,16 +546,17 @@
es = %5B%5D%0A
+%0A
@@ -563,19 +563,51 @@
for
-d
+g
in
-datase
+old_games:%0A if expand_objec
ts:%0A
@@ -618,16 +618,24 @@
+ new_
games +=
@@ -639,20 +639,88 @@
+=
-d.get_data()
+self.split(g)%0A else:%0A new_games += self.update_ref(g)%0A
%0A
@@ -734,41 +734,867 @@
per(
+Crop
Dataset
-Merger, self).__init__(games)%0A
+, self).__init__(new_games)%0A%0A @staticmethod%0A def load(dataset_cls, expand_objects, **kwargs):%0A return CropDataset(dataset_cls(**kwargs), expand_objects=expand_objects)%0A%0A def split(self, game):%0A games = %5B%5D%0A for obj in game.objects:%0A new_game = copy.copy(game)%0A%0A # select new object%0A new_game.object = obj%0A new_game.object_id = obj.id%0A%0A # Hack the image id to differentiate objects%0A new_game.image = copy.copy(game.image)%0A new_game.image.id = obj.id%0A%0A games.append(new_game)%0A%0A return games%0A%0A def update_ref(self, game):%0A%0A new_game = copy.copy(game)%0A%0A # Hack the image id to differentiate objects%0A new_game.image = copy.copy(game.image)%0A new_game.image.id = game.object_id%0A%0A return %5Bnew_game%5D
|
7907aeb6a006655ad96d5d3995b5fdbd4bf00d16
|
fix usage of get_all_enabled_projects
|
scripts/pipeline_main.py
|
scripts/pipeline_main.py
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script runs in checked out llvm-project directory.
import os
from typing import Dict
from steps import generic_linux, generic_windows, from_shell_output, extend_steps_env, bazel
from sync_fork import sync_fork
import git
import yaml
from choose_projects import ChooseProjects
steps_generators = [
'${BUILDKITE_BUILD_CHECKOUT_PATH}/libcxx/utils/ci/buildkite-pipeline-snapshot.sh',
]
if __name__ == '__main__':
scripts_refspec = os.getenv("ph_scripts_refspec", "main")
no_cache = os.getenv('ph_no_cache') is not None
log_level = os.getenv('ph_log_level', 'WARNING')
notify_emails = list(filter(None, os.getenv('ph_notify_emails', '').split(',')))
# Syncing LLVM fork so any pipelines started from upstream llvm-project
# but then triggered a build on fork will observe the commit.
sync_fork(os.path.join(os.getenv('BUILDKITE_BUILD_PATH', ''), 'llvm-project-fork'), [os.getenv('BUILDKITE_BRANCH'), 'main'])
steps = []
env: Dict[str, str] = {}
for e in os.environ:
if e.startswith('ph_'):
env[e] = os.getenv(e, '')
repo = git.Repo('.')
cp = ChooseProjects(None)
linux_projects, _ = cp.get_all_enabled_projects('linux')
steps.extend(generic_linux(os.getenv('ph_projects', ';'.join(linux_projects)), check_diff=False))
windows_projects, _ = cp.get_all_enabled_projects('windows')
steps.extend(generic_windows(os.getenv('ph_projects', ';'.join(windows_projects))))
steps.extend(bazel([], force=True))
if os.getenv('ph_skip_generated') is None:
env = os.environ.copy()
# BUILDKITE_COMMIT might be an alias, e.g. "HEAD". Resolve it to make the build hermetic.
if ('BUILDKITE_COMMIT' not in env) or (env['BUILDKITE_COMMIT'] == "HEAD"):
env['BUILDKITE_COMMIT'] = repo.head.commit.hexsha
for gen in steps_generators:
steps.extend(from_shell_output(gen, env=env))
notify = []
for e in notify_emails:
notify.append({'email': e})
extend_steps_env(steps, env)
print(yaml.dump({'steps': steps, 'notify': notify}))
|
Python
| 0.000002
|
@@ -1741,35 +1741,32 @@
linux_projects
-, _
= cp.get_all_en
@@ -1915,11 +1915,8 @@
ects
-, _
= c
|
c38ad4f02d6f036f23ef6d3c1e033e9843fb068a
|
comment unused tests
|
functional_tests/test_admins.py
|
functional_tests/test_admins.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from unittest import skip
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from .base import FunctionalTest
from nowait.tests.factories import AdminF
from nowait.models import BookingType
@skip
class AdminTest(FunctionalTest):
def setUp(self):
super(AdminTest, self).setUp()
self.admin = AdminF()
self.create_pre_authenticated_session(self.admin)
def test_add_booking_type(self):
self.browser.get(
self.get_url(admin_urlname(BookingType._meta, 'changelist')))
|
Python
| 0
|
@@ -17,16 +17,18 @@
f-8 -*-%0A
+#
from __f
@@ -80,35 +80,12 @@
ort%0A
-%0Afrom unittest import skip%0A
+#%0A#
from
@@ -151,17 +151,20 @@
urlname%0A
-%0A
+#%0A#
from .ba
@@ -188,16 +188,18 @@
nalTest%0A
+#
from now
@@ -232,16 +232,18 @@
AdminF%0A
+#
from now
@@ -276,16 +276,14 @@
ype%0A
-%0A%0A@skip%0A
+#%0A#%0A#
clas
@@ -311,16 +311,18 @@
lTest):%0A
+#
def
@@ -326,32 +326,34 @@
ef setUp(self):%0A
+#
super(Ad
@@ -375,16 +375,18 @@
setUp()%0A
+#
@@ -407,16 +407,18 @@
dminF()%0A
+#
@@ -467,17 +467,20 @@
.admin)%0A
-%0A
+#%0A#
def
@@ -508,16 +508,18 @@
(self):%0A
+#
@@ -536,16 +536,18 @@
er.get(%0A
+#
|
18bc54f964a2925005543df8b4989271ad4464be
|
Fix inheritance in soc.models.base module. FieldsProxy inherited from DbModelForm which was deleted in previous commits (replace that with BaseForm).
|
app/soc/models/base.py
|
app/soc/models/base.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing enhanced db.Model classes.
The classes in this module are intended to serve as base classes for all
Melange Datastore Models.
"""
__authors__ = [
'"Todd Larsen" <tlarsen@google.com>',
]
from google.appengine.ext import db
from soc.views.helper import forms as forms_helper
class ModelWithFieldAttributes(db.Model):
"""A db.Model extension that provides access to Model properties attributes.
Due to the way the Property class in Google App Engine implements __get__()
and __set__(), it is not possible to access attributes of Model properties,
such as verbose_name, from within a Django template. This class works
around that limitation by creating an inner Form class per Model class,
since an unbound Form object contains (most of?) the property attributes
attached to each corresponding Form field.
Some are attributes are renamed during the conversion from a Model Property
to a Form field; for example, verbose_name becomes label. This is tolerable
because any actual Form code refers to these new names, so they are should
be familiar to view creators.
"""
_fields_cache = None
@classmethod
def fields(cls):
"""Called by the Django template engine during template instantiation.
Since the attribute names use the Form fields naming instead of the
Property attribute naming, accessing, for example:
{{ entity.property.verbose_name }}
is accomplished using:
{{ entity.fields.property.label }}
Args:
cls: Model class, so that each Model class can create its own
unbound Form the first time fields() is called by the Django
template engine.
Returns:
A (created-on-first-use) unbound Form object that can be used to
access Property attributes that are not accessible from the
Property itself via the Model entity.
"""
if not cls._fields_cache or (cls != cls._fields_cache.__class__.Meta.model):
class FieldsProxy(forms_helper.DbModelForm):
"""Form used as a proxy to access User model properties attributes.
"""
class Meta:
"""Inner Meta class that pairs the User Model with this "form".
"""
#: db.Model subclass for which to access model properties attributes
model = cls
cls._fields_cache = FieldsProxy()
return cls._fields_cache
|
Python
| 0.000001
|
@@ -2608,15 +2608,12 @@
per.
-DbModel
+Base
Form
|
b4acc5d4c5f6b3e94225ca5926d06a50c511173c
|
add json exporter
|
furnito_crawler/json_manager.py
|
furnito_crawler/json_manager.py
|
class Json_Manager:
def __init__(self):
pass
|
Python
| 0
|
@@ -1,8 +1,21 @@
+import json%0A%0A
class Js
@@ -63,8 +63,419 @@
pass%0A
+%0A def export_json(self, path, json_content):%0A '''%0A @usage: export dict to json and store on local storage%0A @arg: path, path to store json, string eg, 'downlaods/1.json'%0A @arg: json_content, the content want to export, dictionary format%0A '''%0A with open(path, 'w') as json_file:%0A json.dump(json_content, json_file, ensure_ascii = True, indent = 2)%0A %0A
|
7c230d442f758e9fd27c581e43f9e0f1a03d0ee9
|
fix print
|
scripts/relay_control.py
|
scripts/relay_control.py
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import argparse
import sys
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
# GPIO/BOARD | Relay IN | Rotors | Zone
# 22/15 | R2 IN2 | 1 | B
# 18/12 | R1 IN2 | 2 | A
# 24/18 | R1 IN3 | 3 | D
# 17/11 | R1 IN4 | 4 | C
# 27/13 | R2 IN1 | 5 | E
class RelayControl(object):
def set(self):
parser = argparse.ArgumentParser(
description='Set relay state high=1 or low=0')
parser.add_argument('--relay', help='Set relay 1/2/3/4/5 or *', required=False)
parser.add_argument('--state',help='Set state high=1 or low=0', required=False)
args = parser.parse_args(sys.argv[2:])
if args.relay == "*":
print 'Set all relay to state=%s' % args.state
setAll(args.state)
else:
print 'Set relay=%s to state=%s' % args.relay, args.state
GPIO.setup(self.relayIO[args.relay], GPIO.OUT)
GPIO.output(self.relayIO[args.relay], int(args.state))
GPIO.cleanup()
def toggle(self):
parser = argparse.ArgumentParser(
description='Toggle relay value')
parser.add_argument('--relay', help='Set relay 1/2/3/4/5', required=False)
args = parser.parse_args(sys.argv[2:])
print 'Toggle relay=%s' % args.relay
GPIO.setup(self.relayIO[args.relay], GPIO.OUT)
GPIO.output(self.relayIO[args.relay], not GPIO.input(self.relayIO[args.relay]))
GPIO.cleanup()
def get(self):
parser = argparse.ArgumentParser(
description='Set relay state high=1 or low=0')
parser.add_argument('--relay', help='Set relay 1/2/3/4/5 or *', required=False)
args = parser.parse_args(sys.argv[2:])
if args.relay == "*":
print 'Get all relay state'
state = getAll()
else:
print 'Get relay=%s' % args.relay
GPIO.setup(self.relayIO[args.relay], GPIO.OUT)
state = GPIO.input(int(self.relayIO[args.relay]))
GPIO.cleanup()
return state
def setAll(self, state):
chan_list = []
for relay in self.relayIO:
chan_list.append(self.relayIO[relay])
GPIO.setup(chan_list, GPIO.OUT)
GPIO.output(chan_list, int(state))
GPIO.cleanup()
def getAll(self):
chan_list = []
state_list = []
for relay in self.relayIO:
chan_list.append(self.relayIO[relay])
GPIO.setup(chan_list, GPIO.OUT)
for relay in self.relayIO:
state_list.append(GPIO.input(int(self.relayIO[relay])))
GPIO.cleanup()
return state_list
def __init__(self):
self.relayIO = { "1": 15, "2": 12, "3": 18, "4": 11, "5": 13}
parser = argparse.ArgumentParser(
description='Relay control',
usage='''relay <command> [<args>]
The most commonly used relay commands are:
set Set relay value high or low
get Get relay value high or low
toggle Toggle relay value
''')
parser.add_argument('command', help='Subcommand to run')
# parse_args defaults to [1:] for args, but you need to
# exclude the rest of the args too, or validation will fail
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print 'Unrecognized command'
parser.print_help()
exit(1)
# use dispatch pattern to invoke method with same name
getattr(self, args.command)()
if __name__ == '__main__':
RelayControl()
|
Python
| 0.000575
|
@@ -1673,23 +1673,33 @@
ate'%0A%09%09%09
-state =
+print 'states=' +
getAll(
@@ -1802,15 +1802,24 @@
%0A%09%09%09
+print '
state
-
=
+' +
GPI
@@ -1874,31 +1874,16 @@
leanup()
-%0A%09%09return state
%0A%0A%09def s
|
d7e197c8837f8482c29484dbc36a5d7b751cc076
|
Add create method to ModelForm
|
app/soc/views/forms.py
|
app/soc/views/forms.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct templates
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.ext.db import djangoforms
from django.forms import forms
from django.forms import widgets
from django.template import loader
from django.utils.safestring import mark_safe
def choiceWidget(field):
"""Returns a Select widget for the specified field.
"""
label = field.verbose_name
choices = []
choices.append(('', label))
for choice in field.choices:
choices.append((str(choice), unicode(choice)))
return widgets.Select(choices=choices)
def choiceWidgets(model, fields):
"""Returns a dictionary of Select widgets for the specified fields.
"""
return dict((i, choiceWidget(getattr(model, i))) for i in fields)
class ModelForm(djangoforms.ModelForm):
"""Django ModelForm class which uses our implementation of BoundField.
"""
def __iter__(self):
for name, field in self.fields.items():
yield BoundField(self, field, name)
class Form(object):
"""Form class that facilitates the rendering of forms.
"""
class Form(ModelForm):
"""Django Form associated with the class.
"""
pass
def render(self):
"""Renders the template to a string.
Uses the context method to retrieve the appropriate context, uses the
self.templatePath() method to retrieve the template that should be used.
"""
context = self.context()
template_path = 'v2/modules/gsoc/_form.html'
rendered = loader.render_to_string(template_path, dictionary=context)
return rendered
def context(self):
"""Returns the context for the current template.
"""
return {}
def getForm(self):
"""Returns the Django form object associated with the class.
The specialized forms should be defined in subclasses.
"""
return self.Form
class BoundField(forms.BoundField):
"""
"""
def is_required(self):
return self.field.required
def render(self):
attrs = {
'id': self.name
}
widget = self.field.widget
if isinstance(widget, widgets.TextInput):
return self.renderTextInput()
elif isinstance(widget, widgets.DateInput):
return self.renderTextInput()
elif isinstance(widget, widgets.Select):
return self.renderSelect()
elif isinstance(widget, widgets.CheckboxInput):
return self.renderCheckboxInput()
return self.as_widget(attrs=attrs)
def renderCheckboxInput(self):
attrs = {
'id': self.name,
'style': 'opacity: 100;',
}
return mark_safe(
'<label>%s%s%s</label>' % (
self.as_widget(attrs=attrs),
self.field.label,
self._render_is_required()
))
def renderTextInput(self):
attrs = {
'id': self.name,
'class': 'text',
}
return mark_safe('%s%s' % (
self._render_label(), self.as_widget(attrs=attrs)))
def renderSelect(self):
attrs = {
'id': self.name,
'style': 'opacity: 100;',
}
return mark_safe(('%s%s') % (
self.as_widget(attrs=attrs),
self._render_is_required()))
def _render_label(self):
return '<label>%s%s</label>' % (
self.field.label,
self._render_is_required())
def _render_is_required(self):
if self.field.required:
return '<span class="req">*</span>'
else:
return ''
|
Python
| 0.000001
|
@@ -1271,16 +1271,17 @@
oices)%0A%0A
+%0A
def choi
@@ -1451,16 +1451,17 @@
ields)%0A%0A
+%0A
class Mo
@@ -1638,16 +1638,16 @@
tems():%0A
-
yi
@@ -1681,16 +1681,1462 @@
name)%0A%0A
+ def create(self, commit=True, key_name=None, parent=None):%0A %22%22%22Save this form's cleaned data into a new model instance.%0A%0A Args:%0A commit: optional bool, default True; if true, the model instance%0A is also saved to the datastore.%0A key_name: the key_name of the new model instance, default None%0A parent: the parent of the new model instance, default None%0A%0A Returns:%0A The model instance created by this call.%0A Raises:%0A ValueError if the data couldn't be validated.%0A %22%22%22%0A if not self.is_bound:%0A raise ValueError('Cannot save an unbound form')%0A opts = self._meta%0A instance = self.instance%0A if self.instance:%0A raise ValueError('Cannot create a saved form')%0A if self.errors:%0A raise ValueError(%22The %25s could not be created because the data didn't %22%0A 'validate.' %25 opts.model.kind())%0A cleaned_data = self._cleaned_data()%0A converted_data = %7B%7D%0A for name, prop in opts.model.properties().iteritems():%0A value = cleaned_data.get(name)%0A if value is not None:%0A converted_data%5Bname%5D = prop.make_value_from_form(value)%0A try:%0A instance = opts.model(key_name=key_name, parent=parent, **converted_data)%0A self.instance = instance%0A except db.BadValueError, err:%0A raise ValueError('The %25s could not be created (%25s)' %25%0A (opts.model.kind(), err))%0A if commit:%0A instance.put()%0A return instance%0A%0A
%0Aclass F
|
5bde6ca1fd62277463156875e874c4c6843923fd
|
Use the correct variable for the test
|
pytest-{{cookiecutter.plugin_name}}/tests/test_{{cookiecutter.plugin_name}}.py
|
pytest-{{cookiecutter.plugin_name}}/tests/test_{{cookiecutter.plugin_name}}.py
|
# -*- coding: utf-8 -*-
def test_bar_fixture(testdir):
"""Make sure that pytest accepts our fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
def test_sth(bar):
assert bar == "europython2015"
""")
# run pytest with the following cmd args
result = testdir.runpytest(
'--foo=something',
'-v'
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_a PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'cat:',
'*--foo=DEST_FOO*Set the value for the fixture "bar".',
])
|
Python
| 0.009051
|
@@ -591,20 +591,16 @@
et == 0%0A
-
%0A%0Adef te
@@ -777,11 +777,36 @@
'
-cat
+%7B%7Bcookiecutter.plugin_name%7D%7D
:',%0A
|
13db02c23cac6ba0eb9116f1429b8d4a8001db6e
|
Update sequences.py
|
appdaemon/sequences.py
|
appdaemon/sequences.py
|
import uuid
import asyncio
from appdaemon.appdaemon import AppDaemon
class Sequences:
def __init__(self, ad: AppDaemon):
self.AD = ad
self.logger = ad.logging.get_child("_sequences")
async def run_sequence_service(self, namespace, domain, service, kwargs):
if "entity_id" not in kwargs:
self.logger.warning("entity_id not given in service call, so will not be executing %s", service)
return
# await self.run_sequence("_services", namespace, kwargs["entity_id"])
self.AD.thread_async.call_async_no_wait(self.run_sequence, "_services", namespace, kwargs["entity_id"])
async def add_sequences(self, sequences):
for sequence in sequences:
await self.AD.state.add_entity(
"rules",
"sequence.{}".format(sequence),
"idle",
attributes={
"friendly_name": sequences[sequence].get("name", sequence),
"loop": sequences[sequence].get("loop", False),
"steps": sequences[sequence]["steps"],
},
)
async def remove_sequences(self, sequences):
if not isinstance(sequences, list):
sequences = [sequences]
for sequence in sequences:
await self.AD.state.remove_entity("rules", "sequence.{}".format(sequence))
async def run_sequence(self, _name, namespace, sequence):
ephemeral_entity = False
loop = False
if isinstance(sequence, str):
entity_id = sequence
if await self.AD.state.entity_exists("rules", entity_id) is False:
self.logger.warning('Unknown sequence "%s" in run_sequence()', sequence)
return None
entity = await self.AD.state.get_state("_services", "rules", sequence, attribute="all")
seq = entity["attributes"]["steps"]
loop = entity["attributes"]["loop"]
else:
#
# Assume it's a list with the actual commands in it
#
entity_id = "sequence.{}".format(uuid.uuid4().hex)
# Create an ephemeral entity for it
ephemeral_entity = True
await self.AD.state.add_entity("rules", entity_id, "idle", attributes={"steps": sequence})
seq = sequence
#
# OK, lets run it
#
coro = self.do_steps(namespace, entity_id, seq, ephemeral_entity, loop)
future = asyncio.ensure_future(coro)
self.AD.futures.add_future(_name, future)
return future
@staticmethod
async def cancel_sequence(_name, future):
future.cancel()
async def do_steps(self, namespace, entity_id, seq, ephemeral_entity, loop):
await self.AD.state.set_state("_sequences", "rules", entity_id, state="active")
try:
while True:
for step in seq:
for command, parameters in step.items():
if command == "sleep":
await asyncio.sleep(float(parameters))
else:
domain, service = str.split(command, "/")
if "namespace" in parameters:
ns = parameters["namespace"]
del parameters["namespace"]
else:
ns = namespace
await self.AD.services.call_service(ns, domain, service, parameters)
if loop is not True:
break
finally:
await self.AD.state.set_state("_sequences", "rules", entity_id, state="idle")
if ephemeral_entity is True:
await self.AD.state.remove_entity("rules", entity_id)
|
Python
| 0.000001
|
@@ -735,80 +735,16 @@
-await self.AD.state.add_entity(%0A %22rules%22,%0A
+entity =
%22se
@@ -774,38 +774,9 @@
nce)
-,
%0A
- %22idle%22,%0A
@@ -797,13 +797,13 @@
utes
-=%7B%0A
+ = %7B%0A
@@ -814,18 +814,16 @@
-
%22friendl
@@ -890,20 +890,16 @@
-
%22loop%22:
@@ -954,20 +954,16 @@
-
-
%22steps%22:
@@ -1001,24 +1001,31 @@
+ %7D%0A%0A
%7D,%0A
@@ -1020,11 +1020,420 @@
-%7D,%0A
+ if not await self.AD.state.entity_exists(%22rules%22, entity):%0A # it doesn't exist so add it%0A await self.AD.state.add_entity(%0A %22rules%22, entity, %22idle%22, attributes=attributes,%0A )%0A else:%0A await self.AD.state.set_state(%0A %22_sequences%22, %22rules%22, entity, state=%22idle%22, attributes=attributes, replace=True%0A
|
7f6430a7af1cee7f786849b9955179a813925c53
|
Update Skylib version to 0.8.0
|
apple/repositories.bzl
|
apple/repositories.bzl
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for handling Bazel repositories used by the Apple rules."""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
def _colorize(text, color):
"""Applies ANSI color codes around the given text."""
return "\033[1;{color}m{text}{reset}".format(
color = color,
reset = "\033[0m",
text = text,
)
def _green(text):
return _colorize(text, "32")
def _yellow(text):
return _colorize(text, "33")
def _warn(msg):
"""Outputs a warning message."""
print("\n{prefix} {msg}\n".format(
msg = msg,
prefix = _yellow("WARNING:"),
))
def _maybe(repo_rule, name, ignore_version_differences, **kwargs):
"""Executes the given repository rule if it hasn't been executed already.
Args:
repo_rule: The repository rule to be executed (e.g.,
`native.git_repository`.)
name: The name of the repository to be defined by the rule.
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
**kwargs: Additional arguments passed directly to the repository rule.
"""
if name in native.existing_rules():
if not ignore_version_differences:
# Verify that the repository is being loaded from the same URL and tag
# that we asked for, and warn if they differ.
# TODO(allevato): This isn't perfect, because the user could load from the
# same commit SHA as the tag, or load from an HTTP archive instead of a
# Git repository, but this is a good first step toward validating.
# Long-term, we should extend this function to support dependencies other
# than Git.
existing_repo = native.existing_rule(name)
if (existing_repo.get("remote") != kwargs.get("remote") or
existing_repo.get("tag") != kwargs.get("tag")):
expected = "{url} (tag {tag})".format(
tag = kwargs.get("tag"),
url = kwargs.get("remote"),
)
existing = "{url} (tag {tag})".format(
tag = existing_repo.get("tag"),
url = existing_repo.get("remote"),
)
_warn("""\
`build_bazel_rules_apple` depends on `{repo}` loaded from {expected}, but we \
have detected it already loaded into your workspace from {existing}. You may \
run into compatibility issues. To silence this warning, pass \
`ignore_version_differences = True` to `apple_rules_dependencies()`.
""".format(
existing = _yellow(existing),
expected = _green(expected),
repo = name,
))
return
repo_rule(name = name, **kwargs)
def apple_rules_dependencies(ignore_version_differences = False):
"""Fetches repositories that are dependencies of the `rules_apple` workspace.
Users should call this macro in their `WORKSPACE` to ensure that all of the
dependencies of the Swift rules are downloaded and that they are isolated from
changes to those dependencies.
Args:
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
"""
_maybe(
git_repository,
name = "bazel_skylib",
remote = "https://github.com/bazelbuild/bazel-skylib.git",
tag = "0.7.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
git_repository,
name = "build_bazel_apple_support",
remote = "https://github.com/bazelbuild/apple_support.git",
tag = "0.5.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
git_repository,
name = "build_bazel_rules_swift",
remote = "https://github.com/bazelbuild/rules_swift.git",
tag = "0.7.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
http_file,
name = "xctestrunner",
executable = 1,
sha256 = "15fc7d09315a230f3d8ee2913eef8699456366e44b37a9266e36b28517003628",
urls = ["https://github.com/google/xctestrunner/releases/download/0.2.6/ios_test_runner.par"],
ignore_version_differences = ignore_version_differences,
)
|
Python
| 0.000003
|
@@ -794,16 +794,32 @@
tp.bzl%22,
+ %22http_archive%22,
%22http_f
@@ -4044,38 +4044,36 @@
be(%0A
-git_repository
+http_archive
,%0A na
@@ -4097,32 +4097,44 @@
b%22,%0A
-remote =
+urls = %5B%0A
%22https://gi
@@ -4169,35 +4169,154 @@
ylib
-.git%22,%0A tag = %220.7.0
+/releases/download/0.8.0/bazel-skylib.0.8.0.tar.gz%22,%0A %5D,%0A sha256 = %222ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e
%22,%0A
|
44754da4076cdc5c39b24496da7566af61aa719b
|
Update apple_support and rules_swift dependencies.
|
apple/repositories.bzl
|
apple/repositories.bzl
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for handling Bazel repositories used by the Apple rules."""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def _colorize(text, color):
"""Applies ANSI color codes around the given text."""
return "\033[1;{color}m{text}{reset}".format(
color = color,
reset = "\033[0m",
text = text,
)
def _green(text):
return _colorize(text, "32")
def _yellow(text):
return _colorize(text, "33")
def _warn(msg):
"""Outputs a warning message."""
print("\n{prefix} {msg}\n".format(
msg = msg,
prefix = _yellow("WARNING:"),
))
def _maybe(repo_rule, name, ignore_version_differences, **kwargs):
"""Executes the given repository rule if it hasn't been executed already.
Args:
repo_rule: The repository rule to be executed (e.g.,
`native.git_repository`.)
name: The name of the repository to be defined by the rule.
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
**kwargs: Additional arguments passed directly to the repository rule.
"""
if name in native.existing_rules():
if not ignore_version_differences:
# Verify that the repository is being loaded from the same URL and tag
# that we asked for, and warn if they differ.
# TODO(allevato): This isn't perfect, because the user could load from the
# same commit SHA as the tag, or load from an HTTP archive instead of a
# Git repository, but this is a good first step toward validating.
# Long-term, we should extend this function to support dependencies other
# than Git.
existing_repo = native.existing_rule(name)
if (existing_repo.get("remote") != kwargs.get("remote") or
existing_repo.get("tag") != kwargs.get("tag")):
expected = "{url} (tag {tag})".format(
tag = kwargs.get("tag"),
url = kwargs.get("remote"),
)
existing = "{url} (tag {tag})".format(
tag = existing_repo.get("tag"),
url = existing_repo.get("remote"),
)
_warn("""\
`build_bazel_rules_apple` depends on `{repo}` loaded from {expected}, but we \
have detected it already loaded into your workspace from {existing}. You may \
run into compatibility issues. To silence this warning, pass \
`ignore_version_differences = True` to `apple_rules_dependencies()`.
""".format(
existing = _yellow(existing),
expected = _green(expected),
repo = name,
))
return
repo_rule(name = name, **kwargs)
def apple_rules_dependencies(ignore_version_differences = False):
"""Fetches repositories that are dependencies of the `rules_apple` workspace.
Users should call this macro in their `WORKSPACE` to ensure that all of the
dependencies of the Swift rules are downloaded and that they are isolated from
changes to those dependencies.
Args:
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
"""
_maybe(
git_repository,
name = "bazel_skylib",
remote = "https://github.com/bazelbuild/bazel-skylib.git",
tag = "0.7.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
git_repository,
name = "build_bazel_apple_support",
remote = "https://github.com/bazelbuild/apple_support.git",
tag = "0.4.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
git_repository,
name = "build_bazel_rules_swift",
remote = "https://github.com/bazelbuild/rules_swift.git",
tag = "0.6.0",
ignore_version_differences = ignore_version_differences,
)
|
Python
| 0.000152
|
@@ -4344,9 +4344,9 @@
%220.
-4
+5
.0%22,
@@ -4583,9 +4583,9 @@
%220.
-6
+7
.0%22,
|
2ca7b67983e518bf684e4c5561c91d9f8946204d
|
Update xctestrunner dependency.
|
apple/repositories.bzl
|
apple/repositories.bzl
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for handling Bazel repositories used by the Apple rules."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
def _colorize(text, color):
"""Applies ANSI color codes around the given text."""
return "\033[1;{color}m{text}{reset}".format(
color = color,
reset = "\033[0m",
text = text,
)
def _green(text):
return _colorize(text, "32")
def _yellow(text):
return _colorize(text, "33")
def _warn(msg):
"""Outputs a warning message."""
print("\n{prefix} {msg}\n".format(
msg = msg,
prefix = _yellow("WARNING:"),
))
def _maybe(repo_rule, name, ignore_version_differences, **kwargs):
"""Executes the given repository rule if it hasn't been executed already.
Args:
repo_rule: The repository rule to be executed (e.g.,
`http_archive`.)
name: The name of the repository to be defined by the rule.
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
**kwargs: Additional arguments passed directly to the repository rule.
"""
if native.existing_rule(name):
if not ignore_version_differences:
# Verify that the repository is being loaded from the same URL and tag
# that we asked for, and warn if they differ.
# TODO(allevato): This isn't perfect, because the user could load from the
# same commit SHA as the tag, or load from an HTTP archive instead of a
# Git repository, but this is a good first step toward validating.
# Long-term, we should extend this function to support dependencies other
# than Git.
existing_repo = native.existing_rule(name)
if (existing_repo.get("remote") != kwargs.get("remote") or
existing_repo.get("tag") != kwargs.get("tag")):
expected = "{url} (tag {tag})".format(
tag = kwargs.get("tag"),
url = kwargs.get("remote"),
)
existing = "{url} (tag {tag})".format(
tag = existing_repo.get("tag"),
url = existing_repo.get("remote"),
)
_warn("""\
`build_bazel_rules_apple` depends on `{repo}` loaded from {expected}, but we \
have detected it already loaded into your workspace from {existing}. You may \
run into compatibility issues. To silence this warning, pass \
`ignore_version_differences = True` to `apple_rules_dependencies()`.
""".format(
existing = _yellow(existing),
expected = _green(expected),
repo = name,
))
return
repo_rule(name = name, **kwargs)
def apple_rules_dependencies(ignore_version_differences = False):
"""Fetches repositories that are dependencies of the `rules_apple` workspace.
Users should call this macro in their `WORKSPACE` to ensure that all of the
dependencies of the Swift rules are downloaded and that they are isolated from
changes to those dependencies.
Args:
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
"""
_maybe(
http_archive,
name = "bazel_skylib",
urls = [
"https://github.com/bazelbuild/bazel-skylib/releases/download/0.9.0/bazel_skylib-0.9.0.tar.gz",
],
sha256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
http_archive,
name = "build_bazel_apple_support",
urls = [
"https://github.com/bazelbuild/apple_support/releases/download/0.7.1/apple_support.0.7.1.tar.gz",
],
sha256 = "122ebf7fe7d1c8e938af6aeaee0efe788a3a2449ece5a8d6a428cb18d6f88033",
ignore_version_differences = ignore_version_differences,
)
_maybe(
http_archive,
name = "build_bazel_rules_swift",
urls = [
"https://github.com/bazelbuild/rules_swift/releases/download/0.12.1/rules_swift.0.12.1.tar.gz",
],
sha256 = "18cd4df4e410b0439a4935f9ca035bd979993d42372ba79e7f2d4fafe9596ef0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
http_file,
name = "xctestrunner",
executable = 1,
sha256 = "7c088842ebd4f47297a167e3f3df77eab54a8651f6b6d87e2c275a9e6c8adfe5",
urls = ["https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par"],
ignore_version_differences = ignore_version_differences,
)
|
Python
| 0.000089
|
@@ -5152,72 +5152,72 @@
= %22
-7c088842ebd4f47297a167e3f3df77eab54a8651f6b6d87e2c275a9e6c8adfe5
+9e46d5782a9dc7d40bc93c99377c091886c180b8c4ffb9f79a19a58e234cdb09
%22,%0A
@@ -5293,17 +5293,18 @@
oad/0.2.
-9
+10
/ios_tes
|
6ea62e02a8d966fa78889bca184d61e718c80f9c
|
add a request buffer in order to avoid duplicate queries at the same time; (3)
|
lib/rome/utils/MemoizationDecorator.py
|
lib/rome/utils/MemoizationDecorator.py
|
__author__ = 'jonathan'
import threading
import Queue
class MemoizationDecorator(object):
def __init__(self, decorated):
self.decorated = decorated
self.conditions_objects_dict = {}
self.memory = {}
self.insertion_lock = threading.Lock()
def __getattr__(self, attribute_name):
decorated_attribute = getattr(self.decorated, attribute_name)
if hasattr(decorated_attribute, "__call__"):
callable_object = self.FunctionWrapper(decorated_attribute, method_name=attribute_name, memory=self.memory, insertion_lock=self.insertion_lock)
return callable_object
return decorated_attribute
class FunctionWrapper:
"""Class that is used to "simulate" to delay call to decorated's method."""
def __init__(self, callable_object, method_name, memory, insertion_lock):
self.callable_object = callable_object
self.method_name = method_name
self.memory = memory
self.insertion_lock = insertion_lock
def compute_hash(self, method_name, *args, **kwargs):
return hash("%s_%s_%s" % (method_name, args, kwargs))
def __call__(self, *args, **kwargs):
call_hash = self.compute_hash(self.method_name, args, kwargs)
if call_hash in self.memory:
# Increment safely the number of threads waiting for expected value
item = self.memory[call_hash]
should_retry = True
item["modification_lock"].acquire()
if not item["closed"]:
item["waiting_threads_count"] += 1
should_retry = False
item["modification_lock"].release()
if should_retry:
# memory has been destroyed by a master call, simply abort it and repeat the method.
return self.__call__(*args, **kwargs)
# Wait for the expected value
# item["event"].wait()
result = item["result_queue"].get()
# Once the expected value has been collected, decrement safely the number of remaining slave calls.
item["modification_lock"].acquire()
# result = self.memory[call_hash]["result"]
self.memory[call_hash]["waiting_threads_count"] -= 1
item["modification_lock"].release()
else:
# try insertion
should_retry = True
self.insertion_lock.acquire()
if not call_hash in self.memory:
self.memory[call_hash] = {
"modification_lock": threading.Lock(),
"result_queue": Queue.Queue(),
"result": None,
"waiting_threads_count": 0,
"closed": False
}
should_retry = False
self.insertion_lock.release()
if should_retry:
# memory has been initialised by a quicker concurrent call, simply abort it and become a slave.
return self.__call__(*args, **kwargs)
# compute the exepcted value and store it in a shared memory.
result = self.callable_object(*args, **kwargs)
self.memory[call_hash]["result"] = result
# close safely the memory item
self.memory[call_hash]["modification_lock"].acquire()
self.memory[call_hash]["closed"] = True
self.memory[call_hash]["modification_lock"].release()
# notify paused concurrent calls that the expected value is ready to be used.
# self.memory[call_hash]["event"].set()
# delete the memory item
item = self.memory[call_hash]
# send to concurrent slave call the results until they are all satisfied.
while item["waiting_threads_count"] > 0:
# reload safely the item
item["result_queue"].put(result)
item["modification_lock"].acquire()
item = self.memory[call_hash]
item["modification_lock"].release()
# Once there are no more slave calls, the item can be destroyed
item["modification_lock"].acquire()
self.insertion_lock.acquire()
del self.memory[call_hash]
item["modification_lock"].release()
self.insertion_lock.release()
# result = item["result"]
return result
def memoization_decorator(func):
def wrapper(*args, **kwargs):
return MemoizationDecorator(func(*args, **kwargs))
return wrapper
if __name__ == '__main__':
import time
class Foo(object):
def get_magical_value(self, cpt):
print("starting")
time.sleep(7)
print("ending")
return cpt
# obj1 = Foo()
obj1 = MemoizationDecorator(Foo())
def do_request():
value = obj1.get_magical_value(42)
print(value)
for n in range(2):
thread = threading.Thread(target=do_request)
thread.start()
time.sleep(1)
time.sleep(3)
for n in range(3):
thread = threading.Thread(target=do_request)
thread.start()
time.sleep(1)
|
Python
| 0.000002
|
@@ -387,24 +387,26 @@
ame)%0A
+ #
if hasattr(
@@ -438,32 +438,34 @@
all__%22):%0A
+ #
callable_ob
@@ -596,32 +596,34 @@
on_lock)%0A
+ #
return call
|
fcdac4f394afed0225ae6411f9e4c4c32d0fae0f
|
add all possible ids. Make factory classes
|
geetools/collection/sentinel.py
|
geetools/collection/sentinel.py
|
# coding=utf-8
""" Google Earth Engine Sentinel Collections """
from . import Collection, TODAY, Band
from .. import bitreader, cloud_mask, tools
from .. import algorithms as module_alg
import ee
NUNBERS = [1, 2]
PROCESSES = ['TOA', 'SR']
class Sentinel2(Collection):
""" Sentinel 2 Collection """
def __init__(self, process='TOA'):
super(Sentinel2, self).__init__()
if process not in PROCESSES:
msg = '{} is not a valid process'
raise ValueError(msg.format(process))
self._bands = None
if process == 'TOA':
self.id = 'COPERNICUS/S2'
else:
self.id = 'COPERNICUS/S2_SR'
self.number = 2
self.spacecraft = 'SENTINEL'
self.process = process
self.start_date = '2015-06-23'
self.end_date = TODAY
self.cloud_cover = 'CLOUD_COVERAGE_ASSESSMENT'
self.algorithms = {}
if self.process == 'SR':
self.algorithms['scl_masks'] = self.SCL_masks
@property
def bands(self):
if not self._bands:
band = [None]*30
common = {'min':0, 'max': 10000, 'precision': 'uint16',
'reference': 'optical'}
band[0] = Band('B1', 'aerosol', scale=60, **common)
band[1] = Band('B2', 'blue', scale=10, **common)
band[2] = Band('B3', 'green', scale=10, **common)
band[3] = Band('B4', 'red', scale=10, **common)
band[4] = Band('B5', 'red_edge_1', scale=20, **common)
band[5] = Band('B6', 'red_edge_2', scale=20, **common)
band[6] = Band('B7', 'red_edge_3', scale=20, **common)
band[7] = Band('B8', 'nir', scale=10, **common)
band[8] = Band('B8A', 'red_edge_4', scale=20, **common)
band[9] = Band('B9', 'water_vapor', scale=60, **common)
swir = Band('B11', 'swir', scale=20, **common)
swir2 = Band('B12', 'swir2', scale=20, **common)
qa10 = Band('QA10', 'qa10', scale=10, reference='bits')
qa20 = Band('QA20', 'qa20', scale=20, reference='bits')
qa60 = Band('QA60', 'qa60', scale=60, reference='bits',
bits={'10':{1:'cloud'}, '11':{1:'cirrus'}})
if self.process in ['TOA']:
band[10] = Band('B10', 'cirrus', scale=60, **common)
band[11] = swir
band[12] = swir2
band[13] = qa10
band[14] = qa20
band[15] = qa60
if self.process in ['SR']:
band[10] = swir
band[11] = swir2
band[12] = Band('AOT', 'aerosol_thickness', 'uint16', 10,
0, 65535, 'optical')
band[13] = Band('WVP', 'water_vapor_pressure', 'uint16', 10,
0, 65535, 'optical')
band[14] = Band('SCL', 'scene_classification_map', 'uint8', 20,
1, 11, 'classification')
self._bands = [b for b in band if b]
return self._bands
def SCL_masks(self, image):
""" Decodify the SCL bands and create a mask for each category """
if self.process == 'SR':
scl = image.select('SCL')
data = ee.Dictionary(self.SCL_data)
def wrap(band_value, name):
band_value = ee.Number.parse(band_value)
name = ee.String(name)
mask = scl.eq(band_value).rename(name)
return mask
newbands = ee.Dictionary(data.map(wrap))
bandslist = tools.dictionary.extractList(newbands,
[str(i) for i in range(1, 12)])
image = tools.image.addMultiBands(ee.Image(bandslist.get(0)),
ee.List(bandslist.slice(1)))
return image
@property
def SCL_data(self):
data = None
if self.process == 'SR':
data = {
1: 'saturated',
2: 'dark',
3: 'shadow',
4: 'vegetation',
5: 'bare_soil',
6: 'water',
7: 'cloud_low',
8: 'cloud_medium',
9: 'cloud_high',
10: 'cirrus',
11: 'snow'
}
return data
@staticmethod
def fromId(id):
""" Create a Sentinel2 class from a GEE ID """
if id == 'COPERNICUS/S2':
return Sentinel2()
elif id == 'COPERNICUS/S2_SR':
return Sentinel2('SR')
else:
msg = '{} not recognized as a Sentinel 2 ID'
raise ValueError(msg.format(id))
Sentinel2TOA = Sentinel2()
Sentinel2SR = Sentinel2('SR')
|
Python
| 0.00013
|
@@ -114,129 +114,120 @@
ort
-bitreader, cloud_mask, tools%0Afrom .. import algorithms as module_alg%0Aimport ee%0A%0ANUNBERS = %5B1, 2%5D%0APROCESSES = %5B'TOA', 'SR'
+tools%0Aimport ee%0A%0ANUNBERS = %5B1, 2%5D%0APROCESSES = %5B'TOA', 'SR'%5D%0AIDS = %5B%0A 'COPERNICUS/S2',%0A 'COPERNICUS/S2_SR'%0A
%5D%0A%0A%0A
@@ -4738,59 +4738,154 @@
))%0A%0A
-%0ASentinel2TOA = Sentinel2()%0A
+ # FACTORY%0A @classmethod%0A def Sentinel2TOA(cls):%0A return cls('TOA')%0A%0A @classmethod%0A def
Senti
-n
e
+n
l2SR
- = Sentinel2
+(cls):%0A return cls
('SR
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.