commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
832391803816ac39766fa5dad929d6b0a9e56561
|
Fix POST calls with new requests
|
taiga/requestmaker.py
|
taiga/requestmaker.py
|
import json
import requests
import time
from . import exceptions, utils
from requests.exceptions import RequestException
class RequestCacheException(Exception):
pass
class RequestCacheMissingException(RequestCacheException):
pass
class RequestCacheInvalidException(RequestCacheException):
pass
class RequestCache(object):
def __init__(self, valid_time=60):
self._valid_time = valid_time
self._cache = {}
def put(self, key, value):
self._cache[key] = {
'time': time.time(),
'value': value
}
def remove(self, key):
if key in self._cache:
del self._cache[key]
def get(self, key):
if key not in self._cache:
raise RequestCacheMissingException()
if time.time() > self._cache[key]['time'] + self._valid_time:
self.remove(key)
raise RequestCacheInvalidException()
return self._cache[key]['value']
class RequestMakerException(Exception):
pass
class RequestMaker(object):
def __init__(self, api_path, host, token, token_type='Bearer'):
self.api_path = api_path
self.host = host
self.token = token
self.token_type = token_type
self._cache = RequestCache()
@property
def cache(self):
return self._cache
def is_bad_response(self, response):
return 400 <= response.status_code <= 500
def headers(self):
headers = {
'Content-type': 'application/json',
'Authorization': '{0} {1}'.format(self.token_type, self.token),
'x-disable-pagination': 'True'
}
return headers
def urljoin(self, *parts):
return utils.urljoin(*parts)
def get_full_url(self, uri, query={}, **parameters):
full_url = self.urljoin(
self.host, self.api_path,
uri.format(**parameters)
)
return full_url
def get(self, uri, query={}, cache=False, **parameters):
try:
full_url = self.urljoin(
self.host, self.api_path,
uri.format(**parameters)
)
result = None
if cache:
try:
result = self._cache.get(full_url)
except RequestCacheException:
pass
if not result:
result = requests.get(
full_url,
headers=self.headers(),
params=query
)
if cache:
self._cache.put(full_url, result)
except RequestException:
raise exceptions.TaigaRestException(
full_url, 400,
'Network error!', 'GET'
)
if not self.is_bad_response(result):
return result
else:
raise exceptions.TaigaRestException(
full_url, result.status_code,
result.text, 'GET'
)
def post(self, uri, payload=None, query={}, files={}, **parameters):
if files:
headers = {
'Authorization': '{0} {1}'.format(self.token_type, self.token),
'x-disable-pagination': True
}
data = payload
else:
headers = self.headers()
data = json.dumps(payload)
try:
full_url = self.urljoin(
self.host, self.api_path,
uri.format(**parameters)
)
result = requests.post(
full_url,
headers=headers,
data=data,
params=query,
files=files
)
except RequestException:
raise exceptions.TaigaRestException(
full_url, 400,
'Network error!', 'POST'
)
if not self.is_bad_response(result):
return result
else:
raise exceptions.TaigaRestException(
full_url, result.status_code,
result.text, 'POST'
)
def delete(self, uri, query={}, **parameters):
try:
full_url = self.urljoin(
self.host, self.api_path,
uri.format(**parameters)
)
result = requests.delete(
full_url,
headers=self.headers(),
params=query
)
except RequestException:
raise exceptions.TaigaRestException(
full_url, 400,
'Network error!', 'DELETE'
)
if not self.is_bad_response(result):
return result
else:
raise exceptions.TaigaRestException(
full_url, result.status_code,
result.text, 'DELETE'
)
def put(self, uri, payload=None, query={}, **parameters):
try:
full_url = self.urljoin(
self.host, self.api_path,
uri.format(**parameters)
)
result = requests.put(
full_url,
headers=self.headers(),
data=json.dumps(payload),
params=query
)
except RequestException:
raise exceptions.TaigaRestException(
full_url, 400,
'Network error!', 'PUT'
)
if not self.is_bad_response(result):
return result
else:
raise exceptions.TaigaRestException(
full_url, result.status_code,
result.text, 'PUT'
)
def patch(self, uri, payload=None, query={}, **parameters):
try:
full_url = self.urljoin(
self.host, self.api_path,
uri.format(**parameters)
)
result = requests.patch(
full_url,
headers=self.headers(),
data=json.dumps(payload),
params=query
)
except RequestException:
raise exceptions.TaigaRestException(
full_url, 400,
'Network error!', 'PATCH'
)
if not self.is_bad_response(result):
return result
else:
raise exceptions.TaigaRestException(
full_url, result.status_code,
result.text, 'PATCH'
)
|
Python
| 0
|
@@ -3238,20 +3238,22 @@
ation':
+'
True
+'
%0A
|
1dd9295c0e6269ed60eb10e5f6cb3570233f63ac
|
Increase default job size.
|
lobster/cmssw/job.py
|
lobster/cmssw/job.py
|
import imp
import os
import pickle
import shutil
import sqlite3
import time
import lobster.job
import sandbox
from dataset import DASInterface, FileInterface
from jobit import SQLInterface as JobitStore
from ProdCommon.CMSConfigTools.ConfigAPI.CfgInterface import CfgInterface
class JobProvider(lobster.job.JobProvider):
def __init__(self, config):
self.__config = config
self.__workdir = config['workdir']
self.__stageout = config['stageout location']
self.__sandbox = os.path.join(self.__workdir, 'sandbox')
self.__datasets = {}
self.__configs = {}
self.__args = {}
self.__jobdirs = {}
self.__stageoutdirs = {}
self.__outputs = {}
if 'files' in repr(config):
ds_interface = FileInterface(config)
else:
ds_interface = DASInterface(config)
create = not os.path.exists(self.__workdir)
if create:
os.makedirs(self.__sandbox)
for fn in ['job.py']:
shutil.copy(os.path.join(os.path.dirname(__file__), 'data', fn),
os.path.join(self.__sandbox, fn))
blacklist = config.get('sandbox blacklist', [])
sandbox.package(os.environ['LOCALRT'], self.__sandbox, blacklist)
for cfg in config['tasks']:
label = cfg['dataset label']
cms_config = cfg['cmssw config']
self.__datasets[label] = cfg['dataset']
self.__configs[label] = os.path.basename(cms_config)
self.__args[label] = cfg.get('parameters', [])
self.__outputs[label] = []
if cfg.has_key('outputs'):
self.__outputs[label].extend(cfg['outputs'])
else:
with open(cms_config, 'r') as f:
source = imp.load_source('cms_config_source', cms_config, f)
cfg_interface = CfgInterface(source.process)
for m in cfg_interface.data.outputModules:
self.__outputs[label].append(getattr(cfg_interface.data, m).fileName._value)
taskdir = os.path.join(self.__workdir, label)
stageoutdir = os.path.join(self.__stageout, taskdir)
if create:
for dir in [taskdir, stageoutdir]:
if not os.path.exists(dir):
os.makedirs(dir)
shutil.copy(cms_config, os.path.join(taskdir, os.path.basename(cms_config)))
elif os.path.exists(os.path.join(taskdir, 'running')):
for d in os.listdir(os.path.join(taskdir, 'running')):
shutil.move(os.path.join(taskdir, 'running', d), os.path.join(taskdir, 'failed'))
self.__store = JobitStore(config)
if create:
self.__store.register_jobits(ds_interface)
else:
self.__store.reset_jobits()
def obtain(self, num=1):
res = self.retry(self.__store.pop_jobits, ([10] * num,), {})
if not res:
return None
tasks = []
ids = []
for (id, label, files, lumis) in res:
ids.append(id)
config = self.__configs[label]
args = self.__args[label]
inputs = [(os.path.join(self.__workdir, label, config), config),
(self.__sandbox + ".tar.bz2", "sandbox.tar.bz2"),
(os.path.join(os.path.dirname(__file__), 'data', 'wrapper.sh'), 'wrapper.sh')]
sdir = os.path.join(self.__stageout, label)
jdir = os.path.join(self.__workdir, label, 'running', id)
if not os.path.isdir(jdir):
os.makedirs(jdir)
with open(os.path.join(jdir, 'parameters.pkl'), 'wb') as f:
pickle.dump((args, files, lumis), f, pickle.HIGHEST_PROTOCOL)
inputs.append((os.path.join(jdir, 'parameters.pkl'), 'parameters.pkl'))
self.__jobdirs[id] = jdir
outputs = [(os.path.join(sdir, f.replace('.root', '_%s.root' % id)), f) for f in self.__outputs[label]]
outputs.extend([(os.path.join(jdir, f), f) for f in ['report.xml.gz', 'cmssw.log.gz']])
cmd = './wrapper.sh python job.py {0} parameters.pkl'.format(config)
tasks.append((id, cmd, inputs, outputs))
print "Creating job(s) {0}".format(", ".join(ids))
return tasks
def release(self, id, return_code, output):
print "Job", id, "returned with exit code", return_code
failed = (return_code != 0)
self.retry(self.__store.update_jobits, (id, failed), {})
jdir = self.__jobdirs[id]
with open(os.path.join(jdir, 'job.log'), 'w') as f:
f.write(output)
if failed:
shutil.move(jdir, jdir.replace('running', 'failed'))
else:
shutil.move(jdir, jdir.replace('running', 'successful'))
def done(self):
return self.__store.unfinished_jobits() == 0
def work_left(self):
return self.__store.unfinished_jobits()
def retry(self, fct, args, kwargs, attempts=10):
while attempts > 0:
attempts -= 1
try:
return fct(*args, **kwargs)
except sqlite3.OperationalError:
print "Failed to perform SQL operation. {0} attempts remaining.".format(attempts)
if attempts <= 0:
raise
time.sleep(1)
|
Python
| 0
|
@@ -2981,17 +2981,17 @@
bits, (%5B
-1
+3
0%5D * num
|
0c9f2f51778b26bb126eccfbef0b098da3db2877
|
normalize version numbers
|
asynchronous_batch_mailings/__openerp__.py
|
asynchronous_batch_mailings/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of asynchronous_batch_mailings, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# asynchronous_batch_mailings is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# asynchronous_batch_mailings is distributed in the hope
# that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with asynchronous_batch_mailings.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Asynchronous Batch Mailings',
'version': '1.0',
'author': 'ACSONE SA/NV',
'maintainer': 'ACSONE SA/NV',
'website': 'http://www.acsone.eu',
'category': 'Marketing',
'depends': [
'mail',
'connector',
],
'description': """
Asynchronous Batch Mailings
===========================
This module allows to send emails by an asynchronous way.
Moreover it provides a way to split huge mailing.
Two parameters are available:
* the mailing size from which the mailing must become asynchronous
* the batch size
""",
'images': [
],
'data': [
'data/ir_config_parameter_data.xml',
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
}
|
Python
| 0.000673
|
@@ -1138,17 +1138,23 @@
sion': '
-1
+8.0.1.0
.0',%0A
|
dafc54e782c5ee9bda3cf1817df92ae16ed26979
|
fix website url in manifest
|
attachment_base_synchronize/__openerp__.py
|
attachment_base_synchronize/__openerp__.py
|
# coding: utf-8
# @ 2015 Florian DA COSTA @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Attachment Base Synchronize',
'version': '9.0.1.0.0',
'author': 'Akretion,Odoo Community Association (OCA)',
'website': 'www.akretion.com',
'license': 'AGPL-3',
'category': 'Generic Modules',
'depends': [
'base',
'mail',
],
'data': [
'views/attachment_view.xml',
'security/ir.model.access.csv',
'data/cron.xml',
],
'demo': [
'demo/attachment_metadata_demo.xml'
],
'installable': True,
'application': False,
'images': [],
}
|
Python
| 0.000001
|
@@ -264,16 +264,23 @@
site': '
+http://
www.akre
@@ -287,16 +287,17 @@
tion.com
+/
',%0A '
|
1c652fa17df8b1e10b7faf815992c0c7956afd8f
|
Use the right path for the Raven Django app
|
normandy/settings.py
|
normandy/settings.py
|
import os
from configurations import Configuration, values
class Core(Configuration):
"""Settings that will never change per-environment."""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
INSTALLED_APPS = [
'normandy.classifier',
'normandy.recipes',
'normandy.selfrepair',
'adminsortable',
'product_details',
'rest_framework',
'rest_framework.authtoken',
'storages',
'raven',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'normandy.classifier.middleware.RequestReceivedAtMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'normandy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.static',
],
},
},
]
WSGI_APPLICATION = 'normandy.wsgi.application'
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# User-uploaded Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
class Base(Core):
"""Settings that may change per-environment, some with defaults."""
SECRET_KEY = values.SecretValue()
DEBUG = values.BooleanValue(False)
ALLOWED_HOSTS = values.ListValue([])
DATABASES = values.DatabaseURLValue('postgres://postgres@localhost/normandy')
ADMINS = values.SingleNestedListValue([])
STATICFILES_STORAGE = values.Value('whitenoise.django.GzipManifestStaticFilesStorage')
EMAIL_HOST_USER = values.Value()
EMAIL_HOST = values.Value()
EMAIL_PORT = values.IntegerValue(587)
EMAIL_USE_TLS = values.BooleanValue(True)
EMAIL_HOST_PASSWORD = values.Value()
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# Overwrite old files when uploading media.
DEFAULT_FILE_STORAGE = values.Value('storages.backends.overwrite.OverwriteStorage')
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
GEOIP2_DATABASE = values.Value(os.path.join(Core.BASE_DIR, 'GeoLite2-Country.mmdb'))
# Product-details
PROD_DETAILS_STORAGE = values.Value('normandy.recipes.storage.ProductDetailsRelationalStorage')
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication'
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
CAN_EDIT_ACTIONS_IN_USE = values.BooleanValue(False)
ADMIN_ENABLED = values.BooleanValue(True)
USE_X_FORWARDED_HOST = values.BooleanValue(True)
RAVEN_CONFIG = {
'dsn': values.URLValue(None, environ_name='DJANGO_RAVEN_CONFIG_DSN'),
}
class Development(Base):
"""Settings for local development."""
DOTENV_EXISTS = os.path.exists(os.path.join(Core.BASE_DIR, '.env'))
DOTENV = '.env' if DOTENV_EXISTS else None
SECRET_KEY = values.Value('not a secret')
DEBUG = values.BooleanValue(True)
AUTH_PASSWORD_VALIDATORS = values.ListValue([])
INSTALLED_APPS = Base.INSTALLED_APPS + ['sslserver']
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
CAN_EDIT_ACTIONS_IN_USE = values.BooleanValue(True)
class Production(Base):
"""Settings for the production environment."""
class Build(Production):
"""Settings for building the Docker image for production."""
SECRET_KEY = values.Value('not a secret')
class Test(Base):
SECRET_KEY = values.Value('not a secret')
DEFAULT_FILE_STORAGE = 'inmemorystorage.InMemoryStorage'
|
Python
| 0.000001
|
@@ -584,16 +584,44 @@
'raven
+.contrib.django.raven_compat
',%0A%0A
|
a007f80dc2182787eca521c84f37aeedc307645a
|
Remove base64 padding
|
encrypted_id/__init__.py
|
encrypted_id/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
basestring
except NameError:
basestring = str
from Crypto.Cipher import AES
import base64
import binascii
import struct
from django.conf import settings
from django.db.models import Model
from django.http import Http404
from django.shortcuts import get_object_or_404 as go4
__version__ = "0.1.2"
__license__ = "BSD"
__author__ = "Amit Upadhyay"
__email__ = "upadhyay@gmail.com"
__url__ = "http://amitu.com/encrypted-id/"
__source__ = "https://github.com/amitu/django-encrypted-id"
__docformat__ = "html"
def encode(the_id):
assert 0 <= the_id < 2 ** 64
crc = binascii.crc32(bytes(the_id)) & 0xffffffff
message = struct.pack(b"<IQxxxx", crc, the_id)
assert len(message) == 16
cypher = AES.new(
settings.SECRET_KEY[:24], AES.MODE_CBC,
settings.SECRET_KEY[-16:]
)
return base64.urlsafe_b64encode(cypher.encrypt(message)).replace(b"=", b".")
def decode(e):
if isinstance(e, basestring):
e = bytes(e.encode("ascii"))
try:
e = base64.urlsafe_b64decode(e.replace(b".", b"="))
except (TypeError, AttributeError):
raise ValueError("Failed to decrypt, invalid input.")
for skey in getattr(settings, "SECRET_KEYS", [settings.SECRET_KEY]):
cypher = AES.new(skey[:24], AES.MODE_CBC, skey[-16:])
msg = cypher.decrypt(e)
crc, the_id = struct.unpack("<IQxxxx", msg)
if crc != binascii.crc32(bytes(the_id)) & 0xffffffff:
continue
return the_id
raise ValueError("Failed to decrypt, CRC never matched.")
def get_object_or_404(m, ekey, *arg, **kw):
try:
pk = decode(ekey)
except ValueError:
raise Http404
return go4(m, id=pk, *arg, **kw)
def ekey(instance):
assert isinstance(instance, Model)
return encode(instance.id)
|
Python
| 0.000001
|
@@ -1069,17 +1069,16 @@
b%22=%22, b%22
-.
%22)%0A%0A%0Adef
@@ -1162,32 +1162,74 @@
ii%22))%0A%0A try:%0A
+ padding = (3 - len(e) %25 3) * b%22=%22%0A
e = base
@@ -1254,28 +1254,18 @@
de(e
-.replace(b%22.%22, b%22=%22)
+ + padding
)%0A
|
7ca1448e0d4afe0b3abf91ba2cd895df681ae3ba
|
Add initial migrations
|
apps/careers/migrations/0001_initial.py
|
apps/careers/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-20 14:46
from __future__ import unicode_literals
import cms.apps.media.models
import cms.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('pages', '0006_auto_20151002_1655'),
('media', '0003_file_alt_text'),
]
operations = [
migrations.CreateModel(
name='Career',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_online', models.BooleanField(default=True, help_text="Uncheck this box to remove the page from the public website. Logged-in admin users will still be able to view this page by clicking the 'view on site' button.", verbose_name='online')),
('browser_title', models.CharField(blank=True, help_text="The heading to use in the user's web browser. Leave blank to use the page title. Search engines pay particular attention to this attribute.", max_length=1000)),
('meta_description', models.TextField(blank=True, help_text='A brief description of the contents of this page.', verbose_name='description')),
('sitemap_priority', models.FloatField(blank=True, choices=[(1.0, 'Very high'), (0.8, 'High'), (0.5, 'Medium'), (0.3, 'Low'), (0.0, 'Very low')], default=None, help_text='The relative importance of this content on your site. Search engines use this as a hint when ranking the pages within your site.', null=True, verbose_name='priority')),
('sitemap_changefreq', models.IntegerField(blank=True, choices=[(1, 'Always'), (2, 'Hourly'), (3, 'Daily'), (4, 'Weekly'), (5, 'Monthly'), (6, 'Yearly'), (7, 'Never')], default=None, help_text='How frequently you expect this content to be updated. Search engines use this as a hint when scanning your site for updates.', null=True, verbose_name='change frequency')),
('robots_index', models.BooleanField(default=True, help_text='Uncheck to prevent search engines from indexing this page. Do this only if the page contains information which you do not wish to show up in search results.', verbose_name='allow indexing')),
('robots_follow', models.BooleanField(default=True, help_text='Uncheck to prevent search engines from following any links they find in this page. Do this only if the page contains links to other sites that you do not wish to publicise.', verbose_name='follow links')),
('robots_archive', models.BooleanField(default=True, help_text='Uncheck this to prevent search engines from archiving this page. Do this this only if the page is likely to change on a very regular basis. ', verbose_name='allow archiving')),
('og_title', models.CharField(blank=True, help_text='Title that will appear on Facebook posts. This is limited to 100 characters, but Facebook will truncate the title to 88 characters.', max_length=100, verbose_name='title')),
('og_description', models.TextField(blank=True, help_text='Description that will appear on Facebook posts. It is limited to 300 characters, but it is recommended that you do not use anything over 200.', max_length=300, verbose_name='description')),
('twitter_card', models.IntegerField(blank=True, choices=[(0, 'Summary'), (1, 'Photo'), (2, 'Video'), (3, 'Product'), (4, 'App'), (5, 'Gallery'), (6, 'Large Summary')], default=None, help_text='The type of content on the page. Most of the time "Summary" will suffice. Before you can benefit from any of these fields make sure to go to https://dev.twitter.com/docs/cards/validation/validator and get approved.', null=True, verbose_name='card')),
('twitter_title', models.CharField(blank=True, help_text='The title that appears on the Twitter card, it is limited to 70 characters.', max_length=70, verbose_name='title')),
('twitter_description', models.TextField(blank=True, help_text="Description that will appear on Twitter cards. It is limited to 200 characters. This does'nt effect SEO, so focus on copy that complements the tweet and title rather than on keywords.", max_length=200, verbose_name='description')),
('short_title', models.CharField(blank=True, help_text='A shorter version of the title that will be used in site navigation. Leave blank to use the full-length title.', max_length=200)),
('title', models.CharField(max_length=256)),
('slug', models.CharField(max_length=256, unique=True)),
('location', models.CharField(blank=True, max_length=256, null=True)),
('summary', models.TextField(blank=True, null=True)),
('description', cms.models.fields.HtmlField()),
('email_address', models.EmailField(max_length=254)),
('order', models.PositiveIntegerField(default=0)),
('og_image', cms.apps.media.models.ImageRefField(blank=True, help_text='The recommended image size is 1200x627 (1.91:1 ratio); this gives you a big stand out thumbnail. Using an image smaller than 400x209 will give you a small thumbnail and will splits posts into 2 columns. If you have text on the image make sure it is centered.', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='media.File', verbose_name='image')),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='Careers',
fields=[
('page', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='+', serialize=False, to='pages.Page')),
('per_page', models.PositiveIntegerField(blank=True, default=10, null=True, verbose_name='careers per page')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='career',
name='page',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='careers.Careers'),
),
migrations.AddField(
model_name='career',
name='twitter_image',
field=cms.apps.media.models.ImageRefField(blank=True, help_text='The minimum size it needs to be is 280x150. If you want to use a larger imagemake sure the card type is set to "Large Summary".', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to='media.File', verbose_name='image'),
),
]
|
Python
| 0
|
@@ -62,15 +62,15 @@
06-2
-0 14:46
+2 08:42
%0Afro
@@ -332,40 +332,35 @@
('
-pages
+media
', '000
-6_auto_20151002_1655
+3_file_alt_text
'),%0A
@@ -373,35 +373,40 @@
('
-media
+pages
', '000
-3_file_alt_text
+6_auto_20151002_1655
'),%0A
@@ -4296,32 +4296,171 @@
description')),%0A
+ ('slug', models.SlugField(help_text='A user friendly URL')),%0A ('title', models.CharField(max_length=1000)),%0A
@@ -4650,142 +4650,8 @@
)),%0A
- ('title', models.CharField(max_length=256)),%0A ('slug', models.CharField(max_length=256, unique=True)),%0A
|
038c256a3bb4d2b2b56c1bc8b60cd6c2149a1a22
|
fix wrong type_id for challenge
|
novaideo/content/interface.py
|
novaideo/content/interface.py
|
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# avalaible on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from zope.interface import Interface
from dace.interfaces import (
Attribute, IUser, IEntity as IEntityO, IApplication,
IMachine)
from pontus.interfaces import IVisualisableElement, IImage as SourceIImage
from novaideo.utilities.data_manager import (
interface_config,
IMAGETYPE,
FILETYPE,
file_deserializer,
interface,
sub_object_serialize)
def get_subinterfaces(interface):
result = list(getattr(interface, '__sub_interfaces__', []))
for sub_interface in list(result):
if getattr(sub_interface, 'is_abstract', False):
result.extend(get_subinterfaces(sub_interface))
result.append(interface)
return list(set(result))
@interface(True)
class IEntity(IEntityO):
pass
@interface(True)
class IIdeaSource(IEntityO):
pass
@interface()
@interface_config(type_id='creation_culturelle_image',
deserializer=file_deserializer,
serializer=sub_object_serialize)
class IImage(SourceIImage):
pass
@interface()
class INewsletter(IVisualisableElement, IEntity):
pass
@interface(True)
class INode(IEntity):
pass
@interface(True)
class IEmojiable(IEntity):
pass
@interface()
class ICorrection(IEntity):
pass
@interface(True)
class IPresentableEntity(IEntity):
pass
@interface()
class IVote(IEntity):
pass
@interface()
class IBallotType(IEntity):
pass
@interface()
class IReport(IEntity):
pass
@interface()
class IBallot(IEntity):
pass
@interface()
class IBallotBox(IEntity):
pass
@interface(True)
class ICorrelableEntity(IEntity):
pass
@interface(True)
class ISearchableEntity(IEntity):
name = Attribute('name')
title = Attribute('title')
description = Attribute('description')
keywords = Attribute('keywords')
author = Attribute('author', type='person')
@interface(True)
class IVersionableEntity(IEntity):
pass
@interface(True)
class IDuplicableEntity(IEntity):
pass
@interface(True)
class ICommentable(IEntity):
pass
@interface(True)
class IDebatable(IEntity):
pass
@interface(True)
class ISignalableEntity(IEntity):
pass
@interface(True)
class ISustainable(IEntity):
pass
@interface(True)
class ITokenable(IEntity):
pass
@interface()
class ISReport(IEntity):
pass
@interface()
@interface_config(type_id='idea')
class IChallenge(ISearchableEntity,
ICorrelableEntity,
IPresentableEntity,
INode,
ISignalableEntity,
IDebatable):
pass
@interface()
class IComment(ICommentable, IIdeaSource, ISignalableEntity):
pass
@interface()
class IChannel(ICommentable):
pass
@interface()
class IPrivateChannel(IChannel):
pass
@interface()
@interface_config(type_id='amendment')
class IAmendment(ICorrelableEntity,
IPresentableEntity,
IDuplicableEntity,
ISearchableEntity,
IDebatable):
text = Attribute('text')
@interface()
@interface_config(type_id='idea')
class Iidea(IDuplicableEntity,
IVersionableEntity,
ISearchableEntity,
ICorrelableEntity,
IPresentableEntity,
INode,
ISignalableEntity,
IDebatable,
ITokenable):
text = Attribute('text')
attached_files = Attribute('attached_files', type=FILETYPE, multiplicity='*')
@interface()
@interface_config(type_id='question')
class IQuestion(IDuplicableEntity,
IVersionableEntity,
ISearchableEntity,
ICorrelableEntity,
IPresentableEntity,
INode,
ISignalableEntity,
ISustainable,
IDebatable):
question = Attribute('question')
text = Attribute('text')
attached_files = Attribute('attached_files', type=FILETYPE, multiplicity='*')
@interface()
@interface_config(type_id='answer')
class IAnswer(ICorrelableEntity,
IPresentableEntity,
INode,
IIdeaSource,
ISignalableEntity,
ISustainable,
IDebatable):
comment = Attribute('comment')
attached_files = Attribute('attached_files', type=FILETYPE, multiplicity='*')
@interface()
class IFile(ISearchableEntity):
pass
@interface()
class ICorrelation(IEntity, IDebatable):
pass
@interface()
class IInvitation(IEntity):
pass
@interface()
class IKeyword(IEntity):
pass
@interface()
class ICandidacy(IEntity):
pass
@interface()
class IToken(IEntity):
pass
@interface(True)
class IBaseUser(IEntity):
first_name = Attribute('first_name')
last_name = Attribute('last_name')
user_title = Attribute('user_title')
organization = Attribute('organization', type='organization')
@interface()
@interface_config(type_id='person')
class IPerson(IVisualisableElement,
ISearchableEntity,
ICorrelableEntity,
IBaseUser,
IUser,
IDebatable):
picture = Attribute('picture', type=IMAGETYPE)
@interface()
@interface_config(type_id='bot')
class IBot(IMachine):
picture = Attribute('picture', type=IMAGETYPE)
@interface()
@interface_config(type_id='preregistration')
class IPreregistration(IBaseUser):
pass
@interface()
@interface_config(type_id='proposal')
class IProposal(ISearchableEntity,
ICorrelableEntity,
IDuplicableEntity,
IPresentableEntity,
INode,
ISignalableEntity,
IDebatable,
ITokenable):
text = Attribute('text')
attached_files = Attribute('attached_files', type=FILETYPE, multiplicity='*')
workspace = Attribute('workspace', type='workspace')
working_group = Attribute('working_group', type='workinggroup')
authors = Attribute('authors', type='person', multiplicity='*')
related_ideas = Attribute('related_ideas', type='idea', multiplicity='*')
@interface()
@interface_config(type_id='workinggroup')
class IWorkingGroup(IEntity):
pass
@interface()
@interface_config(type_id='workspace')
class IWorkspace(IVisualisableElement,
IEntity):
files = Attribute('files', type=FILETYPE, multiplicity='*')
@interface()
@interface_config(type_id='organization')
class IOrganization(IEntity):
pass
@interface()
class INovaIdeoApplication(IEntity, IApplication, IIdeaSource, IDebatable):
pass
@interface()
@interface_config(type_id='alert')
class IAlert(IVisualisableElement,
IEntity):
pass
@interface(True)
class IAdvertising(IVisualisableElement, ISearchableEntity):
dates = Attribute('dates')
@interface()
@interface_config(type_id='web_advertising')
class IWebAdvertising(IAdvertising):
picture = Attribute('picture', type=FILETYPE)
html_content = Attribute('html_content')
advertisting_url = Attribute('advertisting_url')
@interface()
@interface_config(type_id='smartfolder')
class ISmartFolder(IVisualisableElement, IEntity):
view_type = Attribute('view_type')
children = Attribute('children', type='smartfolder', multiplicity='*')
style = Attribute('style')
|
Python
| 0.000002
|
@@ -2495,36 +2495,41 @@
config(type_id='
-idea
+challenge
')%0Aclass IChalle
|
887b0aa2263366fb18689b2ac92acb75be57c0b3
|
Fix output of export-csv.py
|
export-csv.py
|
export-csv.py
|
#!/usr/bin/env python3
#
# Program for exporting patient records from PhenoTips in CSV format
#
# Copyright 2016 University of Utah
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import csv
import sys
import time
from datetime import timedelta
from getopt import getopt
from getpass import getpass
from phenotipsbot import PhenoTipsBot
from sys import stderr
from sys import stdout
def export_patients(bot, patient_ids, study, out_file, progress_callback):
start_time = time.time()
count = 0
n_exported = 0
prop_names = bot.list_patient_class_properties()
writer = csv.writer(out_file)
writer.writerow(prop_names)
for patient_id in patient_ids:
progress_callback(count)
count += 1
if study != None:
patient_study = bot.get_study(patient_id)
if patient_study != study and not (study == '' and patient_study == None):
continue
patient = bot.get(patient_id)
patient['identifier'] = 'P' + patient['identifier'].zfill(7)
row = []
for prop_name in prop_names:
row.append(patient[prop_name])
writer.writerow(row)
n_exported += 1
return n_exported, timedelta(seconds=time.time() - start_time)
if __name__ == '__main__':
#parse arguments
base_url = None
username = None
password = None
study = None
optlist, args = getopt(sys.argv[1:], '-y', ['base-url=', 'username=', 'password=', 'study='])
for name, value in optlist:
if name == '--base-url':
base_url = value
elif name == '--username':
username = value
elif name == '--password':
password = value
elif name == '--study':
study = value
#get any missing arguments and initialize the bot
if not base_url:
sys.stderr.write('Input the URL (blank for http://localhost:8080): ')
base_url = input()
if not base_url:
base_url = 'http://localhost:8080'
if not base_url.startswith('http://') and not base_url.startswith('https://'):
base_url = 'http://' + base_url
base_url = base_url.rstrip('/')
if not username:
sys.stderr.write('Input your username (blank for Admin): ')
username = input()
if not username:
username = 'Admin'
if not password:
password = getpass('Input your password (blank for admin): ', sys.stderr)
if not password:
password = 'admin'
bot = PhenoTipsBot(base_url, username, password)
if study == None:
studies = bot.list_studies()
if len(studies):
print('Available studies:')
print('* ' + '\n* '.join(studies))
sys.stderr.write('Are you exporting from a particular study (blank for no)? ')
study = input()
if study and study[0] == 'y':
sys.stderr.write('Input the study to export from (blank for default): ')
study = input()
else:
study = None
elif study == 'None':
study = None
#begin export
patient_ids = bot.list()
stderr.write('Looking through ' + str(len(patient_ids)) + ' patient records...\n')
stderr.write('\n')
n_exported, elapsed_time = export_patients(bot, patient_ids, study, stdout, lambda count: stderr.write(str(count) + '\r'))
stderr.write('\n')
stderr.write('Exported ' + n_exported + ' patients.')
stderr.write('Elapsed time ' + str(timedelta(seconds=time.time() - start_time)) + '\n')
|
Python
| 0.999999
|
@@ -3298,95 +3298,8 @@
s):%0A
- print('Available studies:')%0A print('* ' + '%5Cn* '.join(studies))%0A
@@ -3451,24 +3451,119 @@
%5B0%5D == 'y':%0A
+ print('Available studies:')%0A print('* ' + '%5Cn* '.join(studies))%0A
@@ -4108,16 +4108,20 @@
ted ' +
+str(
n_export
@@ -4122,16 +4122,17 @@
exported
+)
+ ' pat
@@ -4137,16 +4137,18 @@
atients.
+%5Cn
')%0A s
@@ -4185,52 +4185,21 @@
str(
-timedelta(seconds=time.time() - start
+elapsed
_time)
-)
+ '
|
9be80df72954c05193fc6ded0998b28de182a699
|
Add _validate method to Client.auth interface.
|
objectrocket/auth.py
|
objectrocket/auth.py
|
"""Authentication operations."""
import logging
import requests
from objectrocket import bases
from objectrocket import errors
logger = logging.getLogger(__name__)
class Auth(bases.BaseAuthLayer):
"""Authentication operations.
:param objectrocket.client.Client base_client: An objectrocket.client.Client instance.
"""
def __init__(self, base_client):
self.__username = None
self.__password = None
self.__token = None
super(Auth, self).__init__(base_client=base_client)
#####################
# Public interface. #
#####################
def authenticate(self, username, password):
"""Authenticate against the ObjectRocket API.
:param str username: The username to perform basic authentication against the API with.
:param str password: The password to perform basic authentication against the API with.
:returns: A token used for authentication against token protected resources.
:rtype: str
"""
# Update the username and password bound to this instance for re-authentication needs.
self._username = username
self._password = password
# Attempt to authenticate.
resp = requests.get(
self._url,
auth=(username, password),
**self._default_request_kwargs
)
# Attempt to extract authentication data.
try:
if resp.status_code == 200:
json_data = resp.json()
token = json_data['data']['token']
elif resp.status_code == 401:
raise errors.AuthFailure(resp.json().get('message', 'Authentication Failure.'))
else:
raise errors.AuthFailure(
"Unknown exception while authenticating: '{}'".format(resp.text)
)
except errors.AuthFailure:
raise
except Exception as ex:
logging.exception(ex)
raise errors.AuthFailure('{}: {}'.format(ex.__class__.__name__, ex))
# Update the token bound to this instance for use by other client operations layers.
self._token = token
logger.info('New API token received: "{}".'.format(token))
return token
######################
# Private interface. #
######################
@property
def _default_request_kwargs(self):
"""The default request keyword arguments to be passed to the requests library."""
return super(Auth, self)._default_request_kwargs
@property
def _password(self):
"""The password currently being used for authentication."""
return self.__password
@_password.setter
def _password(self, new_password):
"""Update the password to be used for authentication."""
self.__password = new_password
def _refresh(self):
"""Refresh the API token using the currently bound credentials.
This is simply a convenience method to be invoked automatically if authentication fails
during normal client use.
"""
# Request and set a new API token.
new_token = self.authenticate(self._username, self._password)
self._token = new_token
logger.info('New API token received: "{}".'.format(new_token))
return self._token
@property
def _token(self):
"""The API token this instance is currently using."""
return self.__token
@_token.setter
def _token(self, new_token):
"""Update the API token which this instance is to use."""
self.__token = new_token
return self.__token
@property
def _url(self):
"""The base URL for authentication operations."""
return self._client._url + 'tokens/'
@property
def _username(self):
"""The username currently being used for authentication."""
return self.__username
@_username.setter
def _username(self, new_username):
"""Update the username to be used for authentication."""
self.__username = new_username
|
Python
| 0
|
@@ -4069,16 +4069,591 @@
= new_username%0A
+%0A def _verify(self, token):%0A %22%22%22Verify that the given token is valid.%0A%0A :param str token: The API token to verify.%0A :returns: The token's corresponding user model as a dict, or None if invalid.%0A :rtype: dict%0A %22%22%22%0A # Attempt to authenticate.%0A url = '%7B%7D%7B%7D'.format(self._url, 'verify')%0A resp = requests.post(%0A url,%0A json=%7B'token': token%7D,%0A **self._default_request_kwargs%0A )%0A if resp.status_code == 200:%0A return resp.json().get('data', None)%0A return None%0A
|
a20a275ef5cae1b38ead1c3191dd96a042eb2070
|
Fix AttributeError in the project instance detail view
|
openstack_dashboard/api/_nova.py
|
openstack_dashboard/api/_nova.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module is a special module to define functions or other resources
which need to be imported outside of openstack_dashboard.api.nova
(like cinder.py) to avoid cyclic imports.
"""
from django.conf import settings
from glanceclient import exc as glance_exceptions
from novaclient import api_versions
from novaclient import client as nova_client
from horizon import exceptions as horizon_exceptions
from horizon.utils import memoized
from openstack_dashboard.api import base
from openstack_dashboard.api import glance
from openstack_dashboard.api import microversions
from openstack_dashboard.contrib.developer.profiler import api as profiler
# Supported compute versions
VERSIONS = base.APIVersionManager("compute", preferred_version=2)
VERSIONS.load_supported_version(1.1, {"client": nova_client, "version": 1.1})
VERSIONS.load_supported_version(2, {"client": nova_client, "version": 2})
INSECURE = settings.OPENSTACK_SSL_NO_VERIFY
CACERT = settings.OPENSTACK_SSL_CACERT
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server.
Preserves the request info so image name can later be retrieved.
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links', 'description',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'locked',
'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state',
'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hostname', 'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:ramdisk_id', 'OS-EXT-SRV-ATTR:root_device_name',
'OS-EXT-SRV-ATTR:root_device_name', 'OS-EXT-SRV-ATTR:user_data',
'OS-EXT-SRV-ATTR:reservation_id', 'OS-EXT-SRV-ATTR:launch_index',
'OS-EXT-AZ:availability_zone', 'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super().__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
if not self.image:
return None
if hasattr(self.image, 'name'):
return self.image.name
if 'name' in self.image:
return self.image['name']
try:
image = glance.image_get(self.request, self.image['id'])
self.image['name'] = image.name
return image.name
except (glance_exceptions.ClientException,
horizon_exceptions.ServiceCatalogException):
self.image['name'] = None
return None
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
@property
def has_extended_attrs(self):
return any(getattr(self, attr) for attr in [
'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:host',
'OS-EXT-SRV-ATTR:hostname', 'OS-EXT-SRV-ATTR:kernel_id',
'OS-EXT-SRV-ATTR:ramdisk_id', 'OS-EXT-SRV-ATTR:root_device_name',
'OS-EXT-SRV-ATTR:root_device_name', 'OS-EXT-SRV-ATTR:user_data',
'OS-EXT-SRV-ATTR:reservation_id', 'OS-EXT-SRV-ATTR:launch_index',
])
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def host_server(self):
return getattr(self, 'OS-EXT-SRV-ATTR:host', "")
@property
def instance_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def reservation_id(self):
return getattr(self, 'OS-EXT-SRV-ATTR:reservation_id', "")
@property
def launch_index(self):
return getattr(self, 'OS-EXT-SRV-ATTR:launch_index', "")
@property
def hostname(self):
return getattr(self, 'OS-EXT-SRV-ATTR:hostname', "")
@property
def kernel_id(self):
return getattr(self, 'OS-EXT-SRV-ATTR:kernel_id', "")
@property
def ramdisk_id(self):
return getattr(self, 'OS-EXT-SRV-ATTR:ramdisk_id', "")
@property
def root_device_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:root_device_name', "")
@property
def user_data(self):
return getattr(self, 'OS-EXT-SRV-ATTR:user_data', "")
@memoized.memoized
def get_microversion(request, features):
client = novaclient(request)
min_ver, max_ver = api_versions._get_server_version_range(client)
return (microversions.get_microversion_for_features(
'nova', features, api_versions.APIVersion, min_ver, max_ver))
def get_auth_params_from_request(request):
"""Extracts properties needed by novaclient call from the request object.
These will be used to memoize the calls to novaclient.
"""
return (
request.user.username,
request.user.token.id,
request.user.tenant_id,
request.user.token.project.get('domain_id'),
base.url_for(request, 'compute'),
base.url_for(request, 'identity')
)
@memoized.memoized
def cached_novaclient(request, version=None):
(
username,
token_id,
project_id,
project_domain_id,
nova_url,
auth_url
) = get_auth_params_from_request(request)
if version is None:
version = VERSIONS.get_active_version()['version']
c = nova_client.Client(version,
username,
token_id,
project_id=project_id,
project_domain_id=project_domain_id,
auth_url=auth_url,
insecure=INSECURE,
cacert=CACERT,
http_log_debug=settings.DEBUG,
auth_token=token_id,
endpoint_override=nova_url)
return c
def novaclient(request, version=None):
if isinstance(version, api_versions.APIVersion):
version = version.get_string()
return cached_novaclient(request, version)
def get_novaclient_with_instance_desc(request):
microversion = get_microversion(request, "instance_description")
return novaclient(request, version=microversion)
@profiler.trace
def server_get(request, instance_id):
return Server(get_novaclient_with_instance_desc(request).servers.get(
instance_id), request)
|
Python
| 0.000074
|
@@ -3481,16 +3481,22 @@
lf, attr
+, None
) for at
|
d88e03ccb99a21566f18a75147c121ecaef010be
|
use timezone instead of naive
|
oauth2_provider/views/base.py
|
oauth2_provider/views/base.py
|
import logging
import datetime
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import View, FormView
from oauthlib.oauth2 import Server
from braces.views import LoginRequiredMixin, CsrfExemptMixin
from ..settings import oauth2_settings
from ..exceptions import OAuthToolkitError
from ..forms import AllowForm
from ..models import get_application_model
from .mixins import OAuthLibMixin
Application = get_application_model()
log = logging.getLogger('oauth2_provider')
class BaseAuthorizationView(LoginRequiredMixin, OAuthLibMixin, View):
"""
Implements a generic endpoint to handle *Authorization Requests* as in :rfc:`4.1.1`. The view does not implement
any strategy to determine *authorize/do not authorize* logic.
The endpoint is used in the following flows:
* Authorization code
* Implicit grant
"""
def dispatch(self, request, *args, **kwargs):
self.oauth2_data = {}
return super(BaseAuthorizationView, self).dispatch(request, *args, **kwargs)
def error_response(self, error, **kwargs):
"""
Handle errors either by redirecting to redirect_uri with a json in the body containing error details or
providing an error response
"""
redirect, error_response = super(BaseAuthorizationView, self).error_response(error, **kwargs)
if redirect:
return HttpResponseRedirect(error_response['url'])
status = error_response['error'].status_code
return self.render_to_response(error_response, status=status)
class AuthorizationView(BaseAuthorizationView, FormView):
"""
Implements and endpoint to handle *Authorization Requests* as in :rfc:`4.1.1` and prompting the user with a form
to determine if she authorizes the client application to access her data. This endpoint is reached two times during
the authorization process:
* first receive a ``GET`` request from user asking authorization for a certain client application, a form is served
possibly showing some useful info and prompting for *authorize/do not authorize*.
* then receive a ``POST`` request possibly after user authorized the access
Some informations contained in the ``GET`` request and needed to create a Grant token during the ``POST`` request
would be lost between the two steps above, so they are temporary stored in hidden fields on the form.
A possible alternative could be keeping such informations in the session.
The endpoint is used in the followin flows:
* Authorization code
* Implicit grant
"""
template_name = 'oauth2_provider/authorize.html'
form_class = AllowForm
server_class = Server
validator_class = oauth2_settings.OAUTH2_VALIDATOR_CLASS
def get_initial(self):
# TODO: move this scopes conversion from and to string into a utils function
scopes = self.oauth2_data.get('scopes', [])
initial_data = {
'redirect_uri': self.oauth2_data.get('redirect_uri', None),
'scopes': ' '.join(scopes),
'client_id': self.oauth2_data.get('client_id', None),
'state': self.oauth2_data.get('state', None),
'response_type': self.oauth2_data.get('response_type', None),
}
return initial_data
def form_valid(self, form):
try:
credentials = {
'client_id': form.cleaned_data.get('client_id'),
'redirect_uri': form.cleaned_data.get('redirect_uri'),
'response_type': form.cleaned_data.get('response_type', None),
'state': form.cleaned_data.get('state', None),
}
scopes = form.cleaned_data.get('scopes')
allow = form.cleaned_data.get('allow')
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=scopes, credentials=credentials, allow=allow)
self.success_url = uri
log.debug("Success url for the request: {0}".format(self.success_url))
return super(AuthorizationView, self).form_valid(form)
except OAuthToolkitError as error:
return self.error_response(error)
def get(self, request, *args, **kwargs):
try:
scopes, credentials = self.validate_authorization_request(request)
kwargs['scopes_descriptions'] = [oauth2_settings.SCOPES[scope] for scope in scopes]
kwargs['scopes'] = scopes
# at this point we know an Application instance with such client_id exists in the database
kwargs['application'] = Application.objects.get(client_id=credentials['client_id']) # TODO: cache it!
kwargs.update(credentials)
self.oauth2_data = kwargs
# following two loc are here only because of https://code.djangoproject.com/ticket/17795
form = self.get_form(self.get_form_class())
kwargs['form'] = form
# Check to see if the user has already granted access and return
# a successful response
require_approval = request.GET.get('approval_prompt', 'auto')
if require_approval == 'auto' and request.user.accesstoken_set.filter(
application=kwargs['application'],
expires__gt=datetime.datetime.now()).count():
uri, headers, body, status = self.create_authorization_response(
request=self.request, scopes=" ".join(scopes),
credentials=credentials, allow=True)
self.success_url = uri
return HttpResponseRedirect(self.success_url)
return self.render_to_response(self.get_context_data(**kwargs))
except OAuthToolkitError as error:
return self.error_response(error)
class TokenView(CsrfExemptMixin, OAuthLibMixin, View):
"""
Implements an endpoint to provide access tokens
The endpoint is used in the followin flows:
* Authorization code
* Password
* Client credentials
"""
server_class = Server
validator_class = oauth2_settings.OAUTH2_VALIDATOR_CLASS
def post(self, request, *args, **kwargs):
url, headers, body, status = self.create_token_response(request)
response = HttpResponse(content=body, status=status)
for k, v in headers.items():
response[k] = v
return response
|
Python
| 0.000055
|
@@ -11,24 +11,8 @@
ging
-%0Aimport datetime
%0A%0Afr
@@ -115,16 +115,50 @@
FormView
+%0Afrom django.utils import timezone
%0A%0Afrom o
@@ -5359,24 +5359,15 @@
_gt=
-datetime.datetim
+timezon
e.no
|
269747cff17e7f509d16044859f1e51a1d02ccf7
|
Split responsibility into the three endpoints
|
oauthlib/oauth2/ext/django.py
|
oauthlib/oauth2/ext/django.py
|
from __future__ import unicode_literals
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
import functools
import logging
from oauthlib.common import urlencode
from oauthlib.oauth2.draft25 import errors
log = logging.getLogger('oauthlib')
class OAuth2ProviderDecorator(object):
def __init__(self, server, error_uri):
self._server = server
self._error_uri = error_uri
def _extract_params(self, request):
log.debug('Extracting parameters from request.')
uri = request.build_absolute_uri()
http_method = request.method
headers = request.META
del headers['wsgi.input']
del headers['wsgi.errors']
if 'HTTP_AUTHORIZATION' in headers:
headers['Authorization'] = headers['HTTP_AUTHORIZATION']
body = urlencode(request.POST.items())
return uri, http_method, body, headers
def pre_authorization_view(self, f):
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
redirect_uri = request.GET.get('redirect_uri', None)
log.debug('Found redirect uri %s.', redirect_uri)
try:
scopes, credentials = self._server.validate_authorization_request(
uri, http_method, body, headers)
log.debug('Saving credentials to session, %r.', credentials)
request.session['oauth2_credentials'] = credentials
kwargs['scopes'] = scopes
log.debug('Invoking view method, %r.', f)
return f(request, *args, **kwargs)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
except errors.OAuth2Error as e:
log.debug('Client error, redirecting back to client.')
return HttpResponseRedirect(e.in_uri(redirect_uri))
return wrapper
def post_authorization_view(self, f):
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
scopes, credentials = f(request, *args, **kwargs)
log.debug('Fetched credentials view, %r.', credentials)
credentials.update(request.session.get('oauth2_credentials', {}))
log.debug('Fetched credentials from session, %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect uri %s.', redirect_uri)
try:
url, headers, body, status = self._server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful, redirecting to client.')
return HttpResponseRedirect(url)
except errors.FatalClientError as e:
log.debug('Fatal client error, redirecting to error page.')
return HttpResponseRedirect(e.in_uri(self._error_uri))
except errors.OAuth2Error as e:
log.debug('Client error, redirecting back to client.')
return HttpResponseRedirect(e.in_uri(redirect_uri))
return wrapper
def access_token_view(self, f):
@csrf_exempt
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
credentials = f(request, *args, **kwargs)
log.debug('Fetched credentials view, %r.', credentials)
url, headers, body, status = self._server.create_token_response(
uri, http_method, body, headers, credentials)
response = HttpResponse(content=body, status=status)
for k, v in headers:
response[k] = v
response['Content-Type'] = 'application/json;charset=UTF-8'
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
return wrapper
def protected_resource_view(self, scopes=None):
def decorator(f):
@csrf_exempt
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
uri, http_method, body, headers = self._extract_params(request)
valid, r = self._server.verify_request(uri, http_method, body, headers, scopes)
kwargs.update({
'client': r.client,
'resource_owner': r.resource_owner,
'scopes': r.scopes
})
if valid:
return f(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return wrapper
return decorator
|
Python
| 0.999997
|
@@ -387,50 +387,301 @@
lf,
-server, error_uri):%0A self._server =
+error_uri, server=None, authorization_endpoint=None,%0A token_endpoint=None, resource_endpoint=None):%0A self._authorization_endpoint = authorization_endpoint or server%0A self._token_endpoint = token_endpoint or server%0A self._resource_endpoint = resource_endpoint or
ser
@@ -1586,22 +1586,38 @@
= self._
-server
+authorization_endpoint
.validat
@@ -3056,38 +3056,54 @@
status = self._
-server
+authorization_endpoint
.create_authoriz
@@ -4104,22 +4104,30 @@
= self._
-server
+token_endpoint
.create_
@@ -4860,30 +4860,66 @@
lf._
-server.verify_request(
+resource_endpoint.verify_request(%0A
uri,
|
c42ffe540b30da5dbf29d557a01503ecad246afb
|
Fix changelog for newer OSes
|
etc/scripts/changelog.py
|
etc/scripts/changelog.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Compiler Explorer Authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import subprocess
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
commit_template = ' <div class="row commit-entry">\n' \
' <div class="col-sm-12">\n' \
' <a href="{}commit/{}" rel="noreferrer noopener" target="_blank">{}</a>\n' \
' </div>\n' \
' </div>\n'
def html_escape(text):
return "".join(html_escape_table.get(c, c) for c in text)
def format_commit(url, commit):
# Input format is "<hash> <description>", so split only on the first space and escape the commit message
grouped_commit = commit.split(' ', 1)
print(grouped_commit)
try:
return commit_template.format(url, grouped_commit[0], html_escape(grouped_commit[1]))
except Exception as e:
print('There was an error in changelog.py: {}'.format(e))
return ''
def get_commits(repo):
coms = subprocess.check_output(['git', 'log', '--date=local', '--after="3 months ago"', '--grep=(#[0-9]*)', '--oneline'])
with open('static/changelog.html', 'w') as f:
f.write('<div class="commits-list">\n')
for commit in coms.splitlines():
f.write(format_commit(repo, commit))
f.write('</div>\n')
if __name__ == '__main__':
get_commits('https://github.com/compiler-explorer/compiler-explorer/')
|
Python
| 0.000001
|
@@ -1541,18 +1541,18 @@
late = '
-
+''
%3Cdiv c
@@ -1579,89 +1579,37 @@
ry%22%3E
-%5Cn' %5C%0A ' %3Cdiv class=%22col-sm-12%22%3E%5Cn' %5C%0A '
+%0A %3Cdiv class=%22col-sm-12%22%3E%0A
@@ -1684,80 +1684,28 @@
%3C/a%3E
-%5Cn' %5C%0A ' %3C/div%3E%5Cn' %5C%0A '
+%0A %3C/div%3E%0A
%3C/div%3E
%5Cn'%0A
@@ -1700,18 +1700,19 @@
%3C/div%3E
-%5Cn
+%0A''
'%0A%0A%0Adef
@@ -2367,16 +2367,32 @@
eline'%5D)
+.decode('utf-8')
%0A wit
|
391d69f4ce485ff02a3844b4cf5a54f23125c477
|
test presab
|
partBreaker.py
|
partBreaker.py
|
#!/usr/bin/env python
import argparse
import Get_fasta_from_Ref as GFR
import re
from sys import argv
import os
def Subsetfromto(FastaDict, outFile, start,end):
"""Writes a subsect multifast file, boud at sequence indeces start and end, form sequence stored in a dictioanry"""
with open(outFile, 'w') as out:
for seqID in FastaDict.iterkeys():
seq=FastaDict[seqID][start:end]
out.write(">%s\n%s\n" %(seqID,seq))
def main(matrix, partfile, outdir):
Smatrix=GFR.Fasta_to_Dict(matrix)
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
print 'The output dir already exist!'
with open(partfile, 'r') as P:
for pline in P:
outN=pline.split(',')[0]
outf="%s/%s" %(outdir,outN)
start=int(pline.split(',')[1].split('-')[0]) -1
end=int(pline.split(',')[1].split('-')[1])
Subsetfromto(Smatrix, outf, start, end)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This is a simple script for breaking supermatrices in individual MSA based on a partition file. The required partition file is a two column comma separated value text file where the fisrt column indicates the name of partition, recycled to be used as the name of the output file, and the second column is an interval of the positions in the supermatrix, separated only by "-". This script deals only with consecutive data blocks. Codon partitioning is not implemented... yet.')
parser.add_argument('-in', dest = 'matrix', type = str, help = 'Input alignemnets in fasta format')
parser.add_argument('-p', dest = 'partitions', type =str, help = 'Input partiotion definition file: a comma separated text file with two columns, ')
parser.add_argument('-o', dest = 'outdir', help='Specify directory where to write partitions')
# parser.add_argument('-c', help="")
args = parser.parse_args()
main(args.matrix, args.partitions, args.outdir)
|
Python
| 0.000001
|
@@ -108,16 +108,26 @@
mport os
+%0Apresab=%7B%7D
%0A%0Adef Su
@@ -368,16 +368,45 @@
keys():%0A
+ presab%5BseqID%5D=%5B%5D%0A
@@ -489,17 +489,156 @@
D,seq))%0A
-%0A
+ if set(seq) in set('-','?'):%0A presab%5BseqId%5D.append(0)%0A else: %0A presab%5BseqId%5D.append(1)
%0A%0Adef ma
@@ -1135,28 +1135,29 @@
-
+print presab%0A
%0Aif __na
@@ -1814,16 +1814,16 @@
ormat')%0A
+
pars
@@ -1895,17 +1895,16 @@
ut parti
-o
tion def
|
7399dfa45c9b5a563798f504e9eb4054faf2aa30
|
print a more meaningful description of EventAct
|
open_municipio/events/models.py
|
open_municipio/events/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from open_municipio.acts.models import Act
from open_municipio.events.managers import EventManager
from open_municipio.people.models import Institution
from datetime import datetime, date
class Event(models.Model):
"""
This class allows OpenMunicipio site to keep track of upcoming
events.
Fields:
* A datefield, no time is needed
* A foreign key to the ``Institution`` that will "host" the event;
eg: council or city government
* A foreign key to the involved ``Act``
* A textfield for some description
Since we will always be interested in future events (with regard
to current date), a custom model manager is provided that allows
``Event.future.all()``.
"""
date = models.DateField(_("Event date"), help_text=_("The day when the event is going to be held"))
event_time = models.TimeField(_("Event time"), blank=True, null=True, help_text=_("The time of the event"))
institution = models.ForeignKey(Institution, verbose_name=_("Institution"), help_text=_("The institution that's going to meet during the event"))
acts = models.ManyToManyField(Act, verbose_name=_("Acts"), blank=True, null=True, help_text=_("Acts the discussion is linked to, if any"),through="EventAct")
title = models.CharField(_("Title"), max_length=128, blank=True, null=True, help_text=_("A short title for this event"))
description = models.TextField(_("Description"), blank=True, null=True, help_text=_("A description, containing the list of things that will be discussed during this event"))
address = models.CharField(_("Address"), max_length=128, blank=True, null=True, help_text=_("The physical address where the meeting is going to be held") )
# The default manager
objects = models.Manager()
# Future events will be retrieved using ``Event.future.all()``
future = EventManager()
class Meta:
verbose_name = _('event')
verbose_name_plural = _('events')
def __unicode__(self):
uc = u'%s %s - %s' % (self.date, self.event_time, self.title)
return uc
@property
def is_past_due(self):
if date.today() > self.date:
return True
return False
class EventAct(models.Model):
"""
WRITEME
"""
act = models.ForeignKey(Act)
event = models.ForeignKey(Event)
order = models.IntegerField(blank=False,null=False)
class Meta:
ordering = ('order',)
# the constraint below would be helpful, but it make the interface validation
# hard to manage -FS
# unique_together = ('order','event'),('act','event')
|
Python
| 0.999999
|
@@ -2666,16 +2666,105 @@
('act','event')%0A
+%0A def __unicode__(self):%0A return %22%25s (%25s)%22 %25 (self.act.title, self.event.date)%0A
|
a9373c3e4c65160bc04e56edbc356e086d2dae71
|
Tweak division display
|
opencivicdata/admin/division.py
|
opencivicdata/admin/division.py
|
from django.contrib import admin
from opencivicdata.models import division as models
@admin.register(models.Division)
class DivisionAdmin(admin.ModelAdmin):
pass
|
Python
| 0
|
@@ -160,10 +160,76 @@
-pass
+list_display = ('display_name', 'id')%0A search_fields = list_display
%0A%0A
|
21c2daf95e7352932346dec2c570cfefce867ed1
|
Add some convenience properties to Node
|
odlclient/v2/node.py
|
odlclient/v2/node.py
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from odlclient.openstack.common.apiclient import base
from odlclient.v2.base import Manager
class Node(base.Resource):
@property
def id(self):
return self.node['id']
@property
def type(self):
return self.node['type']
class NodeConnector(base.Resource):
pass
class NodeManager(Manager):
base = 'controller/nb/v2/switchmanager'
has_container = True
resource_class = Node
def list(self, container=None):
url = self._url('nodes', container=container)
return self._list(url, response_key='nodeProperties')
def save(self, container=None):
url = self._url('nodes', container=container)
self._post(url)
def list_connectors(self, node_type, node_id, container=None):
url = self._url('node', node_type, node_id, container=container)
return self._list(url, response_key='nodeConnectorProperties',
obj_class=NodeConnector)
def create_property(self, node_type, node_id, name, value, container=None):
url = self._url(
'node', node_type, node_id, 'property', name, value,
container=container)
self._put(url, value)
def delete_property(self, node_type, node_id, name, value, container=None):
url = self._url(
'node', node_type, node_id, 'property', name, value,
container=container)
self._delete(url)
def create_connector_property(self, node_type, node_id, connector_type,
connector_name, name, value, container=None):
url = self._url('nodeconnector', node_type, node_id, connector_name,
'property', name, value, container=container)
self._put(url, value)
def delete_connector_property(self, node_type, node_id, connector_type,
connector_name, name, value, container=None):
url = self._url('nodeconnector', node_type, node_id, connector_name,
'property', name, value, container=container)
self._delete(url, value)
|
Python
| 0
|
@@ -648,16 +648,59 @@
icense.%0A
+from datetime import datetime%0Aimport time%0A%0A
from odl
@@ -944,16 +944,355 @@
type'%5D%0A%0A
+ @property%0A def description(self):%0A data = self._info%5B'properties'%5D%5B'description'%5D%5B'value'%5D%0A return None if data == 'None' else data%0A%0A @property%0A def connected_since(self):%0A data = self._info%5B'properties'%5D%5B'timeStamp'%5D%5B'value'%5D%0A return datetime.fromtimestamp(time.mktime(time.gmtime(data / 1000)))%0A%0A
%0Aclass N
|
b6f698f5fd6faf90b36bbb560ba4df13192cff42
|
Update _settings.py
|
templates/root/appfiles/_settings.py
|
templates/root/appfiles/_settings.py
|
"""
Django settings for template project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<%= secret_key %>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'<%= appName %>',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = '<%= projectName %>.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '<%= projectName %>.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
Python
| 0
|
@@ -1023,24 +1023,46 @@
aticfiles',%0A
+ 'rest_framework',%0A
'%3C%25= app
|
281eda574c6ed3d0d9b333b67f53a13ea3c17398
|
Remove `tfds.core.builder_from_directory` alias
|
tensorflow_datasets/core/__init__.py
|
tensorflow_datasets/core/__init__.py
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API to define datasets."""
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
from tensorflow_datasets.core import tf_compat
tf_compat.ensure_tf_install()
# pylint:disable=g-import-not-at-top
from tensorflow_datasets.core import community # pylint: disable=g-bad-import-order
from tensorflow_datasets.core.dataset_builder import BeamBasedBuilder
from tensorflow_datasets.core.dataset_builder import BuilderConfig
from tensorflow_datasets.core.dataset_builder import DatasetBuilder
from tensorflow_datasets.core.dataset_builder import GeneratorBasedBuilder
from tensorflow_datasets.core.dataset_info import BeamMetadataDict
from tensorflow_datasets.core.dataset_info import DatasetInfo
from tensorflow_datasets.core.dataset_info import Metadata
from tensorflow_datasets.core.dataset_info import MetadataDict
from tensorflow_datasets.core.file_adapters import FileFormat
from tensorflow_datasets.core.lazy_imports_lib import lazy_imports
from tensorflow_datasets.core.naming import ShardedFileTemplate
from tensorflow_datasets.core.read_only_builder import builder_from_directory
from tensorflow_datasets.core.registered import DatasetNotFoundError
from tensorflow_datasets.core.split_builder import SplitGeneratorLegacy as SplitGenerator
from tensorflow_datasets.core.splits import ReadInstruction
from tensorflow_datasets.core.splits import Split
from tensorflow_datasets.core.splits import SplitDict
from tensorflow_datasets.core.splits import SplitInfo
from tensorflow_datasets.core.splits import SubSplitInfo
from tensorflow_datasets.core.utils import Experiment
from tensorflow_datasets.core.utils import gcs_path
from tensorflow_datasets.core.utils import tfds_path
from tensorflow_datasets.core.utils import Version
from tensorflow_datasets.core.utils.benchmark import BenchmarkResult
from tensorflow_datasets.core.utils.file_utils import add_data_dir
from tensorflow_datasets.core.utils.generic_path import as_path
from tensorflow_datasets.core.utils.type_utils import PathLike
from tensorflow_datasets.core.utils.type_utils import ReadOnlyPath
from tensorflow_datasets.core.utils.type_utils import ReadWritePath
def benchmark(*args, **kwargs):
raise DeprecationWarning(
"`tfds.core.benchmark` has been renamed to `tfds.benchmark`")
__all__ = [
"add_data_dir",
"as_path",
"BenchmarkResult",
"BeamBasedBuilder",
"BeamMetadataDict",
"BuilderConfig",
"builder_from_directory",
"DatasetBuilder",
"DatasetInfo",
"DatasetNotFoundError",
"Experiment",
"FileFormat",
"GeneratorBasedBuilder",
"gcs_path",
"lazy_imports",
"Metadata",
"MetadataDict",
"PathLike",
"ReadInstruction",
"ReadOnlyPath",
"ReadWritePath",
"ShardedFileTemplate",
"SplitDict",
"SplitGenerator",
"SplitInfo",
"tfds_path",
"Version",
]
|
Python
| 0
|
@@ -1751,87 +1751,8 @@
te%0A%0A
-from tensorflow_datasets.core.read_only_builder import builder_from_directory%0A%0A
from
@@ -3067,38 +3067,8 @@
g%22,%0A
- %22builder_from_directory%22,%0A
|
a39cbaf22401c466f02e5b12e3ebdd46fa8eef0c
|
Fix issue refs in test_numpy_piecewise_regression
|
sympy/printing/tests/test_numpy.py
|
sympy/printing/tests/test_numpy.py
|
from sympy import Piecewise
from sympy.abc import x
from sympy.printing.lambdarepr import NumPyPrinter
def test_numpy_piecewise_regression():
"""
NumPyPrinter needs to print Piecewise()'s choicelist as a list to avoid
breaking compatibility with numpy 1.8. This is not necessary in numpy 1.9+.
See gh-9747 and gh-9749 for details.
"""
p = Piecewise((1, x < 0), (0, True))
assert NumPyPrinter().doprint(p) == 'select([x < 0,True], [1,0], default=nan)'
|
Python
| 0
|
@@ -313,23 +313,41 @@
See
-gh-9747 and gh-
+sympy/sympy#9747 and sympy/sympy#
9749
|
1addeefdf51713d562788018ebfb6549b215f55b
|
Fix C typo error in a test
|
test/option/tree-lib.py
|
test/option/tree-lib.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Make sure that --tree=derived output with a library dependency shows
the dependency on the library. (On earlier versions of the Microsoft
toolchain this wouldn't show up unless the library already existed
on disk.)
Issue 1363: http://scons.tigris.org/issues/show_bug.cgi?id=1363
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
env = Environment(LIBPREFIX='',
LIBSUFFIX='.lib',
OBJSUFFIX='.obj',
EXESUFFIX='.exe')
env.AppendENVPath('PATH', '.')
l = env.Library( 'util.lib', 'util.c' )
p = env.Program( 'test_tree_lib.exe', 'main.c', LIBS=l )
env.Command( 'foo.h', p, '$SOURCE > $TARGET')
""")
test.write('main.c', """\
#include <stdlib.h>
#include <stdio.h>
int
main(int argc, char *argv)
{
printf("#define FOO_H \\"foo.h\\"\\n");
return (0);
}
""")
test.write('util.c', """\
void
util(void)
{
;
}
""")
expect = """
+-test_tree_lib.exe
+-main.obj
+-util.lib
+-util.obj
"""
test.run(arguments = '--tree=derived foo.h')
test.must_contain_all_lines(test.stdout(), [expect])
test.up_to_date(arguments = 'foo.h')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Python
| 0.000144
|
@@ -1935,16 +1935,17 @@
c, char
+*
*argv)%0A%7B
|
53cc757e02b55d7689b9fd9e6331ac0e1a466643
|
Fix hanging highlight in window
|
tekka/lib/general_output_buffer.py
|
tekka/lib/general_output_buffer.py
|
# coding: UTF-8
"""
Copyright (c) 2009 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import gtk
import pango
from gettext import gettext as _
from . import htmlbuffer
from .. import config
from .. import gui
def build_handler_menu(tag, widget, event, iter, attrs):
def hide_message_cb(item, tab, msgtype):
r_tuple = build_tuple(msgtype, tab)
config.append_list("general_output", "filter", str(r_tuple))
def build_tuple(msgtype, tab):
if tab.is_channel() or tab.is_query():
r_tuple = (str(msgtype), str(tab.server.name), str(tab.name))
else:
r_tuple = (str(msgtype), str(tab.name))
return r_tuple
tab = gui.tabs.get_tab_by_path(eval(attrs["path"]))
if not tab:
raise ValueError, "tab could not be retrieved (%s)" % (
attrs["path"])
items = []
items.append(gtk.MenuItem(label = tab.name))
items.append(gtk.SeparatorMenuItem())
items.append(gtk.ImageMenuItem(gtk.STOCK_ZOOM_OUT))
filter = config.get_list("general_output", "filter", [])
label_s = _("Hide '%s' messages")
items[-1].set_label(label_s % (attrs["type"]))
items[-1].connect("activate", hide_message_cb,
tab, attrs["type"])
menu = gtk.Menu()
for item in items:
menu.add(item)
menu.show_all()
return menu
def go_handler(tag, widget, event, iter, attrs):
def switch_highlight(tag, switch):
""" switch highlighting of given tag """
if switch:
tag.set_property("weight", pango.WEIGHT_BOLD)
else:
self.tag.set_property("weight", pango.WEIGHT_NORMAL)
self = go_handler
# check for previous tag and unhighlight it
if hasattr(self, "tag"):
if self.tag != tag:
switch_highlight(tag, False)
# initialize (new) attributes
self.tag = tag
self.widget = widget
self.event = event
self.iter = iter
self.path_string = attrs["path"]
# __init__
if not hasattr(self, "c_init"):
self.c_init = True
def outer_cb(*x):
switch_highlight(self.tag, False)
# FIXME: this does not cover all exists
widget.connect("motion-notify-event", outer_cb)
widget.parent.connect("motion-notify-event", outer_cb)
widget.parent.parent.connect("motion-notify-event", outer_cb)
# abort event handling on <a> tags
for itag in iter.get_tags():
try:
itag.s_attribute["a"]
except KeyError:
pass
else:
return False
# event handling
if event.type == gtk.gdk.MOTION_NOTIFY:
if event.state & gtk.gdk.BUTTON1_MASK:
return False
switch_highlight(tag, True)
return True
if event.type == gtk.gdk.BUTTON_PRESS:
if event.button == 3:
# right mbtn
menu = build_handler_menu(tag, widget, event, iter, attrs)
menu.popup(None, None, None, event.button, event.time)
return True
if event.type == gtk.gdk.BUTTON_RELEASE:
# left mbtn
if (event.button == 1
and not widget.get_buffer().get_has_selection()):
path = eval(self.path_string)
gui.tabs.switch_to_path(path)
class GOHTMLHandler(htmlbuffer.HTMLHandler):
def __init__(self, textbuffer, GOHandler, URLhandler):
htmlbuffer.HTMLHandler.__init__(self, textbuffer, URLhandler)
self.go_handler = GOHandler
def characters(self, text):
htmlbuffer.HTMLHandler.characters(self, text)
def startElement(self, name, attrs):
if name == "goref":
if self.go_handler:
tag = self.textbuffer.create_tag(None)
tag.s_attribute = {"goref":True}
tag.connect("event", self.go_handler, attrs)
self.elms.append(name)
self.tags.append(tag)
htmlbuffer.HTMLHandler.startElement(self, name, attrs)
def endElement(self, name):
htmlbuffer.HTMLHandler.endElement(self, name)
def endDocument(self):
htmlbuffer.HTMLHandler.endDocument(self)
class GOHTMLBuffer(htmlbuffer.HTMLBuffer):
__gtype_name__ = "GOHTMLBuffer"
def __init__(self, go_handler=go_handler, handler=None,
tagtable=None):
htmlbuffer.HTMLBuffer.__init__(self, handler, tagtable)
contentHandler = GOHTMLHandler(self, go_handler, self.URLHandler)
self.parser.setContentHandler(contentHandler)
self.go_handler = go_handler
def go_insert(self, iter, text, tab, type):
self.insert_html(iter, "<goref type='%s' path='%s'>%s</goref>" % (
type, tab.path, text))
|
Python
| 0.000001
|
@@ -3106,209 +3106,80 @@
%0A%0A%09%09
-# FIXME: this does not cover all exists%0A%09%09widget.connect(%22motion-notify-event%22, outer_cb)%0A%09%09widget.parent.connect(%22motion-notify-event%22, outer_cb)%0A%09%09widget.parent.parent.connect(%22motion-notify-event%22,
+gui.widgets.get_object(%22main_window%22).connect(%22motion-notify-event%22,%0A%09%09%09
oute
|
25dc03c3db7e224463f11e513f94fb9cb15ed250
|
Fix check_service_client_function doc typo
|
tempest/tests/lib/services/base.py
|
tempest/tests/lib/services/base.py
|
# Copyright 2015 Deutsche Telekom AG. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_serialization import jsonutils as json
from tempest.tests import base
from tempest.tests.lib import fake_http
class BaseServiceTest(base.TestCase):
def create_response(self, body, to_utf=False, status=200, headers=None):
json_body = {}
if body:
json_body = json.dumps(body)
if to_utf:
json_body = json_body.encode('utf-8')
resp = fake_http.fake_http_response(headers, status=status), json_body
return resp
def check_service_client_function(self, function, function2mock,
body, to_utf=False, status=200,
headers=None, mock_args=None,
**kwargs):
"""Mock a service client function for unit testing.
:param function: The service client function to call.
:param function2mock: The REST call to mock inside the service client
function.
:param body: Expected response body returned by the service client
function.
:param to_utf: Whether to use UTF-8 encoding for request.
:param status: Expected response status returned by the service client
function.
:param headers: Expected headers returned by the service client
function.
:param mock_args: List/dict/value of expected args/kwargs called by
function2mock. For example:
* If mock_args=['foo'] then ``assert_called_once_with('foo')``
is called.
* If mock_args={'foo': 'bar'} then
``assert_called_once_with(foo='bar')`` is called.
* If mock_args='foo' then ``assert_called_once_with('foo')``
is called.
:param kwargs: kwargs that are passed to function.
"""
mocked_response = self.create_response(body, to_utf, status, headers)
fixture = self.useFixture(fixtures.MockPatch(
function2mock, return_value=mocked_response))
if kwargs:
resp = function(**kwargs)
else:
resp = function()
self.assertEqual(body, resp)
if isinstance(mock_args, list):
fixture.mock.assert_called_once_with(*mock_args)
elif isinstance(mock_args, dict):
fixture.mock.assert_called_once_with(**mock_args)
elif mock_args is not None:
fixture.mock.assert_called_once_with(mock_args)
|
Python
| 0.000001
|
@@ -1785,13 +1785,14 @@
r re
-quest
+sponse
.%0A
|
df9ff4f13fc7da111bc11cf5f390efe94352b6e6
|
Fix Setting class
|
src/wikicurses/__init__.py
|
src/wikicurses/__init__.py
|
import os
import json
import pkgutil
from enum import Enum
_data = pkgutil.get_data('wikicurses', 'interwiki.list').decode()
wikis = dict([i.split('|')[0:2] for i in _data.splitlines() if i[0]!='#'])
default_configdir = os.environ['HOME'] + '/.config'
configpath = os.environ.get('XDG_CONFIG_HOME', default_configdir) + '/wikicurses'
class Settings:
def __init__(self, name):
self.file = configpath + '/' + name
def __iter__(self):
if not os.path.exists(self.file):
yield from ()
with open(self.file) as file:
yield from json.load(file)
def _save(self, bookmarks):
if not os.path.exists(configpath):
os.mkdir(configpath)
with open(self.file, 'w') as file:
json.dump(bookmarks, file)
def add(self, bmark):
bookmarks = set(self)
bookmarks.add(bmark)
self._save(list(bookmarks))
def discard(self, bmark):
bookmarks = set(self)
bookmarks.discard(bmark)
self._save(list(bookmarks))
bmarks = Settings('bookmarks')
class BitEnum(int, Enum):
def __new__(cls, *args):
value = 1 << len(cls.__members__)
return int.__new__(cls, value)
formats = BitEnum("formats", "i b blockquote")
|
Python
| 0.000001
|
@@ -513,16 +513,35 @@
from ()%0A
+ return%0A
|
afedc41fd4e573f4db38f2fde38b2286d623b4c4
|
Remove obsolete property
|
src/zeit/campus/article.py
|
src/zeit/campus/article.py
|
import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
|
Python
| 0.000216
|
@@ -323,200 +323,8 @@
')%0A%0A
- topicpagelink_label = zeit.cms.content.dav.mapProperties(%0A zeit.campus.interfaces.ITopicpageLink,%0A zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,%0A ('topicpagelink_label',))%0A%0A
|
e60a05886c52574227b1a73fe02575ede81ffa5e
|
mark out-of-date tests with a @skip
|
staff/tests/tests_views.py
|
staff/tests/tests_views.py
|
from django.core import mail
from django.test import Client, TestCase
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.http import urlencode
class StaffAddView(TestCase):
fixtures = ['ophasebase.json', 'staff.json', 'students.json']
def test_redirect(self):
"""Test for Redirect to SSO Login page"""
c = Client()
suffix = urlencode({"next": reverse('staff:registration')})
redirect_url = "{}?{}".format(reverse('pyTUID:login'), suffix)
response = c.get(reverse('staff:registration'))
self.assertRedirects(response, redirect_url, target_status_code=302)
def test_send_email(self):
"""Sending an email after successfull register"""
pass
# TODO Use fake SSO in test
"""
c = Client()
register_view = reverse('staff:registration')
self.assertEqual(len(mail.outbox), 0)
testdata = {'prename': 'Leah',
'name': 'Bayer',
'email': 'leah.bayer@example.com',
'phone': '016031368212',
'matriculated_since': 'today',
'degree_course': 'Bachelor',
'experience_ophase': 'Nothing until now',
'is_helper': True,
'helper_jobs': 1,}
# sending a incomplet form should not send a email
response = c.post(register_view, testdata, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFormError(response, 'form', 'why_participate', _('This field is required.'))
self.assertEqual(len(mail.outbox), 0)
# a complete form should send one email
testdata['why_participate'] = 'You need testdata'
response = c.post(register_view, testdata, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.redirect_chain, [(reverse('staff:registration_success'), 302)])
self.assertEqual(len(mail.outbox), 1)
smail = mail.outbox[0]
self.assertEqual(len(smail.to), 1)
self.assertEqual(smail.to[0], 'Leah Bayer <leah.bayer@example.com>')
"""
|
Python
| 0.000004
|
@@ -1,20 +1,47 @@
+from unittest import skip%0A%0A
from django.core imp
@@ -708,16 +708,26 @@
e=302)%0A%0A
+ @skip%0A
def
@@ -812,22 +812,8 @@
%22%22%0A%0A
- pass%0A%0A
@@ -845,28 +845,16 @@
n test%0A%0A
- %22%22%22%0A
@@ -2251,17 +2251,4 @@
m%3E')
-%0A %22%22%22%0A
|
42ec06aa5e2034266f817dc6465cd8bf4fea6ead
|
fix migration
|
corehq/apps/linked_domain/migrations/0005_migrate_linked_app_toggle.py
|
corehq/apps/linked_domain/migrations/0005_migrate_linked_app_toggle.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-01 15:00
from __future__ import unicode_literals
from couchdbkit import ResourceNotFound
from django.db import migrations
from corehq.toggles import LINKED_DOMAINS
from toggle.models import Toggle
def _migrate_linked_apps_toggle(apps, schema_editor):
try:
linked_apps_toggle = Toggle.get('linked_apps')
except ResourceNotFound:
pass
else:
linked_domains_toggle = Toggle(
slug=LINKED_DOMAINS.slug, enabled_users=linked_apps_toggle.enabled_users
)
linked_domains_toggle.save()
def noop(*args, **kwargs):
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
('linked_domain', '0004_domainlinkhistory'),
]
operations = [
migrations.RunPython(_migrate_linked_apps_toggle, noop)
]
|
Python
| 0.000001
|
@@ -428,16 +428,110 @@
else:%0A
+ try:%0A Toggle.get(LINKED_DOMAINS.slug)%0A except ResourceNotFound:%0A
@@ -562,16 +562,20 @@
Toggle(%0A
+
@@ -659,18 +659,26 @@
-)%0A
+ )%0A
|
7c62ae1c337348086cb14f1ec8c1d97a23b33f30
|
remove debug case
|
custom/icds_reports/management/commands/stale_data_in_household_ucr.py
|
custom/icds_reports/management/commands/stale_data_in_household_ucr.py
|
import inspect
import dateutil
from django.core.management.base import BaseCommand, CommandError
from datetime import datetime
from corehq.apps.hqadmin.management.commands.stale_data_in_es import RunConfig, get_sql_case_data_for_db
from corehq.apps.userreports.models import StaticDataSourceConfiguration, get_datasource_config
from corehq.apps.userreports.util import get_table_name
from corehq.form_processor.utils import should_use_sql_backend
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from custom.icds_reports.models.aggregate import get_cursor, AggAwc
from dimagi.utils.chunked import chunked
class Command(BaseCommand):
"""
Returns list of (doc_id, doc_type, doc_subtype, ucr_insert_on, modified_on)
tuples for all househould cases that are not found static-household_cases UCR.
Can be used in conjunction with republish_doc_changes
1. Generate tuples not updated in ES with extra debug columns
$ ./manage.py stale_data_in_househould_ucr <DOMAIN> --start 2019-09-19 --end 2019-09-28 > stale_ids.txt
2. Republish case changes
$ ./manage.py republish_doc_changes <DOMAIN> stale_ids.txt
"""
help = inspect.cleandoc(__doc__).split('\n')[0]
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument(
'--start',
action='store',
help='Only include data modified after this date',
)
parser.add_argument(
'--end',
action='store',
help='Only include data modified before this date',
)
def handle(self, domain, **options):
start = dateutil.parser.parse(options['start']) if options['start'] else datetime(2010, 1, 1)
end = dateutil.parser.parse(options['end']) if options['end'] else datetime.utcnow()
run_config = RunConfig(domain, start, end, 'household')
if not should_use_sql_backend(run_config.domain):
raise CommandError('This command only supports SQL domains.')
for case_id, case_type, ucr_date, primary_date in _get_stale_data(run_config):
print(f"{case_id},CommCareCase,{case_type},{ucr_date},{primary_date}")
def _get_stale_data(run_config):
for db in get_db_aliases_for_partitioned_query():
matching_records_for_db = get_sql_case_data_for_db(db, run_config)
chunk_size = 1000
for chunk in chunked(matching_records_for_db, chunk_size):
case_ids = [val[0] for val in chunk]
ucr_insertion_dates = _get_ucr_insertion_dates(run_config.domain, case_ids)
for case_id, case_type, sql_modified_on in chunk:
ucr_insert_date = ucr_insertion_dates.get(case_id)
if not ucr_insert_date or (ucr_insert_date < sql_modified_on):
ucr_date_string = ucr_insert_date.isoformat() if ucr_insert_date else ''
yield (case_id, case_type, ucr_date_string, sql_modified_on.isoformat())
def _get_ucr_insertion_dates(domain, case_ids):
ucr_id = StaticDataSourceConfiguration.get_doc_id(domain, 'static-household_cases')
case_ids += ['4adf255e-5200-4189-8952-852c57ecd493']
config, _ = get_datasource_config(ucr_id, domain)
table_name = get_table_name(domain, config.table_id)
with get_cursor(AggAwc) as cursor:
query = f'''
SELECT
doc_id,
inserted_at
FROM "{table_name}"
WHERE doc_id = ANY(%(case_ids)s);
'''
cursor.execute(query, {'case_ids': case_ids})
return dict(cursor.fetchall())
|
Python
| 0.000287
|
@@ -3139,65 +3139,8 @@
s')%0A
- case_ids += %5B'4adf255e-5200-4189-8952-852c57ecd493'%5D%0A
|
ee0100f0a0f79491216cabe3fb2089c33238f57d
|
Set active device number
|
chainer/functions/theano/theano_function.py
|
chainer/functions/theano/theano_function.py
|
import collections
import numpy
import six
try:
import theano
import theano.sandbox.cuda as theano_cuda
_available = True
except ImportError:
_available = False
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _to_var_tuple(vs):
msg = ('inputs and outputs must be a TensorVariable, a list '
'of TensorVariable or a tuple of TensorVariable')
if isinstance(vs, theano.tensor.TensorVariable):
return vs,
elif isinstance(vs, collections.Iterable):
vs = tuple(vs)
if not all(isinstance(v, theano.tensor.TensorVariable) for v in vs):
raise TypeError(msg)
return vs
else:
raise TypeError(msg)
class TheanoFunction(function.Function):
def __init__(self, inputs, outputs, gpu=True):
if not _available:
msg = '''theano is not installed on your environment.
Please install theano to activate theano function.
$ pip install theano'''
raise RuntimeError(msg)
inputs = _to_var_tuple(inputs)
outputs = _to_var_tuple(outputs)
if gpu:
outs = tuple(theano.sandbox.cuda.basic_ops.gpu_from_host(o)
if o.dtype == 'float32' else o for o in outputs)
else:
outs = outputs
self.func = theano.function(inputs=inputs, outputs=outs)
gs = tuple(o.type('g_{}'.format(i)) for i, o in enumerate(outputs))
known_grads = dict(zip(outputs, gs))
grad = theano.tensor.grad(
cost=None, wrt=inputs, known_grads=known_grads,
disconnected_inputs='ignore')
if gpu:
grad = tuple(theano.sandbox.cuda.basic_ops.gpu_from_host(g)
if g.dtype == 'float32' else g for g in grad)
self.grad = theano.function(
inputs=inputs + gs,
outputs=grad,
on_unused_input='ignore')
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == len(self.func.input_storage))
for actual_type, storage in six.moves.zip(
in_types, self.func.input_storage):
expect_type = storage.type
# Theano cannot check shapes of variables
type_check.expect(
actual_type.ndim == expect_type.ndim,
actual_type.dtype == expect_type.numpy_dtype,
)
def forward(self, inputs):
gpu = cuda.get_array_module(*inputs) is not numpy
if gpu:
inputs = [_cupy_array_to_theano_input(x) for x in inputs]
outputs = self.func(*inputs)
if gpu:
outputs = [_theano_output_to_cupy_array(x) for x in outputs]
return tuple(outputs)
def backward(self, inputs, grads):
args = inputs + grads
gpu = cuda.get_array_module(*args) is not numpy
if gpu:
args = [_cupy_array_to_theano_input(x) for x in args]
outs = self.grad(*args)
assert len(outs) == len(inputs)
if gpu:
outs = [_theano_output_to_cupy_array(x) for x in outs]
outputs = []
for o, i in zip(outs, inputs):
if i.dtype.kind != 'f':
o = None
elif o.dtype != i.dtype:
o = o.astype(i.dtype)
outputs.append(o)
return tuple(outputs)
def _cupy_array_to_theano_input(x):
# CudaNdarray only supports float32
if isinstance(x, cuda.cupy.ndarray) and x.dtype == numpy.float32:
return _cupy_array_to_theano_array(x)
else:
return cuda.to_cpu(x)
def _cupy_array_to_theano_array(x):
if six.PY2:
ptr = long(x.data.ptr) # NOQA
else:
ptr = int(x.data.ptr)
# CuPy's stride is written in bytes, but CudaNdarray uses size
strides = [s // 4 for s in x.strides]
return theano_cuda.from_gpu_pointer(ptr, x.shape, strides, x)
class CudaNdarrayMemory(object):
def __init__(self, array):
self._array = array
self.device = cuda.cupy.cuda.Device()
self.ptr = array.gpudata
def _theano_array_to_cupy_array(x):
mem = CudaNdarrayMemory(x)
memptr = cuda.cupy.cuda.MemoryPointer(mem, 0)
# Theano's CudaNdarray is always float32
return cuda.cupy.ndarray(x.shape, dtype=numpy.float32, memptr=memptr)
def _theano_output_to_cupy_array(x):
if x is None:
return None
elif isinstance(x, theano_cuda.CudaNdarray):
return _theano_array_to_cupy_array(x)
else:
return cuda.to_gpu(x)
|
Python
| 0.000003
|
@@ -2632,32 +2632,96 @@
if gpu:%0A
+ device = theano.sandbox.cuda.active_device_number()%0A
outp
@@ -2749,32 +2749,40 @@
_to_cupy_array(x
+, device
) for x in outpu
@@ -3105,32 +3105,96 @@
if gpu:%0A
+ device = theano.sandbox.cuda.active_device_number()%0A
outs
@@ -3227,16 +3227,24 @@
_array(x
+, device
) for x
@@ -4111,24 +4111,32 @@
(self, array
+, device
):%0A s
@@ -4176,26 +4176,16 @@
evice =
-cuda.cupy.
cuda.Dev
@@ -4188,16 +4188,22 @@
.Device(
+device
)%0A
@@ -4260,24 +4260,32 @@
cupy_array(x
+, device
):%0A mem =
@@ -4304,16 +4304,24 @@
Memory(x
+, device
)%0A me
@@ -4511,32 +4511,40 @@
_to_cupy_array(x
+, device
):%0A if x is N
@@ -4653,32 +4653,40 @@
_to_cupy_array(x
+, device
)%0A else:%0A
|
7545ce166cbad8fcd2fe628fcac236355b345714
|
add new config option for using predefined networks (from graph-tool); fix for graph tool GTK command (deprecatd option?); increase callback time during simulation run (slows down simulation, but necessary to buy time for img export for simulation with large groups of speakers)
|
pdiffsim_gt.py
|
pdiffsim_gt.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
author: Luzius Thöny
lucius.antonius@gmail.com
"""
import sys, os, os.path, math
from numpy.random import *
from distutils.dir_util import mkpath
import configparser
from graph_tool.all import *
# We need some Gtk and gobject functions
from gi.repository import Gtk, Gdk, GdkPixbuf, GObject
from simmanager_gt import SimManager
if len(sys.argv) == 1 :
print("No config file specified.\nUsage: pdiffsym_gt.py [Config File]")
sys.exit(0)
simManager = SimManager()
logFileName = str(sys.argv[1])[:-4] + ".log"
simManager.setLogFileName(logFileName)
# Read config file
config = configparser.ConfigParser()
config.read(sys.argv[1])
speakers = config.getint('simulation', 'speakers')
simManager.setSpeakers(speakers)
simManager.setLambdaValue(config.getfloat('simulation', 'lambdaValue'))
simManager.setMemorySize(config.getint('simulation', 'memorySize'))
simManager.setAlphaBias(config.getfloat('simulation', 'alphaBias'))
simManager.setErrorRate(config.getfloat('simulation', 'errorRate'))
simManager.setUtteranceLength(config.getint('simulation', 'utteranceLength'))
simManager.setDiscreteProduction(config.getboolean('simulation', 'discreteProduction'))
snapshotInterval = config.getint('simulation', 'snapshotInterval')
gui = config.getboolean('simulation', 'gui')
runs = config.getint('simulation', 'runs')
myGraph, pos = triangulation(random((speakers, 2)) * 4, type="delaunay") # will create a more or less rectangular layout
simManager.initSim(myGraph, pos)
# This creates a GTK+ window with the initial graph layout
if not gui:
win = Gtk.OffscreenWindow()
win.set_default_size(500, 400)
win.graph = GraphWidget(simManager.myGraph, simManager.pos,
edge_color=[0.6, 0.6, 0.6, 1],
vertex_fill_color=simManager.colors)
win.add(win.graph)
else:
win = GraphWindow(simManager.myGraph, simManager.pos, geometry=(500, 400),
edge_color=[0.6, 0.6, 0.6, 1],
vertex_fill_color=simManager.colors)
win.graph.regenerate_surface(lazy=False)
win.graph.queue_draw()
dirName = logFileName + '/'
mkpath("./" + dirName)
graphDirty = True
# This function will be called repeatedly by the GTK+ main loop
def update_state_gui():
simManager.stepSim()
win.graph.regenerate_surface(lazy=False)
win.graph.queue_draw()
#~ gtk.Widget.get_snapshot()
return True
# works, but slow
def update_state_nogui():
global graphDirty
if simManager.tick >= runs - 1:
simManager.exportData()
print("done running the simulation. exported data.")
sys.exit(0)
# run through all of the simulation
simManager.stepSim()
graphDirty = True
win.graph.regenerate_surface(lazy=False)
win.graph.queue_draw()
return True
def saveSnapshot(s, e):
global graphDirty
if graphDirty and (simManager.tick % snapshotInterval == 0):
pixbuf = win.get_pixbuf()
pixbuf.savev(logFileName + '/frame' + str(simManager.tick).zfill(4) + '.png', 'png', [], [])
graphDirty = False
#~ print("saving snapshot " + str(simManager.tick))
# Bind the function above as an 'idle' callback.
if not gui:
cid = GObject.timeout_add(100, update_state_nogui) # time in milliseconds [if we go too low there won't be enough time to update the offscreen window and save snapshots]
else:
cid = GObject.idle_add(update_state_gui)
#~ win.connect_after("draw", saveScreenshot)
win.connect_after("damage-event", saveSnapshot)
# We will give the user the ability to stop the program by closing the window.
win.connect("delete_event", Gtk.main_quit)
# Actually show the window, and start the main loop.
win.show_all()
Gtk.main()
|
Python
| 0
|
@@ -736,16 +736,62 @@
akers')%0A
+network = config.get('simulation', 'network')%0A
simManag
@@ -1414,16 +1414,41 @@
uns')%0A%0A%0A
+if network == %22random%22:%0A%09
myGraph,
@@ -1560,16 +1560,118 @@
layout%0A
+else:%0A%09myGraph = collection.data%5Bnetwork%5D #~ %22netscience%22, %22dolphins%22, ...%0A%09pos = myGraph.vp%5B%22pos%22%5D%0A%09%0A
simManag
@@ -2207,26 +2207,16 @@
surface(
-lazy=False
)%0Awin.gr
@@ -2442,34 +2442,24 @@
ate_surface(
-lazy=False
)%0A%09win.graph
@@ -2830,18 +2830,8 @@
ace(
-lazy=False
)%0A%09w
@@ -3259,17 +3259,17 @@
out_add(
-1
+5
00, upda
@@ -3403,16 +3403,157 @@
pshots%5D%0A
+%09# for a small network of 200, a small value of 200 should be enough%0A%09# for a large network of 2000 agents, this value should be 500 or more%0A
else:%0A%09c
|
d282e1e6dbaa596f4d78a503ed4c23ac993e3554
|
change to non-test
|
astropy/coordinates/tests/test_sites.py
|
astropy/coordinates/tests/test_sites.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...tests.helper import pytest, assert_quantity_allclose, remote_data, quantity_allclose
from ... import units as u
from .. import Longitude, Latitude, EarthLocation
from ..sites import get_builtin_sites, get_downloaded_sites, SiteRegistry
def test_builtin_sites():
reg = get_builtin_sites()
keck = reg['keck']
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(lon, -1*Longitude('155:28.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(el, 4160*u.m, atol=1*u.m)
keck = reg['ctio']
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(lon, -1*Longitude('70.815', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(lat, Latitude('-30.16527778', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(el, 2215*u.m, atol=1*u.m)
names = reg.names
assert 'keck' in names
assert 'ctio' in names
with pytest.raises(KeyError):
reg['nonexistent site']
@remote_data
def test_online_stes():
reg = get_downloaded_sites()
keck = reg['keck']
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(lon, -1*Longitude('155:28.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(el, 4160*u.m, atol=1*u.m)
names = reg.names
assert 'keck' in names
assert 'ctio' in names
with pytest.raises(KeyError):
reg['nonexistent site']
@remote_data
# this will *try* the online so we have to make it remote_data, even though it
# falls back on the non-remote version
def test_EarthLocation_basic():
keckel = EarthLocation.of_site('keck')
lon, lat, el = keckel.to_geodetic()
assert_quantity_allclose(lon, -1*Longitude('155:28.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(el, 4160*u.m, atol=1*u.m)
names = EarthLocation.get_site_names()
assert 'keck' in names
assert 'ctio' in names
with pytest.raises(KeyError):
EarthLocation.of_site('nonexistent site')
def test_EarthLocation_state_offline():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_builtin=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_builtin=True)
assert oldreg is not newreg
@remote_data
def test_EarthLocation_state_online():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_download=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_download=True)
assert oldreg is not newreg
def test_registry():
reg = SiteRegistry()
assert len(reg.names) == 0
names = ['sitea', 'site A']
loc = EarthLocation.from_geodetic(lat=1*u.deg, lon=2*u.deg,height=3*u.km)
reg.add_site(names, loc)
assert len(reg.names) == 2
loc1 = reg['SIteA']
assert loc1 is loc
loc2 = reg['sIte a']
assert loc2 is loc
def test_non_EarthLocation():
"""
A regression test for a typo bug pointed out at the bottom of
https://github.com/astropy/astropy/pull/4042
"""
class EarthLocation2(EarthLocation):
pass
# This lets keeps us from needing to do remote_data
# note that this does *not* mess up the registry for EarthLocation because
# registry is cached on a per-class basis
EarthLocation2._get_site_registry(force_builtin=True)
el2 = EarthLocation2.of_site('keck')
assert type(el2) is EarthLocation2
assert el2.info.name == 'W. M. Keck Observatory'
@remote_data
def test_builtin_matches_remote(download_url=True):
builtin_registry = EarthLocation._get_site_registry(force_builtin=True)
dl_registry = EarthLocation._get_site_registry(force_download=download_url)
in_dl = {}
matches = {}
for name in builtin_registry.names:
in_dl[name] = name in dl_registry
if in_dl[name]:
matches[name] = quantity_allclose(builtin_registry[name], dl_registry[name])
else:
matches[name] = False
if not all(matches.values()):
# this makes sure we actually see which don't match
print("In builtin registry but not in download:")
for name in in_dl:
if not in_dl[name]:
print(' ', name)
print("In both but not the same value:")
for name in matches:
if not matches[name] and in_dl[name]:
print(' ', name, 'builtin:', builtin_registry[name], 'download:', dl_registry[name])
assert False, "Builtin and download registry aren't consistent - failures printed to stdout"
|
Python
| 0.999326
|
@@ -4305,37 +4305,25 @@
atory'%0A%0A
-@remote_data%0Adef test
+def check
_builtin
@@ -4358,16 +4358,368 @@
=True):%0A
+ %22%22%22%0A This function checks that the builtin sites registry is consistent with the%0A remote registry (or a registry at some other location). %0A%0A Note that current this is *not* run by the testing suite (because it %0A doesn't start with %22test%22, and is instead meant to be used as a check %0A before merging changes in astropy-data)%0A %22%22%22%0A
buil
|
d3d25e127592356d6b678dc8d013f83f53803f67
|
update mordred.tests to check hidden modules
|
mordred/tests/__main__.py
|
mordred/tests/__main__.py
|
import os
import nose
def main():
base = os.path.dirname(os.path.dirname(__file__))
tests = [base, os.path.join(base, "_base")]
os.environ["NOSE_WITH_DOCTEST"] = "1"
nose.main(
defaultTest=",".join(tests),
)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -84,16 +84,162 @@
ile__))%0A
+ hidden = %5B%0A os.path.join(base, n)%0A for n in os.listdir(base)%0A if n%5B:1%5D == %22_%22 and os.path.splitext(n)%5B1%5D == %22.py%22%0A %5D%0A%0A
test
@@ -277,16 +277,25 @@
_base%22)%5D
+ + hidden
%0A%0A os
|
e61b1f40ff9e39d38257e9883b4cb51f6f1c9d2a
|
Clarify docstrings on TimeSeries
|
bokeh/charts/builder/timeseries_builder.py
|
bokeh/charts/builder/timeseries_builder.py
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
|
Python
| 0.00001
|
@@ -1611,16 +1611,21 @@
erable):
+ a 2d
iterabl
@@ -1630,66 +1630,215 @@
ble
-2d representing the data series%0A values matrix.
+containing the values. Can be anything that %0A can be converted to a 2d array, and which is the x (time) axis is determined%0A by %60%60index%60%60, while the others are interpreted as y values.
%0A
|
d1e9a1ed54cae9b0b10ab89c06d6d7f9b53af3a1
|
Update forward compatibility horizon to 2018-09-21
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 9, 20)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0
|
@@ -1138,17 +1138,17 @@
18, 9, 2
-0
+1
)%0A%0A%0A@tf_
|
b7cc99565fc692e42fd9bf209b1addbe31b3d583
|
Update forward compatibility horizon to 2022-03-24
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 3, 23)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1335,17 +1335,17 @@
22, 3, 2
-3
+4
)%0A_FORWA
|
3b061fce8b9a1c867f2798d51b5375ea3a03b385
|
Update forward compatibility horizon to 2018-08-11
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See
@{$guide/version_compat#backward_and_partial_forward_compatibility}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 8, 10)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0
|
@@ -1120,17 +1120,17 @@
18, 8, 1
-0
+1
)%0A%0A%0A@tf_
|
5f5b7f0bf60b58b91c501bc44dd6803c1892bc05
|
Update forward compatibility horizon to 2019-03-20
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 3, 19)
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args :
year: A year (e.g. 2018).
month: A month (1 <= month <= 12) in year.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month.
Yields:
Nothing.
"""
global _FORWARD_COMPATIBILITY_HORIZON
try:
old_compat_date = _FORWARD_COMPATIBILITY_HORIZON
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day)
yield
finally:
_FORWARD_COMPATIBILITY_HORIZON = old_compat_date
|
Python
| 0
|
@@ -1138,18 +1138,18 @@
019, 3,
-19
+20
)%0A%0A%0A@tf_
|
cff5052f94738942bfd18d745660a975a170ca4b
|
Fix build on non-git source
|
build-tools/code_generator/utils/common.py
|
build-tools/code_generator/utils/common.py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import io
import os
import re
import six
import subprocess
def which(name):
exec_suffix = '.exe' if os.name is 'nt' else ''
for p in os.environ['PATH'].split(os.pathsep):
if os.name is 'nt':
p = p.replace('"', '')
f = os.path.join(p, name + exec_suffix)
if os.path.isfile(f):
return f
return None
def check_update(filename, generated, force=False):
original = ''
if os.path.exists(filename):
with io.open(filename, 'rt', encoding='utf_8_sig') as f:
original = six.text_type(f.read())
s = difflib.SequenceMatcher(None, original, generated)
if(force or not os.path.exists(filename)) and s.ratio() < 1.0:
with open(filename, 'wb') as f:
print('Updating {}.'.format(filename))
write_content = generated.encode('utf_8')
write_content = write_content.replace(b'\r\n', b'\n')
write_content = write_content.replace(b'\r', b'\n')
f.write(write_content)
def get_version(dir):
os.chdir(dir)
default_version = '0.9.7'
if os.path.exists('.git'):
try:
nearest_tag = re.sub(r'^v', '', subprocess.check_output(['git', 'describe', '--abbrev=0', '--tags']).strip().decode('utf-8'))
nearest_tag = nearest_tag.replace('/', '_').lower()
version = nearest_tag
vv = subprocess.check_output(['git', 'describe', '--tags']).strip().decode('utf-8').split('-')
if len(vv) > 1:
cid = vv.pop()
version = '-'.join(vv) + '+' + cid
version = version.replace('/', '_').lower()
except:
nearest_tag = default_version
version = default_version
return version.replace('/', '_').lower(), nearest_tag.replace('/', '_').lower()
|
Python
| 0
|
@@ -1673,16 +1673,26 @@
dir)%0A
+ version =
default
|
784012b11b6c11584b0c8696874137eebe7260c0
|
Update forward compatibility horizon to 2021-02-08
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 2, 7)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1448,9 +1448,9 @@
2,
-7
+8
)%0A_F
|
12bb4e28862bed103ff6c8a2262e014bb2ae3915
|
Add more unit tests [WAL-1666]
|
src/waldur_mastermind/invoices/tests/test_downtime.py
|
src/waldur_mastermind/invoices/tests/test_downtime.py
|
from django.core.exceptions import ValidationError
from dateutil import parser
from django.utils.timezone import get_current_timezone
from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.packages.tests import fixtures as packages_fixtures
from .. import models
def parse_datetime(timestr):
return parser.parse(timestr).replace(tzinfo=get_current_timezone())
@freeze_time('2018-11-01')
class DowntimeValidationTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = packages_fixtures.PackageFixture()
self.package = self.fixture.openstack_package
self.downtime = models.ServiceDowntime.objects.create(
package=self.package,
start=parse_datetime('2018-10-05'),
end=parse_datetime('2018-10-15'),
)
def test_positive(self):
downtime = models.ServiceDowntime(
package=self.package,
start=parse_datetime('2018-10-17'),
end=parse_datetime('2018-10-20'),
)
# It is expected that validation error is not raised in this case
downtime.clean()
def test_validate_offset(self):
downtime = models.ServiceDowntime(
package=self.package,
start=parse_datetime('2018-11-10'),
end=parse_datetime('2018-11-20'),
)
self.assertRaises(ValidationError, downtime.clean)
def test_validate_duration(self):
downtime = models.ServiceDowntime(
package=self.package,
start=parse_datetime('2018-10-16'),
end=parse_datetime('2018-12-20'),
)
self.assertRaises(ValidationError, downtime.clean)
def test_validate_intersection_outside(self):
downtime = models.ServiceDowntime(
package=self.package,
start=parse_datetime('2018-10-01'),
end=parse_datetime('2018-10-20'),
)
self.assertRaises(ValidationError, downtime.clean)
def test_validate_intersection_inside(self):
downtime = models.ServiceDowntime(
package=self.package,
start=parse_datetime('2018-10-07'),
end=parse_datetime('2018-10-10'),
)
self.assertRaises(ValidationError, downtime.clean)
def test_validate_intersection_left(self):
downtime = models.ServiceDowntime(
package=self.package,
start=parse_datetime('2018-10-01'),
end=parse_datetime('2018-10-10'),
)
self.assertRaises(ValidationError, downtime.clean)
def test_validate_intersection_right(self):
downtime = models.ServiceDowntime(
package=self.package,
start=parse_datetime('2018-10-10'),
end=parse_datetime('2018-10-20'),
)
self.assertRaises(ValidationError, downtime.clean)
@freeze_time('2018-11-01')
class OpenStackDowntimeAdjustmentTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = packages_fixtures.PackageFixture()
self.package = self.fixture.openstack_package
self.item = models.OpenStackItem.objects.get(package=self.package)
self.item.start = parse_datetime('2018-10-11')
self.item.end = parse_datetime('2018-10-15')
self.item.save()
def test_downtime_outside_of_invoice_item_billing_period(self):
models.ServiceDowntime.objects.create(
package=self.package,
start=parse_datetime('2018-10-01'),
end=parse_datetime('2018-10-20'),
)
self.assertTrue(models.GenericInvoiceItem.objects.filter(
start=self.item.start, end=self.item.end).exists())
def test_downtime_inside_of_invoice_item_billing_period(self):
downtime = models.ServiceDowntime.objects.create(
package=self.package,
start=parse_datetime('2018-10-12'),
end=parse_datetime('2018-10-14'),
)
self.assertTrue(models.GenericInvoiceItem.objects.filter(
start=downtime.start, end=downtime.end).exists())
def test_downtime_at_the_start_of_invoice_item_billing_period(self):
downtime = models.ServiceDowntime.objects.create(
package=self.package,
start=parse_datetime('2018-10-01'),
end=parse_datetime('2018-10-12'),
)
self.assertTrue(models.GenericInvoiceItem.objects.filter(
start=self.item.start, end=downtime.end).exists())
def test_downtime_at_the_end_of_invoice_item_billing_period(self):
downtime = models.ServiceDowntime.objects.create(
package=self.package,
start=parse_datetime('2018-10-12'),
end=parse_datetime('2018-10-20'),
)
self.assertTrue(models.GenericInvoiceItem.objects.filter(
start=downtime.start, end=self.item.end).exists())
def test_compensation_is_not_created_if_downtime_and_item_do_not_intersect(self):
models.ServiceDowntime.objects.create(
package=self.package,
start=parse_datetime('2018-10-01'),
end=parse_datetime('2018-10-07'),
)
self.assertFalse(models.GenericInvoiceItem.objects.filter(scope__isnull=True).exists())
def test_compensation_is_not_created_if_item_does_not_have_package(self):
self.item.package = None
self.item.save()
models.ServiceDowntime.objects.create(
package=self.package,
start=parse_datetime('2018-10-01'),
end=parse_datetime('2018-10-20'),
)
self.assertFalse(models.GenericInvoiceItem.objects.filter(scope__isnull=True).exists())
|
Python
| 0
|
@@ -3541,32 +3541,31 @@
-self.assertTrue(
+compensation =
models.G
@@ -3648,32 +3648,240 @@
f.item.end).
-exists()
+get()%0A self.assertEqual(compensation.price, -1 * self.item.price)%0A self.assertEqual(compensation.details%5B'name'%5D,%0A 'Compensation for downtime. Resource name: %25s' %25 self.item.name
)%0A%0A def t
|
f4cb18d5e204ba44b75bebece69e8a7322cabb49
|
fix mailgun test
|
muckrock/mailgun/tests.py
|
muckrock/mailgun/tests.py
|
"""
Tests for mailgun
"""
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
import hashlib
import hmac
import nose.tools
import os
import time
from foia.models import FOIARequest
from settings import MAILGUN_ACCESS_KEY
# allow methods that could be functions and too many public methods in tests
# pylint: disable=R0201
# pylint: disable=R0904
class TestMailgunViews(TestCase):
"""Tests for Mailgun views"""
fixtures = ['test_users.json', 'test_profiles.json', 'jurisdictions.json',
'test_foiarequests.json']
def setUp(self):
"""Set up tests"""
# pylint: disable=C0103
mail.outbox = []
def sign(self, data):
"""Add mailgun signature to data"""
token = 'token'
timestamp = int(time.time())
signature = hmac.new(key=MAILGUN_ACCESS_KEY,
msg='%s%s' % (timestamp, token),
digestmod=hashlib.sha256).hexdigest()
data['token'] = token
data['timestamp'] = timestamp
data['signature'] = signature
def test_normal(self):
"""Test a normal succesful response"""
foia = FOIARequest.objects.get(pk=1)
data = {
'from': 'test@agency.gov',
'From': 'test@agency.gov',
'To': '%s@requests.muckrock.com, other@agency.gov' % foia.get_mail_id(),
'subject': 'Test subject',
'stripped-text': 'Test normal.',
'body-plain': 'Test normal.',
}
self.sign(data)
response = self.client.post(reverse('mailgun-request',
kwargs={'mail_id': foia.get_mail_id()}), data)
nose.tools.eq_(response.status_code, 200)
nose.tools.eq_(len(mail.outbox), 3)
nose.tools.eq_(mail.outbox[0].body, 'Test normal.')
nose.tools.ok_(mail.outbox[1].subject.startswith('[RESPONSE]'))
nose.tools.eq_(mail.outbox[2].to, [foia.user.email])
foia = FOIARequest.objects.get(pk=1)
nose.tools.eq_(foia.email, 'test@agency.gov')
nose.tools.eq_(foia.other_emails, 'other@agency.gov')
def test_bad_sender(self):
"""Test a normal succesful response"""
foia = FOIARequest.objects.get(pk=1)
data = {
'from': 'test@example.com',
'From': 'test@example.com',
'To': '%s@requests.muckrock.com' % foia.get_mail_id(),
'subject': 'Test subject',
'stripped-text': 'Test bad sender.',
'body-plain': 'Test bad sender.',
}
self.sign(data)
response = self.client.post(reverse('mailgun-request',
kwargs={'mail_id': foia.get_mail_id()}), data)
nose.tools.eq_(response.status_code, 200)
nose.tools.eq_(len(mail.outbox), 1)
nose.tools.ok_(mail.outbox[0].subject.startswith('Bad Sender'))
def test_bad_addr(self):
"""Test sending to a non existent FOIA request"""
data = {
'from': 'test@agency.gov',
'From': 'test@agency.gov',
'To': '123-12345678@requests.muckrock.com',
'subject': 'Test subject',
'stripped-text': 'Test bad address.',
'body-plain': 'Test bad address.',
}
self.sign(data)
response = self.client.post(reverse('mailgun-request',
kwargs={'mail_id': '123-12345678'}), data)
nose.tools.eq_(response.status_code, 200)
nose.tools.eq_(len(mail.outbox), 1)
nose.tools.ok_(mail.outbox[0].subject.startswith('Invalid Address'))
def test_attachments(self):
"""Test a message with an attachment"""
try:
foia = FOIARequest.objects.get(pk=1)
with open('data.xls', 'w') as file_:
file_.write('abc123')
data = {
'from': 'test@agency.gov',
'From': 'test@agency.gov',
'To': '%s@requests.muckrock.com' % foia.get_mail_id(),
'subject': 'Test subject',
'stripped-text': 'Test attachment.',
'body-plain': 'Test attachment.',
'attachment-1': open('data.xls'),
}
self.sign(data)
response = self.client.post(reverse('mailgun-request',
kwargs={'mail_id': foia.get_mail_id()}), data)
nose.tools.eq_(response.status_code, 200)
foia = FOIARequest.objects.get(pk=1)
nose.tools.eq_(foia.files.all()[0].ffile.name, 'foia_files/data.xls')
finally:
foia.files.all()[0].delete()
os.remove('data.xls')
if os.path.exists('static/foia_files/data.xls'):
os.remove('static/foia_files/data.xls')
def test_fax(self):
"""Test a fax confirmation"""
data = {
'from': 'test@agency.gov',
'From': 'test@agency.gov',
'To': 'fax@requests.muckrock.com',
'subject': 'Test subject',
'stripped-text': 'Test fax.',
'body-plain': 'Test fax.',
}
self.sign(data)
response = self.client.post(reverse('mailgun-fax'), data)
nose.tools.eq_(response.status_code, 200)
nose.tools.eq_(len(mail.outbox), 1)
nose.tools.eq_(mail.outbox[0].body, 'Test fax.')
|
Python
| 0.000001
|
@@ -4795,32 +4795,38 @@
.exists('static/
+media/
foia_files/data.
@@ -4866,16 +4866,22 @@
'static/
+media/
foia_fil
|
f1ad74fa852de191787e7c7467675e390647fdcf
|
Add a missing "import glob".
|
build/config/win/get_visual_studio_path.py
|
build/config/win/get_visual_studio_path.py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import errno
import os
import re
import subprocess
import sys
"""
This script searches for Visual Studio versions on the current system.
Pass in the preferred VS version on the command line, or pass "auto" for
autodetect.
This script prints a string containing the VS root directory. On failure it
returns the empty string.
"""
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _DetectVisualStudioVersion(versions_to_check, force_express):
"""Gets the path of the preferred Visual Studio version.
Returns:
The base path of Visual Studio based on the registry and a quick check if
devenv.exe exists.
Possibilities are:
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
return os.path.normpath(os.path.join(path, '..', '..'))
# Check for express.
elif glob.glob(express_path):
return os.path.normpath(os.path.join(path, '..', '..'))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
return os.path.normpath(os.path.join(path, '..'))
return None
if len(sys.argv) != 2:
print 'Usage: get_visual_studio_path.py <version>'
print 'Use "auto" for the version to autodetect.'
sys.exit(2)
version_map = {
'auto': ('10.0', '12.0', '11.0'),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
}
requested_version = sys.argv[1]
vs_path = _DetectVisualStudioVersion(version_map[requested_version],
'e' in requested_version)
if not vs_path:
# No Visual Studio version detected.
print '""' # Return empty string to .gn file.
sys.exit(1);
# Return Visual Studio path to the .gn file.
print '"%s"' % vs_path
|
Python
| 0.999205
|
@@ -173,16 +173,28 @@
t errno%0A
+import glob%0A
import o
|
7ce597b5e8467cb662c9ae9a968e1ff10741f734
|
Update forward compatibility horizon to 2021-05-25
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 5, 24)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1449,9 +1449,9 @@
5, 2
-4
+5
)%0A_F
|
9e4ad9a04ad911cc9375b3ceeb2428b034758518
|
Update forward compatibility horizon to 2020-02-14
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 2, 13)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1383,17 +1383,17 @@
20, 2, 1
-3
+4
)%0A_FORWA
|
8b0d9378a1e48c010fb028395811d3f3720af3e9
|
Translate language names from settings.LANGUAGES.
|
multilingual/languages.py
|
multilingual/languages.py
|
"""
Django-multilingual: language-related settings and functions.
"""
# Note: this file did become a mess and will have to be refactored
# after the configuration changes get in place.
#retrieve language settings from settings.py
from django.conf import settings
LANGUAGES = settings.LANGUAGES
from multilingual.exceptions import LanguageDoesNotExist
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
thread_locals = local()
def get_language_count():
return len(LANGUAGES)
def get_language_code(language_id):
return LANGUAGES[(int(language_id or get_default_language())) - 1][0]
def get_language_name(language_id):
return LANGUAGES[(int(language_id or get_default_language())) - 1][1]
def get_language_id_list():
return range(1, get_language_count() + 1)
def get_language_code_list():
return [lang[0] for lang in LANGUAGES]
def get_language_choices():
return [(language_id, get_language_code(language_id))
for language_id in get_language_id_list()]
def get_language_id_from_id_or_code(language_id_or_code, use_default=True):
if language_id_or_code is None:
if use_default:
return get_default_language()
else:
return None
if isinstance(language_id_or_code, int):
return language_id_or_code
i = 0
for (code, desc) in LANGUAGES:
i += 1
if code == language_id_or_code:
return i
raise LanguageDoesNotExist()
def get_language_idx(language_id_or_code):
# to do: optimize
language_id = get_language_id_from_id_or_code(language_id_or_code)
return get_language_id_list().index(language_id)
def set_default_language(language_id_or_code):
"""
Set the default language for the whole translation mechanism.
Accepts language codes or IDs.
"""
language_id = get_language_id_from_id_or_code(language_id_or_code)
thread_locals.DEFAULT_LANGUAGE = language_id
def get_default_language():
"""
Return the language ID set by set_default_language.
"""
return getattr(thread_locals, 'DEFAULT_LANGUAGE',
settings.DEFAULT_LANGUAGE)
def get_default_language_code():
"""
Return the language code of language ID set by set_default_language.
"""
language_id = get_language_id_from_id_or_code(get_default_language())
return get_language_code(language_id)
def _to_db_identifier(name):
"""
Convert name to something that is usable as a field name or table
alias in SQL.
For the time being assume that the only possible problem with name
is the presence of dashes.
"""
return name.replace('-', '_')
def get_translation_table_alias(translation_table_name, language_id):
"""
Return an alias for the translation table for a given language_id.
Used in SQL queries.
"""
return (translation_table_name
+ '_'
+ _to_db_identifier(get_language_code(language_id)))
def get_translated_field_alias(field_name, language_id=None):
"""
Return an alias for field_name field for a given language_id.
Used in SQL queries.
"""
return ('_trans_'
+ field_name
+ '_' + _to_db_identifier(get_language_code(language_id)))
|
Python
| 0.999999
|
@@ -290,16 +290,72 @@
GUAGES%0A%0A
+from django.utils.translation import ugettext_lazy as _%0A
from mul
@@ -742,32 +742,34 @@
id):%0A return
+_(
LANGUAGES%5B(int(l
@@ -814,16 +814,17 @@
- 1%5D%5B1%5D
+)
%0A%0Adef ge
|
a68bb0d268861d30c26647523991ed215853cdfe
|
add Reeve post
|
ca_ab_grande_prairie_county_no_1/people.py
|
ca_ab_grande_prairie_county_no_1/people.py
|
from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.countygp.ab.ca/EN/main/government/council.html'
class GrandePrairieCountyNo1PersonScraper(Scraper):
# @todo The Reeve is also a Councillor.
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
councillors = page.xpath('//table[@class="table-plain"]/tbody/tr/td[2]')
for councillor in councillors:
name = councillor.xpath('./h2')[0].text_content().split('Division')[0]
district = re.findall(r'(Division [0-9])', councillor.xpath('./h2')[0].text_content())[0]
p = Legislator(name=name, post_id=district, role='Councillor')
p.add_source(COUNCIL_PAGE)
image = councillor.xpath('./preceding-sibling::td//img/@src')[0]
p.image = image
address = councillor.xpath('./p[1]')[0].text_content()
email = councillor.xpath('.//a[contains(@href, "mailto:")]')[0].text_content()
p.add_contact('address', address, 'legislature')
p.add_contact('email', email, None)
numbers = councillor.xpath('./p[2]')[0].text_content().replace('Email: ', '').replace(email, '').split(':')
for index, number in enumerate(numbers):
if index == 0:
continue
contact_type = re.findall(r'[A-Za-z]+', numbers[index - 1])[0]
number = re.findall(r'[0-9]{3}.[0-9]{3}.[0-9]{4}', number)[0].replace('.', '-')
if contact_type == 'Fax':
p.add_contact('fax', number, 'legislature')
elif contact_type == 'Cell':
p.add_contact('cell', number, 'legislature')
elif contact_type == 'Hm':
p.add_contact('voice', number, 'residence')
else:
raise Exception('Unrecognized contact type %s' % contact_type)
yield p
|
Python
| 0.000001
|
@@ -172,16 +172,102 @@
l.html'%0A
+REEVE_URL = 'http://www.countygp.ab.ca/EN/main/government/council/reeve-message.html'%0A
%0A%0Aclass
@@ -379,16 +379,116 @@
(self):%0A
+ reeve_page = lxmlize(REEVE_URL)%0A reeve_name = reeve_page.xpath('string(//b)').split(',')%5B0%5D%0A%0A
page
@@ -687,16 +687,27 @@
).split(
+%0A
'Divisio
@@ -708,24 +708,32 @@
ivision')%5B0%5D
+.strip()
%0A distr
@@ -814,24 +814,24 @@
tent())%5B0%5D%0A%0A
-
p = Le
@@ -887,16 +887,125 @@
illor')%0A
+ if name == reeve_name:%0A p.add_committee_membership('Grande Prairie County No. 1', role='Reeve')%0A
p.
|
efa53a61d067d42d3dd6f9f3e5d08f89b836f92b
|
Test split_paragraphs functions.
|
landlab/core/tests/test_messager.py
|
landlab/core/tests/test_messager.py
|
#! /usr/bin/env python
import os
from nose.tools import assert_equal, assert_raises
from landlab.core.utils import (format_message, error_message,
warning_message, assert_or_print)
LOREM_IPSUM = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Pharetra pharetra massa massa ultricies mi quis hendrerit.
Dictumst vestibulum rhoncus est pellentesque. Sed viverra tellus in hac habitasse platea dictumst vestibulum rhoncus.
"""
def test_empty_message():
"""Test formatting an empty string."""
assert_equal(format_message(''), '')
def test_one_line():
"""Test a single line message."""
assert_equal(format_message('lorem ipsum'), 'lorem ipsum')
def test_leading_whitespace():
"""Test a single line message."""
assert_equal(format_message(' lorem ipsum'), 'lorem ipsum')
def test_one_long_line():
"""Test a line that needs to be wrapped."""
msg = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."
assert_equal(format_message(msg),
"""
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua.""".strip())
def test_multiline():
msg = """lorem
ipsum
"""
assert_equal(format_message(msg), 'lorem ipsum')
def test_multiple_paragraphs():
assert_equal(format_message(LOREM_IPSUM), """
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua.
Pharetra pharetra massa massa ultricies mi quis hendrerit.
Dictumst vestibulum rhoncus est pellentesque. Sed viverra tellus in
hac habitasse platea dictumst vestibulum rhoncus.
""".strip())
def test_warning_message():
msg = "Pharetra pharetra massa massa ultricies mi quis hendrerit."
assert_equal(warning_message(msg),"""
WARNING
=======
Pharetra pharetra massa massa ultricies mi quis hendrerit.
""".strip())
def test_error_message():
msg = "Pharetra pharetra massa massa ultricies mi quis hendrerit."
assert_equal(error_message(msg),"""
ERROR
=====
Pharetra pharetra massa massa ultricies mi quis hendrerit.
""".strip())
def test_warning_message_is_none():
assert_equal(warning_message(), os.linesep.join(["WARNING", "======="]))
def test_error_message_is_none():
assert_equal(error_message(), os.linesep.join(["ERROR", "====="]))
def test_assert_or_pass():
assert_or_print(True, onerror='pass')
assert_or_print(False, onerror='pass')
def test_assert_or_warn():
assert_or_print(True, onerror='warn')
assert_or_print(False, onerror='warn')
def test_assert_or_raise():
assert_or_print(True, onerror='raise')
with assert_raises(AssertionError):
assert_or_print(False, onerror='raise')
|
Python
| 0
|
@@ -206,16 +206,66 @@
or_print
+,%0A split_paragraphs
)%0A%0A%0ALORE
@@ -586,16 +586,1261 @@
s.%0A%22%22%22%0A%0A
+%0Adef test_split_paragraphs_cr():%0A %22%22%22Test splitting paragraphs with carriage returns.%22%22%22%0A text = %22%22%22%0APharetra pharetra massa massa ultricies mi quis hendrerit.%5Cr%5CrDictumst vestibulum rhoncus est pellentesque.%0A %22%22%22%0A assert_equal(split_paragraphs(text, linesep='%5Cr'), %5B%0A %22Pharetra pharetra massa massa ultricies mi quis hendrerit.%22,%0A %22Dictumst vestibulum rhoncus est pellentesque.%22%5D)%0A%0A%0Adef test_split_paragraphs_lf():%0A %22%22%22Test splitting paragraphs with line feeds.%22%22%22%0A text = %22%22%22%0APharetra pharetra massa massa ultricies mi quis hendrerit.%5Cn%5CnDictumst vestibulum rhoncus est pellentesque.%0A %22%22%22%0A assert_equal(split_paragraphs(text, linesep='%5Cn'), %5B%0A %22Pharetra pharetra massa massa ultricies mi quis hendrerit.%22,%0A %22Dictumst vestibulum rhoncus est pellentesque.%22%5D)%0A%0A%0Adef test_split_paragraphs_crlf():%0A %22%22%22Test splitting paragraphs with carriage returns and line feeds.%22%22%22%0A text = %22%22%22%0APharetra pharetra massa massa ultricies mi quis hendrerit.%5Cr%5Cn%5Cr%5CnDictumst vestibulum rhoncus est pellentesque.%0A %22%22%22%0A assert_equal(split_paragraphs(text, linesep='%5Cr%5Cn'), %5B%0A %22Pharetra pharetra massa massa ultricies mi quis hendrerit.%22,%0A %22Dictumst vestibulum rhoncus est pellentesque.%22%5D)%0A%0A%0A
def test
|
a6b41e855b84ed9971096090a2c38783526ac087
|
check if null for mode of payments (#12264)
|
erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py
|
erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import cstr
def execute(filters=None):
columns, data = [], []
columns=get_columns()
data=get_sales_payment_data(filters, columns)
return columns, data
def get_columns():
return [
_("Date") + ":Date:80",
_("Owner") + "::150",
_("Payment Mode") + "::140",
_("Sales and Returns") + ":Currency/currency:120",
_("Taxes") + ":Currency/currency:120",
_("Payments") + ":Currency/currency:120",
_("Outstanding Amount") + ":Currency/currency:150",
]
def get_sales_payment_data(filters, columns):
sales_invoice_data = get_sales_invoice_data(filters)
data = []
mode_of_payments = get_mode_of_payments(filters)
for inv in sales_invoice_data:
mode_of_payment = inv["owner"]+cstr(inv["posting_date"])
row = [inv.posting_date, inv.owner,", ".join(mode_of_payments.get(mode_of_payment, [])),
inv.net_total,
inv.total_taxes, (inv.net_total + inv.total_taxes - inv.outstanding_amount),
inv.outstanding_amount]
data.append(row)
return data
def get_conditions(filters):
conditions = ""
if filters.get("from_date"): conditions += "a.posting_date >= %(from_date)s"
if filters.get("to_date"): conditions += " and a.posting_date <= %(to_date)s"
if filters.get("company"): conditions += " and a.company=%(company)s"
if filters.get("customer"): conditions += " and a.customer = %(customer)s"
if filters.get("owner"): conditions += " and a.owner = %(owner)s"
if filters.get("is_pos"): conditions += " and a.is_pos = %(is_pos)s"
return conditions
def get_sales_invoice_data(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""
select
a.posting_date, a.owner,
sum(a.net_total) as "net_total",
sum(a.total_taxes_and_charges) as "total_taxes",
sum(a.base_paid_amount) as "paid_amount",
sum(a.outstanding_amount) as "outstanding_amount"
from `tabSales Invoice` a
where a.docstatus = 1
and {conditions}
group by
a.owner, a.posting_date
""".format(conditions=conditions), filters, as_dict=1)
def get_mode_of_payments(filters):
mode_of_payments = {}
invoice_list = get_invoices(filters)
invoice_list_names = ",".join(['"' + invoice['name'] + '"' for invoice in invoice_list])
if invoice_list:
inv_mop = frappe.db.sql("""select a.owner,a.posting_date,b.mode_of_payment
from `tabSales Invoice` a, `tabSales Invoice Payment` b
where a.name = b.parent
and a.name in ({invoice_list_names})
union
select a.owner,a.posting_date,b.mode_of_payment
from `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c
where a.name = c.reference_name
and b.name = c.parent
and a.name in ({invoice_list_names})
""".format(invoice_list_names=invoice_list_names), as_dict=1)
for d in inv_mop:
mode_of_payments.setdefault(d["owner"]+cstr(d["posting_date"]), []).append(d.mode_of_payment)
return mode_of_payments
def get_invoices(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select a.name
from `tabSales Invoice` a
where a.docstatus = 1 and {conditions}""".format(conditions=conditions),
filters, as_dict=1)
|
Python
| 0
|
@@ -2420,32 +2420,40 @@
,a.posting_date,
+ ifnull(
b.mode_of_paymen
@@ -2445,32 +2445,37 @@
.mode_of_payment
+, '')
%0A%09%09%09from %60tabSal
@@ -2627,16 +2627,24 @@
ng_date,
+ ifnull(
b.mode_o
@@ -2652,16 +2652,21 @@
_payment
+, '')
%0A%09%09%09from
|
883ef42a4a02a98bbbec7a2a3c20938853805fb0
|
Fix update_host API response schema
|
tempest/lib/api_schema/response/compute/v2_1/hosts.py
|
tempest/lib/api_schema/response/compute/v2_1/hosts.py
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
list_hosts = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hosts': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'host_name': {'type': 'string'},
'service': {'type': 'string'},
'zone': {'type': 'string'}
},
'additionalProperties': False,
'required': ['host_name', 'service', 'zone']
}
}
},
'additionalProperties': False,
'required': ['hosts']
}
}
get_host_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {
'type': 'array',
'item': {
'type': 'object',
'properties': {
'resource': {
'type': 'object',
'properties': {
'cpu': {'type': 'integer'},
'disk_gb': {'type': 'integer'},
'host': {'type': 'string'},
'memory_mb': {'type': 'integer'},
'project': {'type': 'string'}
},
'additionalProperties': False,
'required': ['cpu', 'disk_gb', 'host',
'memory_mb', 'project']
}
},
'additionalProperties': False,
'required': ['resource']
}
}
},
'additionalProperties': False,
'required': ['host']
}
}
startup_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'power_action': {'enum': ['startup']}
},
'additionalProperties': False,
'required': ['host', 'power_action']
}
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
shutdown_host = copy.deepcopy(startup_host)
shutdown_host['response_body']['properties']['power_action'] = {
'enum': ['shutdown']
}
# The 'power_action' attribute of 'reboot_host' API is 'reboot'
reboot_host = copy.deepcopy(startup_host)
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
update_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'maintenance_mode': {'enum': ['on_maintenance',
'off_maintenance']},
'status': {'enum': ['enabled', 'disabled']}
},
'additionalProperties': False,
'required': ['host', 'maintenance_mode', 'status']
}
}
|
Python
| 0.000269
|
@@ -3629,32 +3629,102 @@
False,%0A
+'anyOf': %5B%0A %7B'required': %5B'host', 'status'%5D%7D,%0A %7B
'required': %5B'ho
@@ -3746,26 +3746,27 @@
ce_mode'
-, 'status'
+%5D%7D%0A
%5D%0A %7D%0A
|
1e28b43c8a02bd3e506fbd33012b4dcd7b193433
|
Fix string-bools not translated
|
compose-v2/galaxy-configurator/customize.py
|
compose-v2/galaxy-configurator/customize.py
|
import os
def j2_environment_params():
""" Extra parameters for the Jinja2 Environment
Add AnsibleCoreFiltersExtension for filters known in Ansible
like `to_nice_yaml`
"""
return dict(
extensions=('jinja2_ansible_filters.AnsibleCoreFiltersExtension',),
)
def alter_context(context):
"""
Translates env variables that start with a specific prefix
and combines them into one dict (like all GALAXY_CONFIG_*
are stored at galaxy.*).
Variables that are stored in an input file overwrite
the input from env.
TODO: Unit test
"""
new_context = dict(os.environ)
translations = {
"GALAXY_CONFIG_": "galaxy",
"GALAXY_UWSGI_CONFIG_": "galaxy_uwsgi",
"GALAXY_JOB_METRICS_": "galaxy_job_metrics",
"NGINX_CONFIG_": "nginx",
"SLURM_CONFIG_": "slurm",
"HTCONDOR_GALAXY_": "htcondor_galaxy",
"HTCONDOR_MASTER_": "htcondor_master",
"HTCONDOR_EXECUTOR_": "htcondor_executor",
"PULSAR_CONFIG_": "pulsar"
}
# Add values from possible input file if existent
if context is not None and len(context) > 0:
new_context.update(context)
# Translate string-boolean to Python boolean
for key, value in new_context.items():
if not isinstance(value, str):
continue
if value.lower() == "true":
new_context[key] = True
elif value.lower() == "false":
new_context[key] = False
for to in translations.values():
if to not in new_context:
new_context[to] = {}
for key, value in os.environ.items():
for frm, to in translations.items():
if key.startswith(frm):
# Format key depending on it being uppercase or not
# (to cope with different formatings: compare Slurm
# with Galaxy)
key = key[len(frm):]
if key.isupper():
key = key.lower()
new_context[to][key] = value
context = new_context
# Set HOST_EXPORT_DIR depending on EXPORT_DIR being absolute or relative
if "HOST_EXPORT_DIR" not in context and "EXPORT_DIR" in context \
and "HOST_PWD" in context:
if context["EXPORT_DIR"].startswith("./"):
context["HOST_EXPORT_DIR"] = context["HOST_PWD"] \
+ context["EXPORT_DIR"][1:]
else:
context["HOST_EXPORT_DIR"] = context["EXPORT_DIR"]
return context
|
Python
| 0.000001
|
@@ -1638,26 +1638,27 @@
alue in
-os.environ
+new_context
.items()
|
9c7dda9f55369109831eb53f4ed1da5fe82cfc7b
|
Fix test for observation_aggregator
|
tests/chainermn_tests/extensions_tests/test_observation_aggregator.py
|
tests/chainermn_tests/extensions_tests/test_observation_aggregator.py
|
import unittest
import numpy as np
import chainer
import chainer.testing
from chainer.training import extension
import chainermn
from chainermn.extensions.observation_aggregator import observation_aggregator
class DummyChain(chainer.Chain):
def __init__(self):
super(DummyChain, self).__init__()
def forward(self, x):
return chainer.Variable(x, grad=np.array([0]))
class TestObservationAggregator(unittest.TestCase):
def setUp(self):
self.communicator = chainermn.create_communicator('naive')
def test_observation_aggregator(self):
model = DummyChain()
comm = self.communicator
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.Adam(), self.communicator)
optimizer.setup(model)
train = np.random.rand(10, 1)
train_iter = chainer.iterators.SerialIterator(train,
batch_size=1,
repeat=True,
shuffle=True)
updater = chainer.training.StandardUpdater(train_iter, optimizer)
trainer = chainer.training.Trainer(updater, (1, 'epoch'))
@extension.make_extension(
trigger=(2, 'iteration'), priority=extension.PRIORITY_WRITER)
def rank_reporter(trainer):
trainer.observation['rank'] = comm.rank
@extension.make_extension(
trigger=(2, 'iteration'), priority=extension.PRIORITY_READER)
def aggregated_rank_checker(trainer):
actual = trainer.observation['rank-aggregated']
expected = (comm.size - 1) / 2
chainer.testing.assert_allclose(actual,
expected)
trainer.extend(rank_reporter)
trainer.extend(observation_aggregator(comm, 'rank', 'rank-aggregated'))
trainer.extend(aggregated_rank_checker)
trainer.run()
|
Python
| 0.000002
|
@@ -1687,16 +1687,18 @@
- 1) / 2
+.0
%0A
|
88be8370e6ede34cb01240a6621923b1ddea370f
|
remove retval before starting the completion service job if it exists
|
studio/completion_service/completion_service_client.py
|
studio/completion_service/completion_service_client.py
|
import importlib
import shutil
import pickle
import os
import sys
import six
from studio import fs_tracker, model, logs
logger = logs.getLogger('completion_service_client')
try:
logger.setLevel(model.parse_verbosity(sys.argv[1]))
except BaseException:
logger.setLevel(10)
def main():
logger.debug('copying and importing client module')
logger.debug('getting file mappings')
artifacts = fs_tracker.get_artifacts()
files = {}
logger.debug("Artifacts = {}".format(artifacts))
for tag, path in six.iteritems(artifacts):
if tag not in {'workspace', 'modeldir', 'tb', '_runner'}:
if os.path.isfile(path):
files[tag] = path
elif os.path.isdir(path):
dirlist = os.listdir(path)
if any(dirlist):
files[tag] = os.path.join(
path,
dirlist[0]
)
logger.debug("Files = {}".format(files))
script_path = files['clientscript']
retval_path = fs_tracker.get_artifact('retval')
shutil.rmtree(retval_path)
# script_name = os.path.basename(script_path)
new_script_path = os.path.join(os.getcwd(), '_clientscript.py')
shutil.copy(script_path, new_script_path)
script_path = new_script_path
logger.debug("script path: " + script_path)
mypath = os.path.dirname(script_path)
sys.path.append(mypath)
# os.path.splitext(os.path.basename(script_path))[0]
module_name = '_clientscript'
client_module = importlib.import_module(module_name)
logger.debug('loading args')
args_path = files['args']
with open(args_path, 'rb') as f:
args = pickle.loads(f.read())
logger.debug('calling client function')
retval = client_module.clientFunction(args, files)
logger.debug('saving the return value')
if os.path.isdir(fs_tracker.get_artifact('clientscript')):
# on go runner:
logger.debug("Running in a go runner, creating {} for retval"
.format(retval_path))
try:
os.mkdir(retval_path)
except OSError:
logger.debug('retval dir present')
retval_path = os.path.join(retval_path, 'retval')
logger.debug("New retval_path is {}".format(retval_path))
logger.debug('Saving retval')
with open(retval_path, 'wb') as f:
f.write(pickle.dumps(retval, protocol=2))
logger.debug('Done')
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -1066,24 +1066,64 @@
t('retval')%0A
+ if os.path.exists(retval_path):%0A
shutil.r
|
cdc75e576aaa3054ed9cc4bde07e5e85d68c8d9f
|
MANAGE on TestSuite inherits from MANAGE on problem.
|
satori.core/satori/core/entities/TestSuite.py
|
satori.core/satori/core/entities/TestSuite.py
|
# vim:ts=4:sts=4:sw=4:expandtab
from django.db import models
from satori.core.dbev import Events
from satori.core.models import Entity
@ExportModel
class TestSuite(Entity):
"""Model. A group of tests, with dispatch and aggregation algorithm.
"""
parent_entity = models.OneToOneField(Entity, parent_link=True, related_name='cast_testsuite')
problem = models.ForeignKey('Problem', related_name='test_suites')
name = models.CharField(max_length=50)
description = models.TextField(blank=True, default="")
tests = models.ManyToManyField('Test', through='TestMapping', related_name='test_suites')
dispatcher = models.CharField(max_length=128)
reporter = models.CharField(max_length=128)
accumulators = models.CharField(max_length=1024)
params = AttributeGroupField(PCArg('self', 'MANAGE'), PCDeny(), '')
class Meta: # pylint: disable-msg=C0111
unique_together = (('problem', 'name'),)
class ExportMeta(object):
fields = [('problem', 'VIEW'), ('name', 'VIEW'), ('description', 'VIEW'), ('dispatcher', 'VIEW'), ('accumulators', 'VIEW'), ('reporter', 'VIEW')]
@classmethod
def inherit_rights(cls):
inherits = super(TestSuite, cls).inherit_rights()
cls._inherit_add(inherits, 'MANAGE', 'problem', 'EDIT')
return inherits
def save(self, *args, **kwargs):
self.fixup_params()
from satori.core.checking.dispatchers import dispatchers
from satori.core.checking.accumulators import accumulators
from satori.core.checking.reporters import reporters
if not self.dispatcher in dispatchers:
raise ValueError('Dispatcher '+self.dispatcher+' is not allowed')
if not self.reporter in reporters:
raise ValueError('Reporter '+self.reporter+' is not allowed')
if self.accumulators:
for accumulator in self.accumulators.split(','):
if not accumulator in accumulators:
raise ValueError('Accumulator '+accumulator+' is not allowed')
super(TestSuite,self).save(*args, **kwargs)
@ExportMethod(DjangoStruct('TestSuite'), [DjangoStruct('TestSuite'), TypedMap(unicode, AnonymousAttribute), DjangoIdList('Test'), TypedList(TypedMap(unicode, AnonymousAttribute))],
PCAnd(PCArgField('fields', 'problem', 'MANAGE'), PCEachValue('params', PCRawBlob('item')), PCEach('test_params', PCEachValue('item', PCRawBlob('item')))),
[CannotSetField])
@staticmethod
def create(fields, params, test_list, test_params):
if len(test_list) != len(test_params):
raise RuntimeError('Bad test_params length.')
test_suite = TestSuite()
test_suite.forbid_fields(fields, ['id'])
test_suite.update_fields(fields, ['problem', 'name', 'description', 'dispatcher', 'reporter', 'accumulators'])
test_suite.save()
test_suite.params_set_map(params)
count = 0
for test in test_list:
t = TestMapping(suite=test_suite, test=test, order=count)
t.save()
t.params_set_map(test_params[count])
count += 1
return test_suite
@ExportMethod(DjangoStruct('TestSuite'), [DjangoId('TestSuite'), DjangoStruct('TestSuite')], PCArg('self', 'MANAGE'), [CannotSetField])
def modify(self, fields):
self.forbid_fields(fields, ['id', 'problem', 'dispatcher', 'reporter', 'accumulators'])
self.update_fields(fields, ['name', 'description'])
return self
@ExportMethod(DjangoStruct('TestSuite'), [DjangoId('TestSuite'), DjangoStruct('TestSuite'),
TypedMap(unicode, AnonymousAttribute), DjangoIdList('Test'), TypedList(TypedMap(unicode, AnonymousAttribute))],
PCAnd(PCArg('self', 'MANAGE'), PCEachValue('params', PCRawBlob('item')), PCEach('test_params', PCEachValue('item', PCRawBlob('item')))),
[CannotSetField])
def modify_full(self, fields, params, test_list, test_params):
if len(test_list) != len(test_params):
raise RuntimeError('Bad test_params length.')
self.forbid_fields(fields, ['id', 'problem'])
self.update_fields(fields, ['name', 'description', 'dispatcher', 'reporter', 'accumulators'])
self.save()
self.params_set_map(params)
TestMapping.objects.filter(suite=self).delete()
count = 0
for test in test_list:
t = TestMapping(suite=self, test=test, order=count)
t.save()
t.params_set_map(test_params[count])
count += 1
self.rejudge()
return self
@ExportMethod(DjangoStructList('Test'), [DjangoId('TestSuite')], PCArg('self', 'MANAGE'))
def get_tests(self):
return self.tests.all().extra(order_by=['core_testmapping.order'])
@ExportMethod(TypedList(TypedMap(unicode, AnonymousAttribute)), [DjangoId('TestSuite')], PCArg('self', 'MANAGE'))
def get_test_params(self):
return [x.params_get_map() for x in self.tests.all().extra(order_by=['core_testmapping.order'])]
@ExportMethod(NoneType, [DjangoId('TestSuite')], PCArg('self', 'MANAGE'))
def rejudge(self):
RawEvent().send(Event(type='checking_rejudge_test_suite', id=self.id))
class TestSuiteEvents(Events):
model = TestSuite
on_insert = on_update = ['owner', 'problem', 'name']
on_delete = []
|
Python
| 0
|
@@ -1379,12 +1379,14 @@
', '
-EDIT
+MANAGE
')%0A
|
c3a251588868ace81e8e4e0bbe29828495d759d9
|
fix command line arguments
|
ThingThree/Code/Dotstar/strandtest.py
|
ThingThree/Code/Dotstar/strandtest.py
|
#!/usr/bin/python
import time, math, sys
from dotstar import Adafruit_DotStar
numPixels = 24
dataPin = 17
clockPin = 27
strip = Adafruit_DotStar(numPixels, dataPin, clockPin)
strip.begin()
strip.setBrightness(255)
def scale(color, brightness):
str_hex = hex(color)[2:].zfill(6)
r,g,b = (int(str_hex[2*x:2*x+2],16)*(brightness/255.0) for x in xrange(3))
return (int(r) << 8) + (int(g) << 16) + int(b)
def pulseFade(color):
for brightness in range(0,255):
for i in range(0,numPixels):
strip.setPixelColor(i, scale(color,brightness))
strip.show()
time.sleep(0.01)
for brightness in range(255,0,-1):
for i in range(0,numPixels):
strip.setPixelColor(i, scale(color,brightness))
strip.show()
time.sleep(0.001)
def pulseFromMiddle(color):
for i in range(0,numPixels/2):
strip.setPixelColor(numPixels/2 + i, color);
strip.setPixelColor(numPixels/2 - i, color);
strip.show();
time.sleep(0.02);
for i in range(0,numPixels/2):
strip.setPixelColor(i, 0);
strip.setPixelColor(numPixels-i, 0);
strip.show();
time.sleep(0.02);
def cycle(color=-1):
head = 0
tail = -10
curColor = 0xFF0000 if (color == -1) else color
while True:
strip.setPixelColor(head,curColor)
strip.setPixelColor(tail,0)
strip.show()
time.sleep(0.02)
head += 1
if (head >= numPixels):
head = 0
if (color == -1):
curColor >>= 8
if (curColor == 0): curColor = 0xFF0000
tail += 1
if (tail >= numPixels): tail = 0
def pulseCycle(color, cycles):
head = 0
tail = -10
iters = 0
while iters < cycles:
strip.setPixelColor(head,color)
strip.setPixelColor(tail,0)
strip.show()
time.sleep(0.02)
head += 1
if (head >= numPixels):
head = 0
iters += 1
tail += 1
if (tail >= numPixels): tail = 0
while tail <= numPixels:
strip.setPixelColor(tail,0)
strip.show()
time.sleep(0.02)
tail += 1
def breathe(color):
while True:
millis = int(round(time.time() * 1000))
brightness = (math.exp(math.sin(millis/2000.0*math.pi)) - 0.36787944)*108.0;
for i in range(0,numPixels):
strip.setPixelColor(i, scale(color,brightness))
strip.show()
time.sleep(0.02)
pulseCycle(int(sys.argv[0],0),int(sys.argv[1]))
|
Python
| 0.000803
|
@@ -2166,17 +2166,17 @@
ys.argv%5B
-0
+1
%5D,0),int
@@ -2185,13 +2185,13 @@
ys.argv%5B
-1
+2
%5D))%0A
|
cfe9c0f3d4155924bd330378ab846da014c4d2cb
|
Remove fixed TODO comment
|
beetsplug/absubmit.py
|
beetsplug/absubmit.py
|
"""Calculate acoustic information and submit to AcousticBrainz.
"""
from __future__ import division, absolute_import, print_function
import hashlib
import json
import os
import subprocess
import tempfile
import distutils
import requests
from beets import plugins
from beets import util
from beets import ui
class ABSubmitError(Exception):
"""Raised when failing to analyse file with extractor."""
def call(args):
"""Execute the command and return its output.
Raise a AnalysisABSubmitError on failure.
"""
try:
return util.command_output(args)
except subprocess.CalledProcessError as e:
raise ABSubmitError(
u'{0} exited with status {1}'.format(args[0], e.returncode)
)
class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
def __init__(self):
super(AcousticBrainzSubmitPlugin, self).__init__()
self.config.add({'extractor': u''})
self.extractor = self.config['extractor'].as_str()
if self.extractor:
self.extractor = util.normpath(self.extractor)
# Expicit path to extractor
if not os.path.isfile(self.extractor):
raise ui.UserError(
u'Extractor command does not exist: {0}.'.
format(self.extractor)
)
else:
# Implicit path to extractor, search for it in path
# TODO how to check for on Windows?
self.extractor = 'streaming_extractor_music'
try:
call([self.extractor])
except OSError:
raise ui.UserError(
u'No extractor command found: please install the '
u'extractor binary from http://acousticbrainz.org/download'
)
except ABSubmitError:
# Extractor found, will exit with an error if not called with
# the correct amount of arguments.
pass
# Get the executable location on the system,
# needed to calculate the sha1 hash.
self.extractor = distutils.spawn.find_executable(self.extractor)
# Calculate extractor hash.
self.extractor_sha = hashlib.sha1()
with open(self.extractor, 'rb') as extractor:
self.extractor_sha.update(extractor.read())
self.extractor_sha = self.extractor_sha.hexdigest()
supported_formats = {'mp3', 'ogg', 'oga', 'flac', 'mp4', 'm4a', 'm4r',
'm4b', 'm4p', 'aac', 'wma', 'asf', 'mpc', 'wv',
'spx', 'tta', '3g2', 'aif', 'aiff', 'ape'}
base_url = 'https://acousticbrainz.org/api/v1/{mbid}/low-level'
def commands(self):
cmd = ui.Subcommand(
'absubmit',
help=u'calculate and submit AcousticBrainz analysis'
)
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(ui.decargs(args))
for item in items:
analysis = self._get_analysis(item)
if analysis:
self._submit_data(item, analysis)
def _get_analysis(self, item):
mbid = item['mb_trackid']
# If file has no mbid skip it.
if not mbid:
self._log.info(u'Not analysing {}, missing '
u'musicbrainz track id.', item)
return None
# If file format is not supported skip it.
if item['format'].lower() not in self.supported_formats:
self._log.info(u'Not analysing {}, file not in '
u'supported format.', item)
return None
# Temporary file to save extractor output to, extractor only works
# if an output file is given. Here we use a temporary file to copy
# the data into a python object and then remove the file from the
# system.
tmp_file, filename = tempfile.mkstemp(suffix='.json')
try:
# Close the file, so the extractor can overwrite it.
try:
call([self.extractor, util.syspath(item.path), filename])
except ABSubmitError as e:
self._log.error(
u'Failed to analyse {item} for AcousticBrainz: {error}',
item=item, error=e
)
return None
with open(filename) as tmp_file:
analysis = json.loads(tmp_file.read())
# Add the hash to the output.
analysis['metadata']['version']['essentia_build_sha'] = \
self.extractor_sha
return analysis
finally:
try:
os.remove(filename)
except OSError as e:
# errno 2 means file does not exist, just ignore this error.
if e.errno != 2:
raise
def _submit_data(self, item, data):
mbid = item['mb_trackid']
headers = {'Content-Type': 'application/json'}
response = requests.post(self.base_url.format(mbid=mbid),
json=data, headers=headers)
# Test that request was successful and raise an error on failure.
if response.status_code != 200:
try:
message = response.json()['message']
except (ValueError, KeyError) as e:
message = u'unable to get error message: {}'.format(e)
self._log.error(
u'Failed to submit AcousticBrainz analysis of {item}: '
u'{message}).', item=item, message=message
)
else:
self._log.debug(u'Successfully submitted AcousticBrainz analysis '
u'for {}.', item)
|
Python
| 0
|
@@ -1396,56 +1396,8 @@
ath%0A
- # TODO how to check for on Windows?%0A
|
a171595f029b43af27d14a125e68647e2206c6d5
|
Update __init__.py
|
tendrl/commons/objects/node_alert_counters/__init__.py
|
tendrl/commons/objects/node_alert_counters/__init__.py
|
from tendrl.commons import objects
class NodeAlertCounters(objects.BaseObject):
def __init__(
self,
warn_count=0,
node_id=None,
*args,
**kwargs
):
super(NodeAlertCounters, self).__init__(*args, **kwargs)
self.warning_count = warn_count
self.node_id = node_id
self.value = '/nodes/{0}/alert_counters'
def render(self):
self.value = self.value.format(self.node_id or NS.node_context.node_id)
return super(NodeAlertCounters, self).render()
def save(self, *args, **kwargs):
NS.tendrl.objects.ClusterNodeAlertCounters(warn_count=self.warning_count,
node_id=self.node_id).save()
super(NodeAlertCounters, self).save(*args, **kwargs)
|
Python
| 0.000072
|
@@ -724,16 +724,116 @@
.node_id
+,%0A integration_id=NS.tendrl_context.integration_id
).save()
|
dd7e0d18a15195cf67af44af8c15918a5cf068e4
|
add header information
|
douban_book_api.py
|
douban_book_api.py
|
from douban_client.api.error import DoubanAPIError
import requests
import simplejson
from douban_client import DoubanClient
__author__ = 'owen2785'
baseurl = 'https://api.douban.com/v2/book/isbn/'
def getbyisbn_without_auth(isbn):
r = requests.get(baseurl+str(isbn))
return r.json()
|
Python
| 0
|
@@ -194,18 +194,16 @@
/isbn/'%0A
-%0A%0A
def getb
@@ -266,17 +266,81 @@
tr(isbn)
-)
+,headers=headers)%0A print r.headers%0A print r.request.headers
%0A ret
@@ -351,8 +351,9 @@
r.json()
+%0A
|
72eeb2ddb1f1be2e0c475f730461c3f195759fea
|
add task start time to loghub
|
dpark/utils/log.py
|
dpark/utils/log.py
|
import os.path
import sys
import logging
import re
from datetime import datetime
LOG_FORMAT = '{GREEN}%(asctime)-15s{RESET}' \
' [%(levelname)s] [%(threadName)s] [%(name)-9s:%(lineno)d] %(message)s'
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
RESET = "\033[0m"
BOLD = "\033[1m"
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = [
"\033[1;%dm" % i for i in range(30, 38)
]
PALLETE = {
'RESET': RESET,
'BOLD': BOLD,
'BLACK': BLACK,
'RED': RED,
'GREEN': GREEN,
'YELLOW': YELLOW,
'BLUE': BLUE,
'MAGENTA': MAGENTA,
'CYAN': CYAN,
'WHITE': WHITE,
}
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
FORMAT_PATTERN = re.compile('|'.join('{%s}' % k for k in PALLETE))
def formatter_message(message, use_color=True):
if use_color:
return FORMAT_PATTERN.sub(
lambda m: PALLETE[m.group(0)[1:-1]],
message
)
return FORMAT_PATTERN.sub('', message)
class ColoredFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, use_color=True):
if fmt:
fmt = formatter_message(fmt, use_color)
logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
self.use_color = use_color
def format(self, record):
record = logging.makeLogRecord(record.__dict__)
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLORS[levelname] + levelname + RESET
record.levelname = levelname_color
record.msg = formatter_message(record.msg, self.use_color)
return logging.Formatter.format(self, record)
USE_UTF8 = getattr(sys.stderr, 'encoding', None) == 'UTF-8'
ASCII_BAR = ('[ ', ' ]', '#', '-', '-\\|/-\\|')
UNICODE_BAR = (u'[ ', u' ]', u'\u2589', u'-',
u'-\u258F\u258E\u258D\u258C\u258B\u258A')
def make_progress_bar(ratio, size=14):
if USE_UTF8:
L, R, B, E, F = UNICODE_BAR
else:
L, R, B, E, F = ASCII_BAR
if size > 4:
n = size - 4
with_border = True
else:
n = size
with_border = False
p = n * ratio
blocks = int(p)
if p > blocks:
frac = int((p - blocks) * 7)
blanks = n - blocks - 1
C = F[frac]
else:
blanks = n - blocks
C = ''
if with_border:
return ''.join([L, B * blocks, C, E * blanks, R])
else:
return ''.join([B * blocks, C, E * blanks])
def init_dpark_logger(log_level, use_color=None):
logger = get_logger('dpark')
logger.propagate = False
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(ColoredFormatter(LOG_FORMAT, DATE_FORMAT, use_color))
handler.setLevel(max(log_level, logger.level))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
def get_logger(name):
""" Always use logging.Logger class.
The user code may change the loggerClass (e.g. pyinotify),
and will cause exception when format log message.
"""
old_class = logging.getLoggerClass()
logging.setLoggerClass(logging.Logger)
logger = logging.getLogger(name)
logging.setLoggerClass(old_class)
return logger
def add_loghub(framework_id):
logger = get_logger('dpark')
try:
import dpark
from dpark.conf import LOGHUB
from dpark.utils import getuser
date_str = datetime.now().strftime("%Y/%m/%d/%H")
date_dir_path = os.path.join(LOGHUB, date_str)
if not os.path.exists(date_dir_path):
logger.error("loghub dir not ready: %s", date_dir_path)
return
dir_path = os.path.join(date_dir_path, framework_id)
os.mkdir(dir_path)
dpark_mtime = datetime.fromtimestamp(os.stat(dpark.__file__).st_mtime).strftime('%Y-%m-%dT%H:%M:%S')
infos = [
("CMD", ' '.join(sys.argv)),
("USER", getuser()),
("PWD", os.getcwd()),
("DPARK", dpark.__file__),
("DPARK_MTIME", dpark_mtime),
("PYTHONPATH", os.environ.get("PYTHONPATH", ""))
]
log_path = os.path.join(dir_path, "log")
try:
with open(log_path, "a") as f:
for i in infos:
f.write("DPARK_{} = {}\n".format(i[0], i[1]))
f.write("\n")
except IOError:
logger.exception("fail to write loghub: %s", log_path)
return
file_handler = logging.FileHandler(filename=log_path)
file_handler.setFormatter(ColoredFormatter(LOG_FORMAT, DATE_FORMAT, True))
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
logger.info("logging/prof to %s", dir_path)
return file_handler, dir_path
except Exception:
logger.exception("add_loghub fail")
def create_logger(stream, handler=None):
logger = get_logger('dpark.' + str(stream.fileno()))
logger.propagate = False
stream_handler = logging.StreamHandler(stream=stream)
stream_handler.setFormatter(logging.Formatter())
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
if handler:
logger.addHandler(handler)
return logger
|
Python
| 0.000001
|
@@ -2,21 +2,16 @@
mport os
-.path
%0Aimport
@@ -3750,118 +3750,8 @@
h)%0A%0A
- dpark_mtime = datetime.fromtimestamp(os.stat(dpark.__file__).st_mtime).strftime('%25Y-%25m-%25dT%25H:%25M:%25S')%0A%0A
@@ -3890,30 +3890,70 @@
(%22
-DPARK%22, dpark.__file__
+CTIME%22, datetime.strftime(datetime.now(), %22%25Y-%25m-%25d %25H:%25M:%25S%22)
),%0A
@@ -3974,22 +3974,16 @@
PARK
-_MTIME
%22, dpark
_mti
@@ -3982,14 +3982,17 @@
park
-_mtime
+.__file__
),%0A
|
08a51723539dbeba08cd23d8942d393ae3f8535c
|
Add test for have_term and get_term methods
|
tests/grammar_term-nonterm_test/TerminalAddingTest.py
|
tests/grammar_term-nonterm_test/TerminalAddingTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Grammar
from grammpy.Terminal import Terminal
class TempClass:
pass
class TerminalAddingTest(TestCase):
def test_correctAddOne(self):
gr = Grammar()
self.assertEqual(gr.terms_count(), 0)
self.assertFalse(gr.have_term(0))
self.assertIsNone(gr.get_term(0))
self.assertIsNone(gr.term(0))
gr.add_term(0)
self.assertEqual(gr.terms_count(), 1)
self.assertIsNotNone(gr.get_term(0))
self.assertIsNotNone(gr.term(0))
self.assertTrue(isinstance(gr.term(0), Terminal))
self.assertEqual(gr.term(0).symbol(), 0)
def test_correctAddTwo(self):
gr = Grammar()
self.assertEqual(gr.terms_count(), 0)
self.assertIsNone(gr.get_term(0))
self.assertIsNone(gr.term(0))
self.assertIsNone(gr.get_term('asdf'))
self.assertIsNone(gr.term('asdf'))
gr.add_term(0)
self.assertEqual(gr.terms_count(), 1)
self.assertIsNotNone(gr.get_term(0))
self.assertIsNotNone(gr.term(0))
self.assertTrue(isinstance(gr.term(0), Terminal))
self.assertEqual(gr.term(0).s, 0)
self.assertIsNone(gr.get_term('asdf'))
self.assertIsNone(gr.term('asdf'))
gr.add_term('asdf')
self.assertEqual(gr.terms_count(), 2)
self.assertIsNotNone(gr.get_term(0))
self.assertIsNotNone(gr.term(0))
self.assertTrue(isinstance(gr.term(0), Terminal))
self.assertEqual(gr.term(0).s, 0)
self.assertIsNotNone(gr.get_term('asdf'))
self.assertIsNotNone(gr.term('asdf'))
self.assertTrue(isinstance(gr.term('asdf'), Terminal))
self.assertEqual(gr.term('asdf').s, 'asdf')
def test_addInArray(self):
gr = Grammar()
gr.add_term([0, 'asdf', TempClass])
self.assertEqual(gr.terms_count(), 3)
self.assertIsNotNone(gr.get_term(0))
self.assertIsNotNone(gr.term(0))
self.assertTrue(isinstance(gr.term(0), Terminal))
self.assertEqual(gr.term(0).s, 0)
self.assertIsNotNone(gr.get_term('asdf'))
self.assertIsNotNone(gr.term('asdf'))
self.assertTrue(isinstance(gr.term('asdf'), Terminal))
self.assertEqual(gr.term('asdf').s, 'asdf')
self.assertIsNotNone(gr.get_term(TempClass))
self.assertIsNotNone(gr.term(TempClass))
self.assertTrue(isinstance(gr.term(TempClass), Terminal))
self.assertEqual(gr.term(TempClass).s, TempClass)
def test_oneSeparateTwoTuple(self):
gr = Grammar()
gr.add_term(0)
self.assertEqual(gr.terms_count(), 1)
self.assertIsNotNone(gr.get_term(0))
self.assertIsNotNone(gr.term(0))
self.assertTrue(isinstance(gr.term(0), Terminal))
self.assertEqual(gr.term(0).s, 0)
gr.add_term(('asdf', TempClass))
self.assertEqual(gr.terms_count(), 3)
self.assertIsNotNone(gr.get_term(0))
self.assertIsNotNone(gr.term(0))
self.assertTrue(isinstance(gr.term(0), Terminal))
self.assertEqual(gr.term(0).s, 0)
self.assertIsNotNone(gr.get_term('asdf'))
self.assertIsNotNone(gr.term('asdf'))
self.assertTrue(isinstance(gr.term('asdf'), Terminal))
self.assertEqual(gr.term('asdf').s, 'asdf')
self.assertIsNotNone(gr.get_term(TempClass))
self.assertIsNotNone(gr.term(TempClass))
self.assertTrue(isinstance(gr.term(TempClass), Terminal))
self.assertEqual(gr.term(TempClass).s, TempClass)
|
Python
| 0.000001
|
@@ -267,16 +267,410 @@
tCase):%0A
+%0A def test_haveTermEmpty(self):%0A gr = Grammar()%0A self.assertFalse(gr.have_term(TempClass))%0A self.assertFalse(gr.have_term(1))%0A self.assertFalse(gr.have_term('asdf'))%0A%0A def test_getTermEmpty(self):%0A gr = Grammar()%0A self.assertIsNone(gr.get_term(TempClass))%0A self.assertIsNone(gr.get_term(1))%0A self.assertIsNone(gr.get_term('asdf'))%0A%0A
def
|
396ab20874a0c3492482a8ae03fd7d61980917a5
|
Update closest match adapter docstring.
|
chatterbot/adapters/logic/closest_match.py
|
chatterbot/adapters/logic/closest_match.py
|
# -*- coding: utf-8 -*-
from fuzzywuzzy import fuzz
from .base_match import BaseMatchAdapter
class ClosestMatchAdapter(BaseMatchAdapter):
"""
The ClosestMatchAdapter logic adapter creates a response by
using fuzzywuzzy's process class to extract the most similar
response to the input. This adapter selects a response to an
input statement by selecting the closest known matching
statement based on the Levenshtein Distance between the text
of each statement.
"""
def get(self, input_statement):
"""
Takes a statement string and a list of statement strings.
Returns the closest matching statement from the list.
"""
statement_list = self.context.storage.get_response_statements()
if not statement_list:
if self.has_storage_context:
# Use a randomly picked statement
self.logger.info(
u'No statements have known responses. ' +
u'Choosing a random response to return.'
)
return 0, self.context.storage.get_random()
else:
raise self.EmptyDatasetException()
confidence = -1
closest_match = input_statement
# Find the closest matching known statement
for statement in statement_list:
ratio = fuzz.ratio(input_statement.text.lower(), statement.text.lower())
if ratio > confidence:
confidence = ratio
closest_match = statement
# Convert the confidence integer to a percent
confidence /= 100.0
return confidence, closest_match
|
Python
| 0
|
@@ -45,17 +45,16 @@
rt fuzz%0A
-%0A
from .ba
@@ -187,17 +187,23 @@
ter
-creates a
+selects a known
res
@@ -211,215 +211,98 @@
onse
- by
%0A
-using fuzzywuzzy's process class to extract the most similar%0A response to the input. This adapter selects a response to an%0A input statement by selecting the closest known matching%0A statemen
+to an input by searching for a known statement that most closely%0A matches the inpu
t ba
|
2947fe97d466872de05ada289d9172f41895969c
|
Update GOV.UK Frontend/Jinja lib test
|
tests/templates/components/test_radios_with_images.py
|
tests/templates/components/test_radios_with_images.py
|
import json
def test_govuk_frontend_jinja_overrides_on_design_system_v3():
with open("package.json") as package_file:
package_json = json.load(package_file)
assert package_json["dependencies"]["govuk-frontend"].startswith("3."), (
"After upgrading the Design System, manually validate that "
"`app/templates/govuk_frontend_jinja_overrides/templates/components/*/template.html`"
"are all structurally-correct and up-to-date macros. If not, update the macros or retire them and update the "
"rendering process."
)
|
Python
| 0
|
@@ -5,16 +5,86 @@
rt json%0A
+from importlib import metadata%0A%0Afrom packaging.version import Version%0A
%0A%0Adef te
@@ -238,20 +238,49 @@
le)%0A
-%0A assert
+ govuk_frontend_version = Version(
pack
@@ -325,25 +325,615 @@
nd%22%5D
-.startswith(%223.%22)
+)%0A%0A govuk_frontend_jinja_version = Version(metadata.version(%22govuk-frontend-jinja%22))%0A%0A # This should be checking govuk_frontend_version == 3.14.x, but we're not there yet. Update this when we are.%0A # Compatibility between these two libs is defined at https://github.com/LandRegistry/govuk-frontend-jinja/%0A correct_govuk_frontend_version = Version(%223.0.0%22) %3C= govuk_frontend_version %3C Version(%224.0.0%22)%0A correct_govuk_frontend_jinja_version = Version(%221.5.0%22) %3C= govuk_frontend_jinja_version %3C Version(%221.6.0%22)%0A%0A assert correct_govuk_frontend_version and correct_govuk_frontend_jinja_version
, (%0A
@@ -957,16 +957,26 @@
grading
+either of
the Desi
@@ -988,18 +988,27 @@
stem
-, manually
+ packages, you must
val
|
f4a80c720d0164eb8a942e3ad1b5244d30800e5a
|
Add --allow-nacl-socket-api for the chromoting functional test.
|
chrome/test/functional/chromoting_basic.py
|
chrome/test/functional/chromoting_basic.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # Must come before chromoting and pyauto.
import chromoting
import pyauto
class ChromotingBasic(chromoting.ChromotingMixIn, pyauto.PyUITest):
"""Basic tests for Chromoting."""
def setUp(self):
"""Set up test for Chromoting on both local and remote machines.
Installs the Chromoting app, launches it, and authenticates
using the default Chromoting test account.
"""
super(ChromotingBasic, self).setUp()
self._app = self.InstallExtension(self.GetWebappPath())
self.LaunchApp(self._app)
account = self.GetPrivateInfo()['test_chromoting_account']
self.Authenticate(account['username'], account['password'])
def testChromoting(self):
"""Verify that we can start and disconnect from a Chromoting session."""
client_local = (self.remote == None)
host = self
client = self if client_local else self.remote
client_tab_index = 2 if client_local else 1
access_code = host.Share()
self.assertTrue(access_code,
msg='Host attempted to share, but it failed. '
'No access code was found.')
if client_local:
client.LaunchApp(self._app)
self.assertTrue(client.Connect(access_code, client_tab_index),
msg='The client attempted to connect to the host, '
'but the chromoting session did not start.')
host.CancelShare()
client.Disconnect(client_tab_index)
if __name__ == '__main__':
pyauto_functional.Main()
|
Python
| 0.000047
|
@@ -400,16 +400,361 @@
ng.%22%22%22%0A%0A
+ _EXTRA_CHROME_FLAGS = %5B%0A '--allow-nacl-socket-api=*',%0A %5D%0A%0A def ExtraChromeFlags(self):%0A %22%22%22Ensures Chrome is launched with some custom flags.%0A%0A Overrides the default list of extra flags passed to Chrome. See%0A ExtraChromeFlags() in pyauto.py.%0A %22%22%22%0A return pyauto.PyUITest.ExtraChromeFlags(self) + self._EXTRA_CHROME_FLAGS%0A%0A
def se
|
0b366a3f4c23b644f885ed649edc577242ae90ee
|
Fix genreflex rootmap files to not contain stray spaces after "string" Corrsponds to v5-22-00-patches r27408
|
cint/reflex/python/genreflex/genrootmap.py
|
cint/reflex/python/genreflex/genrootmap.py
|
# Copyright CERN, CH-1211 Geneva 23, 2004-2006, All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose is hereby granted without fee, provided that this copyright and
# permissions notice appear in all copies and derivatives.
#
# This software is provided "as is" without express or implied warranty.
import os, sys, string, re
model = """
# This file has been generated by genreflex with the --rootmap option
#--Final End
"""
#----------------------------------------------------------------------------------
def isRootmapVetoed(c) :
if c.has_key('extra') and 'rootmap' in c['extra'] :
rootmapsel = c['extra']['rootmap'].lower()
return (rootmapsel == 'false' or rootmapsel == '0')
return False
#----------------------------------------------------------------------------------
def genRootMap(mapfile, dicfile, libfile, cnames, classes) :
startmark = '#--Begin ' + dicfile + '\n'
endmark = '#--End ' + dicfile + '\n'
finalmark = '#--Final End\n'
transtable = string.maketrans(': ', '@-')
transtable = string.maketrans(': ', '@-')
for c in classes :
c['fullname'] = c.get('fullname', c['name'])
# filter out classes that were de-selected by rootmap attribute
cveto = filter( lambda c: isRootmapVetoed(c),classes)
for cv in cveto :
cvname = cv['fullname']
# not all cvname have to be in cnames, cname could have been excluded
if cvname in cnames:
cnames.remove(cvname)
new_lines = []
if libfile.rfind('/') != -1 : libfile = libfile[libfile.rfind('/')+1:]
for c in cnames :
nc = string.translate(str(c), transtable)
nc = re.sub(r"\bstd@@basic_string<char>", 'string', nc)
nc = re.sub(r"\bstd@@", '', nc)
nc = nc.replace(' ','')
new_lines += '%-45s %s\n' % ('Library.' + nc + ':', libfile )
if not os.path.exists(mapfile) :
lines = [ line+'\n' for line in model.split('\n')]
else :
f = open(mapfile,'r')
lines = [ line for line in f.readlines()]
f.close()
if startmark in lines and endmark in lines :
lines[lines.index(startmark)+1 : lines.index(endmark)] = new_lines
else :
lines[lines.index(finalmark):lines.index(finalmark)] = [startmark]+new_lines+[endmark]
f = open(mapfile,'w')
f.writelines(lines)
f.close()
|
Python
| 0.000014
|
@@ -1632,16 +1632,107 @@
stable)%0A
+ # also remove possible seperator ' ', or set%3Cbasic_string%3Cchar%3E %3E becomes set%3Cstring %3E%0A
nc =
@@ -1766,16 +1766,18 @@
ng%3Cchar%3E
+-?
%22, 'stri
|
b2e6a7a8df1ede0118838ce494e1679eea0eb578
|
Decrease cert expiration alerting threshold from 2 years to 1 year. (#1002)
|
scripts/check-bundled-ca-certs-expirations.py
|
scripts/check-bundled-ca-certs-expirations.py
|
#!/usr/bin/env python
# Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script which errors out if any of the bundled certs will expire in 24 months or sooner.
"""
from __future__ import absolute_import
from __future__ import print_function
if False:
from typing import List
import os
import glob
import datetime
from io import open
from cryptography import x509
from cryptography.hazmat.backends import default_backend
# By default we fail if any of the bundled cert expires in 2 years or sooner
DEFAULT_EXPIRE_THRESHOLD_TIMEDELTA = datetime.timedelta(days=(12 * 30 * 2))
def fail_if_cert_expires_in_timedelta(cert_path, expire_in_threshold_timedelta):
# type: (str, datetime.timedelta) -> None
"""
Fail and throw an exception if the provided certificate expires in the provided timedelta period
or sooner.
"""
with open(cert_path, "rb") as fp:
content = fp.read()
cert = x509.load_pem_x509_certificate(content, default_backend())
now_dt = datetime.datetime.utcnow()
expire_in_days = (cert.not_valid_after - now_dt).days
if now_dt + expire_in_threshold_timedelta >= cert.not_valid_after:
raise Exception(
(
"Certificate %s will expire in %s days (%s), please update!"
% (cert_path, expire_in_days, cert.not_valid_after)
)
)
else:
print(
"OK - certificate %s will expire in %s days (%s)"
% (cert_path, expire_in_days, cert.not_valid_after)
)
def get_bundled_cert_paths():
# type: () -> List[str]
"""
Return full absolute paths for all the bundled certs.
"""
cwd = os.path.abspath(os.getcwd())
result = []
for file_name in glob.glob("certs/*"):
file_path = os.path.join(cwd, file_name)
result.append(file_path)
return result
def main():
cert_paths = get_bundled_cert_paths()
for cert_path in cert_paths:
fail_if_cert_expires_in_timedelta(
cert_path, expire_in_threshold_timedelta=DEFAULT_EXPIRE_THRESHOLD_TIMEDELTA
)
if __name__ == "__main__":
main()
|
Python
| 0.000007
|
@@ -1018,15 +1018,14 @@
in
-2
+1
year
-s
or
@@ -1103,17 +1103,17 @@
* 30 *
-2
+1
))%0A%0A%0Adef
|
686f0e21de510a12ee3d6af410448eb405d3e7b6
|
add 1.4.0 release and 1.5 stable branch (#16261)
|
var/spack/repos/builtin/packages/libunwind/package.py
|
var/spack/repos/builtin/packages/libunwind/package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libunwind(AutotoolsPackage):
"""A portable and efficient C programming interface (API) to determine
the call-chain of a program."""
homepage = "http://www.nongnu.org/libunwind/"
url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz"
git = "https://github.com/libunwind/libunwind"
maintainers = ['mwkrentel']
version('master', branch='master')
version('1.4-head', branch='v1.4-stable')
version('1.4-rc1', sha256='1928459139f048f9b4aca4bb5010540cb7718d44220835a2980b85429007fa9f')
version('1.3.1', sha256='43997a3939b6ccdf2f669b50fdb8a4d3205374728c2923ddc2354c65260214f8', preferred=True)
version('1.2.1', sha256='3f3ecb90e28cbe53fba7a4a27ccce7aad188d3210bb1964a923a731a27a75acb')
version('1.1', sha256='9dfe0fcae2a866de9d3942c66995e4b460230446887dbdab302d41a8aee8d09a')
variant('xz', default=False,
description='Support xz (lzma) compressed symbol tables.')
variant('zlib', default=False,
description='Support zlib compressed symbol tables (master '
'branch only).')
# The libunwind releases contain the autotools generated files,
# but the git repo snapshots do not.
depends_on('autoconf', type='build', when='@master,1.4-head')
depends_on('automake', type='build', when='@master,1.4-head')
depends_on('libtool', type='build', when='@master,1.4-head')
depends_on('m4', type='build', when='@master,1.4-head')
depends_on('xz', type='link', when='+xz')
depends_on('zlib', type='link', when='+zlib')
conflicts('platform=darwin',
msg='Non-GNU libunwind needs ELF libraries Darwin does not have')
provides('unwind')
flag_handler = AutotoolsPackage.build_system_flags
def configure_args(self):
spec = self.spec
args = []
if '+xz' in spec:
args.append('--enable-minidebuginfo')
else:
args.append('--disable-minidebuginfo')
# zlib support is only in the master branch (for now).
if spec.satisfies('@master'):
if '+zlib' in spec:
args.append('--enable-zlibdebuginfo')
else:
args.append('--disable-zlibdebuginfo')
return args
|
Python
| 0
|
@@ -650,37 +650,295 @@
('1.
-4-head', branch='v1.4-stable'
+5-head', branch='v1.5-stable')%0A version('1.5-rc1', sha256='3e0cbc6dee326592097ef06e97cf76ef597987eddd0df8bea49b0594e587627a')%0A version('1.4-head', branch='v1.4-stable')%0A version('1.4.0', sha256='df59c931bd4d7ebfd83ee481c943edf015138089b8e50abed8d9c57ba9338435', preferred=True
)%0A
@@ -1127,32 +1127,16 @@
60214f8'
-, preferred=True
)%0A ve
@@ -1527,16 +1527,8 @@
les
-(master
'%0A
@@ -1542,19 +1542,22 @@
'
-branch only
+(1.5 and later
).')
@@ -1722,32 +1722,41 @@
@master,1.4-head
+,1.5-head
')%0A depends_o
@@ -1797,32 +1797,41 @@
@master,1.4-head
+,1.5-head
')%0A depends_o
@@ -1876,24 +1876,33 @@
ter,1.4-head
+,1.5-head
')%0A depen
@@ -1955,16 +1955,25 @@
1.4-head
+,1.5-head
')%0A%0A
@@ -2505,44 +2505,36 @@
is
-only in the master branch (for now).
+available in 1.5.x and later
%0A
@@ -2558,22 +2558,20 @@
sfies('@
-master
+1.5:
'):%0A
|
1be539f68019435b2d09b1a46e4786a09e59edf2
|
Allow for multiple SEPA payment methods with different versions (#493) (#496)
|
account_banking_pain_base/models/account_payment_method.py
|
account_banking_pain_base/models/account_payment_method.py
|
# -*- coding: utf-8 -*-
# © 2016 Akretion (Alexis de Lattre <alexis.delattre@akretion.com>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class AccountPaymentMethod(models.Model):
_inherit = 'account.payment.method'
pain_version = fields.Selection([], string='PAIN Version')
convert_to_ascii = fields.Boolean(
string='Convert to ASCII', default=True,
help="If active, Odoo will convert each accented character to "
"the corresponding unaccented character, so that only ASCII "
"characters are used in the generated PAIN file.")
@api.multi
def get_xsd_file_path(self):
"""This method is designed to be inherited in the SEPA modules"""
self.ensure_one()
raise UserError(_(
"No XSD file path found for payment method '%s'") % self.name)
|
Python
| 0
|
@@ -921,8 +921,294 @@
f.name)%0A
+%0A _sql_constraints = %5B(%0A # Extending this constraint from account_payment_mode%0A 'code_payment_type_unique',%0A 'unique(code, payment_type, pain_version)',%0A 'A payment method of the same type already exists with this code'%0A ' and PAIN version'%0A )%5D%0A
|
4697bb9bb7a3708f1c35b795c02db329d3142703
|
Add script to collect metrics in samples of a case into a single vector
|
src/rgbd_benchmark_tools/h5_collectSamples.py
|
src/rgbd_benchmark_tools/h5_collectSamples.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 09:02:31 2015
@author: jesus
"""
import argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
This script collects the metrics and results from several samples of an experiment into its parent group.
''')
parser.add_argument('h5file', help='HDF5 file in which the metrics are stored in the group eval for each sample')
parser.add_argument('group', help='H5 path of the main group containing sample minor groups')
parser.add_argument('delta_unit', help='delta_unit of the metrics to collect')
args = parser.parse_args()
h5f = h5py.File(args.h5file,'a')
unit = args.delta_unit
# Save the evaluation metric values in the samples' parent group
main_group = h5f[args.group]
# Check if eval group already exists in the main group
if 'eval' in main_group:
print "Removing existing eval group in" + main_group.name
del main_group['eval']
numOfSamples = len(main_group)
# Create new eval group in the main group
samples = main_group.keys()
eval_group = main_group.require_group('eval/'+args.delta_unit)
names = ['rmse','median','mean','max']
for name in names:
# Preallocate arrays
t_arr = np.empty(numOfSamples)
r_arr = np.empty(numOfSamples)
# Store metrics in sample in an array
for i, sample in enumerate(samples):
t_arr[i] = main_group[sample+'/eval/'+unit+'/t_'+name][()]
r_arr[i] = main_group[sample+'/eval/'+unit+'/r_'+name][()]
# Check if dataset already exists in the group
if 't_'+name in eval_group:
print "Removing existing trans dataset in " + eval_group.name
del eval_group['t_'+name]
if 'r_'+name in eval_group:
print "Removing existing rot dataset in " + eval_group.name
del eval_group['r_'+name]
# Save as a new dataset in the main group
eval_group.create_dataset('t_'+name, data=t_arr)
eval_group.create_dataset('r_'+name, data=r_arr)
|
Python
| 0
|
@@ -926,17 +926,23 @@
if 'eval
-'
+/'+unit
in main
@@ -982,24 +982,35 @@
xisting eval
+/%22+unit + %22
group in%22 +
@@ -1054,17 +1054,23 @@
up%5B'eval
-'
+/'+unit
%5D%0A nu
@@ -1177,24 +1177,74 @@
roup.keys()%0A
+ samples = %5Bx for x in samples if x != 'eval'%5D%0A
eval_gro
@@ -1281,27 +1281,16 @@
'eval/'+
-args.delta_
unit)%0A
|
0f295d0ee8c29361bd4f80dbc947da65dd7fbbe6
|
move raindrops
|
Exercism/python/raindrops/raindrops.py
|
Exercism/python/raindrops/raindrops.py
|
raindrops = ((3, "Pling"), (5, "Plang"), (7, "Plong"))
def convert(number):
raindrop_result = [raindrop[1] for raindrop in raindrops if number % raindrop[0] == 0]
return "".join(raindrop_result) or str(number)
|
Python
| 0.000759
|
@@ -1,16 +1,41 @@
+def convert(number):%0A
raindrops = ((3,
@@ -73,36 +73,20 @@
long%22))%0A
-def convert(number):
+
%0A rai
|
15ae458f7cf1a8257967b2b3b0ceb812547c4766
|
Test more edge cases of the highlighting parser
|
IPython/utils/tests/test_pycolorize.py
|
IPython/utils/tests/test_pycolorize.py
|
"""Test suite for our color utilities.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# third party
import nose.tools as nt
# our own
from IPython.utils.PyColorize import Parser
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def test_unicode_colorize():
p = Parser()
f1 = p.format('1/0', 'str')
f2 = p.format(u'1/0', 'str')
nt.assert_equal(f1, f2)
|
Python
| 0
|
@@ -1,8 +1,24 @@
+# coding: utf-8%0A
%22%22%22Test
@@ -691,16 +691,26 @@
t Parser
+%0Aimport io
%0A%0A#-----
@@ -879,16 +879,442 @@
------%0A%0A
+sample = u%22%22%22%0Adef function(arg, *args, kwarg=True, **kwargs):%0A '''%0A this is docs%0A '''%0A pass is True%0A False == None%0A%0A with io.open(ru'unicode'):%0A raise ValueError(%22%5Cn escape %5Cr sequence%22)%0A%0A print(%22w%C4%9Bird %C3%BCnico%C3%B0e%22)%0A%0Aclass Bar(Super):%0A%0A def __init__(self):%0A super(Bar, self).__init__(1**2, 3%5E4, 5 or 6)%0A%22%22%22%0A%0Adef test_loop_colors():%0A%0A for scheme in ('Linux', 'NoColor','LightBG'):%0A%0A
def test
@@ -1334,16 +1334,24 @@
rize():%0A
+
p =
@@ -1359,16 +1359,24 @@
arser()%0A
+
f1 =
@@ -1401,68 +1401,684 @@
str'
-)%0A f2 = p.format(u'1/0', 'str')%0A nt.assert_equal(f1, f2)%0A
+, scheme=scheme)%0A f2 = p.format(u'1/0', 'str', scheme=scheme)%0A nt.assert_equal(f1, f2)%0A%0A def test_parse_sample():%0A %22%22%22and test writing to a buffer%22%22%22%0A buf = io.StringIO()%0A p = Parser()%0A p.format(sample, buf, scheme=scheme)%0A buf.seek(0)%0A f1 = buf.read()%0A%0A nt.assert_not_in('ERROR', f1)%0A%0A def test_parse_error():%0A p = Parser()%0A f1 = p.format(')', 'str', scheme=scheme)%0A if scheme != 'NoColor':%0A nt.assert_in('ERROR', f1)%0A%0A yield test_unicode_colorize%0A yield test_parse_sample%0A yield test_parse_error
%0A
|
1eee9dfa6f7ea359f0dc4d0bf7450b3c96d3731d
|
Remove unnecessary var
|
reunition/apps/reunions/management/commands/setalumniusersfromrsvps.py
|
reunition/apps/reunions/management/commands/setalumniusersfromrsvps.py
|
from django.core.management.base import NoArgsCommand
from django.db.models.fields import related
from reunition.apps.alumni import models as alumni_m
from reunition.apps.reunions import models as reunions_m
class Command(NoArgsCommand):
help = 'Associate reunions.Rsvp.created_by to alumni.Person.user when not yet set'
def handle_noargs(self, **options):
for rsvp in reunions_m.Rsvp.objects.all():
user = rsvp.created_by
try:
user.person
except alumni_m.Person.DoesNotExist, e:
first_alumni_added = rsvp.rsvpalumniattendee_set.order_by('created').first()
if first_alumni_added:
person = first_alumni_added.person
print 'Associating user', user, 'with person', person
person.user = user
person.save()
|
Python
| 0.000007
|
@@ -545,11 +545,8 @@
xist
-, e
:%0A
|
4308b5f351aef2339a2166a12b02ecc74e58d1d7
|
Remove too far away points (filtering by time)
|
server/lib/python/cartodb_services/cartodb_services/mapbox/isolines.py
|
server/lib/python/cartodb_services/cartodb_services/mapbox/isolines.py
|
'''
Python implementation for Mapbox services based isolines.
Uses the Mapbox Time Matrix service.
'''
import json
from cartodb_services.tools import Coordinate
from cartodb_services.tools.spherical import (get_angles,
calculate_dest_location)
from cartodb_services.mapbox.matrix_client import (validate_profile,
DEFAULT_PROFILE,
PROFILE_WALKING,
PROFILE_DRIVING,
PROFILE_CYCLING,
ENTRY_DURATIONS,
ENTRY_DESTINATIONS,
ENTRY_LOCATION)
MAX_SPEEDS = {
PROFILE_WALKING: 3.3333333, # In m/s, assuming 12km/h walking speed
PROFILE_CYCLING: 16.67, # In m/s, assuming 60km/h max speed
PROFILE_DRIVING: 41.67 # In m/s, assuming 140km/h max speed
}
DEFAULT_NUM_ANGLES = 24
DEFAULT_MAX_ITERS = 5
DEFAULT_TOLERANCE = 0.1
MATRIX_NUM_ANGLES = DEFAULT_NUM_ANGLES
MATRIX_MAX_ITERS = DEFAULT_MAX_ITERS
MATRIX_TOLERANCE = DEFAULT_TOLERANCE
UNIT_FACTOR_ISOCHRONE = 1.0
UNIT_FACTOR_ISODISTANCE = 1000.0
DEFAULT_UNIT_FACTOR = UNIT_FACTOR_ISOCHRONE
class MapboxIsolines():
'''
Python wrapper for Mapbox services based isolines.
'''
def __init__(self, matrix_client, logger, service_params=None):
service_params = service_params or {}
self._matrix_client = matrix_client
self._logger = logger
def _calculate_matrix_cost(self, origin, targets, isorange,
profile=DEFAULT_PROFILE,
unit_factor=UNIT_FACTOR_ISOCHRONE,
number_of_angles=MATRIX_NUM_ANGLES):
response = self._matrix_client.matrix([origin] + targets,
profile)
json_response = json.loads(response)
if not json_response:
return []
costs = [None] * number_of_angles
destinations = [None] * number_of_angles
for idx, cost in enumerate(json_response[ENTRY_DURATIONS][0][1:]):
if cost:
costs[idx] = cost * unit_factor
else:
costs[idx] = isorange
for idx, destination in enumerate(json_response[ENTRY_DESTINATIONS][1:]):
destinations[idx] = Coordinate(destination[ENTRY_LOCATION][0],
destination[ENTRY_LOCATION][1])
return costs, destinations
def calculate_isochrone(self, origin, time_ranges,
profile=DEFAULT_PROFILE):
validate_profile(profile)
max_speed = MAX_SPEEDS[profile]
isochrones = []
for time_range in time_ranges:
upper_rmax = max_speed * time_range # an upper bound for the radius
coordinates = self.calculate_isoline(origin=origin,
isorange=time_range,
upper_rmax=upper_rmax,
cost_method=self._calculate_matrix_cost,
profile=profile,
unit_factor=UNIT_FACTOR_ISOCHRONE,
number_of_angles=MATRIX_NUM_ANGLES,
max_iterations=MATRIX_MAX_ITERS,
tolerance=MATRIX_TOLERANCE)
isochrones.append(MapboxIsochronesResponse(coordinates,
time_range))
return isochrones
def calculate_isodistance(self, origin, distance_range,
profile=DEFAULT_PROFILE):
validate_profile(profile)
max_speed = MAX_SPEEDS[profile]
time_range = distance_range / max_speed
return self.calculate_isochrone(origin=origin,
time_ranges=[time_range],
profile=profile)[0].coordinates
def calculate_isoline(self, origin, isorange, upper_rmax,
cost_method=_calculate_matrix_cost,
profile=DEFAULT_PROFILE,
unit_factor=DEFAULT_UNIT_FACTOR,
number_of_angles=DEFAULT_NUM_ANGLES,
max_iterations=DEFAULT_MAX_ITERS,
tolerance=DEFAULT_TOLERANCE):
# Formally, a solution is an array of {angle, radius, lat, lon, cost}
# with cardinality number_of_angles
# we're looking for a solution in which
# abs(cost - isorange) / isorange <= TOLERANCE
# Initial setup
angles = get_angles(number_of_angles)
rmax = [upper_rmax] * number_of_angles
rmin = [0.0] * number_of_angles
location_estimates = [calculate_dest_location(origin, a,
upper_rmax / 2.0)
for a in angles]
# Iterate to refine the first solution
for i in xrange(0, max_iterations):
# Calculate the "actual" cost for each location estimate.
# NOTE: sometimes it cannot calculate the cost and returns None.
# Just assume isorange and stop the calculations there
costs, destinations = cost_method(origin=origin,
targets=location_estimates,
isorange=isorange,
profile=profile,
unit_factor=unit_factor,
number_of_angles=number_of_angles)
if not costs:
continue
errors = [(cost - isorange) / float(isorange) for cost in costs]
max_abs_error = max([abs(e) for e in errors])
if max_abs_error <= tolerance:
# good enough, stop there
break
# let's refine the solution, binary search
for j in xrange(0, number_of_angles):
if abs(errors[j]) > tolerance:
if errors[j] > 0:
rmax[j] = (rmax[j] + rmin[j]) / 2.0
else:
rmin[j] = (rmax[j] + rmin[j]) / 2.0
location_estimates[j] = calculate_dest_location(origin,
angles[j],
(rmax[j] + rmin[j]) / 2.0)
# delete points that got None
location_estimates_filtered = []
for i, c in enumerate(costs):
if c != isorange:
location_estimates_filtered.append(destinations[i])
return location_estimates_filtered
class MapboxIsochronesResponse:
def __init__(self, coordinates, duration):
self._coordinates = coordinates
self._duration = duration
@property
def coordinates(self):
return self._coordinates
@property
def duration(self):
return self._duration
|
Python
| 0.000001
|
@@ -7009,10 +7009,9 @@
f c
-!=
+%3C
iso
@@ -7015,16 +7015,34 @@
isorange
+ * (1 + tolerance)
:%0A
|
5fc54a2120fbc9151073c9b247e3fd7e8e79a9fa
|
Remove premature attribute from migration script (Fixes #283)
|
src/adhocracy/migration/versions/054_add_hierachical_categorybadges.py
|
src/adhocracy/migration/versions/054_add_hierachical_categorybadges.py
|
from datetime import datetime
from sqlalchemy import Column, ForeignKey, MetaData, Table
from sqlalchemy import Boolean, Integer, DateTime, String, Unicode, LargeBinary
metadata = MetaData()
#table to update
badge_table = Table(
'badge', metadata,
#common attributes
Column('id', Integer, primary_key=True),
Column('type', String(40), nullable=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('title', Unicode(40), nullable=False),
Column('color', Unicode(7), nullable=False),
Column('description', Unicode(255), default=u'', nullable=False),
Column('instance_id', Integer, ForeignKey('instance.id',
ondelete="CASCADE",),
nullable=True),
# attributes for UserBadges
Column('group_id', Integer, ForeignKey('group.id', ondelete="CASCADE")),
Column('display_group', Boolean, default=False),
Column('visible', Boolean, default=True),
# attributes for ThumbnailBadges
Column('thumbnail', LargeBinary, default=None, nullable=True)
)
def upgrade(migrate_engine):
#use sqlalchemy-migrate database connection
metadata.bind = migrate_engine
#autoload needed tables
instance_table = Table('instance', metadata, autoload=True)
#add hierachical columns to the table
select_child_desc = Column('select_child_description', Unicode(255), default=u'', nullable=True)
parent = Column('parent_id', Integer, ForeignKey('badge.id', ondelete="CASCADE"),
nullable=True)
#create/recreate the table
select_child_desc.create(badge_table)
select_child_desc.alter(nullable=False)
parent.create(badge_table)
def downgrade(migrate_engine):
raise NotImplementedError()
|
Python
| 0
|
@@ -962,111 +962,8 @@
e),%0A
- # attributes for ThumbnailBadges%0A Column('thumbnail', LargeBinary, default=None, nullable=True)%0A
)%0A%0A%0A
|
a1bf5aaf3866eea7370c1a401a5e3d5791f97539
|
Add exception for inline encoded images.
|
better_figures_and_images/better_figures_and_images.py
|
better_figures_and_images/better_figures_and_images.py
|
"""
Better Figures & Images
------------------------
This plugin:
- Adds a style="width: ???px; height: auto;" to each image in the content
- Also adds the width of the contained image to any parent div.figures.
- If RESPONSIVE_IMAGES == True, also adds style="max-width: 100%;"
- Corrects alt text: if alt == image filename, set alt = ''
TODO: Need to add a test.py for this plugin.
"""
from __future__ import unicode_literals
from os import path, access, R_OK
from pelican import signals
from bs4 import BeautifulSoup
from PIL import Image
import logging
logger = logging.getLogger(__name__)
def content_object_init(instance):
if instance._content is not None:
content = instance._content
soup = BeautifulSoup(content, 'html.parser')
if 'img' in content:
for img in soup('img'):
logger.debug('Better Fig. PATH: %s', instance.settings['PATH'])
logger.debug('Better Fig. img.src: %s', img['src'])
img_path, img_filename = path.split(img['src'])
logger.debug('Better Fig. img_path: %s', img_path)
logger.debug('Better Fig. img_fname: %s', img_filename)
# Strip off {filename}, |filename| or /static
if img_path.startswith(('{filename}', '|filename|')):
img_path = img_path[10:]
elif img_path.startswith('/static'):
img_path = img_path[7:]
elif img_path.startswith('data:image'):
# Image is encoded in-line (not a file).
break
else:
logger.warning('Better Fig. Error: img_path should start with either {filename}, |filename| or /static')
# Build the source image filename
src = instance.settings['PATH'] + img_path + '/' + img_filename
logger.debug('Better Fig. src: %s', src)
if not (path.isfile(src) and access(src, R_OK)):
logger.error('Better Fig. Error: image not found: %s', src)
# Open the source image and query dimensions; build style string
im = Image.open(src)
extra_style = 'width: {}px; height: auto;'.format(im.size[0])
if 'RESPONSIVE_IMAGES' in instance.settings and instance.settings['RESPONSIVE_IMAGES']:
extra_style += ' max-width: 100%;'
if img.get('style'):
img['style'] += extra_style
else:
img['style'] = extra_style
if img['alt'] == img['src']:
img['alt'] = ''
fig = img.find_parent('div', 'figure')
if fig:
if fig.get('style'):
fig['style'] += extra_style
else:
fig['style'] = extra_style
instance._content = soup.decode()
def register():
signals.content_object_init.connect(content_object_init)
|
Python
| 0
|
@@ -1598,13 +1598,16 @@
-break
+continue
%0A
|
b06687b1e78645a055a314be4b1af693e2c3be05
|
remove obsolete arguments
|
RatS/filmaffinity/filmaffinity_site.py
|
RatS/filmaffinity/filmaffinity_site.py
|
import time
from RatS.base.base_site import Site
from selenium.webdriver.common.by import By
class FilmAffinity(Site):
def __init__(self, args):
login_form_selector = "//form[@id='login-form']"
self.LOGIN_USERNAME_SELECTOR = login_form_selector + "//input[@name='username']"
self.LOGIN_PASSWORD_SELECTOR = login_form_selector + "//input[@name='password']"
self.LOGIN_BUTTON_SELECTOR = login_form_selector + "//input[@type='submit']"
super(FilmAffinity, self).__init__(args)
self.MY_RATINGS_URL = "https://www.filmaffinity.com/en/myvotes.php"
def _get_login_page_url(self):
return "https://www.filmaffinity.com/en/login.php"
def _handle_cookie_notice_if_present(self):
cookie_notices = self.browser.find_elements(By.ID, "qc-cmp2-container")
if len(cookie_notices) == 0:
return
cookie_notice = cookie_notices[0]
if cookie_notice is not None:
# agree
cookie_accept_button = cookie_notice.find_elements(
By.CSS_SELECTOR, By.CSS_SELECTOR, "div.qc-cmp2-summary-buttons button"
)
if cookie_accept_button is not None and len(cookie_accept_button) > 1:
cookie_accept_button[1].click()
time.sleep(2)
# agree all
cookie_accept_button = cookie_notice.find_elements(
By.CSS_SELECTOR,
By.CSS_SELECTOR,
"div.qc-cmp2-buttons-desktop button",
)
if cookie_accept_button is not None and len(cookie_accept_button) > 1:
cookie_accept_button[1].click()
time.sleep(2)
|
Python
| 0.005914
|
@@ -1066,33 +1066,16 @@
ELECTOR,
- By.CSS_SELECTOR,
%22div.qc
@@ -1416,45 +1416,8 @@
OR,%0A
- By.CSS_SELECTOR,%0A
|
cd17eba08cbb898b1cf6d0bb622315d851b4eeec
|
The main parameter object is a list
|
ocradmin/ocr/tools/manager.py
|
ocradmin/ocr/tools/manager.py
|
"""
Plugin manager.
"""
import os
import sys
class PluginManager(object):
"""
Class for managing OCR tool plugins.
"""
def __init__(self):
pass
@classmethod
def get_plugins(cls):
"""
List available OCR plugins.
"""
engines = []
plugdir = os.path.join(os.path.dirname(__file__), "plugins")
for fname in os.listdir(plugdir):
if not fname.endswith("wrapper.py"):
continue
modname = os.path.splitext(os.path.basename(fname))[0]
pm = __import__(
modname,
fromlist=["main_class"])
if not hasattr(pm, "main_class"):
continue
mod = pm.main_class()
engines.append(dict(
name=mod.name,
type="object",
description=mod.description,
parameters=True,
))
return engines
@classmethod
def get_provider(cls, provides=None):
"""
Get a list of available OCR engines.
"""
engines = []
plugdir = os.path.join(os.path.dirname(__file__), "plugins")
for fname in os.listdir(plugdir):
if fname.endswith("wrapper.py"):
modname = os.path.splitext(os.path.basename(fname))[0]
pm = __import__(
modname,
fromlist=["main_class"])
if not hasattr(pm, "main_class"):
continue
mod = pm.main_class()
if provides is None:
engines.append(mod.name)
elif isinstance(provides, str) and provides in mod.capabilities:
engines.append(mod.name)
elif isinstance(provides, tuple):
for cap in provides:
if cap in mod.capabilities:
engines.append(mod.name)
break
return engines
@classmethod
def get(cls, name, *args, **kwargs):
"""
Get a plugin directly.
"""
return cls._main_class(name)
@classmethod
def get_info(cls, name, *args, **kwargs):
"""
Get info about a plugin.
"""
mc = cls._main_class(name)
if mc is not None:
return mc.get_info(*args, **kwargs)
@classmethod
def get_parameters(cls, name, *args, **kwargs):
"""
Get general options for an engine.
"""
print "Getting options: " + name
mc = cls._main_class(name)
if mc is not None:
return mc.get_parameters(*args, **kwargs)
@classmethod
def get_trainer(cls, name, *args, **kwargs):
"""
Fetch a given trainer class. Currently this is the
same as the converter.
"""
return cls.get_converter(name, *args, **kwargs)
@classmethod
def get_converter(cls, name, *args, **kwargs):
"""
Get a converter with a given name.
"""
mc = cls._main_class(name)
if mc is not None:
return mc(*args, **kwargs)
@classmethod
def get_components(cls, name, *args, **kwargs):
"""
Get available components of the given type for given plugin.
"""
mc = cls._main_class(name)
if mc is not None:
return mc.get_components(*args, **kwargs)
@classmethod
def _main_class(cls, name):
"""
Get the plugin class with a given name.
"""
plugdir = os.path.join(os.path.dirname(__file__), "plugins")
for fname in os.listdir(plugdir):
modname = "%s_wrapper.py" % name
if fname == modname:
# FIXME: Hard-coded module import path needs to change
# TODO: Generally find a better way of doing this...
pm = __import__("%s_wrapper" % name, fromlist=["main_class"])
return pm.main_class()
|
Python
| 0.999856
|
@@ -836,21 +836,19 @@
type=%22
-objec
+lis
t%22,%0A
|
c79c3b7f920f4bcf5fb69cf74b224e6ff37a709b
|
test triggering travis
|
fabre_test.py
|
fabre_test.py
|
#!/usr/bin/env python
# coding=UTF-8
import pytest
import sys
sys.exit(0)
|
Python
| 0.000001
|
@@ -61,16 +61,115 @@
s%0A%0A%0A
-sys.exit(0
+# content of test_assert1.py%0Adef f():%0A return 3%0A%0Adef test_function():%0A assert f() == 4%0A%0A%0Atest_function(
)%0A
|
0fb800cd42f1545e8d5e744af1ff81922c930448
|
Add Google analytics ID
|
pelicanconf.py
|
pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
from datetime import datetime
import os
import sys
BASE_DIR = os.path.dirname(__file__)
# Clone the official plugin repo to the `official_plugins` dir
# (https://github.com/getpelican/pelican-plugins)
sys.path.append(os.path.join(BASE_DIR, "official_plugins"))
AUTHOR = u'Leonardo Zhou'
SITENAME = u'翼图南'
SITE_DESCRIPTION = u'故九萬里,則風斯在下矣,而後乃今培風;背負青天而莫之夭閼者,而後乃今將圖南'
SITEURL = ''
TIMEZONE = 'Asia/Shanghai'
DEFAULT_LANG = u'zh'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Social widget
SOCIAL = (
('twitter', 'https://twitter.com/glasslion'),
('envelope', 'mailto:glasslion@gmail.com'),
('github', 'https://github.com/glasslion'),
('stack-overflow', 'http://stackoverflow.com/users/1093020/leonardo-z'),
)
LOCALE = ('usa', 'en_US.utf8')
DEFAULT_DATE_FORMAT = '%b %d, %Y'
# DIRECT_TEMPLATES = ('index', 'tags', 'categories', 'archives')
# PAGINATED_DIRECT_TEMPLATES = (('blog',))
PLUGINS = ['summary', 'assets', 'neighbors']
# Assets
ASSET_BUNDLES = ()
ASSET_CONFIG = (('sass_bin', 'sass'), )
SUMMARY_MAX_LENGTH = 20
DEFAULT_PAGINATION = 5
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# Static content
STATIC_PATHS = ['images', 'extra/CNAME',]
EXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},}
# Url
ARTICLE_URL = '{slug}/'
ARTICLE_SAVE_AS = '{slug}/index.html'
# Archive
YEAR_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/index.html'
MONTH_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/{date:%m}/index.html'
# Custom theme
THEME = '../pelican-zha'
CURRENT_DATETIME = datetime.now()
QINIU_BUCKET_URL = 'http://wing2south.qiniudn.com'
CDN_URL = SITEURL
|
Python
| 0.000002
|
@@ -901,16 +901,51 @@
z'),%0A)%0A%0A
+GOOGLE_ANALYTICS = %22UA-42951023-1%22%0A
%0ALOCALE
|
df40b12248bfac18d6b0c2c247b67d101f4a0635
|
revise last_day
|
muxiwebsite/book/views.py
|
muxiwebsite/book/views.py
|
# coding: utf-8
"""
views.py
~~~~~~~~
木犀图书视图函数
url func
/book 主页 ; 显示:1.最近录入(6条), 2.最近借阅
/book/logout 登出
/bookin 录入新书(只有管理员可见)
/search 站内搜索(支持两种模式)
/search_results 搜索结果页(提供借阅表单) 关于借阅状态
/admin 后台管理(只有管理员可见)
/rter 注册接口 (只有管理员可见)
/<current_user> 个人信息页(最近借阅)(快要到期 3天)
已过期的图书会flash消息提示
有情怀的flash提示
"""
from . import books
from .. import db, app
# from ..auth._decorate import auth_login
from werkzeug import secure_filename
from muxiwebsite.models import User, Book
from forms import BookForm, GetForm, LoginForm, RterForm
from flask import render_template, redirect, url_for, flash, request, session
from flask.ext.login import login_user, logout_user, login_required, current_user
from urllib2 import urlopen
import json
import datetime
import os
"""
|
/------------/\-------------\
/ \
| 木犀团队棒棒嗒 |
"""
# ------------------------------------------------------
# """ 我们在路上 前方不会太远 """
# ------------------------------------------------------
# 允许上传的文件扩展名
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
def allowed_file(filename):
"""检查文件扩展名函数"""
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# 对所有访客可见
@books.route('/', methods=["POST", "GET"])
# @auth_login(redirect_url='user')
def home():
"""
首页视图函数
1. 最近录入
2. 最近借阅
new_book_list: 最近录入新书列表(默认为6本, 依据时间[id]排序)
"""
form = LoginForm()
new_book_list = Book.query.order_by('-id').all()[:9]
get_book_list = Book.query.filter_by(status=True).order_by('start desc').all()[:2]
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user)
return redirect(url_for('books.home', id=current_user.id))
flash('用户名或密码错误!')
range_book_count = range(len(new_book_list)/6 + 1)
return render_template('/pages/home.html', new_book_list=new_book_list,
get_book_list=get_book_list, form=form,
range_book_count=range_book_count)
# 对所有访客可见
@books.route('/search/', methods=["POST", "GET"])
def search():
"""
搜索视图函数
1. 搜索表单
2. 显示搜索结果列表(最多加载5条搜索结果)
搜索模式:
1. 书名搜索(书名必须正确)
2. 类别搜索(返回类别的图书:后台、前端、设计、互联网、其他)
"""
if request.methos == 'POST':
"""前端 input 标签 action 实现重定向
递交 search_results 处理"""
pass
# 对所有访客可见
@books.route('/search_results/')
def search_results():
"""
搜索结果页
提供书籍借阅表单
"""
search = request.args.get('search')
page = int(request.args.get('page') or 1)
book_all = Book.query.all()
book_search = {}
book_result = []
get_book_list = []
for book in book_all:
book_search.setdefault(str(book.name)+str(book.bid)+str(book.tag)+str(book.author), book)
for key in book_search.keys():
if search in key:
book_result.append(book_search[key])
last_page = len(book_result)/app.config['MAX_SEARCH_RESULTS']+1
for each_book in book_result[(page-1)*app.config['MAX_SEARCH_RESULTS']:(page*app.config['MAX_SEARCH_RESULTS'])]:
get_book_list.append(each_book)
return render_template('/pages/search_results.html',
get_book_list=get_book_list, page=page, last_page=last_page,
search=search
)
# 对所有访客可见,但只有登录用户可以借阅(html改动)
@books.route('/info/<int:id>/', methods=["POST", "GET"])
def info(id):
form = GetForm()
book = Book.query.get_or_404(id)
if form.validate_on_submit():
formday = str(form.day.data)
day = formday[0:4] + formday[5:7] + formday[8:10]
start = str(datetime.date.today().strftime('%Y%m%d'))
dminuss = int(day)-int(start)
if dminuss >= 0:
book.start = start
book.user_id = current_user.id
book.status = True # 已被借
book.end = day
return redirect(url_for('profile.user_profile', id=current_user.id))
else:
flash('光阴似箭、岁月如梭,时间-你不能篡改她,更不能逆转她!')
return redirect(url_for('books.info', id=id))
return render_template('/pages/info.html', book=book, form=form)
# 只对管理员可见
@books.route('/bookin/', methods=["POST", "GET"])
@login_required
def bookin():
"""
书籍录入
输入书籍的名字,将书籍的
书名, 封面, 简介 录入数据库
"""
if current_user.role_id == 2:
form = BookForm()
if form.validate_on_submit():
bookname = form.bookname.data
get_url = "https://api.douban.com/v2/book/search?q=%s" % bookname
resp_1 = json.loads(urlopen(get_url).read().decode('utf-8'))
book_id = resp_1['books'][0]['id']
url = "https://api.douban.com/v2/book/%s" % book_id
resp_2 = json.loads(urlopen(url).read().decode('utf-8'))
book = Book(url=url, name=resp_2['title'], author=resp_2['author'][0], \
tag=form.tag.data, summary=resp_2['summary'], \
image=resp_2['images'].get('large'), user_id=None, end=None, \
status=False)
db.session.add(book)
db.session.commit()
flash('书籍已录入!')
return redirect(url_for('books.bookin'))
return render_template('/pages/bookin.html', form=form)
else:
return redirect(url_for('books.home'))
# 对所有登录用户可见
@books.route('/logout/')
@login_required
def logout():
"""退出视图函数"""
logout_user()
return redirect(url_for('home'))
|
Python
| 0.000325
|
@@ -3493,16 +3493,17 @@
_page =
+(
len(book
@@ -3510,16 +3510,19 @@
_result)
+-1)
/app.con
@@ -3549,16 +3549,17 @@
LTS'%5D+1%0A
+%0A
for
|
94edba9f203d11b9ede8cf32eced2b409f9ce491
|
Fix bug with *
|
rs.py
|
rs.py
|
#!/usr/bin/env python
from __future__ import print_function
import re, string, sys
# XXX: this sucks
import sre_parse
orig_expand_template = sre_parse.expand_template
def wrap_expand_template(template, match):
class MatchWrapper(object):
def __init__(self):
self.string = match.string
def group(self, n):
return match.group(n) or self.string[:0]
return orig_expand_template(template, MatchWrapper())
sre_parse.expand_template = wrap_expand_template
rep = re.compile(r'(\([^\(\)]+\))\^\^(\([^\)]+\))')
ln = re.compile(r'\(\^\^([^\(\)]+)\)')
def get_delim(cmd, delim):
esc = False
conv = False
flags = 0
if cmd.startswith('+'):
cmd = cmd[1:]
conv = True
if cmd.startswith('*'):
cmd = cmd[1:]
flags = re.IGNORE
for i, c in enumerate(cmd):
if esc: esc = True
elif c == '\\': esc = False
elif cmd.startswith(delim, i):
return cmd[:i], cmd[i+len(delim):], conv, flags
return '^', cmd, conv, flags
def expand(debug, line):
while True:
m = ln.search(line)
if m is None:
if debug: print('nothing more to get length of')
break
line = line[:m.start()] + str(len(m.group(1))) + line[m.end():]
if debug: print('result with length expanded: %s' %
line.encode('string-escape'))
while True:
m = rep.search(line)
if m is None:
if debug: print('no more repititions to expand')
break
l, r = m.group(1), m.group(2)
if debug: print('found repition %s^^%s' % (l, r))
l = l[1:-1]
line = line[:m.start()] + l*int(r[1:-1]) + line[m.end():]
if debug: print('result with repitition expanded: %s' %
line.encode('string-escape'))
return line
def run(delim, cmds, debug):
patterns = []
macros = {}
for cmd in cmds:
if debug: print('reading command %s' % cmd)
if cmd.startswith('$$'):
if string is None:
sys.exit('macros can only be used if there is a string module')
try:
i = cmd.index('=')
except:
sys.exit('invalid macro definition: ' + cmd)
if debug: print("setting macro '%s' to '%s'" % (cmd[2:i], cmd[i+1:]))
macros[cmd[2:i]] = cmd[i+1:]
else:
pat, repl, conv, flags = get_delim(cmd, delim)
if debug:
print('got pattern %s' % pat)
print('got replacement %s' % repl)
if string is not None:
pat = string.Template(pat).safe_substitute(macros)
repl = string.Template(repl).safe_substitute(macros)
if debug:
print('substituting macros into pattern resulted in %s' % pat)
print('substituting macros into replacement resulted in %s' %
repl)
patterns.append((re.compile(pat, flags), repl, conv))
if debug: print('processing input!')
for line in sys.stdin:
line = line.rstrip('\n').replace('^^', r'^\^')
if debug: print('current line: %s' % line)
for find, replace, conv in patterns:
if debug: print('applying pattern %s with replacement %s' % (
find.pattern, replace))
if conv:
orig = line
while True:
line = expand(debug, find.sub(replace, line))
if line == orig: break
if debug:
print('converged %s to %s' % (
orig.encode('string-escape'),
line.encode('string-escape')))
orig = line
else:
line = expand(debug, find.sub(replace, line))
if debug:
print('result of application is %s' %
line.encode('string-escape'))
print(line.replace(r'^\^', '^^'))
def usage():
print('usage: %s [-h] [-g] [-f] <script/file>' % sys.argv[0], file=sys.stderr)
def main():
if '-h' in sys.argv:
usage()
sys.exit(0)
use_file = False
cmds = []
delim = '/'
debug = False
for arg in sys.argv[1:]:
if arg == '-f':
if use_file:
usage()
sys.exit(1)
use_file = True
elif arg.startswith('-d'):
delim = arg[2:]
elif arg == '-g':
debug = True
else:
cmds.append(arg)
if not cmds:
print('no commands given', file=sys.stderr)
usage()
sys.exit(1)
if use_file:
files = cmds
cmds = []
for fn in files:
with open(fn, 'r') as f:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('\\#'): continue
cmds.append(line)
run(delim, cmds, debug)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -804,16 +804,20 @@
e.IGNORE
+CASE
%0A for
|
24ea0a43ef21a33c3f3f2f526530d23ad3ff7d90
|
fix code style
|
tensorflow_text/python/ops/hub_module_splitter_test.py
|
tensorflow_text/python/ops/hub_module_splitter_test.py
|
# coding=utf-8
# Copyright 2020 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
"""Tests for HubModuleSplitter."""
from absl.testing import parameterized
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save
from tensorflow_text.python.ops import hub_module_splitter
def _Utf8(char):
return char.encode("utf-8")
@test_util.run_all_in_graph_and_eager_modes
class HubModuleSplitterTest(parameterized.TestCase, test.TestCase):
@parameterized.parameters([
# Test scalar input.
dict(
text_input=_Utf8(u"新华社北京"),
expected_pieces=[_Utf8(u"新华社"), _Utf8(u"北京")],
expected_starts=[0, 9],
expected_ends=[9, 15]
),
# Test rank 1 input.
dict(
text_input=[_Utf8(u"新华社北京"), _Utf8(u"中文测试")],
expected_pieces=[[_Utf8(u"新华社"), _Utf8(u"北京")],
[_Utf8(u"中文"), _Utf8(u"测试")]],
expected_starts=[[0, 9], [0, 6]],
expected_ends=[[9, 15], [6, 12]]
),
# Test rank 2 ragged input.
dict(
text_input=ragged_factory_ops.constant_value(
[[_Utf8(u"新华社北京"), _Utf8(u"中文测试")],
[_Utf8(u"新华社上海")]]),
expected_pieces=[[[_Utf8(u"新华社"), _Utf8(u"北京")],
[_Utf8(u"中文"), _Utf8(u"测试")]],
[[_Utf8(u"新华社"), _Utf8(u"上海")]]],
expected_starts=[[[0, 9], [0, 6]], [[0, 9]]],
expected_ends=[[[9, 15], [6, 12]], [[9, 15]]]
),
# Test rank 2 dense input.
dict(
text_input=ragged_factory_ops.constant_value(
[[_Utf8(u"新华社北京"), _Utf8(u"中文测试")],
[_Utf8(u"新华社上海"), _Utf8(u"英国交通")]]),
expected_pieces=[[[_Utf8(u"新华社"), _Utf8(u"北京")],
[_Utf8(u"中文"), _Utf8(u"测试")]],
[[_Utf8(u"新华社"), _Utf8(u"上海")],
[_Utf8(u"英国"), _Utf8(u"交通")]]],
expected_starts=[[[0, 9], [0, 6]], [[0, 9], [0, 6]]],
expected_ends=[[[9, 15], [6, 12]], [[9, 15], [6, 12]]]
),
# Test ragged input with rank higher than 2.
dict(
text_input=ragged_factory_ops.constant_value(
[
[[_Utf8(u"新华社北京")], [_Utf8(u"中文测试")]],
[[_Utf8(u"新华社上海")]]
]),
expected_pieces=[
[[[_Utf8(u"新华社"), _Utf8(u"北京")]],
[[_Utf8(u"中文"), _Utf8(u"测试")]]],
[[[_Utf8(u"新华社"), _Utf8(u"上海")]]]],
expected_starts=[
[[[0, 9]], [[0, 6]]],
[[[0, 9]]]],
expected_ends=[
[[[9, 15]], [[6, 12]]],
[[[9, 15]]]]
)
])
def testSplit(self,
text_input,
expected_pieces,
expected_starts,
expected_ends):
hub_module_handle = ("tensorflow_text/python/ops/test_data/"
"segmenter_hub_module")
splitter = hub_module_splitter.HubModuleSplitter(hub_module_handle)
pieces, starts, ends = splitter.split_with_offsets(text_input)
pieces_no_offset = splitter.split(text_input)
self.evaluate(lookup_ops.tables_initializer())
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(expected_pieces, pieces)
self.assertAllEqual(expected_starts, starts)
self.assertAllEqual(expected_ends, ends)
self.assertAllEqual(expected_pieces, pieces_no_offset)
def exportSavedModel(self):
hub_module_handle = ("tensorflow_text/python/ops/test_data/"
"segmenter_hub_module")
splitter = hub_module_splitter.HubModuleSplitter(hub_module_handle)
save.save(splitter, 'ram://saved_model')
self.assertEqual(file_io.file_exists_v2('ram://saved_model'), True)
if __name__ == "__main__":
test.main()
|
Python
| 0.000022
|
@@ -4495,17 +4495,17 @@
litter,
-'
+%22
ram://sa
@@ -4513,17 +4513,17 @@
ed_model
-'
+%22
)%0A se
@@ -4560,17 +4560,17 @@
ists_v2(
-'
+%22
ram://sa
@@ -4582,9 +4582,9 @@
odel
-'
+%22
), T
|
6a1176d547694b535bc581d5a0af87230d533caf
|
set to version 3.305.533
|
base/pythonvideoannotator/pythonvideoannotator/__init__.py
|
base/pythonvideoannotator/pythonvideoannotator/__init__.py
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
__version__ = "3.305.532"
__author__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro", "Hugo Cachitas"]
__credits__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro", "Hugo Cachitas"]
__license__ = "Attribution-NonCommercial-ShareAlike 4.0 International"
__maintainer__ = ["Ricardo Ribeiro", "Carlos Mao de Ferro"]
__email__ = ["ricardojvr at gmail.com", "cajomferro at gmail.com"]
__status__ = "Development"
from confapp import conf; conf += 'pythonvideoannotator.settings'
import logging
logger = logging.getLogger(__name__)
logger.setLevel(conf.APP_LOG_HANDLER_LEVEL)
if conf.APP_LOG_HANDLER_FILE:
logger = logging.getLogger()
loggers_formatter = logging.Formatter(conf.PYFORMS_LOG_FORMAT)
fh = logging.FileHandler(conf.APP_LOG_HANDLER_FILE)
fh.setLevel(conf.APP_LOG_HANDLER_FILE_LEVEL)
fh.setFormatter(loggers_formatter)
logger.addHandler(fh)
|
Python
| 0.000001
|
@@ -65,9 +65,9 @@
5.53
-2
+3
%22%0A__
|
4af80f4a72618482135f388c3bc424fa12e1ccc4
|
refactor filter structure
|
shot_detector/filters/dsl/dsl_filter_mixin.py
|
shot_detector/filters/dsl/dsl_filter_mixin.py
|
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import collections
import logging
from shot_detector.utils.dsl import BaseDslOperatorMixin
from shot_detector.utils.dsl.dsl_kwargs import dsl_kwargs_decorator
class DslFilterMixin(BaseDslOperatorMixin):
"""
Basic filter mixin to build Filter-DSL
"""
__logger = logging.getLogger(__name__)
@staticmethod
def dsl_kwargs_decorator(*args, **kwargs):
return dsl_kwargs_decorator(args, kwargs)
def __or__(self, other):
"""
:param Filter other:
:return:
"""
return self.sequential(other)
def __ror__(self, other):
"""
:param Filter other:
:return:
"""
return self.sequential(other)
def sequential(self, other):
"""
:param other:
:return:
"""
from .filter_cast_features import FilterCastFeatures
from .filter_sequence import FilterSequence
if not isinstance(other, DslFilterMixin):
other = FilterCastFeatures(
cast=other,
)
return FilterSequence(
sequential_filters=[
self, other
],
)
def apply_operator(self,
op_func=None,
others=None,
mode=None,
**kwargs):
"""
:param other:
:param op:
:param is_right:
:return:
"""
from .filter_operator import FilterOperator as Fo
other = others[0]
if not isinstance(other, DslFilterMixin):
other = self.scalar_to_filter(
value=other,
)
mode = Fo.LEFT
if mode is self.OPERATOR_RIGHT:
mode = Fo.RIGHT
return Fo(
parallel_filters=[self, other],
op_func=op_func,
mode=mode,
**kwargs
)
def to_filter(self, value):
"""
:param value:
:return:
"""
if isinstance(value, collections.Iterable):
return self.seq_to_filter(value)
return self.scalar_to_filter(value)
@staticmethod
def seq_to_filter(value):
"""
:param value:
:return:
"""
from .filter_cast_seq_value import FilterCastSeqValue
return FilterCastSeqValue(seq=value)
@staticmethod
def scalar_to_filter(value):
"""
:param value:
:return:
"""
from .filter_cast_scalar_value import FilterCastScalarValue
return FilterCastScalarValue(value=value)
def __contains__(self, item):
"""
:param Filter item:
:return:
"""
return self.intersect(item)
def i(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
return self.intersect(*args, **kwargs)
def intersect(self, other, threshold=0):
"""
:param other:
:param threshold:
:return:
"""
from .filter_intersection import FilterIntersection
return FilterIntersection(
parallel_filters=[self, other],
threshold=threshold
)
|
Python
| 0.000001
|
@@ -530,32 +530,105 @@
orator(*
-args, **kwargs):
+dsl_rules):%0A %22%22%22%0A %0A :param dsl_rules: %0A :return: %0A %22%22%22
%0A
@@ -660,19 +660,17 @@
tor(
-args, kwarg
+*dsl_rule
s)%0A%0A
|
6e2997ba8a551e1a93b086b7eabc7b73f0d7aeb0
|
Remove extra blank line
|
test/data/observatory/repository/test_category_repo.py
|
test/data/observatory/repository/test_category_repo.py
|
import unittest
from cartoframes.data.observatory.category import Category
from cartoframes.exceptions import DiscoveryException
from cartoframes.data.observatory.entity import CatalogList
from cartoframes.data.observatory.repository.category_repo import CategoryRepository
from cartoframes.data.observatory.repository.repo_client import RepoClient
from ..examples import test_category1, test_categories, db_category1, db_category2
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
class TestCategoryRepo(unittest.TestCase):
@patch.object(RepoClient, 'get_categories')
def test_get_all(self, mocked_repo):
# Given
mocked_repo.return_value = [db_category1, db_category2]
repo = CategoryRepository()
# When
categories = repo.get_all()
# Then
mocked_repo.assert_called_once_with(None)
assert isinstance(categories, CatalogList)
assert categories == test_categories
@patch.object(RepoClient, 'get_categories')
def test_get_all_when_empty(self, mocked_repo):
# Given
mocked_repo.return_value = []
repo = CategoryRepository()
# When
categories = repo.get_all()
# Then
mocked_repo.assert_called_once_with(None)
assert categories is None
@patch.object(RepoClient, 'get_categories_joined_datasets')
def test_get_all_only_uses_allowed_filters(self, mocked_repo):
# Given
mocked_repo.return_value = [db_category1, db_category2]
repo = CategoryRepository()
filters = {
'country_id': 'usa',
'dataset_id': 'carto-do.project.census2011',
'variable_id': 'population',
'geography_id': 'census-geo',
'variable_group_id': 'var-group',
'provider_id': 'open_data',
'fake_field_id': 'fake_value'
}
# When
categories = repo.get_all(filters)
# Then
mocked_repo.assert_called_once_with({
'country_id': 'usa'
})
assert categories == test_categories
@patch.object(RepoClient, 'get_categories')
def test_get_by_id(self, mocked_repo):
# Given
mocked_repo.return_value = [db_category1, db_category2]
requested_id = db_category1['id']
repo = CategoryRepository()
# When
category = repo.get_by_id(requested_id)
# Then
mocked_repo.assert_called_once_with({'id': requested_id})
assert category == test_category1
@patch.object(RepoClient, 'get_categories')
def test_get_by_id_unknown_fails(self, mocked_repo):
# Given
mocked_repo.return_value = []
requested_id = 'unknown_id'
repo = CategoryRepository()
# Then
with self.assertRaises(DiscoveryException):
repo.get_by_id(requested_id)
@patch.object(RepoClient, 'get_categories')
def test_get_by_id_list(self, mocked_repo):
# Given
mocked_repo.return_value = [db_category1, db_category2]
repo = CategoryRepository()
# When
categories = repo.get_by_id_list([db_category1['id'], db_category2['id']])
# Then
mocked_repo.assert_called_once_with({'id': [db_category1['id'], db_category2['id']]})
assert isinstance(categories, CatalogList)
assert categories == test_categories
@patch.object(RepoClient, '_run_query')
def test_get_by_country(self, mocked_repo):
# Given
mocked_repo.return_value = [db_category1, db_category2]
country_code = 'esp'
repo = CategoryRepository()
# When
categories = repo.get_all({'country_id': country_code})
# Then
query = 'SELECT DISTINCT c.* FROM categories_public c, datasets_public t'
mocked_repo.assert_called_once_with(query, {'country_id': country_code}, ['c.id = t.category_id'])
assert isinstance(categories, CatalogList)
assert categories == test_categories
@patch.object(RepoClient, 'get_categories')
def test_missing_fields_are_mapped_as_None(self, mocked_repo):
# Given
mocked_repo.return_value = [{'id': 'cat1'}]
repo = CategoryRepository()
expected_categories = CatalogList([Category({
'id': 'cat1',
'name': None
})])
# When
categories = repo.get_all()
# Then
assert categories == expected_categories
|
Python
| 0.99854
|
@@ -2125,17 +2125,16 @@
gories%0A%0A
-%0A
@pat
|
d82c37a85e3522f7cf7e26a220eb5946aec66ffe
|
Create docs from numpy
|
test/test_data_utils.py
|
test/test_data_utils.py
|
from cStringIO import StringIO
from nose.tools import raises
from microscopes.lda import utils
def test_docs_from_document_term_matrix():
dtm = [[2, 1], [3, 2]]
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_document_term_matrix(dtm) == docs
def test_docs_from_ldac_simple():
stream = StringIO()
stream.write("2 0:2 1:1\n2 0:3 1:2")
stream.seek(0) # rewind stream
docs = [[0, 0, 1], [0, 0, 0, 1, 1]]
assert utils.docs_from_ldac(stream) == docs
stream = StringIO()
stream.write("2 1:1 0:2\n3 2:1 0:3 1:1")
stream.seek(0) # rewind stream
docs = [[1, 0, 0], [2, 0, 0, 0, 1]]
assert utils.docs_from_ldac(stream) == docs
@raises(AssertionError)
def test_bad_ldac_data():
stream = StringIO()
stream.write("2 0:1")
stream.seek(0) # rewind stream
utils.docs_from_ldac(stream)
|
Python
| 0
|
@@ -1,12 +1,32 @@
+import numpy as np%0A%0A
from cString
@@ -279,24 +279,196 @@
) == docs%0A%0A%0A
+def test_docs_from_numpy_dtp():%0A dtm = np.array(%5B%5B2, 1%5D, %5B3, 2%5D%5D)%0A docs = %5B%5B0, 0, 1%5D, %5B0, 0, 0, 1, 1%5D%5D%0A assert utils.docs_from_document_term_matrix(dtm) == docs%0A%0A%0A
def test_doc
|
4cc3fe30e676c31cc6af9cb3a75de10b47ff2adc
|
Add % to date format
|
door/views.py
|
door/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import DoorStatus, OpenData
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from website import settings
from datetime import datetime
import json
# Create your views here.
@csrf_exempt
def door_post(request):
if request.method == 'POST':
unico = request.body.decode('utf-8')
data = json.loads(unico)
if 'key' in data:
if data['key'] == settings.DOOR_KEY:
if 'status' in data:
status = data['status']
if DoorStatus.objects.filter(name='hackerspace').count():
door_status_object = DoorStatus.objects.get(name='hackerspace')
else:
door_status_object = DoorStatus(name='hackerspace')
if status == True:
door_status_object.status = status
door_status_object.save()
if 'timeStart' in data and 'dateStart' in data:
timeStart = data['timeStart']
dateStart = data['dateStart']
opened = datetime.strptime(dateStart+"."+timeStart,"Y-m-d.H:i:s")
door_status_object.datetime = opened
door_status_object.save()
elif status == False:
door_status_object.status = status
door_status_object.save()
if 'timeStart' in data and 'dateStart' in data and 'timeEnd' in data and 'dateEnd' in data and 'timeTotal' in data:
timeStart = data['timeStart']
dateStart = data['dateStart']
timeEnd = data['timeEnd']
dateEnd = data['dateEnd']
total = data['timeTotal']
opened = datetime.strptime(dateStart+"."+timeStart,"Y-m-d.H:i:s")
closed = datetime.strptime(dateEnd+"."+timeEnd,"Y-m-d.H:i:s")
openData = OpenData(opened=opened, closed=closed, total=total)
openData.save()
door_status_object.datetime = closed
door_status_object.save()
return HttpResponse(" ")
@csrf_exempt
def get_status(request):
if DoorStatus.objects.filter(name='hackerspace').count():
status = DoorStatus.objects.get(name='hackerspace').status
else:
status = True
return HttpResponse(status)
def door_data(request):
opendata_list = OpenData.objects.all()
if DoorStatus.objects.filter(name='hackerspace').count():
status = DoorStatus.objects.get(name='hackerspace')
else:
status = DoorStatus(name='hackerspace')
context = {
'opendata_list': opendata_list,
'status': status,
}
return render(request, 'door_data.html', context)
|
Python
| 0.000007
|
@@ -1279,34 +1279,40 @@
+timeStart,%22
+%25
Y-
+%25
m-
+%25
d.
+%25
H:
+%25
i:
+%25
s%22)%0A
@@ -2071,26 +2071,32 @@
eStart,%22
+%25
Y-
+%25
m-
+%25
d.
+%25
H:
+%25
i:
+%25
s%22)%0A
@@ -2171,18 +2171,24 @@
nd,%22
+%25
Y-
+%25
m-
+%25
d.
+%25
H:
+%25
i:
+%25
s%22)%0A
|
36ed44e94916d6abe3458645c957dd9715cbc532
|
set STATIC_ROOT
|
myproj/myproj/settings.py
|
myproj/myproj/settings.py
|
"""
Django settings for myproj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'unsecret_key'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'myproj.urls'
WSGI_APPLICATION = 'myproj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'myproj', 'templates'),
)
try:
from local_settings import *
except ImportError:
pass
|
Python
| 0.000008
|
@@ -2011,16 +2011,71 @@
atic/'%0A%0A
+STATIC_ROOT = os.path.join(BASE_DIR, 'web', 'static')%0A%0A
TEMPLATE
|
825c1ba04bb2374020025de18e1cb17b5479ee7f
|
Fix document
|
chainer/functions/activation/log_softmax.py
|
chainer/functions/activation/log_softmax.py
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_algorithm = libcudnn.CUDNN_SOFTMAX_LOG
_mode = libcudnn.CUDNN_SOFTMAX_MODE_CHANNEL
_cudnn_version = libcudnn.getVersion()
def logsumexp(x):
xp = cuda.get_array_module(x)
m = x.max(axis=1, keepdims=True)
y = x - m
xp.exp(y, out=y)
return xp.log(y.sum(axis=1, keepdims=True)) + m
class LogSoftmax(function.Function):
"""Log-softmax activation function."""
def __init__(self, use_cudnn=True):
self.use_cudnn = use_cudnn
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim > 1,
)
def forward(self, xs):
x = xs[0]
xp = cuda.get_array_module(x)
if xp != numpy and cuda.cudnn_enabled and self.use_cudnn \
and _cudnn_version >= 3000:
dtype = x.dtype
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
handle = cudnn.get_handle()
x_cube = x.reshape(x.shape[:2] + (-1, 1))
desc = cudnn.create_tensor_descriptor(x_cube)
self.y = xp.empty_like(x)
libcudnn.softmaxForward(
handle, _algorithm, _mode, one.data, desc.value,
x_cube.data.ptr, zero.data, desc.value,
self.y.data.ptr)
return self.y,
else:
log_z = logsumexp(x)
self.y = x - log_z
return self.y,
def backward(self, x, gy):
xp = cuda.get_array_module(*x)
if xp != numpy and cuda.cudnn_enabled and self.use_cudnn \
and _cudnn_version >= 3000:
dtype = x[0].dtype
one = numpy.array(1, dtype=dtype).ctypes
zero = numpy.array(0, dtype=dtype).ctypes
handle = cudnn.get_handle()
gx = xp.empty_like(x[0])
gx_cube = gx.reshape(gx.shape[:2] + (-1, 1))
desc = cudnn.create_tensor_descriptor(gx_cube)
libcudnn.softmaxBackward(
handle, _algorithm, _mode, one.data, desc.value,
self.y.data.ptr, desc.value, gy[0].data.ptr, zero.data,
desc.value, gx.data.ptr)
else:
gx = gy[0] - xp.exp(self.y) * gy[0].sum(axis=1, keepdims=True)
return gx,
def log_softmax(x, use_cudnn=True):
"""Channelwise log-softmax function.
This function computes its logarithm of softmax along the second axis. Let
:math:`x = (x_1, x_2, \\dots, x_d)^{\\top}` be the d dimensional index
array and :math:`f(x)` be the d dimensional input array. For each index
:math:`x` of the input array :math:`f(x)`, it computes the probability
:math:`\log p(x)` defined as
:math:`p(x) = {\\exp(f(x)) \\over \\sum_{x_2} \\exp(f(x))}`.
Note that `log(softmax(x))` may cause underflow when ``x`` is too small,
because ``softmax(x)`` may returns 0.
`log_softmax` method is more stable.
Args:
x (~chainer.Variable): Input variable.
use_cudnn (bool): If True and cuDNN is enabled, then this function uses
cuDNN as the core implementation.
Returns:
~chainer.Variable: Output variable.
"""
return LogSoftmax(use_cudnn)(x)
|
Python
| 0.000004
|
@@ -2919,16 +2919,37 @@
tes the
+logarithm of%0A the
probabil
@@ -2951,20 +2951,16 @@
bability
-%0A
:math:%60
@@ -2981,27 +2981,39 @@
ined as%0A
+%0A
-:
+..
math:
-%60
+:%0A
p(x) = %7B
@@ -3039,18 +3039,17 @@
%5C%5Csum_%7Bx
-_2
+'
%7D %5C%5Cexp(
@@ -3055,29 +3055,38 @@
(f(x
+'
))%7D
-%60
.%0A%0A
-Note that
+.. note::%0A %60
%60log
@@ -3098,16 +3098,17 @@
max(x))%60
+%60
may cau
@@ -3149,16 +3149,20 @@
ll,%0A
+
+
because
@@ -3192,15 +3192,24 @@
rns
-0
+%60%600%60%60
.%0A
+ %60
%60log
@@ -3217,16 +3217,17 @@
softmax%60
+%60
method
@@ -3333,20 +3333,17 @@
If True
- and
+,
cuDNN i
@@ -3351,16 +3351,62 @@
enabled
+ and cuDNN ver. 3 or later%0A is used
, then t
@@ -3422,28 +3422,16 @@
ion uses
-%0A
cuDNN a
@@ -3512,24 +3512,77 @@
variable.%0A%0A
+ .. seealso:: :func:%60~chainer.functions.softmax%60%0A%0A
%22%22%22%0A
|
c929cd9ba2718175e79906f836780e47d4793d75
|
enable test for SQL
|
corehq/apps/commtrack/tests/test_rebuild.py
|
corehq/apps/commtrack/tests/test_rebuild.py
|
from django.test import TestCase
from casexml.apps.case.cleanup import rebuild_case_from_forms
from casexml.apps.case.mock import CaseFactory
from casexml.apps.case.models import CommCareCase
from corehq.apps.commtrack.helpers import make_product
from corehq.apps.commtrack.processing import rebuild_stock_state
from corehq.apps.commtrack.tests.util import get_single_balance_block
from corehq.apps.hqcase.utils import submit_case_blocks
from corehq.form_processor.interfaces.dbaccessors import LedgerAccessors, CaseAccessors
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.models import RebuildWithReason
from corehq.form_processor.parsers.ledgers.helpers import UniqueLedgerReference
from corehq.form_processor.tests.utils import run_with_all_backends
LEDGER_BLOCKS_SIMPLE = """
<transfer xmlns="http://commcarehq.org/ledger/v1" dest="{case_id}" date="2000-01-02" section-id="stock">
<entry id="{product_id}" quantity="100" />
</transfer >
<balance xmlns="http://commcarehq.org/ledger/v1" entity-id="{case_id}" date="2000-01-01" section-id="stock">
<entry id="{product_id}" quantity="100" />
</balance>
"""
LEDGER_BLOCKS_INFERRED = """
<transfer xmlns="http://commcarehq.org/ledger/v1" dest="{case_id}" date="2000-01-02" section-id="stock">
<entry id="{product_id}" quantity="50" />
</transfer >
<balance xmlns="http://commcarehq.org/ledger/v1" entity-id="{case_id}" date="2000-01-01" section-id="stock">
<entry id="{product_id}" quantity="100" />
</balance>
"""
class RebuildStockStateTest(TestCase):
def setUp(self):
self.domain = 'asldkjf-domain'
self.case = CaseFactory(domain=self.domain).create_case()
self.product = make_product(self.domain, 'Product Name', 'prodcode')
self._stock_state_key = dict(
section_id='stock',
case_id=self.case.case_id,
product_id=self.product.get_id
)
self.unique_reference = UniqueLedgerReference(
case_id=self.case.case_id, section_id='stock', entry_id=self.product.get_id
)
self.ledger_processor = FormProcessorInterface(self.domain).ledger_processor
def _assert_stats(self, epxected_tx_count, expected_stock_state_balance, expected_tx_balance):
ledger_value = LedgerAccessors(self.domain).get_ledger_value(**self.unique_reference._asdict())
latest_txn = LedgerAccessors(self.domain).get_latest_transaction(**self.unique_reference._asdict())
all_txns = LedgerAccessors(self.domain).get_ledger_transactions_for_case(**self.unique_reference._asdict())
self.assertEqual(epxected_tx_count, len(all_txns))
self.assertEqual(expected_stock_state_balance, ledger_value.stock_on_hand)
self.assertEqual(expected_tx_balance, latest_txn.stock_on_hand)
def _submit_ledgers(self, ledger_blocks):
return submit_case_blocks(
ledger_blocks.format(**self._stock_state_key), self.domain)
@run_with_all_backends
def test_simple(self):
self._submit_ledgers(LEDGER_BLOCKS_SIMPLE)
self._assert_stats(2, 100, 100)
self.ledger_processor.rebuild_ledger_state(**self.unique_reference._asdict())
self._assert_stats(2, 200, 200)
def test_inferred(self):
self._submit_ledgers(LEDGER_BLOCKS_INFERRED)
# this is weird behavior:
# it just doesn't process the second one
# even though knowing yesterday's certainly changes the meaning
# of today's transfer
# UPDATE SK 2016-03-14: this happens because the transactions are received out of order
# (they appear out of order in the form XML) and hence saved out of order.
# When the older transaction is saved it will only look back in time
# to create inferred transactions and not ahead.
self._assert_stats(2, 50, 50)
rebuild_stock_state(**self._stock_state_key)
self._assert_stats(2, 150, 150)
def test_case_actions(self):
"""
make sure that when a case is rebuilt (using rebuild_case)
stock transactions show up as well
"""
form_id = self._submit_ledgers(LEDGER_BLOCKS_SIMPLE)
case_id = self.case.case_id
rebuild_case_from_forms(self.domain, case_id, RebuildWithReason(reason='test'))
case = CaseAccessors(self.domain).get_case(self.case.case_id)
self.assertEqual(case.xform_ids[1:], [form_id])
self.assertEqual(case.actions[1].xform_id, form_id)
def test_edit_submissions_simple(self):
initial_quantity = 100
form_id = submit_case_blocks(
case_blocks=get_single_balance_block(quantity=initial_quantity, **self._stock_state_key),
domain=self.domain,
)
self._assert_stats(1, initial_quantity, initial_quantity)
case = CaseAccessors(self.domain).get_case(self.case.case_id)
self.assertEqual(2, len(case.actions))
self.assertEqual([form_id], case.xform_ids[1:])
# change the value to 50
edit_quantity = 50
submit_case_blocks(
case_blocks=get_single_balance_block(quantity=edit_quantity, **self._stock_state_key),
domain=self.domain,
form_id=form_id,
)
case = CommCareCase.get(id=self.case.case_id)
self.assertEqual(2, len(case.actions))
self._assert_stats(1, edit_quantity, edit_quantity)
self.assertEqual([form_id], case.xform_ids[1:])
|
Python
| 0.000001
|
@@ -4501,24 +4501,51 @@
, form_id)%0A%0A
+ @run_with_all_backends%0A
def test
@@ -4568,32 +4568,32 @@
s_simple(self):%0A
-
initial_
@@ -4860,32 +4860,42 @@
y)%0A%0A case
+_accessors
= CaseAccessors
@@ -4899,32 +4899,62 @@
ors(self.domain)
+%0A case = case_accessors
.get_case(self.c
@@ -5347,28 +5347,32 @@
e =
-CommCareCase.get(id=
+case_accessors.get_case(
self
|
566ceb81a14685c201f3c92668dc0530a1a91176
|
fix path
|
organization/projects/management/commands/project_inject_content.py
|
organization/projects/management/commands/project_inject_content.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import requests
import json
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from organization.projects.models import *
from django.utils.text import slugify
from django.contrib.sites.models import Site
from copy import deepcopy
class Command(BaseCommand):
help = """Retrieve content_fr of old mode Project
from database Tue Feb 5 14:26:55 2019 +0100
"""
def handle(self, *args, **options):
old_projects = self.read_json('projects.json')
project_pages = ProjectPage.objects.all()
for project_page in project_pages:
print(project_page.site_id)
for old_project in old_projects:
if old_project['pk'] == project_page.project_id:
# inject _fr in _en (because _fr became _en)
if not project_page.content_en:
project_page.content_en = project_page.content_fr
project_page.content_fr = old_project['fields']['content_fr']
project_page.save()
def read_file(self, path):
file = open(path, "r")
data = file.read()
file.close()
return data
def read_json(self, path):
return json.loads(self.read_file(path))
|
Python
| 0.000001
|
@@ -1338,16 +1338,126 @@
ions):%0A%0A
+ json_path = '/srv/lib/mezzanine-organization/organization/projects/management/commands/projects.json'%0A
@@ -1490,23 +1490,17 @@
son(
-'projects.json'
+json_path
)%0A%0A
|
898dbbbda66b842b9477d473788e929bd18f8572
|
Add --pdb flag and fix --no-discover flag
|
os_testr/os_testr.py
|
os_testr/os_testr.py
|
#!/usr/bin/env python2
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import subprocess
import sys
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description='Tool to run openstack tests')
parser.add_argument('--blacklist_file', '-b',
help='Path to a blacklist file, this file contains a'
' separate regex exclude on each newline')
parser.add_argument('--regex', '-r',
help='A normal testr selection regex. If a blacklist '
'file is specified, the regex will be appended '
'to the end of the generated regex from that '
'file')
parser.add_argument('--pretty', '-p', default=True,
help='Print pretty output from subunit-trace. This is '
'mutually exclusive with --subunit')
parser.add_argument('--subunit', '-s', action='store_true',
help='output the raw subunit v2 from the test run '
'this is mutuall exclusive with --pretty')
parser.add_argument('--list', '-l', action='store_true',
help='List all the tests which will be run.')
parser.add_argument('--no-discover', '-n',
help="Takes in a single test to bypasses test "
"discover and just excute the test specified")
parser.add_argument('--slowest', default=True,
help="after the test run print the slowest tests")
opts = parser.parse_args()
return opts
def construct_regex(blacklist_file, regex):
if not blacklist_file:
exclude_regex = ''
else:
black_file = open(blacklist_file, 'r')
exclude_regex = ''
for line in black_file:
regex = line.strip()
exclude_regex = '|'.join([regex, exclude_regex])
if exclude_regex:
exclude_regex = "'(?!.*" + exclude_regex + ")"
if regex:
exclude_regex += regex
return exclude_regex
def call_testr(regex, subunit, pretty, list_tests, slowest):
cmd = ['testr', 'run', '--parallel']
if list_tests:
cmd = ['testr', 'list-tests']
elif subunit or pretty:
cmd.append('--subunit')
cmd.append(regex)
env = copy.deepcopy(os.environ)
if pretty and not list_tests:
ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'],
env=env, stdin=ps.stdout)
ps.stdout.close()
else:
proc = subprocess.Popen(cmd, env=env)
return_code = proc.communicate()[0]
if slowest and not list_tests:
print("\nSlowest Tests:\n")
slow_proc = subprocess.Popen(['testr', 'slowest'], env=env)
slow_proc.communicate()
return return_code
def call_subunit_run(test_id, pretty):
cmd = ['python', '-m', 'subunit.run', test_id]
env = copy.deepcopy(os.environ)
if pretty:
ps = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE)
proc = subprocess.Popen(['subunit-trace', '--no-failure-debug', '-f'],
env=env, stdin=ps.stdout)
ps.stdout.close()
else:
proc = subprocess.Popen(cmd, env=env)
proc = subprocess.Popen(cmd)
return_code = proc.communicate()[0]
return return_code
def main():
opts = parse_args()
if opts.pretty and opts.subunit:
msg = ('Subunit output and pretty output cannot be specified at the '
'same time')
print(msg)
exit(2)
if opts.list and opts.no_discover:
msg = ('you can not list tests when you are bypassing discovery to '
'run a single test')
print(msg)
exit(3)
exclude_regex = construct_regex(opts.blacklist_file, opts.regex)
if not os.path.isdir('.testrepository'):
subprocess.call('testr init')
if not opts.no_discover:
exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list,
opts.slowest))
else:
exit(call_subunit_run(opts.no_discover, opts.pretty))
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2177,16 +2177,125 @@
tests%22)%0A
+ parser.add_argument('--pdb',%0A help='Run a single test that has pdb traces added')%0A
opts
@@ -4059,24 +4059,210 @@
d, env=env)%0A
+ return_code = proc.communicate()%5B0%5D%0A return return_code%0A%0Adef call_testtools_run(test_id):%0A cmd = %5B'python', '-m', 'testtools.run', test_id%5D%0A env = copy.deepcopy(os.environ)%0A
proc = s
@@ -4280,16 +4280,25 @@
open(cmd
+, env=env
)%0A re
@@ -4351,25 +4351,24 @@
eturn_code%0A%0A
-%0A
def main():%0A
@@ -4927,32 +4927,49 @@
opts.no_discover
+ and not opts.pdb
:%0A exit(c
@@ -5070,16 +5070,78 @@
owest))%0A
+ elif opts.pdb:%0A exit(call_testtools_run(opts.pdb))%0A
else
|
05f00ab66df27e56921b30dde0691a176ba829fa
|
delete comments
|
chainer/functions/normalization/layer_normalization.py
|
chainer/functions/normalization/layer_normalization.py
|
from chainer import cuda
from chainer import function
from chainer.utils import type_check
def _broadcast_to(xp, x, shape):
if hasattr(xp, 'broadcast_to'):
return xp.broadcast_to(x, shape)
else:
# numpy 1.9 doesn't support broadcast_to method
dummy = xp.empty(shape)
bx, _ = xp.broadcast_arrays(x, dummy)
return bx
class LayerNormalization(function.Function):
"""Layer normalization"""
def __init__(self, eps=1e-5):
self.eps = eps
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 2,
gamma_type.ndim == 1,
beta_type.ndim == 1,
gamma_type.dtype == x_type.dtype,
beta_type.dtype == x_type.dtype,
gamma_type.shape == beta_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
x, gamma, beta = inputs
mu = xp.mean(x, axis=1, keepdims=True)
self.x_mu = x - mu
self.squ_x_mu = xp.square(self.x_mu)
self.var = xp.mean(self.squ_x_mu, axis=1, keepdims=True)
std = xp.sqrt(self.var + self.eps)
self.inv_std = 1. / std
self.x_hat = self.x_mu * self.inv_std
scaled_x = self.x_hat * gamma[None, ]
shifted_x = scaled_x + beta[None, ]
return shifted_x,
def backward(self, inputs, gy):
xp = cuda.get_array_module(*inputs)
x, gamma, beta = inputs
gy = gy[0]
g_beta = gy.sum(axis=0)
g_scaled_x = gy
g_gamma = xp.sum(g_scaled_x * self.x_hat, axis=0)
g_x_hat = g_scaled_x * gamma[None, ]
g_inv_std = xp.sum(g_x_hat * self.x_mu, axis=1, keepdims=True)
g_x_mu_1 = g_x_hat * self.inv_std
g_std = g_inv_std * (- 1. / self.var)
# = g_inv_std * (- 1. / (self.std ** 2))
g_var = g_std * 0.5 * self.inv_std
# = g_std * 0.5 * 1. / xp.sqrt(self.var + self.eps)
n_units = x.shape[1]
g_squ_x_mu = _broadcast_to(xp, g_var * 1. / n_units, x.shape)
g_x_mu_2 = g_squ_x_mu * 2 * self.x_mu
g_x_1 = g_x_mu_1 + g_x_mu_2
g_mu = xp.sum(g_x_1, axis=1, keepdims=True) * (- 1.)
# = xp.sum(g_x_mu_1 + g_x_mu_2, axis=1, keepdims=True) * (- 1.)
g_x_2 = _broadcast_to(xp, g_mu * 1. / n_units, x.shape)
g_x = g_x_1 + g_x_2
return g_x, g_gamma, g_beta,
def layer_normalization(x, gamma, beta, eps=1e-5):
"""Layer normalization.
This function implements a "layer normalization"
which normalizes the input units by statistics
that are computed along the second axis,
scales and shifts them.
Args:
x (~chainer.Variable): Batch vectors.
Shape of this value must be `(batch_size, unit_size)`,
e.g., the output of :func:`~chainer.functions.linear`.
gamma (~chainer.Variable): Scaling vectors.
beta (~chainer.Variable): Shifting vectors.
Returns:
~chainer.Variable: The output variable which has the same shape
as :math:`x`.
See: `Layer Normalization <https://arxiv.org/abs/1607.06450>`_
"""
return LayerNormalization(eps)(x, gamma, beta)
|
Python
| 0
|
@@ -1944,152 +1944,42 @@
-# = g_inv_std * (- 1. / (self.std ** 2))%0A%0A g_var = g_std * 0.5 * self.inv_std%0A # = g_std * 0.5 * 1. / xp.sqrt(self.var + self.eps)
+g_var = g_std * 0.5 * self.inv_std
%0A%0A
@@ -2158,16 +2158,16 @@
_x_mu_2%0A
+
@@ -2222,80 +2222,8 @@
1.)
-%0A # = xp.sum(g_x_mu_1 + g_x_mu_2, axis=1, keepdims=True) * (- 1.)
%0A%0A
|
70004e7caf332e55d40b4f1f757138c4cd35a3fe
|
fix path
|
organization/projects/management/commands/project_inject_content.py
|
organization/projects/management/commands/project_inject_content.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import requests
import json
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from organization.projects.models import *
from django.utils.text import slugify
from django.contrib.sites.models import Site
from copy import deepcopy
class Command(BaseCommand):
help = """Retrieve content_fr of old mode Project
from database Tue Feb 5 14:26:55 2019 +0100
"""
def handle(self, *args, **options):
old_projects = self.read_json('projects.json')
project_pages = ProjectPage.objects.all()
for project_page in project_pages:
print(project_page.site_id)
for old_project in old_projects:
if old_project['pk'] == project_page.project_id:
# inject _fr in _en (because _fr became _en)
if not project_page.content_en:
project_page.content_en = project_page.content_fr
project_page.content_fr = old_project['fields']['content_fr']
project_page.save()
def read_file(self, path):
file = open(path, "r")
data = file.read()
file.close()
return data
def read_json(self, path):
return json.loads(self.read_file(path))
|
Python
| 0.000001
|
@@ -1338,16 +1338,126 @@
ions):%0A%0A
+ json_path = '/srv/lib/mezzanine-organization/organization/projects/management/commands/projects.json'%0A
@@ -1490,23 +1490,17 @@
son(
-'projects.json'
+json_path
)%0A%0A
|
a1e4cfba6c184c76986d8bb0a11e0a5f1ea07ad1
|
Fix test
|
mapentity/tests/test_attachments.py
|
mapentity/tests/test_attachments.py
|
import mock
from django.test import TestCase, RequestFactory
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
from paperclip.models import Attachment, FileType
from paperclip.views import add_url_for_obj
from mapentity.views.generic import MapEntityDetail
from .models import DummyModel
User = get_user_model()
class EntityAttachmentTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('howard', 'h@w.com', 'booh')
def user_perms(p):
return {'add_attachment': False}.get(p, True)
self.user.is_anonymous = mock.MagicMock(return_value=False)
self.user.has_perm = mock.MagicMock(side_effect=user_perms)
self.object = DummyModel.objects.create()
def createRequest(self):
request = RequestFactory().get('/dummy')
request.session = {}
request.user = self.user
return request
def createAttachment(self, obj):
uploaded = SimpleUploadedFile('file.odt',
'*' * 128,
content_type='application/vnd.oasis.opendocument.text')
kwargs = {
'content_type': ContentType.objects.get_for_model(obj),
'object_id': obj.pk,
'filetype': FileType.objects.create(),
'creator': self.user,
'title': "Attachment title",
'legend': "Attachment legend",
'attachment_file': uploaded
}
return Attachment.objects.create(**kwargs)
def test_list_attachments_in_details(self):
self.createAttachment(self.object)
request = self.createRequest()
view = MapEntityDetail.as_view(model=DummyModel,
template_name="mapentity/mapentity_detail.html")
response = view(request, pk=self.object.pk)
html = unicode(response.render())
self.assertTemplateUsed(response, template_name='paperclip/details.html')
self.assertEqual(1, len(Attachment.objects.attachments_for_object(self.object)))
self.assertFalse("Upload attachment" in html)
for attachment in Attachment.objects.attachments_for_object(self.object):
self.assertTrue(attachment.legend in html)
self.assertTrue(attachment.title in html)
self.assertTrue(attachment.attachment_file.url in html)
self.assertTrue('paperclip/fileicons/odt.png')
def test_upload_form_in_details_if_perms(self):
self.user.has_perm = mock.MagicMock(return_value=True)
view = MapEntityDetail.as_view(model=DummyModel,
template_name="mapentity/mapentity_detail.html")
request = self.createRequest()
response = view(request, pk=self.object.pk)
html = unicode(response.render())
self.assertTrue("Upload attachment" in html)
self.assertTrue("""<form method="post" enctype="multipart/form-data"
action="/paperclip/add-for/tests/dummymodel/1/""" in html)
class UploadAttachmentTestCase(TestCase):
def setUp(self):
self.object = DummyModel.objects.create()
user = User.objects.create_user('aah', 'email@corp.com', 'booh')
user.is_superuser = True
user.save()
success = self.client.login(username=user.username, password='booh')
self.assertTrue(success)
def attachmentPostData(self):
filetype = FileType.objects.create()
uploaded = SimpleUploadedFile('face.jpg',
'*' * 128,
content_type='image/jpeg')
data = {
'filetype': filetype.pk,
'title': 'A title',
'legend': 'A legend',
'attachment_file': uploaded,
'next': self.object.get_detail_url()
}
return data
def test_upload_redirects_to_dummy_detail_url(self):
response = self.client.post(add_url_for_obj(self.object),
data=self.attachmentPostData())
self.assertEqual(response.status_code, 302)
self.assertEqual(response['location'],
'http://testserver/dummymodel/%s/' % self.object.pk)
def test_upload_creates_attachment(self):
data = self.attachmentPostData()
self.client.post(add_url_for_obj(self.object), data=data)
att = Attachment.objects.attachments_for_object(self.object).get()
self.assertEqual(att.title, data['title'])
self.assertEqual(att.legend, data['legend'])
self.assertEqual(att.filetype.pk, data['filetype'])
def test_title_gives_name_to_file(self):
data = self.attachmentPostData()
self.client.post(add_url_for_obj(self.object), data=data)
att = Attachment.objects.attachments_for_object(self.object).get()
self.assertTrue('a-title' in att.attachment_file.name)
def test_filename_is_used_if_no_title(self):
data = self.attachmentPostData()
data['title'] = ''
self.client.post(add_url_for_obj(self.object), data=data)
att = Attachment.objects.attachments_for_object(self.object).get()
self.assertTrue('face' in att.attachment_file.name)
|
Python
| 0.000004
|
@@ -615,16 +615,26 @@
eturn %7B'
+paperclip.
add_atta
|
2e4ec0fea35722fbdbab36ce326e664249e3eaf7
|
Add support jinja2
|
nacho/controllers/base.py
|
nacho/controllers/base.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from cyclone.web import RequestHandler
class ApplicationController(RequestHandler):
pass
|
Python
| 0
|
@@ -48,15 +48,15 @@
rom
-cyclone
+tornado
.web
@@ -83,59 +83,970 @@
er%0A%0A
-%0Aclass ApplicationController(RequestHandler):%0A pass
+from jinja2 import Environment, FileSystemLoader, TemplateNotFound%0A%0A%0Aclass ApplicationController(RequestHandler):%0A def render(self, template_name, **kwargs):%0A kwargs.update(%7B%0A 'settings': self.settings,%0A 'STATIC_URL': self.settings.get('static_url_prefix', '/static/'),%0A 'request': self.request,%0A 'xsrf_token': self.xsrf_token,%0A 'xsrf_form_html': self.xsrf_form_html,%0A %7D)%0A self.write(self.render_template(template_name, **kwargs))%0A%0A def render_template(self, template_name, **kwargs):%0A template_dirs = %5B%5D%0A if self.settings.get('template_path', ''):%0A template_dirs.append(self.settings%5B%22template_path%22%5D)%0A env = Environment(loader=FileSystemLoader(template_dirs))%0A try:%0A template = env.get_template(template_name)%0A except TemplateNotFound:%0A raise TemplateNotFound(template_name)%0A return template.render(kwargs)
%0A
|
fde4cf7b025255bdc5c63f1d2c84a1f45dc6068c
|
Make tests more robust
|
mailme/core/tests.py
|
mailme/core/tests.py
|
from uuid import uuid4
from datetime import datetime
import requests
from django.test import TestCase
from django.utils import unittest
from django.utils.timezone import utc
import pytz
from mailme.core.models import (
Feed,
Post,
Category,
Enclosure,
FEED_GENERIC_ERROR,
FEED_TIMEDOUT_ERROR,
FEED_NOT_FOUND_ERROR
)
from mailme.utils.dates import naturaldate
def gen_unique_id():
return str(uuid4())
class TestCategory(unittest.TestCase):
def test__str__(self):
cat = Category(name="foo", domain="bar")
self.assertIn("foo", str(cat))
self.assertIn("bar", str(cat))
cat = Category(name="foo")
self.assertIn("foo", str(cat))
class TestEnclosure(unittest.TestCase):
def test__str__(self):
en = Enclosure(url="requests.codes.//e.com/media/i.jpg",
type="image/jpeg", length=376851)
self.assertIn("requests.codes.//e.com/media/i.jpg", str(en))
self.assertIn("image/jpeg", str(en))
self.assertIn("376851", str(en))
class TestPost(unittest.TestCase):
def setUp(self):
self.feed = Feed.objects.create(name="testfeed",
feed_url=gen_unique_id())
def test__str__(self):
post = Post(feed=self.feed, title="foo")
self.assertIn("foo", str(post))
def test_auto_guid(self):
p1 = Post(feed=self.feed, title="foo")
p2 = Post(feed=self.feed, title="bar")
self.assertNotEqual(p1.auto_guid(), p2.auto_guid())
def test_date_published_naturaldate(self):
now = datetime.now(pytz.utc)
day = datetime(now.year, now.month, now.day, tzinfo=utc)
post = Post(feed=self.feed, title="baz", date_published=now)
self.assertEqual(post.date_published_naturaldate, naturaldate(day))
def test_date_updated_naturaldate(self):
now = datetime.now(pytz.utc)
post = Post(feed=self.feed, title="baz", date_updated=now)
self.assertEqual(post.date_updated_naturaldate, naturaldate(now))
class TestFeed(unittest.TestCase):
def test__str__(self):
f = Feed(name="foo", feed_url="requests.codes.//example.com")
self.assertIn("foo", str(f))
self.assertIn("(requests.codes.//example.com)", str(f))
def test_error_for_status(self):
f = Feed(name="foo", feed_url="requests.codes.//example.com")
self.assertEqual(f.error_for_status(requests.codes.NOT_FOUND),
FEED_NOT_FOUND_ERROR)
self.assertEqual(f.error_for_status(requests.codes.INTERNAL_SERVER_ERROR),
FEED_GENERIC_ERROR)
self.assertIsNone(f.error_for_status(requests.codes.OK))
def test_save_generic_error(self):
f = Feed(name="foo1", feed_url="requests.codes.//example.com/t1.rss")
f.save_generic_error()
indb = Feed.objects.get(feed_url="requests.codes.//example.com/t1.rss")
self.assertEqual(indb.last_error, FEED_GENERIC_ERROR)
def test_set_error_status(self):
f = Feed(name="foo3", feed_url="requests.codes.//example.com/t3.rss")
f.set_error_status(requests.codes.INTERNAL_SERVER_ERROR)
indb = Feed.objects.get(feed_url="requests.codes.//example.com/t3.rss")
self.assertEqual(indb.last_error, FEED_GENERIC_ERROR)
def test_save_timeout_error(self):
f = Feed(name="foo2", feed_url="requests.codes.//example.com/t2.rss")
f.save_timeout_error()
indb = Feed.objects.get(feed_url="requests.codes.//example.com/t2.rss")
self.assertEqual(indb.last_error, FEED_TIMEDOUT_ERROR)
def test_date_last_refresh_naturaldate(self):
now = datetime.now(pytz.utc)
f = Feed(name="foo2", feed_url="requests.codes.//example.com/t2.rss",
date_last_refresh=now)
self.assertEqual(f.date_last_refresh_naturaldate, naturaldate(now))
|
Python
| 0.001098
|
@@ -100,42 +100,8 @@
ase%0A
-from django.utils import unittest%0A
from
@@ -134,16 +134,16 @@
ort utc%0A
+
%0Aimport
@@ -416,25 +416,16 @@
ategory(
-unittest.
TestCase
@@ -679,25 +679,16 @@
closure(
-unittest.
TestCase
@@ -986,24 +986,24 @@
str(en))%0A%0A%0A
+
class TestPo
@@ -1005,25 +1005,16 @@
estPost(
-unittest.
TestCase
@@ -2004,17 +2004,8 @@
eed(
-unittest.
Test
|
caddce9b9c77a09b8ea6e3af5f4815afe8e7b908
|
Use correct file handle for writing ancestor simulated sequences to fasta.
|
test/simulations/sample_genomes.py
|
test/simulations/sample_genomes.py
|
"""
Create genome alignment by random sampling columns from INDELible simulation outputs.
"""
from collections import OrderedDict
import Utility
import csv
import sys
import os
import random
import ctypes
from collections import deque
BASES_PER_CODON = 3
scaling_factors = sys.argv[1].split(",")
output_dir = sys.argv[2]
output_filename_prefix = sys.argv[3]
seed = int(sys.argv[4])
total_codon_sites = int(sys.argv[5])
indelible_output_dir = sys.argv[6]
indelible_filename_prefix = sys.argv[7]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
ratefile = open(output_dir + os.sep + output_filename_prefix + ".rates.csv", 'w') # keep track of each codon site omega
ratefile.write('Site,Scaling_factor,Rate_class,Omega\n')
randomizer = random.Random(seed)
random_codons = range(0,total_codon_sites)
randomizer.shuffle(random_codons) # we randomize which codons are assigned which mutation rate
random_codons_queue = deque(random_codons)
new_fasta = {}
new_anc_fasta = {}
for scaling_factor in scaling_factors:
if total_codon_sites % len(scaling_factors) != 0:
print("ERROR: The number of scaling factors should divide evenly into the number of codon sites")
raise ValueError("The number of scaling factors should divide evenly into the number of codon sites")
num_codons_per_scaling = total_codon_sites / len(scaling_factors)
scaling_factor = scaling_factor.lstrip().rstrip()
# read INDELible tree sequence FASTA
# <output_filename_prefix>_<scaling_factor>_TRUE.fasta are fastas containing the INDELible tip sequences of a phylogenetic tree
# The tree mutation rate is scaled by <scaling_factor>.
infile = open(indelible_output_dir+ os.sep + scaling_factor + os.sep + indelible_filename_prefix + "." +scaling_factor+'_TRUE.fasta', 'rU')
indelible_fasta = Utility.convert_fasta(infile.readlines()) # returns a List of (header, sequence) tuples
infile.close()
# read INDELible ancestor FASTA
# <output_filename_prefix>_<scaling_factor>_ANCESTRAL.fasta are fastas containing the INDELible inner node sequences of a phylogenetic tree
fh_in_anc_fasta_ = open(indelible_output_dir+ os.sep + scaling_factor + os.sep + indelible_filename_prefix + "." +scaling_factor+'_TRUE.fasta', 'rU')
indelible_anc_fasta = Utility.convert_fasta(fh_in_anc_fasta_.readlines()) # returns a List of (header, sequence) tuples
fh_in_anc_fasta_.close()
# if this is first time, transfer header
if len(new_fasta) == 0:
new_fasta = OrderedDict()
for h, s in indelible_fasta:
new_fasta[h] = ctypes.create_string_buffer(s)
for h, s in indelible_anc_fasta:
new_anc_fasta[h] = ctypes.create_string_buffer(s)
# Randomly select mutation scaling rate for each codon site
# Take codons from the sequences from the INDELible population matching that mutation scaling rate
random_codon_sites_1based = []
for i in range(0, num_codons_per_scaling):
random_codon_site = random_codons_queue.popleft() #0-based random codon sites for this mutation rate
random_codon_sites_1based.append(random_codon_site+1)
for h, s in indelible_fasta:
new_fasta[h][BASES_PER_CODON*random_codon_site : BASES_PER_CODON*(random_codon_site+1)] = s[BASES_PER_CODON*random_codon_site : BASES_PER_CODON*(random_codon_site+1)]
for h, s in indelible_anc_fasta:
new_anc_fasta[h][BASES_PER_CODON*random_codon_site : BASES_PER_CODON*(random_codon_site+1)] = s[BASES_PER_CODON*random_codon_site : BASES_PER_CODON*(random_codon_site+1)]
indelible_rates_csv = indelible_output_dir + os.sep + scaling_factor + os.sep + indelible_filename_prefix + "." + scaling_factor+'_RATES.csv'
with open(indelible_rates_csv, 'rU') as fh_rates_in:
reader = csv.DictReader(fh_rates_in) # Columns: Site Class Partition Inserted? Omega
for line in reader:
site_1based = int(line["Site"])
if site_1based in random_codon_sites_1based:
ratefile.write('%d,%s,%s,%s\n' % (site_1based, scaling_factor, line["Class"], line["Omega"]))
# output
out_fasta_fname = output_dir+ os.sep + output_filename_prefix + ".fasta"
out_anc_fasta_fname = output_dir+ os.sep + output_filename_prefix + ".anc.fasta"
with open(out_fasta_fname, 'w') as fh_out_fasta:
for h in new_fasta.iterkeys():
fh_out_fasta.write('>%s\n%s\n' % (h, new_fasta[h].value.rstrip())) # Need rstrip since extra whitespace added to .value
with open(out_anc_fasta_fname, 'w') as fh_out_anc_fasta:
for h in new_anc_fasta.iterkeys():
fh_out_fasta.write('>%s\n%s\n' % (h, new_anc_fasta[h].value.rstrip())) # Need rstrip since extra whitespace added to .value
# output consensus
Utility.write_consensus_from_msa(out_fasta_fname, out_fasta_fname.replace(".fasta", ".consensus.fasta"))
|
Python
| 0
|
@@ -4719,32 +4719,36 @@
%0A fh_out_
+anc_
fasta.write('%3E%25s
|
d2683b12f10eea662ae5bbad9e8a49857bcab599
|
version bump to 0.4
|
luigi_swf/__init__.py
|
luigi_swf/__init__.py
|
# Exports
from .executor import LuigiSwfExecutor
from .tasks import SwfHeartbeatCancel
__version__ = '0.3'
|
Python
| 0
|
@@ -104,7 +104,7 @@
'0.
-3
+4
'%0A
|
7f3a93dea0eb683bf2d35110fbe921b88646c579
|
debug spacy init time
|
nalaf/features/parsing.py
|
nalaf/features/parsing.py
|
from textblob import TextBlob
from textblob.en.taggers import NLTKTagger
from textblob.en.np_extractors import FastNPExtractor
from nalaf.features import FeatureGenerator
from spacy.en import English
#import time
class SpacyPosTagger(FeatureGenerator):
"""
POS-tag a dataset using the Spacy Pos Tagger
"""
def __init__(self):
self.nlp = English()
def generate(self, dataset):
"""
:type dataset: nalaf.structures.data.Dataset
"""
for part in dataset.parts():
for sentence in part.sentences:
text_tokens = list(map(lambda x: x.word, sentence))
spacy_doc = self.nlp.tokenizer.tokens_from_list(text_tokens)
self.nlp.tagger(spacy_doc)
for token, spacy_token in zip(sentence, spacy_doc):
token.features['pos'] = spacy_token.pos_
token.features['tag'] = spacy_token.tag_
class NLKTPosTagger(FeatureGenerator):
"""
POS-tag a dataset using the NLTK Pos Tagger
See: https://textblob.readthedocs.org/en/dev/_modules/textblob/en/taggers.html#NLTKTagger
"""
def __init__(self):
self.tagger = NLTKTagger()
def generate(self, dataset):
"""
:type dataset: nalaf.structures.data.Dataset
"""
for part in dataset.parts():
for sentence in part.sentences:
text_tokens = list(map(lambda x : x.word, sentence))
tags = self.tagger.tag(text_tokens, tokenize=False)
for token, tag in zip(sentence, tags):
token.features['tag'] = tag[1]
class PosTagFeatureGenerator(FeatureGenerator):
"""
"""
def __init__(self):
self.punctuation = ['.', ',', ':', ';', '[', ']', '(', ')', '{', '}', '”', '“', '–', '"', '#', '?', '-']
def generate(self, dataset):
"""
:type dataset: nalaf.structures.data.Dataset
"""
for part in dataset.parts():
tags = TextBlob(part.text).tags
tag_index = 0
for sentence in part.sentences:
for token in sentence:
if token.word in self.punctuation:
token.features['tag'] = 'PUN'
else:
remember_index = tag_index
for word, tag in tags[tag_index:]:
if token.word in word:
token.features['tag'] = tag
tag_index += 1
break
tag_index = remember_index
class NounPhraseFeatureGenerator(FeatureGenerator):
"""
"""
def __init__(self):
self.extractor = FastNPExtractor()
def generate(self, dataset):
"""
:type dataset: nalaf.structures.data.Dataset
"""
for part in dataset.parts():
for sentence in part:
# get the chunk of text representing the sentence
joined_sentence = part.text[sentence[0].start:sentence[-1].start + len(sentence[-1].word)]
phrases = self.extractor.extract(joined_sentence)
for phrase in phrases:
# only consider real noun phrases that have more than 1 word
if ' ' in phrase:
# find the phrase offset in part text
phrase_start = part.text.find(phrase, sentence[0].start)
phrase_end = phrase_start + len(phrase)
# mark the tokens that are part of that phrase
for token in sentence:
if phrase_start <= token.start < token.end <= phrase_end:
token.features['is_nn'] = 1
|
Python
| 0
|
@@ -193,16 +193,46 @@
English%0A
+from nalaf import print_debug%0A
#import
@@ -379,27 +379,125 @@
-self.nlp = English(
+print_debug(%22SpacyPosTagger: INIT START%22)%0A self.nlp = English()%0A print_debug(%22SpacyPosTagger: INIT END%22
)%0A%0A
|
beec55986440c5c7a4afdd556c743dd0d6bc3aa9
|
fix citation in random_expand
|
chainercv/transforms/image/random_expand.py
|
chainercv/transforms/image/random_expand.py
|
import numpy as np
import random
def random_expand(img, max_ratio=4, fill=0, return_param=False):
"""Expand an image randomly.
This method randomly place the input image on a larger canvas. The size of
the canvas is :math:`(rW, rH)`, where :math:`(W, H)` is the size of the
input image and :math:`r` is a random ratio drawn from
:math:`[1, max\_ratio]`. The canvas is filled by a value :obj:`fill`
except for the region where the original image is placed.
This data augmentation trick is used to create "zoom out" effect [1].
.. [1] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, \
Scott Reed, Cheng-Yang Fu, Alexander C. Berg. \
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
img (~numpy.ndarray): An image array to be augmented. This is in
CHW format.
max_ratio (float): The maximum ratio of expansion. In the original
paper, this value is 4.
fill (float, tuple or ~numpy.ndarray): The value of padded pixels.
In the original paper, this value is the mean of ImageNet.
return_param (bool): Returns random parameters.
Returns:
~numpy.ndarray or (~numpy.ndarray, dict):
If :obj:`return_param = False`,
returns an array :obj:`out_img` that is the result of expansion.
If :obj:`return_param = True`,
returns a tuple whose elements are :obj:`out_img, param`.
:obj:`param` is a dictionary of intermediate parameters whose
contents are listed below with key, value-type and the description
of the value.
* **ratio** (*float*): The sampled value used to make the canvas.
* **x_offset** (*int*): The x coordinate of the top left corner\
of the image after placing on the canvas.
* **y_offset** (*int*): The y coodinate of the top left corner of\
the image after placing on the canvas.
"""
if max_ratio <= 1:
if return_param:
return img, {'ratio': 1, 'x_offset': 0, 'y_offset': 0}
else:
return img
C, H, W = img.shape
ratio = random.uniform(1, max_ratio)
out_H, out_W = int(H * ratio), int(W * ratio)
x_offset = random.randint(0, out_W - W)
y_offset = random.randint(0, out_H - H)
out_img = np.empty((C, out_H, out_W), dtype=img.dtype)
out_img[:] = np.array(fill).reshape(-1, 1, 1)
out_img[:, y_offset:y_offset + H, x_offset:x_offset + W] = img
if return_param:
param = {'ratio': ratio, 'x_offset': x_offset, 'y_offset': y_offset}
return out_img, param
else:
return out_img
|
Python
| 0.001224
|
@@ -547,18 +547,19 @@
effect %5B
-1%5D
+#%5D_
.%0A%0A .
@@ -561,17 +561,17 @@
.. %5B
-1
+#
%5D Wei Li
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.