commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
d1b50f2f1d4ea3bf4b55c8997cba9fe47c1aa377
|
fix saving
|
api/models.py
|
api/models.py
|
"""
GRT Models
"""
from __future__ import unicode_literals
import os
import uuid as pyuuid
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
class GalaxyInstance(models.Model):
"""A single galaxy site. Corresponds to a single galaxy.ini"""
# Optional
url = models.URLField(null=True, help_text="Instance URL")
title = models.CharField(max_length=256, null=True, help_text="The name / title of the instance. E.g. GalaxyP")
description = models.TextField(null=True, help_text="Any extra description you wish to add.")
# The instance's information should be published. This will include a
# logo/domain name page for each instance.
users_recent = models.IntegerField(default=0)
users_total = models.IntegerField(default=0)
jobs_run = models.IntegerField(default=0)
jobs_total = models.IntegerField(default=0)
# Owner of this Galaxy instance
owners = models.ManyToManyField(User)
# API key for submitting results regarding this Galaxy instance
api_key = models.UUIDField(default=pyuuid.uuid4, editable=False)
# We import data on a cron job, we use this to track which was the most
# recent data file that we imported.
last_import = models.FloatField(default=-1)
@property
def report_dir(self):
instance_report_dir = os.path.join(settings.GRT_UPLOAD_DIRECTORY, str(self.id))
if not os.path.exists(instance_report_dir):
os.makedirs(instance_report_dir)
return instance_report_dir
def uploaded_reports(self):
"""Get the set of reports that have previously been uploaded to GRT"""
return [path.strip('.json') for path in os.listdir(self.report_dir) if path.endswith('.json')]
def new_reports(self):
"""Get reports that have not yet been imported."""
return [path for path in self.uploaded_reports() if float(path) > self.last_import]
def __str__(self):
return '%s <%s>' % (self.title, self.url)
class Job(models.Model):
"""
A single galaxy job
"""
# Galaxy Instance
instance = models.ForeignKey(GalaxyInstance)
# We store the job ID from their database in order to ensure that we do not
# create duplicate records.
external_job_id = models.IntegerField(default=-1)
# Other attributes
tool_id = models.CharField(max_length=255)
tool_version = models.TextField()
state = models.CharField(max_length=16)
create_time = models.DateTimeField(null=True, blank=True)
# Ensure that the combination of instance + external_job_id is unique. We
# don't want duplicate jobs skewing our results.
class Meta:
unique_together = (('instance', 'external_job_id'),)
class JobParam(models.Model):
"""
A given parameter within a job. For non-repeats, these are simple
(some_param, 10), for repeats and other more complex ones, this comes as a
giant JSON struct.
"""
instance = models.ForeignKey(GalaxyInstance)
external_job_id = models.IntegerField(default=-1)
name = models.CharField(max_length=256)
value = models.TextField()
class MetricNumeric(models.Model):
"""
Tuple of (name, type, value).
"""
instance = models.ForeignKey(GalaxyInstance)
external_job_id = models.IntegerField(default=-1)
plugin = models.CharField(max_length=256)
name = models.CharField(max_length=256)
value = models.DecimalField(max_digits=22, decimal_places=7)
class MetricText(models.Model):
"""
Tuple of (name, type, value).
"""
instance = models.ForeignKey(GalaxyInstance)
external_job_id = models.IntegerField(default=-1)
plugin = models.CharField(max_length=256)
name = models.CharField(max_length=256)
value = models.CharField(max_length=256)
|
Python
| 0.000001
|
@@ -192,16 +192,61 @@
rt User%0A
+from django.core.urlresolvers import reverse%0A
%0A%0Aclass
@@ -1988,16 +1988,124 @@
mport%5D%0A%0A
+ def get_absolute_url(self):%0A return reverse('galaxy-instance-detail', kwargs=%7B'slug': self.pk%7D)%0A%0A
def
|
5768d1ebcfec46e564c8b420773d911c243327ff
|
Fix non-threadsafe failure in serializer - now using thread local serializer instance.
|
dddp/msg.py
|
dddp/msg.py
|
"""Django DDP utils for DDP messaging."""
import collections
from django.core.serializers import get_serializer
_SERIALIZER = None
def obj_change_as_msg(obj, msg):
"""Generate a DDP msg for obj with specified msg type."""
global _SERIALIZER
if _SERIALIZER is None:
_SERIALIZER = get_serializer('ddp')()
data = _SERIALIZER.serialize([obj])[0]
name = data['model']
# cast ID as string
if not isinstance(data['pk'], basestring):
data['pk'] = '%d' % data['pk']
payload = {
'msg': msg,
'collection': name,
'id': data['pk'],
}
if msg != 'removed':
payload['fields'] = data['fields']
return (name, payload)
|
Python
| 0
|
@@ -39,25 +39,44 @@
%22%22%22%0A
-import collection
+from dddp import THREAD_LOCAL as thi
s%0Afr
@@ -129,26 +129,107 @@
er%0A%0A
-_SERIALIZER = None
+%0Adef serializer_factory():%0A %22%22%22Make a new DDP serializer.%22%22%22%0A return get_serializer('ddp')()%0A
%0A%0Ade
@@ -329,81 +329,31 @@
-global _SERIALIZER%0A if _SERIALIZER is None:%0A _SERIALIZER = get_
+serializer = this.get('
seri
@@ -358,24 +358,37 @@
rializer
-('ddp')(
+', serializer_factory
)%0A da
@@ -396,19 +396,18 @@
a =
-_SERIALIZER
+serializer
.ser
|
3676809c9d44abf8b48810b58488ba519c21254d
|
use new API for thawing deps
|
repository/netrepos/instructionsets.py
|
repository/netrepos/instructionsets.py
|
#
# Copyright (c) 2004 Specifix, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import deps.deps
class InstructionSets:
def __init__(self, db):
self.db = db
cu = self.db.cursor()
cu.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
tables = [ x[0] for x in cu ]
if 'InstructionSets' not in tables:
cu.execute("CREATE TABLE InstructionSets(isnSetId integer primary key, base str, flags str)")
def _freezeIsd(self, isd):
frozen = isd.freeze()
split = frozen.split(' ', 1)
if len(split) > 1:
base, flags = split
# sort the flags
# XXX - beware of i18n changes in sort order
flags = flags.split(' ')
flags.sort()
flags = ' '.join(flags)
else:
base = split[0]
flags = None
return base, flags
def _thawIsd(self, base, flags):
if flags is not None:
frozen = " ".join((base, flags))
else:
frozen = base
return deps.deps.ThawDependency(frozen)
def addId(self, isd):
cu = self.db.cursor()
assert(isinstance(isd, deps.deps.Dependency))
base, flags = self._freezeIsd(isd)
cu.execute("INSERT INTO InstructionSets VALUES (NULL, ?, ?)",
(base, flags))
def delId(self, theId):
assert(type(theId) is int)
cu = self.db.cursor()
cu.execute("DELETE FROM InstructionSets WHERE isnSetId=?", (theId,))
def __delitem__(self, isd):
assert(isinstance(isd, deps.deps.Dependency))
base, flags = self._freezeIsd(isd)
cu = self.db.cursor()
query = "DELETE FROM InstructionSets WHERE base=? "
if flags is None:
query += "AND flags is NULL"
cu.execute(query, (base))
else:
query += "AND flags=?"
cu.execute(query, (base, flags))
def __getitem__(self, isd):
assert(isinstance(isd, deps.deps.Dependency))
base, flags = self._freezeIsd(isd)
cu = self.db.cursor()
query = "SELECT isnSetId from InstructionSets WHERE base=? AND "
if flags is None:
query += "flags IS NULL"
cu.execute(query, (base,))
else:
query += "flags=?"
cu.execute(query, (base, flags))
row = cu.fetchone()
if row is None:
raise KeyError, isd
return row[0]
def iterkeys(self):
cu = self.db.cursor()
cu.execute("SELECT base, flags from InstructionSets")
for row in cu:
yield self._thawIsd(row[0], row[1])
def itervalues(self):
cu = self.db.cursor()
cu.execute("SELECT isnSetId from InstructionSets")
for row in cu:
yield row[0]
def iteritems(self):
cu = self.db.cursor()
cu.execute("SELECT isnSetId, base, flags from InstructionSets")
for row in cu:
yield (self._thawIsd(row[1], row[2]), row[0])
def keys(self):
return [ x for x in self.iterkeys() ]
def values(self):
return [ x for x in self.itervalues() ]
def items(self):
return [ x for x in self.iteritems() ]
|
Python
| 0
|
@@ -1568,17 +1568,42 @@
ps.deps.
-T
+InstructionSetDependency.t
hawDepen
@@ -1615,24 +1615,16 @@
(frozen)
-
%0A %0A
|
0a42ec9eeccc5969bf1eb8a92cd7d66ade4daf76
|
Make executable
|
ddgquery.py
|
ddgquery.py
|
import os, time
# use python3
while True:
os.system("espeak -v en-us 'What would you like to know about?'")
#time.sleep(4)
query = input("What would you like to know about?\n")
if query == "help":
print("Add -u to get a helpful URL\nAdd -l to launch the URL in your browser\nAdd -s to get a DuckDuckGo search\nType 'about' to learn more.")
elif query == "about":
print("This uses the Duck Duck Go Zero-Click Info API. This program is written by Python and is written by Merlin04.")
elif query == "quit":
break
else:
os.system('ddg ' + query + ' | espeak -v en-us')
|
Python
| 0.999996
|
@@ -1,8 +1,32 @@
+#! /usr/bin/env python%0A%0A
import o
|
40dca64ee4307669b80f8a2e4328176c50f179f2
|
use trailing slash for non-URL crumbs so sequential non-URL crumbs get a separator...this means there's always an ending slash (may fix that later)
|
repository/templatetags/breadcrumbs.py
|
repository/templatetags/breadcrumbs.py
|
# Based on http://djangosnippets.org/snippets/1289/
# with modifications to use bootstrap breadcrumb styles
from django import template
from django.template import Node, Variable
from django.utils.encoding import smart_unicode
from django.template.defaulttags import url
from django.template import VariableDoesNotExist
register = template.Library()
@register.tag
def breadcrumb(parser, token):
"""
Renders the breadcrumb.
Examples:
{% breadcrumb "Title of breadcrumb" url_var %}
{% breadcrumb context_var url_var %}
{% breadcrumb "Just the title" %}
{% breadcrumb just_context_var %}
Parameters:
- 1st parameter - title of the crumb,
- 2nd parameter(optional) - url to link to, produced by url tag, i.e.:
{% url person_detail object.id as person_url %}
then:
{% breadcrumb person.name person_url %}
@author Andriy Drozdyuk
"""
return BreadcrumbNode(token.split_contents()[1:])
@register.tag
def breadcrumb_url(parser, token):
"""
Same as breadcrumb
but instead of url context variable takes in all the
arguments URL tag takes.
{% breadcrumb "Title of breadcrumb" person_detail person.id %}
{% breadcrumb person.name person_detail person.id %}
"""
bits = token.split_contents()
if len(bits) == 2:
return breadcrumb(token)
# Extract our extra title parameter
title = bits.pop(1)
token.contents = ' '.join(bits)
url_node = url(parser, token)
return UrlBreadcrumbNode(title, url_node)
class BreadcrumbNode(Node):
def __init__(self, variables):
"""
First var is title, second var is url context variable
"""
self.vars = map(Variable, variables)
def render(self, context):
title = self.vars[0]
if title.var.find("'") == -1 and title.var.find('"') == -1:
try:
title = title.resolve(context)
except:
title = ''
else:
title = title.var.strip("'").strip('"')
title = smart_unicode(title)
url = None
if len(self.vars) > 1:
val = self.vars[1]
try:
url = val.resolve(context)
except VariableDoesNotExist:
print 'URL does not exist', val
url = None
return create_crumb(title, url)
class UrlBreadcrumbNode(Node):
def __init__(self, title, url_node):
self.title = Variable(title)
self.url_node = url_node
def render(self, context):
title = self.title.var
if title.find("'") == -1 and title.find('"') == -1:
try:
val = self.title
title = val.resolve(context)
except:
title = ''
else:
title = title.strip("'").strip('"')
title = smart_unicode(title)
url = self.url_node.render(context)
return create_crumb(title, url)
def create_crumb(title, url=None):
"""
Helper function
"""
if url:
crumb = "<li><a href='%s'>%s</a> <span class=\"divider\">/</span></li>" % (url, title)
else:
crumb = "<li>%s</li>" % (title)
return crumb
|
Python
| 0
|
@@ -3213,16 +3213,54 @@
%22%3Cli%3E%25s
+ %3Cspan class=%5C%22divider%5C%22%3E/%3C/span%3E
%3C/li%3E%22 %25
|
6c1e5e312996f83be3ed52a3e046cc135a3fd888
|
Add vhost_root to Bootstrap#upload_config_files
|
__init__.py
|
__init__.py
|
from fabric.api import local, run, sudo, prefix, cd, env
from fabric.contrib import django
from fabric.contrib.files import upload_template, append
from fabric.contrib.console import confirm
from fabric import tasks
from fabric.utils import abort, puts
from django.conf import settings
django.settings_module('settings.development')
if not hasattr(settings, 'FABRIC_DOMAIN'):
abort('You must set FABRIC_DOMAIN in your settings file')
env.domain = settings.FABRIC_DOMAIN
env.path = local('pwd')
env.user = 'cahoona'
env.hosts = ['92.63.136.213']
env.password = 'v-Fj9P@8'
# Allow fabric to restart apache2
env.always_use_pty = False
# @link https://bitbucket.org/dhellmann/virtualenvwrapper/issue/62/hooklog-permissions#comment-229798
# env.shell = '/bin/bash --noprofile -l -c'
class BaseTask(tasks.Task):
def run(self):
if not hasattr(env, 'name'):
abort('You must specify an environment to deploy to\n\n'\
'e.g. fab production prepare_environment')
def get_local_settings(self):
django.settings_module('settings.development')
return settings
def select_env(self, name='staging'):
env.name = name
env.repo = env.domain.replace('.', '_')
env.virtual_env = '$WORKON_HOME/%(domain)s' % env
django.settings_module('settings.%(name)s' % env)
class Production(BaseTask):
"""
Run tasks in production environment
"""
name = 'production'
def run(self):
return self.select_env('production')
class Staging(BaseTask):
"""
Run tasks in staging environment
"""
name = 'staging'
def run(self):
env.domain = 'staging.%(domain)s' % env
return self.select_env('staging')
class Bootstrap(BaseTask):
"""
Prepare a virtualenv with apache2 and nginx config files
"""
name = 'bootstrap'
def run(self):
super(Bootstrap, self).run()
self.create_virtualenv()
self.clone_git_repo()
self.create_folders()
self.upload_config_files()
def create_virtualenv(self):
sudo('chmod 777 $WORKON_HOME/hook.log')
run('mkvirtualenv %(domain)s' % env)
# append env vars to virtualenv activate file
append('%(virtual_env)s/bin/activate' % env, ['',
'export DJANGO_SETTINGS_MODULE=settings.%(name)s' % env,
'export PYTHONPATH=$VIRTUAL_ENV/project',
''])
def clone_git_repo(self):
# Checkout git repo from cahoona VM-3
with cd(env.virtual_env):
run('git clone git@92.63.136.209:%(repo)s.git project' % env)
def create_folders(self):
# Create dirs
vhost_root = '/var/www/vhosts/%(domain)s' % env
for folder in ['media', 'static', 'apache', 'log']:
run('mkdir -p %(vhost_root)s/%(folder)s' % locals())
# Permissions
for folder in ['media', 'log']:
run('chmod 777 %(vhost_root)s/%(folder)s' % locals())
def upload_config_files(self):
kwargs = dict(context=env, use_sudo=True, backup=False)
# wsgi
upload_template('%(real_fabfile)s/conf/wsgi.conf' % env,\
vhost_root + '/apache/%(name)s.wsgi' % env, **kwargs)
# apache
upload_template('%(real_fabfile)s/conf/apache.conf' % env,\
'/etc/apache2/sites-available/%(domain)s' % env, **kwargs)
sudo('a2ensite %(domain)s' % env)
sudo('service apache2 restart')
# nginx
upload_template('%(real_fabfile)s/conf/nginx.conf' % env,\
'/etc/nginx/sites-available/%(domain)s' % env, **kwargs)
sudo('ln -sf /etc/nginx/sites-available/%(domain)s '\
'/etc/nginx/sites-enabled/%(domain)s' % env)
sudo('service nginx restart')
class Deploy(BaseTask):
"""
Deploy project to remote server
"""
name = 'deploy'
def run(self, update_requirements=False, migrate=False, static=False):
super(Deploy, self).run()
sudo('chmod 777 $WORKON_HOME/hook.log')
with cd('%(virtual_env)s/project' % env):
run('git pull')
with prefix('source ../bin/activate'):
if update_requirements:
run('pip install -r REQUIREMENTS')
if migrate:
run('django-admin.py migrate --all')
if static:
run('django-admin.py collectstatic')
sudo('service apache2 graceful')
class Test(tasks.Task):
"""
Run tests locally
"""
name = 'test'
def run(self):
with cd(env.path):
local('django-admin.py validate')
local('django-admin.py test')
class CreateDatabase(BaseTask):
"""
Create and populate remote database (with syncdb)
"""
name = 'create_database'
def run(self, run_migrations=False):
remote_db_settings = settings.DATABASES['default']
sudo('mysql -u%(USER)s -p%(PASSWORD)s -e '\
'"CREATE DATABASE %(NAME)s"' % remote_db_settings)
with prefix('workon %(domain)s' % env):
migs = ' --migrate' if run_migrations else ''
run('django-admin.py syncdb --noinput %s' % migs)
class SyncDatabase(BaseTask):
"""
Sync local MYSQL database with remote database
"""
name = 'sync_database'
def run(self):
self.remote_db_settings = settings.DATABASES['default']
if not self.remote_db_settings['ENGINE'].endswith('mysql'):
abort('Command only possible with MySQL databases')
self.export_db()
self.import_db()
def export_db(self):
# Export remote database
run('mysqldump -u%(USER)s -p%(PASSWORD)s --databases %(NAME)s '\
'> /tmp/%(NAME)s.sql' % self.remote_db_settings)
local('scp %s@%s:/tmp/%s.sql /tmp/' % (
env.user, env.hosts[0],
self.remote_db_settings['NAME']))
def import_db(self):
# Import remote database to local
local_settings = self.get_local_settings()
local('mysql -p -h localhost %s < /tmp/%(NAME)s.sql' % (
local_settings.DATABASES['default']['NAME'],\
self.remote_db_settings['NAME']))
class VirtualenvPermission(tasks.Task):
name = 'virtualenv_permission'
def run(self):
sudo('chmod 777 $WORKON_HOME/hook.log')
virtualenv_permission = VirtualenvPermission()
production = Production()
staging = Staging()
bootstrap = Bootstrap()
deploy = Deploy()
test = Test()
create_database = CreateDatabase()
sync_database = SyncDatabase()
|
Python
| 0.000001
|
@@ -2997,32 +2997,88 @@
ig_files(self):%0A
+ vhost_root = '/var/www/vhosts/%25(domain)s' %25 env%0A
kwargs =
|
f6b91953a5595b1f5a29a5bb69b3c9bc61e1b885
|
I am an idiot
|
api/result.py
|
api/result.py
|
"""
ApiResult class
"""
from datetime import datetime
from typing import Dict
from discord import Embed
from api import datetime_input_format, datetime_output_format, trim_string
class ApiResult(object):
"""
API Result object
"""
#taken from https://rpcs3.net/compatibility
STATUS_NOTHING = 0x455556
STATUS_LOADABLE = 0xe74c3c
STATUS_INTRO = 0xe08a1e
STATUS_INGAME = 0xf9b32f
STATUS_PLAYABLE = 0x1ebc61
STATUS_UNKNOWN = 0x3198ff
status_map = dict({
"Nothing": STATUS_NOTHING,
"Loadable": STATUS_LOADABLE,
"Intro": STATUS_INTRO,
"Ingame": STATUS_INGAME,
"Playable": STATUS_PLAYABLE
})
def __init__(self, game_id: str, data: Dict) -> None:
self.game_id = game_id
self.title = data["title"] if "title" in data else None
self.status = data["status"] if "status" in data else None
self.date = datetime.strptime(data["date"], datetime_input_format) if "date" in data else None
self.thread = data["thread"] if "thread" in data else None
self.commit = data["commit"] if "commit" in data else None
self.pr = data["pr"] if "pr" in data and data["pr"] is not 0 else """???"""
def to_string(self) -> str:
"""
Makes a string representation of the object.
:return: string representation of the object
"""
if self.status in self.status_map:
return ("ID:{:9s} Title:{:40s} PR:{:4s} Status:{:8s} Updated:{:10s}".format(
self.game_id,
trim_string(self.title, 40),
self.pr,
self.status,
datetime.strftime(self.date, datetime_output_format)
))
else:
return "Product code {} was not found in compatibility database, possibly untested!".format(self.game_id)
def to_embed(self, infoInFooter = True) -> Embed:
"""
Makes an Embed representation of the object.
:return: Embed representation of the object
"""
if self.status in self.status_map:
desc = "Status: {}, PR: {}, Updated: {}".format(
self.status,
self.pr if self.pr != "???" else """¯\_(ツ)_/¯""",
datetime.strftime(self.date, datetime_output_format))
result = Embed(
title="[{}] {}".format(self.game_id, trim_string(self.title, 200)),
url="https://forums.rpcs3.net/thread-{}.html".format(self.thread),
description = desc if not infoInFooter else None,
color=self.status_map[self.status])
if infoInFooter:
return result.set_footer(text=desc)
else:
return result
else:
desc = "No product id was found, log might be corrupted or tempered with"
if self.game_id is not None:
"Product code {} was not found in compatibility database, possibly untested!".format(self.game_id)
return Embed(
description=desc,
color=self.STATUS_UNKNOWN
)
|
Python
| 0.998675
|
@@ -2916,16 +2916,23 @@
+ desc =
%22Produc
|
338e2ba155df0759113c65ced6be6714092b9aaf
|
Use Alex's awesome new version of the GtkQuartz theme engine
|
packages/gtk-quartz-engine.py
|
packages/gtk-quartz-engine.py
|
Package ('gtk-quartz-engine', 'master',
sources = [ 'git://github.com/jralls/gtk-quartz-engine.git' ],
override_properties = { 'configure':
'libtoolize --force --copy && '
'aclocal && '
'autoheader && '
'automake --add-missing && '
'autoconf && '
'./configure --prefix=%{prefix}'
}
)
|
Python
| 0
|
@@ -68,14 +68,16 @@
com/
-jralls
+nirvanai
/gtk
|
12130cef6c9b08e0928ed856972ace3c2000e6f8
|
Fix error accessing class variable
|
mooc_aggregator_restful_api/udacity.py
|
mooc_aggregator_restful_api/udacity.py
|
'''
This module retrieves the course catalog and overviews of the Udacity API
Link to Documentation:
https://s3.amazonaws.com/content.udacity-data.com/techdocs/UdacityCourseCatalogAPIDocumentation-v0.pdf
'''
import json
import requests
class UdacityAPI(object):
'''
This class defines attributes and methods for Udaciy API
'''
UDACITY_API_ENDPOINT = 'https://udacity.com/public-api/v0/courses'
def __init__(self):
self.response = requests.get(UDACITY_API_ENDPOINT)
self.courses = self.response.json()['courses']
self.tracks = self.response.json()['tracks']
def status_code(self):
'''
Return status code of response object
'''
return self.response.status_code
def get_courses(self):
'''
Return list of course objects for all courses offered by Udacity
'''
return self.courses
def get_tracks(self):
'''
Return list of tracks offered by Udacity
'''
return self.tracks
if __name__ == '__main__':
udacity_object = UdacityAPI()
print len(udacity_object.get_courses())
print udacity_object.get_courses()[0].keys()
|
Python
| 0.000004
|
@@ -472,16 +472,27 @@
sts.get(
+UdacityAPI.
UDACITY_
|
ecf685c157ac3b59641f0c4327d147b6c14ba17b
|
Remove debugging message.
|
djangofm/gobbler/models.py
|
djangofm/gobbler/models.py
|
# Libre.fm -- a free network service for sharing your music listening habits
#
# Copyright (C) 2009 Libre.fm Project
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import md5
from django.contrib import admin
from django.contrib.auth.models import get_hexdigest, User
from django.contrib.auth.admin import UserAdmin
from django.template.defaultfilters import slugify
from django.db import models
RATING_CHOICES = (
('L', 'Love'),
('B', 'Ban'),
('S', 'Skip')
)
SOURCE_CHOICES = (
('P', 'Chosen by the user'),
('R', 'Non-personalised broadcast'),
('E', 'Personalised recommendation except Last.fm'),
('L', 'Last.fm'),
('U', 'Unknown')
)
class GobblerUser(User):
def set_password(self, raw_password):
print "set password"
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
pwd,created = Md5Password.objects.get_or_create(user=self)
pwd.password = md5.new(raw_password).hexdigest()
pwd.save()
def get_md5(self):
pwd = Md5Password.objects.get(user=self)
return pwd.password
class Meta:
proxy = True
admin.site.register(GobblerUser, UserAdmin)
class Artist(models.Model):
name = models.CharField(max_length=255, unique=True)
slug = models.SlugField(unique=True, editable=False)
def get_absolute_url(self):
return "/music/%s/" % (self.slug,)
def save(self, force_insert=False, force_update=False):
self.slug = slugify(self.name)
super(Artist, self).save(force_insert, force_update)
def __unicode__(self):
return self.name
class Album(models.Model):
name = models.CharField(max_length=256)
artist = models.ForeignKey(Artist)
slug = models.SlugField(editable=False)
def get_absolute_url(self):
return "%s%s/" % (self.artist.get_absolute_url(), self.slug)
def save(self, force_insert=False, force_update=False):
self.slug = slugify(self.name)
super(Album, self).save(force_insert, force_update)
def __unicode__(self):
return "%s by %s" % (self.name, self.artist)
class Meta:
unique_together = (('artist', 'name'), ('artist', 'slug'))
class Track(models.Model):
name = models.CharField(max_length=256)
track_number = models.PositiveSmallIntegerField(blank=True, null=True)
length = models.PositiveSmallIntegerField(blank=True)
album = models.ForeignKey(Album, null=True)
mbid = models.CharField(max_length=256, blank=True)
slug = models.SlugField(editable=False)
def get_absolute_url(self):
return "%s%s/" % (self.album.get_absolute_url(), self.name)
def save(self, force_insert=False, force_update=False):
self.slug = slugify(self.name)
super(Track, self).save(force_insert, force_update)
def __unicode__(self):
return "%s - %s" % (self.album.artist, self.name)
class Meta:
unique_together = ('album', 'slug')
class Session(models.Model):
user = models.OneToOneField(GobblerUser, primary_key=True)
key = models.CharField(max_length=32)
class Md5Password(models.Model):
user = models.OneToOneField(GobblerUser, primary_key=True)
password = models.CharField(max_length=32)
class NowPlaying(models.Model):
user = models.OneToOneField(GobblerUser, primary_key=True)
track = models.ForeignKey(Track)
def __unicode__(self):
return "%s is playing %s" % (self.user, self.track)
class Gobble(models.Model):
user = models.ForeignKey(GobblerUser)
track = models.ForeignKey(Track)
time = models.DateTimeField()
source = models.CharField(choices=SOURCE_CHOICES, max_length=1)
rating = models.CharField(choices=RATING_CHOICES, max_length=1,
blank=True)
length = models.PositiveSmallIntegerField()
@property
def artist(self):
return self.track.album.artist
def __unicode__(self):
return "%s at %s" % (self.user, self.time)
admin.site.register(Gobble)
|
Python
| 0.000001
|
@@ -1349,37 +1349,8 @@
d):%0A
- print %22set password%22%0A
|
8d8a2fb7f1ec5a14a329e8aaaa9dd1815981cf40
|
fix rounding error
|
motmot/realtime_image_analysis/slow.py
|
motmot/realtime_image_analysis/slow.py
|
import numpy
def do_bg_maint( running_mean_im,
hw_roi_frame,
max_frame_size,
ALPHA,
running_mean8u_im,
fastframef32_tmp,
running_sumsqf,
mean2,
std2,
running_stdframe,
n_sigma,
compareframe8u,
bright_non_gaussian_cutoff,
noisy_pixels_mask,
bright_non_gaussian_replacement,
bench=0):
"""
= Arguments =
FastImage.FastImage32f running_mean_im IO - current estimate of mean of x
FastImage.FastImage8u hw_roi_frame Input - current image
FastImage.Size max_frame_size Input - size of all images
float ALPHA Input
FastImage.FastImage8u running_mean8u_im Output
FastImage.FastImage32f fastframef32_tmp Output (temp/scratch)
FastImage.FastImage32f running_sumsqf IO - current estimate of mean of x^2
FastImage.FastImage32f mean2 Output - running_mean_im^2
FastImage.FastImage32f std2 Output - running_sumsqf-mean2
FastImage.FastImage32f running_stdframe Output - sqrt(std2)
float n_sigma Input
FastImage.FastImage8u compareframe8u Output
int bright_non_gaussian_cutoff Input
FastImage.FastImage8u noisy_pixels_mask Input
int bright_non_gaussian_replacement Input
int bench Input
= Returns =
Benchmarking information if bench != 0
"""
hw_roi_frame = numpy.asarray( hw_roi_frame )
running_mean_im = numpy.asarray( running_mean_im )
running_mean8u_im = numpy.asarray( running_mean8u_im )
fastframef32_tmp = numpy.asarray( fastframef32_tmp )
running_sumsqf = numpy.asarray( running_sumsqf )
mean2 = numpy.asarray( mean2 )
std2 = numpy.asarray( std2 )
running_stdframe = numpy.asarray( running_stdframe )
compareframe8u = numpy.asarray( compareframe8u )
# maintain running average
# <x>
running_mean_im[:,:] = (1-ALPHA)*running_mean_im + ALPHA*hw_roi_frame
running_mean8u_im[:,:] = running_mean_im.astype( numpy.uint8 )
# standard deviation calculation
fastframef32_tmp[:,:] = hw_roi_frame
# x^2
fastframef32_tmp[:,:] = fastframef32_tmp**2
# <x^2>
running_sumsqf[:,:] = (1-ALPHA)*running_sumsqf + ALPHA*fastframef32_tmp
### GETS SLOWER
# <x>^2
mean2[:,:] = running_mean_im**2
# <x^2> - <x>^2
std2[:,:] = running_sumsqf - mean2
# sqrt( |<x^2> - <x>^2| )
# clip
running_stdframe[:,:] = abs(std2)
running_stdframe[:,:] = numpy.sqrt( running_stdframe )
# now create frame for comparison
if n_sigma != 1.0:
running_stdframe[:,:] = n_sigma*running_stdframe
# XXX TODO: currently this ignores mask and non_gaussian stuff
compareframe8u[:,:] = running_stdframe.round()
|
Python
| 0.000765
|
@@ -2208,32 +2208,44 @@
ean8u_im%5B:,:%5D =
+numpy.round(
running_mean_im.
@@ -2247,29 +2247,8 @@
n_im
-.astype( numpy.uint8
)%0A%0A
|
ac63fca5b1e688fb465431fd1760db6b1c766fea
|
Bump to version 0.14
|
openspending/_version.py
|
openspending/_version.py
|
__version__ = '0.13.1'
|
Python
| 0
|
@@ -15,9 +15,9 @@
'0.1
-3.1
+4.0
'%0A
|
b7b23f9840af377f37617f3bbb79556342d74133
|
replace prints with calls to logger
|
__main__.py
|
__main__.py
|
#/usr/bin/env python2
import web
import ConfigParser
_config_file_list = ['./jim.cfg', '/etc/jim.cfg']
_config_file_parser = ConfigParser.RawConfigParser()
_config_ok = True
try:
_config_file_list = _config_file_parser.read(_config_file_list)
except:
print("cannot parse configuration file(s)")
_config_ok = False
if len(_config_file_list) == 0:
print("no configuration file found")
_config_ok = False
else:
print("using configuration file {}".format(_config_file_list[0]))
if _config_ok:
print("starting server")
web.run_server()
print("server exited")
|
Python
| 0
|
@@ -27,16 +27,27 @@
ort web%0A
+import log%0A
import C
@@ -59,16 +59,43 @@
Parser%0A%0A
+_log = log.TrivialLogger()%0A
_config_
@@ -292,21 +292,26 @@
pt:%0A
-print
+_log.error
(%22cannot
@@ -400,21 +400,26 @@
0:%0A
-print
+_log.error
(%22no con
@@ -475,21 +475,25 @@
se:%0A
-print
+_log.info
(%22using
@@ -546,16 +546,17 @@
t%5B0%5D))%0A%0A
+%0A
if _conf
@@ -566,21 +566,25 @@
ok:%0A
-print
+_log.info
(%22starti
@@ -624,13 +624,17 @@
-print
+_log.info
(%22se
|
ba97f70c2d213f92ce73d1e13921ff7c6bf35991
|
fix /compare/ link
|
__main__.py
|
__main__.py
|
import os
import subprocess
import sys
GIT_PATH = '/opt/'
def _open_pipe(cmd, cwd=None):
return subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=cwd)
def git_path(repo):
return os.path.join(GIT_PATH, repo)
def git(path, args):
stdout, stderr = _open_pipe(['git'] + args.split(' '),
cwd=path).communicate()
if stderr:
print stderr
return stderr or stdout
def github_url(team, repo, url=None):
return 'http://github.com/{team}/{repo}{url}'.format(
team=team, repo=repo, url=url or '')
def pbcopy(data):
"""Copy to clipboard on Mac OS X."""
print data
pb = _open_pipe(['pbcopy'])
pb.stdin.write(data)
pb.stdin.close()
return pb.wait()
def usage(args):
print 'Usage: {program} [command] [tag] [sha]? [repo]?\n'.format(
program=args[0])
print 'Commands:'
print '- append: Cherrypick a [sha] to [tag]'
print '- create: Create a new [tag]'
print '- delete: Delete an existing [tag]'
sys.exit(1)
if __name__ == '__main__':
args = [x.strip() for x in sys.argv]
if len(args) < 3:
usage(args)
cmd, tag = args[1:3]
try:
sha = args[3]
except IndexError:
if cmd == 'append':
usage(args)
repos = [x.strip() for x in open('repos').readlines()
if not x.strip().startswith('#')]
for repo in repos:
team, repo = repo.split('/')
if cmd == 'create':
path = git_path(repo)
# Fetch the latest tags
# (where origin is the upstream remote repo).
git(path, 'fetch --tags')
# Identify the latest tag.
tag_prev = git(path,
'for-each-ref refs/tags --sort=-authordate '
'--format="%(refname)" --count=1')
tag_prev = tag_prev.replace('refs/tags/', '').strip('\'"\n')
# Tag master.
git(path, 'tag %s' % tag)
# Point to the releases permalink page.
print '* Tagged {team}/{repo}: {tag}'.format(
team=team, repo=repo, tag=tag)
print github_url(team, repo, '/releases/{tag}'.format(tag=tag))
# Point to the tag comparison page.
pbcopy(github_url(team, repo,
'/compare/{previous_tag}/{tag}'.format(
team=team,
repo=repo,
previous_tag=tag_prev,
tag=tag
)
))
git(path, 'push --tags')
|
Python
| 0
|
@@ -2434,17 +2434,19 @@
ous_tag%7D
-/
+...
%7Btag%7D'.f
@@ -2456,98 +2456,8 @@
at(%0A
- team=team,%0A repo=repo,%0A
|
6a5a7a1e1eafa91543d8e274e63d258332149a29
|
Update __version__.py
|
orcidfind/__version__.py
|
orcidfind/__version__.py
|
# Single source of metadata about the project that's used by setup.py and
# docs/conf.py
# Some segments of public version identifer (PEP 440)
VERSION_RELEASE = "0.1"
VERSION_PRE_RELEASE = "a4" # e.g., "a4", "b1", "rc3" or "" (final release)
VERSION_POST_RELEASE = "" # e.g., ".post1"
VERSION = VERSION_RELEASE + VERSION_PRE_RELEASE + VERSION_POST_RELEASE
|
Python
| 0.000002
|
@@ -185,17 +185,17 @@
ASE = %22a
-4
+5
%22 # e
@@ -335,28 +335,29 @@
LEASE + VERSION_POST_RELEASE
+%0A
|
37899ad1e6120a14c053e09b3233a97fdfb9bb6d
|
add trash option
|
organize/actions/copy.py
|
organize/actions/copy.py
|
import logging
import os
import shutil
from organize.utils import Mapping, find_unused_filename, fullpath
from .action import Action
from .trash import Trash
logger = logging.getLogger(__name__)
CONFLICT_OPTIONS = ("rename_new", "rename_old", "skip", "overwrite")
class Copy(Action):
"""
Copy a file to a new location.
If the specified path does not exist it will be created.
:param str dest:
The destination where the file should be copied to.
If `dest` ends with a slash / backslash, the file will be copied into
this folder and keep its original name.
:param bool overwrite:
specifies whether existing files should be overwritten.
Otherwise it will start enumerating files (append a counter to the
filename) to resolve naming conflicts. [Default: False]
:param str counter_separator:
specifies the separator between filename and the appended counter.
Only relevant if **overwrite** is disabled. [Default: ``\' \'``]
Examples:
- Copy all pdfs into `~/Desktop/somefolder/` and keep filenames
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: ~/Desktop
filters:
- extension: pdf
actions:
- copy: '~/Desktop/somefolder/'
- Use a placeholder to copy all .pdf files into a "PDF" folder and all .jpg
files into a "JPG" folder. Existing files will be overwritten.
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: ~/Desktop
filters:
- extension:
- pdf
- jpg
actions:
- copy:
dest: '~/Desktop/{extension.upper}/'
overwrite: true
- Copy into the folder `Invoices`. Keep the filename but do not
overwrite existing files. To prevent overwriting files, an index is
added to the filename, so `somefile.jpg` becomes `somefile 2.jpg`.
The counter separator is `' '` by default, but can be changed using
the `counter_separator` property.
.. code-block:: yaml
:caption: config.yaml
rules:
- folders: ~/Desktop/Invoices
filters:
- extension:
- pdf
actions:
- copy:
dest: '~/Documents/Invoices/'
overwrite: false
counter_separator: '_'
"""
def __init__(
self, dest: str, on_conflict="rename_new", counter_separator=" "
) -> None:
if on_conflict not in CONFLICT_OPTIONS:
raise ValueError(
"on_conflict must be one of %s" % ", ".join(CONFLICT_OPTIONS)
)
self.dest = dest
self.on_conflict = on_conflict
self.counter_separator = counter_separator
def pipeline(self, args: Mapping) -> None:
path = args["path"]
simulate = args["simulate"]
expanded_dest = self.fill_template_tags(self.dest, args)
# if only a folder path is given we append the filename to have the full
# path. We use os.path for that because pathlib removes trailing slashes
if expanded_dest.endswith(("\\", "/")):
expanded_dest = os.path.join(expanded_dest, path.name)
new_path = fullpath(expanded_dest)
if new_path.exists() and not new_path.samefile(path):
if self.overwrite:
self.print("File already exists")
Trash().run(path=new_path, simulate=simulate)
else:
new_path = find_unused_filename(
path=new_path, separator=self.counter_separator
)
self.print('Copy to "%s"' % new_path)
if not simulate:
logger.info("Creating folder if not exists: %s", new_path.parent)
new_path.parent.mkdir(parents=True, exist_ok=True)
logger.info('Copying "%s" to "%s"', path, new_path)
shutil.copy2(src=str(path), dst=str(new_path))
# the next actions should handle the original file
return None
def __str__(self) -> str:
return "Copy(dest=%s, overwrite=%s)" % (self.dest, self.overwrite)
|
Python
| 0.000001
|
@@ -248,16 +248,25 @@
%22skip%22,
+ %22trash%22,
%22overwr
@@ -4388,24 +4388,26 @@
st=%25s, o
-verwrite
+n_conflict
=%25s)%22 %25
@@ -4424,18 +4424,20 @@
, self.o
-verwrite
+n_conflict
)%0A
|
9ffe02ac6f35b12952392585ab92cabf60234c50
|
fix potential issue with mutation
|
treeano/sandbox/sensitivity_analysis.py
|
treeano/sandbox/sensitivity_analysis.py
|
"""
from "Deep Inside Convolutional Networks: Visualising Image Classification
Models and Saliency Maps"
http://arxiv.org/abs/1312.6034
"""
import theano
import theano.tensor as T
import treeano
import canopy
class SensitivityAnalysisOutput(canopy.handlers.NetworkHandlerImpl):
"""
adds a new input and output to the network
- the input is an int that is an index into the logit
- the output is a tensor of the same shape as the input representing
the result of the sensitivity analysis
idx_input_key: key of the index
output_key: key to put the sensitivity analysis in the results
input_name: node name of the input in the network
logit_name: node name of the logit in the network
"""
def __init__(self, idx_input_key, output_key, input_name, logit_name):
self.idx_input_key = idx_input_key
self.output_key = output_key
self.input_name = input_name
self.logit_name = logit_name
def transform_compile_function_kwargs(self, state, **kwargs):
assert self.idx_input_key not in kwargs["inputs"]
assert self.output_key not in kwargs["outputs"]
network = state.network
input_var = network[self.input_name].get_vw("default").variable
logit_var = network[self.logit_name].get_vw("default").variable
idx_var = T.iscalar()
target_var = logit_var[:, idx_var].sum()
sensitivity_var = T.grad(target_var, input_var)
kwargs["inputs"][self.idx_input_key] = idx_var
kwargs["outputs"][self.output_key] = sensitivity_var
return kwargs
def sensitivity_analysis_fn(input_name,
logit_name,
network,
handlers,
inputs=None,
**kwargs):
"""
returns a function from input to sensitivity analysis heatmap
"""
handlers = [
SensitivityAnalysisOutput(idx_input_key="idx",
output_key="outputs",
input_name=input_name,
logit_name=logit_name),
canopy.handlers.override_hyperparameters(deterministic=True)
] + handlers
fn = canopy.handled_fn(network,
handlers=handlers,
inputs={"input": input_name},
outputs={})
def inner(in_val, idx_val):
return fn({"input": in_val, "idx": idx_val})["outputs"]
return inner
def customizable_sensitivity_analysis_fn(input_name,
logit_name,
network,
handlers,
inputs,
outputs=None,
*args,
**kwargs):
"""
returns a function from input to sensitivity analysis heatmap
takes in additional keys for "input" and "idx"
"""
if outputs is None:
outputs = {}
assert "outputs" not in outputs
handlers = [
SensitivityAnalysisOutput(idx_input_key="idx",
output_key="outputs",
input_name=input_name,
logit_name=logit_name),
canopy.handlers.override_hyperparameters(deterministic=True)
] + handlers
assert "input" not in inputs
assert "idx" not in inputs
inputs["input"] = input_name
fn = canopy.handled_fn(network,
handlers=handlers,
inputs=inputs,
outputs=outputs)
return fn
|
Python
| 0.000041
|
@@ -3552,24 +3552,100 @@
in inputs%0A%0A
+ # make a copy of inputs so that we can mutate%0A inputs = dict(inputs)%0A
inputs%5B%22
|
66d6bed7f79cb10f699b5fc72f2ab7011379e5c1
|
Fix this docstring
|
owapi/blizz_interface.py
|
owapi/blizz_interface.py
|
"""
Interface that uses Blizzard's pages as the source.
"""
import functools
import logging
import asyncio
import traceback
from lxml import etree
import aiohttp
from kyoukai.context import HTTPRequestContext
from owapi import util
B_BASE_URL = "https://playoverwatch.com/en-us/"
B_PAGE_URL = B_BASE_URL + "career/{platform}{region}/{btag}"
# The currently available specific regions.
AVAILABLE_REGIONS = ["/eu", "/us", "/kr"]
logger = logging.getLogger("OWAPI")
async def get_page_body(ctx: HTTPRequestContext, url: str, cache_time=300, cache_404=False) -> str:
"""
Downloads page body from MasterOverwatch and caches it.
"""
session = aiohttp.ClientSession(headers={"User-Agent": "OWAPI Scraper/1.0.1"})
async def _real_get_body(_, url: str):
# Real function.
logger.info("GET => {}".format(url))
async with session.get(url) as req:
assert isinstance(req, aiohttp.ClientResponse)
logger.info("GET => {} => {}".format(url, req.status))
if req.status != 200:
return None
return (await req.read()).decode()
result = await util.with_cache(ctx, _real_get_body, url, expires=cache_time, cache_404=cache_404)
session.close()
return result
def _parse_page(content: str) -> etree._Element:
"""
Internal function to parse a page and return the data.
"""
if content and content.lower() != 'none':
data = etree.HTML(content)
return data
async def get_user_page(ctx: HTTPRequestContext, battletag: str, platform: str = "pc", region: str = "us",
cache_time=300, cache_404=False) -> etree._Element:
"""
Downloads the BZ page for a user, and parses it.
"""
if platform != "pc":
region = ""
built_url = B_PAGE_URL.format(
region=region, btag=battletag.replace("#", "-"), platform=platform)
page_body = await get_page_body(ctx, built_url, cache_time=cache_time, cache_404=cache_404)
if not page_body:
return None
# parse the page
parse_partial = functools.partial(_parse_page, page_body)
loop = asyncio.get_event_loop()
parsed = await loop.run_in_executor(None, parse_partial)
return parsed
async def fetch_all_user_pages(ctx: HTTPRequestContext, battletag: str, *,
platform="pc"):
"""
Fetches all user pages for a specified user.
Returns a dictionary in the format of `{region: etree._Element | None}`.
"""
if platform != "pc":
coro = get_user_page(ctx, battletag, region="", platform=platform, cache_404=True)
result = await coro
if isinstance(result, etree._Element):
return {"any": result,
"eu": None, "us": None, "kr": None}
else:
return {"any": None,
"eu": None, "us": None, "kr": None}
futures = []
for region in AVAILABLE_REGIONS:
# Add the get_user_page coroutine.
coro = get_user_page(ctx, battletag, region=region, platform=platform, cache_404=True)
futures.append(coro)
# Gather all the futures to download paralellely.
results = await asyncio.gather(*futures, return_exceptions=True)
d = {"any": None}
for region, result in zip(AVAILABLE_REGIONS, results):
# Remove the `/` from the front of the region.
# This is used internally to make building the URL to get simpler.
region = region[1:]
# Make sure it's either a None or an element.
if isinstance(result, etree._Element):
d[region] = result
elif isinstance(result, Exception):
logger.error("Failed to fetch user page!\n{}".format(
traceback.format_exception(type(result), result, result.__traceback__)
))
d[region] = None
else:
d[region] = None
return d
async def region_helper_v2(ctx: HTTPRequestContext, battletag: str, platform="pc", region=None, extra=""):
"""
Downloads the correct page for a user in the right region.
This will return either (etree._Element, region) or (None, None).
"""
if region is None:
reg_l = ["/eu", "/us", "/kr"]
else:
if not region.startswith("/"):
# ugh
region = "/" + region
reg_l = [region]
for reg in reg_l:
# Get the user page.
page = await get_user_page(ctx, battletag, platform, reg, extra)
# Check if the page was returned successfully.
# If it was, return it.
if page is not None:
return page, reg[1:]
else:
# Since we continued without returning, give back the None, None.
return None, None
|
Python
| 0.999999
|
@@ -606,14 +606,12 @@
rom
-Master
+Play
Over
|
c212c2e19f325a3f7074db1993c4a737d62d2761
|
Add itertools.product as a filter (#32991)
|
lib/ansible/plugins/filter/mathstuff.py
|
lib/ansible/plugins/filter/mathstuff.py
|
# (c) 2014, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import itertools
import math
from ansible import errors
from ansible.module_utils import basic
from ansible.module_utils.six.moves import zip, zip_longest
def unique(a):
if isinstance(a, collections.Hashable):
c = set(a)
else:
c = []
for x in a:
if x not in c:
c.append(x)
return c
def intersect(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) & set(b)
else:
c = unique([x for x in a if x in b])
return c
def difference(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) - set(b)
else:
c = unique([x for x in a if x not in b])
return c
def symmetric_difference(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) ^ set(b)
else:
c = unique([x for x in union(a, b) if x not in intersect(a, b)])
return c
def union(a, b):
if isinstance(a, collections.Hashable) and isinstance(b, collections.Hashable):
c = set(a) | set(b)
else:
c = unique(a + b)
return c
def min(a):
_min = __builtins__.get('min')
return _min(a)
def max(a):
_max = __builtins__.get('max')
return _max(a)
def logarithm(x, base=math.e):
try:
if base == 10:
return math.log10(x)
else:
return math.log(x, base)
except TypeError as e:
raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
def power(x, y):
try:
return math.pow(x, y)
except TypeError as e:
raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
def inversepower(x, base=2):
try:
if base == 2:
return math.sqrt(x)
else:
return math.pow(x, 1.0 / float(base))
except (ValueError, TypeError) as e:
raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
def human_readable(size, isbits=False, unit=None):
''' Return a human readable string '''
try:
return basic.bytes_to_human(size, isbits, unit)
except:
raise errors.AnsibleFilterError("human_readable() can't interpret following string: %s" % size)
def human_to_bytes(size, default_unit=None, isbits=False):
''' Return bytes count from a human readable string '''
try:
return basic.human_to_bytes(size, default_unit, isbits)
except:
raise errors.AnsibleFilterError("human_to_bytes() can't interpret following string: %s" % size)
class FilterModule(object):
''' Ansible math jinja2 filters '''
def filters(self):
filters = {
# general math
'min': min,
'max': max,
# exponents and logarithms
'log': logarithm,
'pow': power,
'root': inversepower,
# set theory
'unique': unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
# combinatorial
'permutations': itertools.permutations,
'combinations': itertools.combinations,
# computer theory
'human_readable': human_readable,
'human_to_bytes': human_to_bytes,
# zip
'zip': zip,
'zip_longest': zip_longest,
}
return filters
|
Python
| 0
|
@@ -4031,16 +4031,58 @@
atorial%0A
+ 'product': itertools.product,%0A
|
d032d2597525e02fd71a524c5a9619c09c640365
|
Bump version number.
|
coffin/__init__.py
|
coffin/__init__.py
|
"""
Coffin
~~~~~~
`Coffin <http://www.github.com/dcramer/coffin>` is a package that resolves the
impedance mismatch between `Django <http://www.djangoproject.com/>` and `Jinja2
<http://jinja.pocoo.org/2/>` through various adapters. The aim is to use Coffin
as a drop-in replacement for Django's template system to whatever extent is
reasonable.
:copyright: 2008 by Christopher D. Leary
:license: BSD, see LICENSE for more details.
"""
__all__ = ('__version__', '__build__', '__docformat__', 'get_revision')
__version__ = (0, 3, '6', 'dev')
__docformat__ = 'restructuredtext en'
import os
def _get_git_revision(path):
revision_file = os.path.join(path, 'refs', 'heads', 'master')
if not os.path.exists(revision_file):
return None
fh = open(revision_file, 'r')
try:
return fh.read()
finally:
fh.close()
def get_revision():
"""
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
"""
package_dir = os.path.dirname(__file__)
checkout_dir = os.path.normpath(os.path.join(package_dir, '..'))
path = os.path.join(checkout_dir, '.git')
if os.path.exists(path):
return _get_git_revision(path)
return None
__build__ = get_revision()
|
Python
| 0
|
@@ -532,15 +532,8 @@
'6'
-, 'dev'
)%0A__
|
5261e7b75718b866f95285bd03171c861175dccc
|
Move question into random_questions function
|
collection/srvy.py
|
collection/srvy.py
|
#!/usr/bin/python
import sys
import time
from time import sleep
from datetime import datetime
import random
import sqlite3
import csv
from configparser import ConfigParser
try:
from gpiozero import Button
except ImportError:
print("gpiozero is not installed.")
pass
try:
import pygame
except ImportError:
print("pygame is not installed.")
pass
# VARIABLES
question_csv_location = '../archive/questions.csv'
sqlite_file = '../archive/srvy.db'
# FUNCTIONS
def module_installed(module):
if module in sys.modules:
return True
else:
return False
def get_current_questions(file_location):
"""Add each question from a text file to a list. Questions should be separated by newlines."""
with open(file_location, 'r') as csv_file:
readCSV = csv.reader(csv_file, delimiter=',', quotechar='"')
questions = []
for row in readCSV:
if row:
question = row[0]
questions.append(question)
return questions
def random_questions():
"""pulls returns a random question into main loop."""
return random.choice(question)
def add_response_to_database(question, opinion):
"""Add response to SQLite 3 database"""
conn = sqlite3.connect(sqlite_file)
c = conn.cursor()
current_date = datetime.now()
current_unix_time = time.time()
try:
c.execute('''INSERT INTO responses (pythonDateTime, unixTime, question, opinion) VALUES (?,?,?,?)''',
(current_date, current_unix_time, question, opinion))
print("Successfully added response to database.")
print("Thank you!")
except Exception as e:
print(e)
conn.commit()
conn.close()
main()
def main():
qs = random_questions() # calls questions function that returns random question.
print(qs)
while True:
opinion = input("Opinion [y/n]: ")
if opinion == "y":
sleep(.5)
opinion = 1
add_response_to_database(qs, opinion)
elif opinion == "n":
sleep(.5)
opinion = -1
add_response_to_database(qs, opinion)
question = get_current_questions(question_csv_location)
main()
|
Python
| 0.999999
|
@@ -1101,24 +1101,84 @@
in loop.%22%22%22%0A
+ question = get_current_questions(question_csv_location)%0A
return r
@@ -2231,64 +2231,8 @@
)%0A%0A%0A
-question = get_current_questions(question_csv_location)%0A
main
|
93e8e63c3cf8d360af018b6ce3abe224b8ad374c
|
Add further testinfra tests
|
molecule/default/tests/test_default.py
|
molecule/default/tests/test_default.py
|
def test_apt_preferences_docker_compose_file(host):
f = host.file("/etc/apt/preferences.d/docker-compose")
assert f.exists
assert f.is_file
def test_apt_preferences_docker_file(host):
f = host.file("/etc/apt/preferences.d/docker")
assert f.exists
assert f.is_file
|
Python
| 0
|
@@ -263,28 +263,306 @@
exists%0A assert f.is_file%0A
+%0A%0Adef test_systemd_overlay_file(host):%0A f = host.file(%22/etc/systemd/system/docker.service.d/overlay.conf%22)%0A assert f.exists%0A assert f.is_file%0A%0A%0Adef test_limits_file(host):%0A f = host.file(%22/etc/security/limits.d/docker.conf%22)%0A assert f.exists%0A assert f.is_file%0A
|
e4442eb72a2a937c8f5bca9f6b2c479f9f15bb51
|
Fix #3826, sort all file lists in moz.build (#3827)
|
bin/update_mozbuild.py
|
bin/update_mozbuild.py
|
#!.venv/bin/python
import argparse
import os
import io
import re
skipFiles = [
"manifest.json.template"
]
def getFullFileList(outputLoc, dirName):
result = {dirName: []}
for entry in os.listdir(outputLoc):
if os.path.isdir(os.path.join(outputLoc, entry)):
result.update(getFullFileList(os.path.join(outputLoc, entry), os.path.join(dirName, entry)))
elif entry not in skipFiles:
if dirName:
result[dirName].append(os.path.join(dirName, entry))
else:
result[dirName].append(entry)
return result
def rewriteMozBuild(outputLoc, fileList):
mozBuildFile = os.path.join(outputLoc, "moz.build")
print "Rewriting %s" % mozBuildFile
with io.open(mozBuildFile, "r+", encoding="UTF-8") as buildFile:
contents = buildFile.read()
insertion_text = ''
for dir in sorted(fileList.keys()):
if not fileList[dir]:
continue
mozBuildPathName = '["' + '"]["'.join(dir.split(os.sep)) + '"]'
insertion_text += \
"FINAL_TARGET_FILES.features['screenshots@mozilla.org']%s += [\n" % mozBuildPathName + \
" '" + \
"',\n '".join(fileList[dir]) + "'\n]\n\n"
new_contents = re.sub(
'# AUTOMATIC INSERTION START(.*)# AUTOMATIC INSERTION END',
"# AUTOMATIC INSERTION START\n" +
insertion_text +
"# AUTOMATIC INSERTION END",
contents, 1, re.M | re.S)
buildFile.seek(0)
buildFile.truncate(0)
buildFile.write(new_contents)
def main(mcRepoPath, mcSubDir):
outputLoc = os.path.join(mcRepoPath, mcSubDir)
fileList = getFullFileList(os.path.join(outputLoc, "webextension"), "webextension")
rewriteMozBuild(outputLoc, fileList)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Screenshots script for managing update of moz.build in mozilla-central")
parser.add_argument("--mozilla-central-repo",
default=os.environ["EXPORT_MC_LOCATION"],
metavar="../gecko-dev",
help="A gecko directory reference to mozilla-central, can also "
"be specified via EXPORT_MC_LOCATION environment variable")
parser.add_argument("--mozilla-central-subdir",
default="browser/extensions/screenshots/",
help="Where the extension is located in mozilla-central.")
args = parser.parse_args()
main(mcRepoPath=args.mozilla_central_repo, mcSubDir=args.mozilla_central_subdir)
|
Python
| 0.000001
|
@@ -1242,16 +1242,23 @@
'%22.join(
+sorted(
fileList
@@ -1263,16 +1263,17 @@
st%5Bdir%5D)
+)
+ %22'%5Cn%5D
|
574105cc7f383a107ff30a87e259be49aa0b5d21
|
Fix a misplaced line
|
msoffcrypto/method/ecma376_standard.py
|
msoffcrypto/method/ecma376_standard.py
|
import logging
import io
from hashlib import sha1
from struct import pack, unpack
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class ECMA376Standard:
def __init__(self):
pass
@staticmethod
def decrypt(key, ibuf):
r'''
Return decrypted data.
'''
obuf = io.BytesIO()
totalSize = unpack('<I', ibuf.read(4))[0]
logger.debug("totalSize: {}".format(totalSize))
ibuf.seek(8)
aes = Cipher(algorithms.AES(key), modes.ECB(), backend=default_backend())
decryptor = aes.decryptor()
x = ibuf.read()
dec = decryptor.update(x) + decryptor.finalize()
obuf.write(dec[:totalSize])
return obuf.getvalue() # return obuf.getbuffer()
@staticmethod
def verifykey(key, encryptedVerifier, encryptedVerifierHash):
r'''
Return True if the given intermediate key is valid.
>>> key = b'@\xb1:q\xf9\x0b\x96n7T\x08\xf2\xd1\x81\xa1\xaa'
>>> encryptedVerifier = b'Qos.\x96o\xac\x17\xb1\xc5\xd7\xd8\xcc6\xc9('
>>> encryptedVerifierHash = b'+ah\xda\xbe)\x11\xad+\xd3|\x17Ft\\\x14\xd3\xcf\x1b\xb1@\xa4\x8fNo=#\x88\x08r\xb1j'
>>> ECMA376Standard.verifykey(key, encryptedVerifier, encryptedVerifierHash)
True
'''
logger.debug([key, encryptedVerifier, encryptedVerifierHash])
# https://msdn.microsoft.com/en-us/library/dd926426(v=office.12).aspx
aes = Cipher(algorithms.AES(key), modes.ECB(), backend=default_backend())
decryptor = aes.decryptor()
verifier = decryptor.update(encryptedVerifier)
hash = sha1(verifier).digest()
decryptor = aes.decryptor()
verifierHash = decryptor.update(encryptedVerifierHash)[:sha1().digest_size]
logging.debug([hash, verifierHash])
return hash == verifierHash
@staticmethod
def makekey_from_password(password, algId, algIdHash, providerType, keySize, saltSize, salt):
logger.debug([password, hex(algId), hex(algIdHash), hex(providerType), keySize, saltSize, salt])
r'''
Generate intermediate key from given password.
>>> password = 'Password1234_'
>>> algId = 0x660e
>>> algIdHash = 0x8004
>>> providerType = 0x18
>>> keySize = 128
>>> saltSize = 16
>>> salt = b'\xe8\x82fI\x0c[\xd1\xee\xbd+C\x94\xe3\xf80\xef'
>>> expected = b'@\xb1:q\xf9\x0b\x96n7T\x08\xf2\xd1\x81\xa1\xaa'
>>> ECMA376Agile.makekey_from_password(password, algId, algIdHash, providerType, keySize, saltSize, salt) == expected
True
'''
xor_bytes = lambda a, b: bytearray([p ^ q for p, q in zip(bytearray(a), bytearray(b))]) # bytearray() for Python 2 compat.
# https://msdn.microsoft.com/en-us/library/dd925430(v=office.12).aspx
ITER_COUNT = 50000
password = password.encode("UTF-16LE")
h = sha1(salt + password).digest()
for i in range(ITER_COUNT):
ibytes = pack("<I", i)
h = sha1(ibytes + h).digest()
block = 0
blockbytes = pack("<I", block)
hfinal = sha1(h + blockbytes).digest()
cbRequiredKeyLength = keySize // 8
cbHash = sha1().digest_size
buf1 = b"\x36" * 64
buf1 = xor_bytes(hfinal, buf1[:cbHash]) + buf1[cbHash:]
x1 = sha1(buf1).digest()
buf2 = b"\x5c" * 64
buf2 = xor_bytes(hfinal, buf2[:cbHash]) + buf2[cbHash:]
x2 = sha1(buf2).digest() # In spec but unused
x3 = x1 + x2
keyDerived = x3[:cbRequiredKeyLength]
logger.debug(keyDerived)
return keyDerived
|
Python
| 1
|
@@ -2141,113 +2141,8 @@
t):%0A
- logger.debug(%5Bpassword, hex(algId), hex(algIdHash), hex(providerType), keySize, saltSize, salt%5D)%0A
@@ -2588,13 +2588,16 @@
A376
-Agile
+Standard
.mak
@@ -2715,32 +2715,137 @@
rue%0A '''%0A
+ logger.debug(%5Bpassword, hex(algId), hex(algIdHash), hex(providerType), keySize, saltSize, salt%5D)%0A
xor_byte
|
4992cda20695b79b6d238f2a448ba8aa713dcb2f
|
Fix error with package tags type
|
murano/packages/application_package.py
|
murano/packages/application_package.py
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imghdr
import io
import os
import sys
import zipfile
import murano.packages.exceptions as e
class PackageTypes(object):
Library = 'Library'
Application = 'Application'
ALL = [Library, Application]
class ApplicationPackage(object):
def __init__(self, source_directory, manifest, loader):
self.yaml_loader = loader
self._source_directory = source_directory
self._full_name = None
self._package_type = None
self._display_name = None
self._description = None
self._author = None
self._supplier = {}
self._tags = None
self._logo = None
self._format = manifest.get('Format')
self._logo_cache = None
self._supplier_logo_cache = None
self._blob_cache = None
@property
def full_name(self):
return self._full_name
@property
def package_type(self):
return self._package_type
@property
def display_name(self):
return self._display_name
@property
def description(self):
return self._description
@property
def author(self):
return self._author
@property
def supplier(self):
return self._supplier
@property
def tags(self):
return tuple(self._tags)
@property
def logo(self):
if not self._logo_cache:
self._load_logo(False)
return self._logo_cache
@property
def supplier_logo(self):
if not self._supplier_logo_cache:
self._load_supplier_logo(False)
return self._supplier_logo_cache
@property
def blob(self):
if not self._blob_cache:
self._blob_cache = _pack_dir(self._source_directory)
return self._blob_cache
def get_resource(self, name):
resources_dir = os.path.join(self._source_directory, 'Resources')
if not os.path.exists(resources_dir):
os.makedirs(resources_dir)
return os.path.join(resources_dir, name)
def validate(self):
self._load_logo(True)
self._load_supplier_logo(True)
def _load_logo(self, validate=False):
logo_file = self._logo or 'logo.png'
full_path = os.path.join(self._source_directory, logo_file)
if not os.path.isfile(full_path) and logo_file == 'logo.png':
self._logo_cache = None
return
try:
if validate:
if imghdr.what(full_path) != 'png':
raise e.PackageLoadError("Logo is not in PNG format")
with open(full_path) as stream:
self._logo_cache = stream.read()
except Exception as ex:
trace = sys.exc_info()[2]
raise e.PackageLoadError(
"Unable to load logo: " + str(ex)), None, trace
def _load_supplier_logo(self, validate=False):
if 'Logo' not in self._supplier:
self._supplier['Logo'] = None
logo_file = self._supplier['Logo'] or 'supplier_logo.png'
full_path = os.path.join(self._source_directory, logo_file)
if not os.path.isfile(full_path) and logo_file == 'supplier_logo.png':
del self._supplier['Logo']
return
try:
if validate:
if imghdr.what(full_path) != 'png':
raise e.PackageLoadError(
"Supplier Logo is not in PNG format")
with open(full_path) as stream:
self._supplier_logo_cache = stream.read()
except Exception as ex:
trace = sys.exc_info()[2]
raise e.PackageLoadError(
"Unable to load supplier logo: " + str(ex)), None, trace
def _zipdir(path, zipf):
for root, dirs, files in os.walk(path):
for f in files:
abspath = os.path.join(root, f)
relpath = os.path.relpath(abspath, path)
zipf.write(abspath, relpath)
def _pack_dir(source_directory):
blob = io.BytesIO()
zipf = zipfile.ZipFile(blob, mode='w')
_zipdir(source_directory, zipf)
zipf.close()
return blob.getvalue()
|
Python
| 0.000154
|
@@ -1854,13 +1854,12 @@
urn
-tuple
+list
(sel
|
06883c488182ef23a84b75cbad47e05e63a8ceb6
|
Use agent 8 signature (#9516)
|
activemq_xml/datadog_checks/activemq_xml/activemq_xml.py
|
activemq_xml/datadog_checks/activemq_xml/activemq_xml.py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from xml.etree import ElementTree
import requests
from six import iteritems
from datadog_checks.base import AgentCheck
from datadog_checks.base.config import _is_affirmative
QUEUE_URL = "/admin/xml/queues.jsp"
TOPIC_URL = "/admin/xml/topics.jsp"
SUBSCRIBER_URL = "/admin/xml/subscribers.jsp"
TOPIC_QUEUE_METRICS = {
"consumerCount": "consumer_count",
"dequeueCount": "dequeue_count",
"enqueueCount": "enqueue_count",
"size": "size",
}
SUBSCRIBER_TAGS = ["connectionId", "subscriptionName", "destinationName", "selector", "active"]
MAX_ELEMENTS = 300
class ActiveMQXML(AgentCheck):
def check(self, instance):
url = instance.get("url")
custom_tags = instance.get('tags', [])
max_queues = int(instance.get("max_queues", MAX_ELEMENTS))
max_topics = int(instance.get("max_topics", MAX_ELEMENTS))
max_subscribers = int(instance.get("max_subscribers", MAX_ELEMENTS))
detailed_queues = instance.get("detailed_queues", [])
detailed_topics = instance.get("detailed_topics", [])
detailed_subscribers = instance.get("detailed_subscribers", [])
suppress_errors = _is_affirmative(instance.get("suppress_errors", False))
tags = custom_tags + ["url:{0}".format(url)]
self.log.debug("Processing ActiveMQ data for %s", url)
data = self._fetch_data(url, QUEUE_URL, suppress_errors)
if data:
self._process_data(data, "queue", tags, max_queues, detailed_queues)
data = self._fetch_data(url, TOPIC_URL, suppress_errors)
if data:
self._process_data(data, "topic", tags, max_topics, detailed_topics)
data = self._fetch_data(url, SUBSCRIBER_URL, suppress_errors)
if data:
self._process_subscriber_data(data, tags, max_subscribers, detailed_subscribers)
def _fetch_data(self, base_url, xml_url, suppress_errors):
url = "%s%s" % (base_url, xml_url)
self.log.debug("ActiveMQ Fetching queue data from: %s", url)
try:
r = self.http.get(url)
r.raise_for_status()
except requests.exceptions.ConnectionError:
if suppress_errors:
self.log.warning("ActiveMQ not contactable, but suppressing the error due to configuration.")
return False
else:
raise
return r.text
def _process_data(self, data, el_type, tags, max_elements, detailed_elements):
root = ElementTree.fromstring(data)
# if list provided in config, only send those metrics
if detailed_elements:
elements = [e for e in root.findall(el_type) if e.get('name') in detailed_elements]
else:
elements = [e for e in root.findall(el_type) if e.get('name')]
count = len(elements)
if count > max_elements:
if not detailed_elements:
self.warning(
"Number of %s is too high (%s > %s). "
"Please use the detailed_%ss parameter"
" to list the %s you want to monitor.",
el_type,
count,
max_elements,
el_type,
el_type,
)
for el in elements[:max_elements]:
name = el.get("name")
stats = el.find("stats")
if stats is None:
continue
el_tags = tags + ["{0}:{1}".format(el_type, name)]
for attr_name, alias in iteritems(TOPIC_QUEUE_METRICS):
metric_name = "activemq.{0}.{1}".format(el_type, alias)
value = stats.get(attr_name, 0)
self.gauge(metric_name, value, tags=el_tags)
self.log.debug("ActiveMQ %s count: %s", el_type, count)
self.gauge("activemq.{0}.count".format(el_type), count, tags=tags)
def _process_subscriber_data(self, data, tags, max_subscribers, detailed_subscribers):
root = ElementTree.fromstring(data)
# if subscribers list provided in config, only send those metrics
if detailed_subscribers:
subscribers = [s for s in root.findall("subscriber") if s.get("clientId") in detailed_subscribers]
else:
subscribers = [s for s in root.findall("subscriber") if s.get("clientId")]
count = len(subscribers)
if count > max_subscribers:
if not detailed_subscribers:
self.warning(
"Number of subscribers is too high (%s > %s)."
"Please use the detailed_subscribers parameter "
"to list the %s you want to monitor.",
count,
max_subscribers,
count,
)
for subscriber in subscribers[:max_subscribers]:
clientId = subscriber.get("clientId")
if not clientId:
continue
subscribers.append(clientId)
stats = subscriber.find("stats")
if stats is None:
continue
el_tags = tags + ["clientId:{0}".format(clientId)]
for name in SUBSCRIBER_TAGS:
value = subscriber.get(name)
if value is not None:
el_tags.append("%s:%s" % (name, value))
pending_queue_size = stats.get("pendingQueueSize", 0)
dequeue_counter = stats.get("dequeueCounter", 0)
enqueue_counter = stats.get("enqueueCounter", 0)
dispatched_queue_size = stats.get("dispatchedQueueSize", 0)
dispatched_counter = stats.get("dispatchedCounter", 0)
self.log.debug(
"ActiveMQ Subscriber %s: %s %s %s %s %s",
clientId,
pending_queue_size,
dequeue_counter,
enqueue_counter,
dispatched_queue_size,
dispatched_counter,
)
self.gauge("activemq.subscriber.pending_queue_size", pending_queue_size, tags=el_tags)
self.gauge("activemq.subscriber.dequeue_counter", dequeue_counter, tags=el_tags)
self.gauge("activemq.subscriber.enqueue_counter", enqueue_counter, tags=el_tags)
self.gauge("activemq.subscriber.dispatched_queue_size", dispatched_queue_size, tags=el_tags)
self.gauge("activemq.subscriber.dispatched_counter", dispatched_counter, tags=el_tags)
self.log.debug("ActiveMQ Subscriber Count: %s", count)
self.gauge("activemq.subscriber.count", count, tags=tags)
|
Python
| 0
|
@@ -733,24 +733,17 @@
k(self,
-instance
+_
):%0A
@@ -751,16 +751,21 @@
url =
+self.
instance
@@ -794,24 +794,29 @@
stom_tags =
+self.
instance.get
@@ -845,32 +845,37 @@
ax_queues = int(
+self.
instance.get(%22ma
@@ -917,32 +917,37 @@
ax_topics = int(
+self.
instance.get(%22ma
@@ -1002,16 +1002,21 @@
s = int(
+self.
instance
@@ -1076,24 +1076,29 @@
ed_queues =
+self.
instance.get
@@ -1143,24 +1143,29 @@
ed_topics =
+self.
instance.get
@@ -1219,16 +1219,21 @@
ibers =
+self.
instance
@@ -1307,16 +1307,21 @@
rmative(
+self.
instance
|
8dc853e90b587b9245b87c14f5cb2e93215d3283
|
Change test_output data structure to dict of dict
|
tests/convergence_tests/sphere_lspr.py
|
tests/convergence_tests/sphere_lspr.py
|
from pygbe.util import an_solution
from convergence_lspr import (mesh_ratio, run_convergence, picklesave, pickleload,
report_results, mesh)
def main():
print('{:-^60}'.format('Running sphere_lspr test'))
try:
test_outputs = pickleload()
except FileNotFoundError:
test_outputs = {}
problem_folder = 'input_files'
# dirichlet_surface
param = 'sphere_complex.param'
test_name = 'sphere_complex'
if test_name not in test_outputs.keys():
N, iterations, expected_rate, Cext_0, Time = run_convergence(
mesh, test_name, problem_folder, param)
test_outputs[test_name] = [N, iterations, expected_rate, Cext_0, Time]
picklesave(test_outputs)
# load data for analysis
N, iterations, expected_rate = test_outputs['sphere_complex'][:3]
Cext_0 = test_outputs['sphere_complex'][3]
Time = test_outputs['sphere_complex'][-1]
total_time = Time
#This test is for 10 nm radius silver sphere in water, at wavelength 380 nm
radius = 10.
wavelength = 380.
diel_out = 1.7972083599999999 + 1j * 8.504766399999999e-09 #water value extrapolated
diel_in = -3.3876520488233184 + 1j * 0.19220746083441781 #silver value extrapolated
analytical = an_solution.Cext_analytical(radius, wavelength, diel_out, diel_in)
error = abs(Cext_0 - analytical) / abs(analytical)
report_results(error,
N,
expected_rate,
iterations,
Cext_0,
analytical,
total_time,
test_name='sphere_complex')
if __name__ == "__main__":
main()
|
Python
| 0.000311
|
@@ -665,131 +665,344 @@
%5D =
-%5BN, iterations, expected_rate, Cext_0, Time%5D%0A%0A picklesave(test_outputs)%0A%0A # load data for analysis%0A N, iterations,
+%7B'N': N, 'iterations': iterations,%0A 'expected_rate': expected_rate, 'Cext_0': Cext_0,%0A 'Time': Time%7D %0A picklesave(test_outputs)%0A%0A # load data for analysis%0A N = test_outputs%5B'sphere_complex'%5D%5B'N'%5D%0A iterations = test_outputs%5B'sphere_complex'%5D%5B'iterations'%5D%0A
exp
@@ -1049,10 +1049,23 @@
x'%5D%5B
-:3
+'expected_rate'
%5D%0A
@@ -1106,17 +1106,24 @@
mplex'%5D%5B
-3
+'Cext_0'
%5D%0A Ti
@@ -1162,10 +1162,14 @@
x'%5D%5B
--1
+'Time'
%5D%0A%0A
|
cd4aa7364c82a464f0cd29ad45431e00c413b3f2
|
return utc timestamps
|
pagecountssearch/search.py
|
pagecountssearch/search.py
|
import argparse
import collections
import datetime
import functools
import gzip
import io
import itertools
import sys
from urllib.parse import quote, unquote
import pathlib
from .sortedcollection import SortedCollection
__all__ = ('Finder', 'search', 'build_index')
DATETIME_PATTERN = '%Y%m%d-%H%M%S'
Record = collections.namedtuple(
'Record',
'project page timestamp count bytes_trans',
)
IndexRecord = collections.namedtuple(
'IndexRecord',
'file_name project page',
)
class Finder:
"""Look for the given entry in the dataset."""
def __init__(self, source_dir, index_path=None, auto_index=True):
"""Initialize the search engine."""
self.source_dir = source_dir
if index_path is None:
index_path = default_index_path(source_dir)
if not index_path.exists() and auto_index:
print('Building index... ', end='', flush=True, file=sys.stderr)
build_index(source_dir, index_path)
print('Done!', flush=True, file=sys.stderr)
if not index_path.exists():
raise ValueError('Index file does not exists.')
self.curr_file_path = None
self.curr_file = None
self.curr_iter = None
self.last_key = None
self.index = read_index(index_path)
def slow_search(self, project, page):
"""Search for the given project and page."""
return search(self.source_dir, self.index, project, page)
def _switch_file(self, new_file_path):
if self.curr_file:
self.curr_file.close()
self.curr_file_path = new_file_path
self.curr_file = gzip.open(str(new_file_path), 'rt', encoding='utf-8')
@functools.lru_cache(1)
def search(self, project, page):
part_file_name = self.index.find_le((project, page)).file_name
part_file_path = self.source_dir/part_file_name
file_changed = self.curr_file_path != part_file_path
if file_changed or self.last_key >= (project, page):
self._switch_file(part_file_path)
self.curr_iter = parse_and_group_records(self.curr_file)
for key, records_group in self.curr_iter:
self.last_key = key
if key < (project, page):
continue
if key > (project, page):
break
return [
(timestamp, count, bytes_trans)
for _, _, timestamp, count, bytes_trans in records_group
]
return []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.curr_file:
self.curr_file.close()
self.curr_file = None
self.curr_file_path = None
def parse_and_group_records(lines):
records = (parse_line(line) for line in lines)
grouped_records = itertools.groupby(
records,
key=lambda record: (record[0], record[1]),
)
return grouped_records
def default_index_path(source_dir):
return source_dir/'index'
def parse_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, DATETIME_PATTERN)
parse_timestamp_cached = functools.lru_cache(maxsize=10000)(parse_timestamp)
quote_cached = functools.lru_cache(maxsize=10000)(quote)
unquote_cached = functools.lru_cache(maxsize=10000)(unquote)
def parse_line(line):
project, page, timestamp, count, bytes_trans = line[:-1].split(' ')
page = unquote_cached(page)
timestamp = parse_timestamp_cached(timestamp)
count = int(count)
bytes_trans = int(bytes_trans)
return Record(project, page, timestamp, count, bytes_trans)
def parse_index_line(line):
part_file_path, project, page = line[:-1].split(' ')
page = unquote(page)
return IndexRecord(part_file_path, project, page)
def search(source_dir, index, project, page):
part_file_name = index.find_le((project, page)).file_name
part_file_path = source_dir/part_file_name
part_file = gzip.open(str(part_file_path), 'rt', encoding='utf-8')
with part_file:
records = (parse_line(line) for line in part_file)
grouped_records = itertools.groupby(
records,
key=lambda record: (record[0], record[1]),
)
for key, records_group in grouped_records:
if key < (project, page):
continue
if key > (project, page):
break
return [
(timestamp, count, bytes_trans)
for _, _, timestamp, count, bytes_trans in records_group
]
return []
def build_index(source_dir, output_path):
with output_path.open('wt', encoding='utf-8') as index_f:
for part_file_path in sorted(source_dir.glob('part-*.gz')):
part_file = gzip.open(str(part_file_path), 'rt', encoding='utf-8')
with part_file:
line = part_file.readline()
record = parse_line(line)
print(
part_file_path.name,
record.project,
quote(record.page),
file=index_f)
def read_index(index_path):
index_file = index_path.open('rt', encoding='utf-8')
with index_file:
return SortedCollection(
(parse_index_line(l) for l in index_file),
key=lambda x: x[1:]
)
def parse_args():
parser = argparse.ArgumentParser(
prog="pagecounts-search",
description="Search through sorted wikimedia pagecounts data.",
)
parser.add_argument(
'source_dir',
metavar='SOURCE_DIR',
type=pathlib.Path,
help="Directory containing pagecounts data files",
)
parser.add_argument(
'--index-path', '-i',
metavar='INDEX_PATH',
required=False,
type=pathlib.Path,
help="Index file path",
)
subparsers = parser.add_subparsers(
help='sub-commands help',
dest='command')
search_parser = subparsers.add_parser(
'search',
help='Search for the given entry',
)
search_parser.add_argument(
'project',
)
search_parser.add_argument(
'page',
)
build_index_parser = subparsers.add_parser(
'build-index',
help='Build the index for the dataset',
)
return parser.parse_args()
def main():
args = parse_args()
if args.index_path is None:
args.index_path = default_index_path(args.source_dir)
if args.command == 'search':
f = Finder(args.source_dir, args.index_path)
for timestamp, count, bytes_trans in f.search(args.project, args.page):
print(
timestamp.strftime(DATETIME_PATTERN),
count,
bytes_trans,
)
if args.command == 'build-index':
build_index(args.source_dir, args.index_path)
if __name__ == '__main__':
main()
|
Python
| 0.999999
|
@@ -77,18 +77,8 @@
zip%0A
-import io%0A
impo
@@ -3058,22 +3058,33 @@
p):%0A
-return
+naive_timestamp =
datetim
@@ -3131,16 +3131,107 @@
PATTERN)
+%0A timestamp = naive_timestamp.replace(tzinfo=datetime.timezone.utc)%0A return timestamp
%0A%0Aparse_
|
0ac869ce67017c9ffb8a8b32ff57346980144371
|
use global es in reindexers
|
corehq/ex-submodules/pillowtop/reindexer/reindexer.py
|
corehq/ex-submodules/pillowtop/reindexer/reindexer.py
|
from pillowtop.es_utils import set_index_reindex_settings, \
set_index_normal_settings, get_index_info_from_pillow, initialize_mapping_if_necessary
from pillowtop.pillow.interface import PillowRuntimeContext
class PillowReindexer(object):
def __init__(self, pillow, change_provider):
self.pillow = pillow
self.change_provider = change_provider
def clean_index(self):
"""
Cleans the index.
This can be called prior to reindex to ensure starting from a clean slate.
Should be overridden on a case-by-case basis by subclasses.
"""
pass
def reindex(self, start_from=None):
reindexer_context = PillowRuntimeContext(do_set_checkpoint=False)
for change in self.change_provider.iter_changes(start_from=start_from):
self.pillow.processor(change, reindexer_context)
class ElasticPillowReindexer(PillowReindexer):
def __init__(self, pillow, change_provider, elasticsearch, index_info):
super(ElasticPillowReindexer, self).__init__(pillow, change_provider)
self.es = elasticsearch
self.index_info = index_info
def clean_index(self):
if self.es.indices.exists(self.index_info.index):
self.es.indices.delete(index=self.index_info.index)
def reindex(self, start_from=None):
if not start_from:
# when not resuming force delete and create the index
self._prepare_index_for_reindex()
super(ElasticPillowReindexer, self).reindex(start_from)
self._prepare_index_for_usage()
def _prepare_index_for_reindex(self):
if not self.es.indices.exists(self.index_info.index):
self.es.indices.create(index=self.index_info.index, body=self.index_info.meta)
initialize_mapping_if_necessary(self.es, self.index_info)
set_index_reindex_settings(self.es, self.index_info.index)
def _prepare_index_for_usage(self):
set_index_normal_settings(self.es, self.index_info.index)
self.es.indices.refresh(self.index_info.index)
def get_default_reindexer_for_elastic_pillow(pillow, change_provider):
return ElasticPillowReindexer(
pillow=pillow,
change_provider=change_provider,
elasticsearch=pillow.get_es_new(),
index_info=get_index_info_from_pillow(pillow),
)
|
Python
| 0
|
@@ -1,20 +1,58 @@
+from corehq.elastic import get_es_new%0A
from pillowtop.es_ut
@@ -2295,23 +2295,16 @@
csearch=
-pillow.
get_es_n
|
f5600008defcd5fe4c9c397c0b7170f6f5e9a5e4
|
Add header info and submodule imports to init
|
__init__.py
|
__init__.py
|
Python
| 0
|
@@ -1 +1,379 @@
+__author__ = %22Justin Kitzes, Mark Wilber, Chloe Lewis%22%0A__copyright__ = %22Copyright 2012, Regents of University of California%22%0A__credits__ = %5B%5D%0A__license__ = %22BSD 2-clause%22%0A__version__ = %220.1%22%0A__maintainer__ = %22Justin Kitzes%22%0A__email__ = %22jkitzes@berkeley.edu%22%0A__status__ = %22Development%22%0A%0Aimport compare%0Aimport data%0Aimport empirical%0Aimport output%0Aimport utils.workflow as workflow
%0A
|
|
5281d535f67dfa2cebd8f70ee1f342c213d11b29
|
change filename
|
__init__.py
|
__init__.py
|
from .pyglow import *
|
Python
| 0.000109
|
@@ -3,11 +3,11 @@
om .
-pyg
+PyG
low
|
67406893c1b9b727f313a374affe9868ec986fa6
|
Bump to 2.6.2c1.
|
__init__.py
|
__init__.py
|
"""distutils
The main package for the Python Module Distribution Utilities. Normally
used from a setup script as
from distutils.core import setup
setup (...)
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
# Distutils version
#
# Please coordinate with Marc-Andre Lemburg <mal@egenix.com> when adding
# new features to distutils that would warrant bumping the version number.
#
# In general, major and minor version should loosely follow the Python
# version number the distutils code was shipped with.
#
#--start constants--
__version__ = "2.6.1"
#--end constants--
|
Python
| 0.000001
|
@@ -587,16 +587,18 @@
= %222.6.
+2c
1%22%0A#--en
|
b913e6d1b4323dbc52fbe2697dc9bf7fa2b80c24
|
Add Python 2 deprecation warning, closes #1179
|
__init__.py
|
__init__.py
|
from __future__ import absolute_import, division, print_function
import logging
import os
import sys
logging.getLogger("dials").addHandler(logging.NullHandler())
# Invert FPE trap defaults, https://github.com/cctbx/cctbx_project/pull/324
if "boost.python" in sys.modules:
import boost.python
boost.python.ext.trap_exceptions(
bool(os.getenv("BOOST_ADAPTBX_TRAP_FPE")),
bool(os.getenv("BOOST_ADAPTBX_TRAP_INVALID")),
bool(os.getenv("BOOST_ADAPTBX_TRAP_OVERFLOW")),
)
elif not os.getenv("BOOST_ADAPTBX_TRAP_FPE") and not os.getenv(
"BOOST_ADAPTBX_TRAP_OVERFLOW"
):
os.environ["BOOST_ADAPTBX_FPE_DEFAULT"] = "1"
# Intercept easy_mp exceptions to extract stack traces before they are lost at
# the libtbx process boundary/the easy_mp API. In the case of a subprocess
# crash we print the subprocess stack trace, which will be most useful for
# debugging parallelized sections of DIALS code.
import libtbx.scheduling.stacktrace as _lss
def _stacktrace_tracer(error, trace, intercepted_call=_lss.set_last_exception):
"""Intercepts and prints ephemeral stacktraces."""
if error and trace:
print(
"\n\neasy_mp crash detected; subprocess trace: ----\n%s%s\n%s\n\n"
% ("".join(trace), error, "-" * 46)
)
return intercepted_call(error, trace)
if _lss.set_last_exception.__doc__ != _stacktrace_tracer.__doc__:
# ensure function is only redirected once
_lss.set_last_exception = _stacktrace_tracer
|
Python
| 0
|
@@ -94,16 +94,331 @@
port sys
+%0Aimport warnings%0A%0Aif sys.version_info.major == 2:%0A warnings.warn(%0A %22Python 2 is no longer fully supported. Please consider using the DIALS 2.2 release branch. %22%0A %22For more information on Python 2.7 support please go to https://github.com/dials/dials/issues/1175.%22,%0A DeprecationWarning,%0A )
%0A%0Aloggin
|
b0e00a905da20bc0609a5a5fa53442482fcba091
|
Add "--check-ignore" option
|
test/acid.py
|
test/acid.py
|
#!/usr/bin/env python
"""Test that autopep8 runs without crashing on various Python files."""
import contextlib
import os
import sys
import subprocess
import tempfile
import tokenize
@contextlib.contextmanager
def red(file_object):
"""Red context."""
if file_object.isatty():
RED = '\033[91m'
END = '\033[0m'
else:
RED = ''
END = ''
try:
file_object.flush()
file_object.write(RED)
file_object.flush()
yield
finally:
file_object.flush()
file_object.write(END)
file_object.flush()
def run(filename, fast_check=False, passes=2000,
ignore='', verbose=False):
"""Run autopep8 on file at filename.
Return True on success.
"""
ignore_option = '--ignore=' + ignore
autopep8_path = os.path.split(os.path.abspath(
os.path.dirname(__file__)))[0]
autoppe8_bin = os.path.join(autopep8_path, 'autopep8.py')
command = ([autoppe8_bin] + (['--verbose'] if verbose else []) +
['--pep8-passes={p}'.format(p=passes),
ignore_option, filename])
if fast_check:
if 0 != subprocess.call(command + ['--diff']):
sys.stderr.write('autopep8 crashed on ' + filename + '\n')
return False
else:
with tempfile.NamedTemporaryFile(suffix='.py') as tmp_file:
if 0 != subprocess.call(command, stdout=tmp_file):
sys.stderr.write('autopep8 crashed on ' + filename + '\n')
return False
with red(sys.stdout):
if 0 != subprocess.call(['pep8', ignore_option,
'--show-source', tmp_file.name],
stdout=sys.stdout):
sys.stderr.write('autopep8 did not completely fix ' +
filename + '\n')
try:
if _check_syntax(filename):
try:
_check_syntax(tmp_file.name, raise_error=True)
except (SyntaxError, TypeError,
UnicodeDecodeError) as exception:
sys.stderr.write('autopep8 broke ' + filename + '\n' +
str(exception) + '\n')
return False
except IOError as exception:
sys.stderr.write(str(exception) + '\n')
return True
def _detect_encoding(filename):
"""Return file encoding."""
try:
# Python 3
try:
with open(filename, 'rb') as input_file:
encoding = tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding
import io
with io.TextIOWrapper(input_file, encoding) as wrapper:
wrapper.read()
return encoding
except (SyntaxError, LookupError, UnicodeDecodeError):
return 'latin-1'
except AttributeError:
return 'utf-8'
def _open_with_encoding(filename, encoding, mode='r'):
"""Open file with a specific encoding."""
try:
# Python 3
return open(filename, mode=mode, encoding=encoding)
except TypeError:
return open(filename, mode=mode)
def _check_syntax(filename, raise_error=False):
"""Return True if syntax is okay."""
with _open_with_encoding(
filename, _detect_encoding(filename)) as input_file:
try:
compile(input_file.read(), '<string>', 'exec')
return True
except (SyntaxError, TypeError, UnicodeDecodeError):
if raise_error:
raise
else:
return False
def process_args():
"""Return processed arguments (options and positional arguments)."""
import optparse
parser = optparse.OptionParser()
parser.add_option('--fast-check', action='store_true',
help='ignore incomplete PEP8 fixes and broken files')
parser.add_option('--ignore',
help='comma-separated errors to ignore',
default='')
parser.add_option('-p', '--pep8-passes',
help='maximum number of additional pep8 passes'
' (default: %default)',
default=2000)
parser.add_option(
'--timeout',
help='stop testing additional files after this amount of time '
'(default: %default)',
default=-1,
type=float)
parser.add_option('-v', '--verbose', action='store_true',
help='print verbose messages')
return parser.parse_args()
class TimeoutException(Exception):
"""Timeout exception."""
def timeout(_, __):
raise TimeoutException()
def check(opts, args):
"""Run recursively run autopep8 on directory of files.
Return False if the fix results in broken syntax.
"""
if args:
dir_paths = args
else:
dir_paths = sys.path
filenames = dir_paths
completed_filenames = set()
try:
import signal
if opts.timeout > 0:
signal.signal(signal.SIGALRM, timeout)
signal.alarm(int(opts.timeout))
while filenames:
name = os.path.realpath(filenames.pop(0))
if name in completed_filenames:
sys.stderr.write(
'---> Skipping previously tested ' + name + '\n')
continue
else:
completed_filenames.update(name)
try:
is_directory = os.path.isdir(name)
except UnicodeEncodeError:
continue
if is_directory:
for root, directories, children in os.walk(name):
filenames += [os.path.join(root, f) for f in children
if f.endswith('.py') and
not f.startswith('.')]
for d in directories:
if d.startswith('.'):
directories.remove(d)
else:
sys.stderr.write('---> Testing with ')
try:
sys.stderr.write(name)
except UnicodeEncodeError:
sys.stderr.write('...')
sys.stderr.write('\n')
if not run(os.path.join(name),
fast_check=opts.fast_check,
passes=opts.pep8_passes,
ignore=opts.ignore,
verbose=opts.verbose):
return False
except TimeoutException:
sys.stderr.write('Timed out\n')
finally:
if opts.timeout > 0:
signal.alarm(0)
return True
def main():
"""Run main."""
return 0 if check(*process_args()) else 1
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
Python
| 0.999028
|
@@ -653,16 +653,33 @@
nore='',
+ check_ignore='',
verbose
@@ -679,32 +679,32 @@
verbose=False):%0A
-
%22%22%22Run autop
@@ -1638,16 +1638,31 @@
e_option
+ + check_ignore
,%0A
@@ -4188,16 +4188,214 @@
ult='')%0A
+ parser.add_option('--check-ignore',%0A help='comma-separated errors to ignore when checking '%0A 'for completeness',%0A default='')%0A
pars
@@ -6784,16 +6784,16 @@
passes,%0A
-
@@ -6823,32 +6823,91 @@
re=opts.ignore,%0A
+ check_ignore=opts.check_ignore,%0A
|
9d085f0478ca55b59390515c82ca3e367cef5522
|
Replace Bootstrap's html5shiv with es5-shim.
|
app/assets.py
|
app/assets.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask.ext.assets import Environment, Bundle
css_application = Bundle(
'less/main.less',
filters='less',
debug=False,
output='gen/app.css'
)
css_all = Bundle(
# 'vendor/some/library.css',
css_application,
filters='cssmin',
output='gen/app.min.css'
)
js_vendor = Bundle(
'vendor/jquery/dist/jquery.js',
'vendor/angular/angular.js',
'vendor/angular-animate/angular-animate.js',
'vendor/angular-aria/angular-aria.js',
'vendor/angular-cookies/angular-cookies.js',
'vendor/angular-messages/angular-messages.js',
'vendor/angular-resource/angular-resource.js',
'vendor/angular-route/angular-route.js',
'vendor/angular-sanitize/angular-sanitize.js',
'vendor/angular-touch/angular-touch.js',
'vendor/bootstrap/dist/js/bootstrap.js',
'vendor/lodash/dist/lodash.js',
# 'vendor/modernizr/dist/modernizr-build.js', # TODO Customize this
filters='uglifyjs',
output='gen/vendor.min.js'
)
js_ie = Bundle(
'vendor/bootstrap/assets/js/html5shiv.js',
'vendor/bootstrap/assets/js/respond.min.js',
filters='uglifyjs',
output='gen/ie.min.js'
)
js_main = Bundle(
'libs/ba-debug.js',
Bundle(
'coffee/app.coffee',
'coffee/init.coffee', # Must be loaded after app.coffee but before anything else.
'coffee/notify.coffee',
'scripts/app.coffee',
'scripts/service/config.coffee',
'scripts/controllers/listing.coffee',
filters='coffeescript',
output='gen/app.js'
),
filters='uglifyjs',
output='gen/app.min.js'
)
def init_app(app):
webassets = Environment(app)
webassets.register('css_all', css_all)
webassets.register('js_vendor', js_vendor)
webassets.register('js_ie', js_ie)
webassets.register('js_main', js_main)
webassets.manifest = 'cache' if not app.debug else False
webassets.cache = not app.debug
webassets.debug = app.debug
|
Python
| 0.000002
|
@@ -1046,86 +1046,25 @@
dor/
-bootstrap/assets/js/html5shiv.js',%0A 'vendor/bootstrap/assets/js/respond.min
+es5-shim/es5-shim
.js'
|
12c57f6b785167c4f9e6427520360ce64d845e96
|
Fix documentation links in Edward2 docstring.
|
tensorflow_probability/python/experimental/edward2/__init__.py
|
tensorflow_probability/python/experimental/edward2/__init__.py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Edward2 probabilistic programming language.
For user guides, see:
+ [Overview](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/edward2/README.md)
+ [Upgrading from Edward to Edward2](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/edward2/Upgrading_From_Edward_To_Edward2.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow_probability.python.experimental.edward2.generated_random_variables import *
from tensorflow_probability.python.experimental.edward2.generated_random_variables import as_random_variable
from tensorflow_probability.python.experimental.edward2.generated_random_variables import rv_dict
from tensorflow_probability.python.experimental.edward2.interceptor import get_next_interceptor
from tensorflow_probability.python.experimental.edward2.interceptor import interceptable
from tensorflow_probability.python.experimental.edward2.interceptor import interception
from tensorflow_probability.python.experimental.edward2.interceptor import tape
from tensorflow_probability.python.experimental.edward2.program_transformations import make_log_joint_fn
from tensorflow_probability.python.experimental.edward2.program_transformations import make_value_setter
from tensorflow_probability.python.experimental.edward2.random_variable import RandomVariable
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = list(rv_dict.keys()) + [
"RandomVariable",
"as_random_variable",
"interception",
"get_next_interceptor",
"interceptable",
"make_log_joint_fn",
"make_value_setter",
"tape",
]
remove_undocumented(__name__, _allowed_symbols)
|
Python
| 0.000004
|
@@ -836,32 +836,45 @@
bability/python/
+experimental/
edward2/README.m
@@ -1001,16 +1001,29 @@
/python/
+experimental/
edward2/
|
1ec3a57e53e52cc8b8188dcdcb5eeb773cbead18
|
Allow autonomous modes to be marked as disabled
|
robot/robot/src/autonomous/__init__.py
|
robot/robot/src/autonomous/__init__.py
|
'''
Implements an autonomous mode management program. Example usage:
from autonomous import AutonomousModeManager
components = {'drive': drive,
'component1': component1, ... }
autonomous = AutonomousModeManager(components)
class MyRobot(wpilib.SimpleRobot):
...
def Autonomous(self):
autonomous.run(self, control_loop_wait_time)
def update(self):
...
Note that the robot instance passed to AutonomousModeManager.run() must
have an update function.
'''
from glob import glob
import imp
import inspect
import os
import sys
from common.delay import PreciseDelay
try:
import wpilib
except ImportError:
from pyfrc import wpilib
class AutonomousModeManager(object):
'''
The autonomous manager loads all autonomous mode modules and allows
the user to select one of them via the SmartDashboard.
See template.txt for a sample autonomous mode module
'''
def __init__(self, components):
''''''
self.ds = wpilib.DriverStation.GetInstance()
self.modes = {}
self.active_mode = None
print( "AutonomousModeManager::__init__() Begins" )
# load all modules in the current directory
modules_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(modules_path)
modules = glob(os.path.join(modules_path, '*.py' ))
for module_filename in modules:
module_name = os.path.basename(module_filename[:-3])
if module_name in ['__init__', 'manager']:
continue
try:
module = imp.load_source(module_name, module_filename)
except:
if not self.ds.IsFMSAttached():
raise
#
# Find autonomous mode classes in the modules that are present
# -> note that we actually create the instance of the objects here,
# so that way we find out about any errors *before* we get out
# on the field..
for name, obj in inspect.getmembers(module, inspect.isclass):
if hasattr(obj, 'MODE_NAME') :
try:
instance = obj(components)
except:
if not self.ds.IsFMSAttached():
raise
else:
continue
if instance.MODE_NAME in self.modes:
if not self.ds.IsFMSAttached():
raise RuntimeError( "Duplicate name %s in %s" % (instance.MODE_NAME, module_filename) )
print( "ERROR: Duplicate name %s specified by object type %s in module %s" % (instance.MODE_NAME, name, module_filename))
self.modes[name + '_' + module_filename] = instance
else:
self.modes[instance.MODE_NAME] = instance
# now that we have a bunch of valid autonomous mode objects, let
# the user select one using the SmartDashboard.
# SmartDashboard interface
sd = wpilib.SmartDashboard
self.chooser = wpilib.SendableChooser()
default_modes = []
print("Loaded autonomous modes:")
for k,v in sorted(self.modes.items()):
if hasattr(v, 'DEFAULT') and v.DEFAULT == True:
print(" -> %s [Default]" % k)
self.chooser.AddDefault(k, v)
default_modes.append(k)
else:
print( " -> %s" % k )
self.chooser.AddObject(k, v)
if len(self.modes) == 0:
print("-- no autonomous modes were loaded!")
# provide a none option
self.chooser.AddObject('None', None)
if len(default_modes) == 0:
self.chooser.AddDefault('None', None)
elif len(default_modes) != 1:
if not self.ds.IsFMSAttached():
raise RuntimeError("More than one autonomous mode was specified as default! (modes: %s)" % (', '.join(default_modes)))
# must PutData after setting up objects
sd.PutData('Autonomous Mode', self.chooser)
print( "AutonomousModeManager::__init__() Done" )
def run(self, robot, control_loop_wait_time):
'''
This function does everything required to implement autonomous
mode behavior.
:param robot: a SimpleRobot derived class, and is expected to
have a function called 'update', which will do
updates on all motors and components.
:param control_loop_wait_time: Amount of time between loops
'''
print("AutonomousModeManager::Autonomous() Begins")
# don't risk the watchdog, hopefully we do everything right here :)
robot.GetWatchdog().SetEnabled(False)
# keep track of how much time has passed in autonomous mode
timer = wpilib.Timer()
timer.Start()
try:
self.on_autonomous_enable()
except:
if not self.ds.IsFMSAttached():
raise
#
# Autonomous control loop
#
delay = PreciseDelay(control_loop_wait_time)
while robot.IsAutonomous() and robot.IsEnabled():
try:
self.update(timer.Get())
except:
if not self.ds.IsFMSAttached():
raise
robot.update()
delay.wait()
#
# Done with autonomous, finish up
#
try:
self.on_autonomous_disable()
except:
if not self.ds.IsFMSAttached():
raise
print("AutonomousModeManager::Autonomous() Done")
#
# Internal methods used to implement autonomous mode switching. Most
# users of this class will not want to use these functions, use the
# run() function instead.
#
def on_autonomous_enable(self):
'''Select the active autonomous mode here, and enable it'''
self.active_mode = self.chooser.GetSelected()
if self.active_mode is not None:
print("AutonomousModeManager: Enabling '%s'" % self.active_mode.MODE_NAME)
self.active_mode.on_enable()
else:
print("AutonomousModeManager: No autonomous modes were selected, not enabling autonomous mode")
def on_autonomous_disable(self):
'''Disable the active autonomous mode'''
if self.active_mode is not None:
print("AutonomousModeManager: Disabling '%s'" % self.active_mode.MODE_NAME)
self.active_mode.on_disable()
self.active_mode = None
def update(self, time_elapsed):
'''Run the code for the current autonomous mode'''
if self.active_mode is not None:
self.active_mode.update(time_elapsed)
|
Python
| 0.000001
|
@@ -2697,32 +2697,335 @@
continue%0A
+ %0A # don't allow the driver to select this mode %0A if hasattr(instance, 'DISABLED') and instance.DISABLED:%0A print(%22Warning: autonomous mode %25s is marked as disabled%22 %25 instance.MODE_NAME)%0A continue%0A
|
9736dcfebaeca8cfbc730dc547e34bb4ca5d1768
|
Remove some of the "useless keys", they're not so useless.
|
south/creator/freezer.py
|
south/creator/freezer.py
|
"""
Handles freezing of models into FakeORMs.
"""
import sys
from django.db import models
from django.contrib.contenttypes.generic import GenericRelation
from south.orm import FakeORM
from south.utils import auto_model
from south import modelsinspector
def freeze_apps(apps):
"""
Takes a list of app labels, and returns a string of their frozen form.
"""
if isinstance(apps, basestring):
apps = [apps]
frozen_models = set()
# For each app, add in all its models
for app in apps:
for model in models.get_models(models.get_app(app)):
# Only add if it's not abstract or proxy
if not model._meta.abstract and not getattr(model._meta, "proxy", False):
frozen_models.add(model)
# Now, add all the dependencies
for model in list(frozen_models):
frozen_models.update(model_dependencies(model))
# Serialise!
model_defs = {}
for model in frozen_models:
model_defs[model_key(model)] = prep_for_freeze(model)
# Check for any custom fields that failed to freeze.
missing_fields = False
for key, fields in model_defs.items():
for field_name, value in fields.items():
if value is None:
missing_fields = True
print " ! Cannot freeze field '%s.%s'" % (key, field_name)
if missing_fields:
print ""
print " ! South cannot introspect some fields; this is probably because they are custom"
print " ! fields. If they worked in 0.6 or below, this is because we have removed the"
print " ! models parser (it often broke things)."
print " ! To fix this, read http://south.aeracode.org/wiki/MyFieldsDontWork"
sys.exit(1)
return model_defs
def freeze_apps_to_string(apps):
return pprint_frozen_models(freeze_apps(apps))
###
def model_key(model):
"For a given model, return 'appname.modelname'."
return "%s.%s" % (model._meta.app_label, model._meta.object_name.lower())
def prep_for_freeze(model):
"""
Takes a model and returns the ready-to-serialise dict (all you need
to do is just pretty-print it).
"""
fields = modelsinspector.get_model_fields(model, m2m=True)
# Remove useless attributes (like 'choices')
for name, field in fields.items():
fields[name] = remove_useless_attributes(field)
# See if there's a Meta
fields['Meta'] = remove_useless_meta(modelsinspector.get_model_meta(model))
# Add in our own special items to track the object name and managed
fields['Meta']['object_name'] = model._meta.object_name # Special: not eval'able.
if not getattr(model._meta, "managed", True):
fields['Meta']['managed'] = repr(model._meta.managed)
return fields
### Dependency resolvers
def model_dependencies(model, checked_models=None):
"""
Returns a set of models this one depends on to be defined; things like
OneToOneFields as ID, ForeignKeys everywhere, etc.
"""
depends = set()
checked_models = checked_models or set()
# Get deps for each field
for field in model._meta.fields + model._meta.many_to_many:
depends.update(field_dependencies(field))
# Add in any non-abstract bases
for base in model.__bases__:
if issubclass(base, models.Model) and (base is not models.Model) and not base._meta.abstract:
depends.add(base)
# Now recurse
new_to_check = depends - checked_models
while new_to_check:
checked_model = new_to_check.pop()
if checked_model == model or checked_model in checked_models:
continue
checked_models.add(checked_model)
deps = model_dependencies(checked_model, checked_models)
# Loop through dependencies...
for dep in deps:
# If the new dep is not already checked, add to the queue
if (dep not in depends) and (dep not in new_to_check) and (dep not in checked_models):
new_to_check.add(dep)
depends.add(dep)
return depends
def field_dependencies(field, checked_models=None):
checked_models = checked_models or set()
depends = set()
if isinstance(field, (models.OneToOneField, models.ForeignKey, models.ManyToManyField, GenericRelation)):
if field.rel.to in checked_models:
return depends
checked_models.add(field.rel.to)
depends.add(field.rel.to)
depends.update(field_dependencies(field.rel.to._meta.pk, checked_models))
# Also include M2M throughs
if isinstance(field, models.ManyToManyField):
if field.rel.through:
if hasattr(field.rel, "through_model"): # 1.1 and below
depends.add(field.rel.through_model)
else:
# Make sure it's not an automatic one
if not auto_model(field.rel.through):
depends.add(field.rel.through) # 1.2 and up
return depends
### Prettyprinters
def pprint_frozen_models(models):
return "{\n %s\n }" % ",\n ".join([
"%r: %s" % (name, pprint_fields(fields))
for name, fields in sorted(models.items())
])
def pprint_fields(fields):
return "{\n %s\n }" % ",\n ".join([
"%r: %r" % (name, defn)
for name, defn in sorted(fields.items())
])
### Output sanitisers
USELESS_KEYWORDS = ["choices", "help_text", "upload_to", "verbose_name", "storage"]
USELESS_DB_KEYWORDS = ["related_name", "default"] # Important for ORM, not for DB.
INDEX_KEYWORDS = ["db_index"]
def remove_useless_attributes(field, db=False, indexes=False):
"Removes useless (for database) attributes from the field's defn."
# Work out what to remove, and remove it.
keywords = USELESS_KEYWORDS[:]
if db:
keywords += USELESS_DB_KEYWORDS[:]
if indexes:
keywords += INDEX_KEYWORDS[:]
if field:
for name in keywords:
if name in field[2]:
del field[2][name]
return field
USELESS_META = ["verbose_name", "verbose_name_plural"]
def remove_useless_meta(meta):
"Removes useless (for database) attributes from the table's meta."
if meta:
for name in USELESS_META:
if name in meta:
del meta[name]
return meta
|
Python
| 0.000116
|
@@ -5455,43 +5455,19 @@
%22, %22
-upload_to%22, %22verbose_name%22, %22storag
+verbose_nam
e%22%5D%0A
|
324f178926392293527b4aafa64a4c9a7a4d28c7
|
Change an invalid constant name
|
spam_lists/validation.py
|
spam_lists/validation.py
|
# -*- coding: utf-8 -*-
'''
This module contains functions responsible for
validating arguments for other functions and
methods provided by the library
'''
from __future__ import unicode_literals
import functools
import re
from future.moves.urllib.parse import urlparse
import validators
from .exceptions import InvalidURLError, InvalidHostError
def is_valid_host(value):
''' Check if given value is valid host string
:param value: a value to test
:returns: True if the value is valid host string
'''
host_validators = validators.ipv4, validators.ipv6, validators.domain
return any(f(value) for f in host_validators)
url_regex = re.compile(r'^[a-z0-9\.\-\+]*://' #scheme
r'(?:\S+(?::\S*)?@)?' #authentication
r'(?:[^/:]+|\[[0-9a-f:\.]+\])' # host
r'(?::\d{2,5})?' # port
r'(?:[/?#][^\s]*)?' # path, query or fragment
r'$', re.IGNORECASE)
def is_valid_url(value):
''' Check if given value is valid url string
:param value: a value to test
:returns: True if the value is valid url string
'''
match = url_regex.match(value)
host_str = urlparse(value).hostname
return (match and is_valid_host(host_str))
def accepts_valid_host(func):
@functools.wraps(func)
def wrapper(obj, value, *args, **kwargs):
''' Run the function and return its return value
if the value is host - otherwise raise InvalidHostError
:param obj": an object in whose class f is defined
:param value: a value expected to be a valid host string
:returns: a return value of the function f
:raises InvalidHostError: if the value is not a valid host string
'''
if not is_valid_host(value):
raise InvalidHostError
return func(obj, value, *args, **kwargs)
return wrapper
def accepts_valid_urls(func):
@functools.wraps(func)
def wrapper(obj, urls, *args, **kwargs):
'''Run the function and return its return value
if all given urls are valid - otherwise raise InvalidURLError
:param obj: an object in whose class f is defined
:param urls: an iterable containing urls
:returns: a return value of the function f
:raises InvalidURLError: if the iterable contains invalid urls
'''
invalid_urls = [u for u in urls if not is_valid_url(u)]
if invalid_urls:
msg_tpl = 'The values: {} are not valid urls'
msg = msg_tpl.format(','.join(invalid_urls))
raise InvalidURLError(msg)
return func(obj, urls, *args, **kwargs)
return wrapper
|
Python
| 0.000031
|
@@ -642,25 +642,25 @@
ators)%0A%0A
-url_regex
+URL_REGEX
= re.co
@@ -1165,17 +1165,17 @@
h =
-url_regex
+URL_REGEX
.mat
|
cc9d516605878e8cc3b003727908c3791a026ca6
|
Raise exceptions with list of issues to show all of them immediately
|
runtime_typecheck/runtime_typecheck.py
|
runtime_typecheck/runtime_typecheck.py
|
#!/usr/bin/env python3
from functools import wraps
import inspect
from typing import (Union,
Tuple,
Any,
TypeVar,
Type,
List)
def check_type(obj, candidate_type, reltype='invariant') -> bool:
if reltype not in ['invariant', 'covariant', 'contravariant']:
raise ValueError(f' Variadic type {reltype} is unknown')
# builtin type like str, or a class
if type(candidate_type) == type and reltype in ['invariant']:
return isinstance(obj, candidate_type)
if type(candidate_type) == type and reltype in ['covariant']:
return issubclass(obj.__class__, candidate_type)
if type(candidate_type) == type and reltype in ['contravariant']:
return issubclass(candidate_type, obj.__class__)
# Any accepts everything
if type(candidate_type) == type(Any):
return True
# Union, at least one match in __args__
if type(candidate_type) == type(Union):
return any(check_type(obj, t, reltype) for t in candidate_type.__args__)
# Tuple, each element matches the corresponding type in __args__
if type(candidate_type) == type(Tuple):
if not hasattr(obj, '__len__'):
return False
if len(candidate_type.__args__) != len(obj):
return False
return all(check_type(o, t, reltype) for (o, t) in zip(obj, candidate_type.__args__))
# List, each element matches the type in __args__
if type(candidate_type) == type(List):
if not hasattr(obj, '__len__'):
return False
return all(check_type(o, candidate_type.__args__[0], reltype) for o in obj)
# TypeVar, this is tricky
if type(candidate_type) == type(TypeVar):
# TODO consider contravariant, variant and bound
# invariant with a list of constraints, acts like a Tuple
if not (candidate_type.__covariant__ or candidate_type.__contracovariant__) and len(
candidate_type.__constraints__) > 0:
return any(check_type(obj, t) for t in candidate_type.__constraints__)
if type(candidate_type) == type(Type):
return check_type(obj, candidate_type.__args__[0], reltype='covariant')
raise ValueError(f'Cannot check against {reltype} type {candidate_type}')
def check_args(func):
@wraps(func)
def check(*args):
sig = inspect.signature(func)
params = zip(sig.parameters, args)
for name, value in params:
if not check_type(value, sig.parameters[name].annotation):
raise TypeError(f'Expected {sig.parameters[name]}, got {type(value)}.')
return func(*args)
return check
|
Python
| 0
|
@@ -225,51 +225,293 @@
)%0A%0A%0A
-def check_type(obj, candidate_type,
+class DetailedTypeError(TypeError):%0A issues = %5B%5D%0A%0A def __init__(self, issues: List%5BTuple%5Bstr, str, str%5D%5D):%0A self.issues = issues%0A super().__init__(f'typing issues found:%7Bissues%7D')%0A%0A%0Adef check_type(obj: Any,%0A candidate_type: Any,%0A
reltype
='in
@@ -506,16 +506,21 @@
reltype
+: str
='invari
@@ -2708,16 +2708,42 @@
, args)%0A
+ found_errors = %5B%5D%0A
@@ -2769,16 +2769,16 @@
params:%0A
-
@@ -2860,78 +2860,158 @@
-raise TypeError(f'Expected %7Bsig.parameters%5Bname%5D%7D, got %7Btype(value)%7D.'
+found_errors.append((name, sig.parameters%5Bname%5D.annotation, value))%0A if len(found_errors) %3E 0:%0A raise DetailedTypeError(found_errors
)%0A
|
eee59888b5bf03727ac28f52c1c7ede52e85feae
|
update SQL
|
s3-dynamo-sync-check/case_alignment.py
|
s3-dynamo-sync-check/case_alignment.py
|
import argparse
import logging.config
import threadpool
import traceback
from lib.connection import Connection
from lib.s3 import S3
from lib.dynamodb import DynamoDB
from lib.mssql import MSSql
LOG_PATH = "/tmp/case_err.log"
def nonsync_logging(request, (success, key)):
"""non-sync item will be logged here
"""
if success:
logger.info("Success on %s" % key)
else:
logger.error("Not Sync object - " + key)
def case_alignment(key):
who = None
try:
# get connection
global db_table, s3_bucket, dryrun
db_connection = Connection("dynamodb", args.region)
s3_connection = Connection("s3", args.region)
# find file key
key = key['SHA1']
file_key = '/'.join(('frs', key[:2], key[2:5], key[5:8], key[8:13], key))
# case update
db = DynamoDB(connection=db_connection.new_connection())
if not dryrun:
who = "db"
table = db.get_storage_set(db_table)
item = db.get_item(table, hash_key=file_key)
if item:
db.update_record(table, "SHA1", item, item['SHA1'].lower())
db.update_primary_key(table, item, file_key.lower())
else:
return (False, "%s - %s" % (who, file_key))
s3 = S3(connection=s3_connection.new_connection())
if not dryrun:
who = "s3"
bucket = s3.get_storage_set(s3_bucket)
s3obj = s3.get_item(bucket, file_key)
if s3obj:
s3.update_primary_key(bucket, s3obj, file_key.lower())
else:
return (False, "%s - %s" % (who, file_key))
return (True, file_key)
except:
logger.error(traceback.format_exc())
return (False, "%s - %s" % (who, key))
def get_result_set():
conn = MSSql("host", "username", "password")
conn.connect("FRSCentralStorage")
result_set = conn.query("SELECT top 100 SHA1 FROM FileInfo WITH (NOLOCK) WHERE BINARY_CHECKSUM(sha1) = BINARY_CHECKSUM(Upper(sha1)) ORDER BY sha1")
return result_set
def get_test_result_set():
"""
temp added for testing
:return:
"""
connection = Connection("dynamodb", "us-west-1")
storage_obj = DynamoDB(connection=connection.new_connection())
storage_obj.set_storage_set_name("cs-file-metadata")
storage_set = storage_obj.get_storage_set()
result_set = storage_obj.list(storage_set) if storage_set else None
return result_set
def main(bucket, table, threadcnt):
logger.info("Start to process - bucket=%s, table=%s" % (bucket, table))
if test:
result_set = get_test_result_set()
else:
result_set = get_result_set()
pool = threadpool.ThreadPool(int(threadcnt))
reqs = threadpool.makeRequests(case_alignment, result_set, nonsync_logging)
[pool.putRequest(req) for req in reqs]
pool.wait()
logger.info("End of process with bucket=%s, table=%s" % (bucket, table))
if __name__ == '__main__':
# args
parser = argparse.ArgumentParser(description='Central Storage S3 & DynamoDB verification tool')
parser.add_argument("-r", "--region", type=str, help="target AWS region", required=True)
parser.add_argument("-b", "--bucket", type=str, help="S3 bucket name", required=True)
parser.add_argument("-t", "--table", type=str, help="DynamoDB table name", required=True)
parser.add_argument("-c", "--threadcount", type=str, help="thread count", default="1", required=False)
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--test', help="use test set", action='store_true')
# logging
logging.config.fileConfig('logging.ini', disable_existing_loggers=False, defaults={'logfilename': LOG_PATH})
logger = logging.getLogger(__name__)
# FIXME - file logging
fh = logging.FileHandler(LOG_PATH)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
# parse args
args = parser.parse_args()
db_table = args.table
s3_bucket = args.bucket
dryrun = args.dryrun
test = args.test
main(args.bucket, args.table, args.threadcount)
|
Python
| 0.000002
|
@@ -1943,32 +1943,24 @@
ry(%22
-SELECT top 100
+select
SHA1
-FROM
+from
Fil
@@ -1969,17 +1969,16 @@
nfo WITH
-
(NOLOCK)
@@ -1982,80 +1982,69 @@
CK)
-WHERE BINARY_CHECKSUM(sha1) = BINARY_CHECKSUM(Upper(sha1)) ORDER BY sha1
+where sha1 COLLATE SQL_Latin1_General_CP1_CS_AS = upper(sha1)
%22)%0A
|
b4e2d34edcbc404f6c90f76b67bcc5fe26f0945f
|
Remove imp and just use importlib to avoid memory error when showing versions
|
pandas/util/print_versions.py
|
pandas/util/print_versions.py
|
import os
import platform
import sys
import struct
import subprocess
import codecs
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("pandas"):
try:
pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
so, serr = pipe.communicate()
except:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode('utf-8')
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(('commit', commit))
try:
(sysname, nodename, release,
version, machine, processor) = platform.uname()
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
("LANG", "%s" % os.environ.get('LANG', "None")),
])
except:
pass
return blob
def show_versions(as_json=False):
import imp
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("nose", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("statsmodels", lambda mod: mod.__version__),
("xarray", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("blosc", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
("numexpr", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("openpyxl", lambda mod: mod.__version__),
("xlrd", lambda mod: mod.__VERSION__),
("xlwt", lambda mod: mod.__VERSION__),
("xlsxwriter", lambda mod: mod.__version__),
("lxml", lambda mod: mod.etree.__version__),
("bs4", lambda mod: mod.__version__),
("html5lib", lambda mod: mod.__version__),
("httplib2", lambda mod: mod.__version__),
("apiclient", lambda mod: mod.__version__),
("sqlalchemy", lambda mod: mod.__version__),
("pymysql", lambda mod: mod.__version__),
("psycopg2", lambda mod: mod.__version__),
("jinja2", lambda mod: mod.__version__),
("boto", lambda mod: mod.__version__),
("pandas_datareader", lambda mod: mod.__version__)
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
try:
mod = imp.load_module(modname, *imp.find_module(modname))
except (ImportError):
import importlib
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except:
deps_blob.append((modname, None))
if (as_json):
try:
import json
except:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json is True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("%s: %s" % (k, stat))
print("")
for k, stat in deps_blob:
print("%s: %s" % (k, stat))
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in "
"'-' to output to stdout")
(options, args) = parser.parse_args()
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0
|
@@ -76,16 +76,33 @@
codecs%0A
+import importlib%0A
%0A%0Adef ge
@@ -1591,23 +1591,8 @@
e):%0A
- import imp%0A
@@ -3464,170 +3464,8 @@
ry:%0A
- try:%0A mod = imp.load_module(modname, *imp.find_module(modname))%0A except (ImportError):%0A import importlib%0A
|
c47dc4004a12010e32c16fbabb64fb682eee16cb
|
Define parseArgs
|
gsm.py
|
gsm.py
|
import sys
from os import path
from subprocess import call
import json
from pprint import pprint
" Terminal Colors "
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
" GSM "
class GSM(object):
version = str('1.0.0')
json_file = str('gitsubmodule.json')
dependencies = dict()
devDependencies = dict()
_jsonExists = bool(False)
" Property. "
@property
def jsonExists(self):
if path.isfile(self.json_file):
exists = True
else:
exists = False
self._jsonExists = exists
return self._jsonExists
" Initialise. "
def __init__(self):
self.message(value="git submodule manager %s" % self.version)
pass
" Run. "
def run(self):
if self.readJson() == True:
self.addSubmodules()
else:
pass
" Message "
def message(self, value, code=None):
if code:
if code == 'OK':
color = bcolors.OKGREEN
elif code == 'ERR':
color = bcolors.FAIL
print("gsm %s%s!%s %s" % (color, code, bcolors.ENDC, value))
else:
print(value)
" Read JSON. "
def readJson(self):
if self.jsonExists == True:
with open(self.json_file) as data_file:
try:
data = json.load(data_file)
except ValueError as e:
self.message(code='ERR', value="no JSON object could be decoded, please check `%s`" % self.json_file)
return False
self.dependencies = data["dependencies"].items()
self.devDependencies = data["devDependencies"].items()
self.message(code='OK', value="%s" % self.json_file)
return True
else:
self.message(code='ERR', value="could not find `%s`" % self.json_file)
return False
" Add Git Submodules. "
def addSubmodules(self):
for dst, src in self.dependencies:
self.message(value="- Installing %s" % (dst))
self.message(value=" Source: %s" % (src))
call(["git", "submodule", "add", "-f", src, dst])
# check if all submodules installed
# self.message(code='OK', value='add git submodules')
" Main "
def main():
gsm = GSM()
gsm.run()
sys.exit()
" Enter Main "
if __name__ == '__main__':
main()
|
Python
| 0.998561
|
@@ -462,24 +462,50 @@
es = dict()%0A
+ cmd = str('install');%0A
_jsonExi
@@ -522,16 +522,16 @@
(False)%0A
-
%0A %22 P
@@ -872,21 +872,8 @@
ion)
-%0A pass
%0A%0A
@@ -894,32 +894,158 @@
def run(self):%0A
+ # parse args%0A if self.parseArgs() == True:%0A # install%0A if self.cmd == 'install':%0A
if self.
@@ -1056,32 +1056,40 @@
Json() == True:%0A
+
self
@@ -1429,32 +1429,32 @@
)%0A else:%0A
-
prin
@@ -1459,24 +1459,813 @@
int(value)%0A%0A
+ %22 Parse Arguments. %22%0A def parseArgs(self):%0A # check argv length%0A if len(sys.argv) != 2:%0A self.message(code='ERR', value=%22invalid command, try -h for help%22)%0A return False%0A # if command argument%0A cmd = sys.argv%5B1%5D%0A if cmd:%0A if cmd == '-h':%0A self.message(value=%22- install git submodules:%22)%0A self.message(value=%22 python gsm.py install%22)%0A return False%0A elif cmd == 'install':%0A self.cmd = cmd%0A return True%0A else:%0A self.message(code='ERR', value=%22unknown command %60%25s%60%22 %25 cmd)%0A return False%0A else:%0A self.message(code='ERR', value=%22no command given%22)%0A return False%0A%0A
%22 Read J
|
3a34ef949cdbe11dcf066cf1ca2e3567c89d57de
|
Make flake happy
|
tests/plugins/helpers/test_interact.py
|
tests/plugins/helpers/test_interact.py
|
from collections import namedtuple
from unittest import TestCase
from spock.plugins.helpers.clientinfo import PlayerPosition
from spock.plugins.helpers.interact import *
Packet = namedtuple('Packet', 'ident data')
class DataDict(dict):
def __init__(self, **kwargs):
super(DataDict, self).__init__(**kwargs)
self.__dict__.update(kwargs)
class PluginLoaderMock(object):
def provides(self, ident, obj):
self.provides_ident = ident
self.provides_obj = obj
def requires(self, requirement):
if requirement == 'ClientInfo':
return ClientInfoMock()
elif requirement == 'Inventory':
return InventoryMock()
elif requirement == 'Net':
return NetMock()
else:
raise AssertionError('Unexpected requirement %s' % requirement)
class NetMock(object):
idents = []
datas = []
def push_packet(self, ident, data):
data_dict = DataDict(**data)
self.idents.append(ident)
self.datas.append(data_dict)
print(ident, data)
class SlotMock(object):
def get_dict(self):
return {}
class InventoryMock(object):
active_slot = SlotMock()
class ClientInfoMock(object):
eid = 123
position = PlayerPosition(1., 2., 3.)
class InteractPluginTest(TestCase):
def setUp(self):
ploader = PluginLoaderMock()
self.plug = InteractPlugin(ploader, {})
assert ploader.provides_ident == 'Interact'
assert ploader.provides_obj == self.plug
def test_sneak(self):
self.assertEqual(self.plug.sneaking, False)
self.plug.sneak()
self.assertEqual(self.plug.sneaking, True)
self.plug.unsneak()
self.assertEqual(self.plug.sneaking, False)
self.plug.sneak(sneak=True)
self.assertEqual(self.plug.sneaking, True)
self.plug.sneak(sneak=False)
self.assertEqual(self.plug.sneaking, False)
def test_sprint(self):
self.assertEqual(self.plug.sprinting, False)
self.plug.sprint()
self.assertEqual(self.plug.sprinting, True)
self.plug.unsprint()
self.assertEqual(self.plug.sprinting, False)
self.plug.sprint(sprint=True)
self.assertEqual(self.plug.sprinting, True)
self.plug.sprint(sprint=False)
self.assertEqual(self.plug.sprinting, False)
def test_chat(self):
self.plug.chat('Hello')
self.assertEqual(NetMock.datas[-1].message, 'Hello')
self.plug.whisper('Guy', 'Hello')
self.assertEqual(NetMock.datas[-1].message, '/tell Guy Hello')
def test_look(self):
self.plug.look(123.4, -42.2)
self.assertEqual(ClientInfoMock.position.yaw, 123.4)
self.assertEqual(ClientInfoMock.position.pitch, -42.2)
self.assertEqual(NetMock.datas[-1].yaw, 123)
self.assertEqual(NetMock.datas[-1].pitch, -42)
self.plug.look_rel(1.4, 2.1)
self.assertAlmostEqual(ClientInfoMock.position.yaw, 124.8)
self.assertAlmostEqual(ClientInfoMock.position.pitch, -40.1)
self.assertEqual(NetMock.datas[-1].yaw, 124)
self.assertEqual(NetMock.datas[-1].pitch, -40)
self.plug.look_at_rel(Vector3(1, 0, 0))
self.assertAlmostEqual(ClientInfoMock.position.yaw, -90)
self.assertAlmostEqual(ClientInfoMock.position.pitch, 0)
self.plug.look_at(Vector3(0, 2 + PLAYER_HEIGHT, 3))
self.assertAlmostEqual(ClientInfoMock.position.yaw, 90)
self.assertAlmostEqual(ClientInfoMock.position.pitch, 0)
# TODO digging, block placement
def test_activate_item(self):
self.plug.activate_item()
self.assertEqual(NetMock.datas[-1].location, Vector3(-1, 255, -1))
self.assertEqual(NetMock.datas[-1].direction, -1)
for c in 'xyz':
self.assertEqual(getattr(NetMock.datas[-1], 'cur_pos_%s' % c),
-1)
# TODO deactivate_item
def test_entity(self):
entity = DataDict(eid=234, x=2, y=2 + PLAYER_HEIGHT, z=4)
self.plug.use_entity(entity)
self.assertAlmostEqual(ClientInfoMock.position.yaw, -45)
self.assertAlmostEqual(ClientInfoMock.position.pitch, 0)
self.assertEqual(NetMock.datas[-2].action, INTERACT_ENTITY)
self.assertEqual(NetMock.datas[-2].target, 234)
self.assertEqual(NetMock.idents[-1], 'PLAY>Animation')
self.plug.auto_look = False
entity = DataDict(eid=345, x=0, y=3 + PLAYER_HEIGHT, z=3)
self.plug.attack_entity(entity)
# different pos, but look shouldn't have changed
self.assertAlmostEqual(ClientInfoMock.position.yaw, -45)
self.assertAlmostEqual(ClientInfoMock.position.pitch, 0)
self.assertEqual(NetMock.datas[-2].action, ATTACK_ENTITY)
self.assertEqual(NetMock.datas[-2].target, 345)
self.assertEqual(NetMock.idents[-1], 'PLAY>Animation')
self.plug.auto_look = True
self.plug.auto_swing = False
entity = DataDict(eid=456, x=2, y=3 + PLAYER_HEIGHT, z=3)
self.plug.mount_vehicle(entity)
self.assertAlmostEqual(ClientInfoMock.position.yaw, -90)
self.assertAlmostEqual(ClientInfoMock.position.pitch, -45)
self.assertEqual(NetMock.datas[-1].action, INTERACT_ENTITY)
self.assertEqual(NetMock.datas[-1].target, 456)
self.plug.auto_swing = True
def test_vehicle(self):
self.plug.steer_vehicle(1., -2.)
self.assertEqual(NetMock.datas[-1].flags, 0)
self.assertAlmostEqual(NetMock.datas[-1].sideways, 1.)
self.assertAlmostEqual(NetMock.datas[-1].forward, -2.)
self.plug.steer_vehicle(-1., 2., jump=True, unmount=True)
self.assertEqual(NetMock.datas[-1].flags, 3)
self.assertAlmostEqual(NetMock.datas[-1].sideways, -1.)
self.assertAlmostEqual(NetMock.datas[-1].forward, 2.)
self.plug.jump_vehicle()
self.assertEqual(NetMock.datas[-1].flags, 1)
self.assertEqual(NetMock.datas[-1].sideways, 0.)
self.assertEqual(NetMock.datas[-1].forward, 0.)
self.plug.unmount_vehicle()
self.assertEqual(NetMock.datas[-1].flags, 2)
self.assertEqual(NetMock.datas[-1].sideways, 0.)
self.assertEqual(NetMock.datas[-1].forward, 0.)
|
Python
| 0.000214
|
@@ -58,16 +58,17 @@
estCase%0A
+%0A
from spo
@@ -162,17 +162,116 @@
import
-*
+%5C%0A ATTACK_ENTITY, INTERACT_ENTITY, InteractPlugin, PLAYER_HEIGHT%0Afrom spock.vector import Vector3
%0A%0A%0APacke
@@ -3804,29 +3804,62 @@
ion,
- Vector3(-1, 255, -1)
+%0A %7B'x': -1, 'y': 255, 'z': -1%7D
)%0A
|
045256f13ed1bfc3e9cca6897548760145101ff0
|
simplify test executor
|
iatidataquality/test_queue.py
|
iatidataquality/test_queue.py
|
import sys, os, json, ckan, urllib2
from datetime import date, datetime
import models, dqprocessing, dqparsetests, \
dqfunctions, queue
import dqprocessing
from lxml import etree
from iatidataquality import db
# FIXME: this should be in config
download_queue='iati_tests_queue'
def aggregate_results(runtime, package_id):
return dqprocessing.aggregate_results(runtime, package_id)
def test_activity(runtime_id, package_id, result_identifier, data,
test_functions, result_hierarchy):
xmldata = etree.fromstring(data)
tests = models.Test.query.filter(models.Test.active == True).all()
conditions = models.TestCondition.query.filter(
models.TestCondition.active == True).all()
for test in tests:
if not test.id in test_functions:
continue
try:
if test_functions[test.id](xmldata):
the_result = 1
else:
the_result = 0
# If an exception is not caught in test functions,
# it should not count against the publisher
except Exception:
the_result = 2
newresult = models.Result()
newresult.runtime_id = runtime_id
newresult.package_id = package_id
newresult.test_id = test.id
newresult.result_data = the_result
newresult.result_identifier = result_identifier
newresult.result_hierarchy = result_hierarchy
db.session.add(newresult)
return "Success"
def check_file(file_name, runtime_id, package_id, context=None):
try:
try:
data = etree.parse(file_name)
except etree.XMLSyntaxError:
dqprocessing.add_hardcoded_result(-3, runtime_id, package_id, False)
return
dqprocessing.add_hardcoded_result(-3, runtime_id, package_id, True)
from dqparsetests import test_functions as tf
test_functions = tf()
def get_result_hierarchy(activity):
hierarchy = activity.get('hierarchy', default=None)
if hierarchy is "":
return None
return hierarchy
for activity in data.findall('iati-activity'):
result_hierarchy = get_result_hierarchy(activity)
result_identifier = activity.find('iati-identifier').text
activity_data = etree.tostring(activity)
res = test_activity(runtime_id, package_id,
result_identifier, activity_data,
test_functions, result_hierarchy)
db.session.commit()
print "Aggregating results..."
dqprocessing.aggregate_results(runtime_id, package_id)
print "Finished aggregating results"
db.session.commit()
dqfunctions.add_test_status(package_id, 3)
except Exception, e:
print "Exception in check_file ", e
def dequeue_download(body):
try:
args = json.loads(body)
check_file(args['filename'],
args['runtime_id'],
args['package_id'],
args['context'])
except Exception, e:
print "Exception in dequeue_download", e
def run_test_queue():
for body in queue.handle_queue_generator(download_queue):
dequeue_download(body)
|
Python
| 0.00182
|
@@ -724,249 +724,520 @@
l()%0A
-
%0A
-for test in tests:%0A if not test.id in test_functions:%0A continue%0A try:%0A if test_functions%5Btest.id%5D(xmldata):%0A the_result = 1%0A else:%0A the_result = 0%0A
+def add_result(test_id, the_result):%0A newresult = models.Result()%0A newresult.runtime_id = runtime_id%0A newresult.package_id = package_id%0A newresult.test_id = test.id%0A newresult.result_data = the_result%0A newresult.result_identifier = result_identifier%0A newresult.result_hierarchy = result_hierarchy%0A db.session.add(newresult)%0A%0A def execute_test(test_id):%0A try:%0A return int(test_functions%5Btest_id%5D(xmldata))%0A except:%0A
# If
@@ -1232,16 +1232,19 @@
+
# If an
@@ -1286,16 +1286,20 @@
ctions,%0A
+
@@ -1354,131 +1354,90 @@
-except Exception:%0A the_result =
+ return
2%0A
-%0A
+%0A
-newresult = models.Result()%0A newresult.runtime_id = r
+for test in tests:%0A if not test.id in test_f
un
+c
ti
-me_id
+ons:
%0A
@@ -1445,176 +1445,63 @@
-newresult.package_id = package_id%0A newresult.test_id = test.id%0A newresult.result_data = the_result%0A newresult.result_identifier = result_identifier
+ continue%0A the_result = execute_test(test.id)
%0A
@@ -1509,88 +1509,40 @@
-new
+add_
result
-.result_hierarchy = result_hierarchy%0A db.session.add(new
+(test.id, the_
result)%0A
@@ -1537,16 +1537,17 @@
result)%0A
+%0A
retu
|
86a5ebd7562dece822b62c618ceba98088fb3bc5
|
Add a search interface to assign policies
|
ibmcnx/cnx/LibraryPolicies.py
|
ibmcnx/cnx/LibraryPolicies.py
|
######
# Work with Libraries - add policies
#
# Author: Christoph Stoettner
# Mail: christoph.stoettner@stoeps.de
# Documentation: http://scripting101.stoeps.de
#
# Version: 2.0
# Date: 2014-06-04
#
# License: Apache 2.0
#
# TODO: Create a Search Filter to get a list of libraries or Users.
execfile( "filesAdmin.py" )
import sys
def printLibs( libraries ):
for i in range( len( libraries ) ):
print str( i ) + '\t',
print str( round( libraries[i]['maximumSize'] / 1073741824.0, 2 ) ) + ' GB\t',
print str( round( libraries[i]['percentUsed'], 2 ) ) + ' %\t',
print str( libraries[i]['id'] ) + '\t',
print str( libraries[i]['title'] )
def printPolicies( policies ):
state = ''
print '# \tmax Size \t\t uuid \t\t\t\t\t title'
print '----------------------------------------------------------------------------------------------------'
for i in range( len( policies ) ):
print str( i ) + '\t' + str( round( policies[i]['maximumSize'] / 1073741824.0, 2 ) ) + ' GB\t\t' + str( policies[i]['id'] ) + '\t\t' + str( policies[i]['title'] )
print
return policies
def combineMaps( personalList, communityList ):
pLen = len( personalList )
cLen = len( communityList )
print
for i in range( pLen ):
print str( i ) + '\t',
print str( round( personalList[i]['maximumSize'] / 1073741824.0, 2 ) ) + ' GB\t',
print str( round( personalList[i]['percentUsed'], 2 ) ) + ' %\t',
print str( personalList[i]['id'] ) + '\t',
print str( personalList[i]['title'] )
print
for i in range( cLen ):
print str( i + pLen ) + '\t',
print str( round( communityList[i]['maximumSize'] / 1073741824.0, 2 ) ) + ' GB\t',
print str( round( communityList[i]['percentUsed'], 2 ) ) + ' %\t',
print str( communityList[i]['id'] ) + '\t',
print str( communityList[i]['title'] )
print
return pLen, cLen
def askLibraryType():
# Check if Community or Personal Libraries should be searched
is_valid_lib = 0
while not is_valid_lib :
try :
libask = 'Personal or Community Library? (P|C)'
libType = raw_input( libask ).lower()
if libType == 'p':
is_valid_lib = 1 ## set it to 1 to validate input and to terminate the while..not loop
return libType
elif libType == 'c':
is_valid_lib = 1 ## set it to 1 to validate input and to terminate the while..not loop
return libType
else:
print ( "'%s' is not a valid menu option.") % libType
except ValueError, e :
print ("'%s' is not valid." % e.args[0].split(": ")[1])
def searchLibrary( libType ):
if libType == 'p':
libNameAsk = 'Which User you want to search? (min 1 character)'
libNameAnswer = raw_input(libNameAsk)
result = FilesUtilService.filterListByString(FilesLibraryService.browsePersonal("title", "true", 1, 250), "title", ".*" + libNameAnswer + ".*")
return result
elif libType == 'c':
print libType
else:
print ('Not a valid library Type!')
def getLibraryDetails( librarieslist ):
result = str(librarieslist)
counter = result.count('id=')
print counter
index = 0
count = 0
if (counter<1):
print '\n------------------------------------------------------------------'
print 'There is NO Library with this name\nPlease try again ----------->'
print '------------------------------------------------------------------\n'
return (0,0,0,0,0)
elif (counter<2):
lib_id = result[result.find('id=')+5:result.find('id=')+41]
lib_name = result[result.find('name=')+5:result.find('id=')-2]
return (lib_id, lib_name, 1)
else:
lib_id = []
lib_name = []
lib_number = -1
print '\nThere are multiple libraries with this name:'
print '----------------------------------------------'
while index < len(result):
index = result.find('{', index)
end = result.find('{', index+1)
lib_id.append(result[result.find('uuid=', index)+5:result.find('uuid=', index)+41])
lib_name.append(result[result.find('name=', index)+5:result.find('uuid=', index)-2])
numberlist.append(count)
if index == -1:
break
print (str(count) + ': ' + lib_name[count])
index += 1
count += 1
print '----------------------------------------------'
go_on = ''
while go_on != 'TRUE':
lib_number = raw_input('Please type the number of the library? ')
try:
lib_number = float(lib_number)
except (TypeError, ValueError):
continue
if count-1>=lib_number>=0:
break
else:
continue
return (lib_id[int(lib_number)], lib_name[int(lib_number)], 1)
# Combine personal and community FilesLibrary List
# TODO: Change this to a function for searching
# personalList = FilesLibraryService.browsePersonal( "title", "true", 1, 100 )
# communityList = FilesLibraryService.browseCommunity( "title", "true", 1, 100 )
getLibraryDetails( searchLibrary(askLibraryType()) )
#pLen, cLen = combineMaps( personalList, communityList )
# print 'Available Policies: '
# policies = printPolicies( FilesPolicyService.browse( "title", "true", 1, 25 ) )
#libraryID = int( raw_input( 'Which library should be changed? (0 - %s) ' % str( pLen + cLen - 1 ) ) )
#if libraryID >= pLen:
# libraryID = libraryID - pLen
# libraryType = 'community'
#else:
# libraryType = 'personal'
#policyID = int( raw_input( 'Which policy do you want to assign? ' ) )
#if libraryType == 'personal':
# libraryUUID = personalList[libraryID]['id']
#elif libraryType == 'community':
# libraryUUID = communityList[libraryID]['id']
#else:
# print "Error can't find Library UUID"
#policyUUID = policies[policyID]['id']
#FilesLibraryService.assignPolicy( libraryUUID, policyUUID )
|
Python
| 0
|
@@ -3793,25 +3793,25 @@
find('id=')+
-5
+3
:result.find
@@ -3818,18 +3818,18 @@
('id=')+
-41
+39
%5D%0A
@@ -3865,17 +3865,18 @@
nd('
-nam
+titl
e=')+
-5
+6
:res
@@ -3881,25 +3881,34 @@
esult.find('
-i
+ownerUserI
d=')-2%5D%0A
@@ -5403,16 +5403,29 @@
100 )%0A%0A
+LibDetails =
getLibra
@@ -5469,16 +5469,33 @@
pe()) )%0A
+print LibDetails%0A
#pLen, c
|
e1530b228d54c9d8d3d5b11070d8b70692068dfd
|
Handle nil internaldate values
|
imapclient/response_parser.py
|
imapclient/response_parser.py
|
# Copyright (c) 2014, Menno Smits
# Released subject to the New BSD License
# Please see http://en.wikipedia.org/wiki/BSD_licenses
"""
Parsing for IMAP command responses with focus on FETCH responses as
returned by imaplib.
Initially inspired by http://effbot.org/zone/simple-iterator-parser.htm
"""
# TODO more exact error reporting
from __future__ import unicode_literals
import re
import sys
from collections import defaultdict
import six
from .datetime_util import parse_to_datetime
from .response_lexer import TokenSource
from .response_types import BodyData, Envelope, Address, SearchIds
xrange = six.moves.xrange
__all__ = ['parse_response', 'parse_message_list', 'ParseError']
class ParseError(ValueError):
pass
def parse_response(data):
"""Pull apart IMAP command responses.
Returns nested tuples of appropriately typed objects.
"""
if data == [None]:
return []
return tuple(gen_parsed_response(data))
_msg_id_pattern = re.compile("(\d+(?: +\d+)*)")
def parse_message_list(data):
"""Parse a list of message ids and return them as a list.
parse_response is also capable of doing this but this is
faster. This also has special handling of the optional MODSEQ part
of a SEARCH response.
The returned list is a SearchIds instance which has a *modseq*
attribute which contains the MODSEQ response (if returned by the
server).
"""
if len(data) != 1:
raise ValueError("unexpected message list data")
data = data[0]
if not data:
return SearchIds()
if six.PY3 and isinstance(data, six.binary_type):
data = data.decode('ascii')
m = _msg_id_pattern.match(data)
if not m:
raise ValueError("unexpected message list format")
ids = SearchIds(int(n) for n in m.group(1).split())
# Parse any non-numeric part on the end using parse_response (this
# is likely to be the MODSEQ section).
extra = data[m.end(1):]
if extra:
for item in parse_response([extra.encode('ascii')]):
if isinstance(item, tuple) and len(item) == 2 and item[0].lower() == b'modseq':
ids.modseq = item[1]
elif isinstance(item, int):
ids.append(item)
return ids
def gen_parsed_response(text):
if not text:
return
src = TokenSource(text)
token = None
try:
for token in src:
yield atom(src, token)
except ParseError:
raise
except ValueError:
_, err, _ = sys.exc_info()
raise ParseError("%s: %s" % (str(err), token))
def parse_fetch_response(text, normalise_times=True, uid_is_key=True):
"""Pull apart IMAP FETCH responses as returned by imaplib.
Returns a dictionary, keyed by message ID. Each value a dictionary
keyed by FETCH field type (eg."RFC822").
"""
if text == [None]:
return {}
response = gen_parsed_response(text)
parsed_response = defaultdict(dict)
while True:
try:
msg_id = seq = _int_or_error(six.next(response),
'invalid message ID')
except StopIteration:
break
try:
msg_response = six.next(response)
except StopIteration:
raise ParseError('unexpected EOF')
if not isinstance(msg_response, tuple):
raise ParseError('bad response type: %s' % repr(msg_response))
if len(msg_response) % 2:
raise ParseError('uneven number of response items: %s' % repr(msg_response))
# always return the sequence of the message, so it is available
# even if we return keyed by UID.
msg_data = {b'SEQ': seq}
for i in xrange(0, len(msg_response), 2):
word = msg_response[i].upper()
value = msg_response[i + 1]
if word == b'UID':
uid = _int_or_error(value, 'invalid UID')
if uid_is_key:
msg_id = uid
else:
msg_data[word] = uid
elif word == b'INTERNALDATE':
msg_data[word] = _convert_INTERNALDATE(value, normalise_times)
elif word == b'ENVELOPE':
msg_data[word] = _convert_ENVELOPE(value, normalise_times)
elif word in (b'BODY', b'BODYSTRUCTURE'):
msg_data[word] = BodyData.create(value)
else:
msg_data[word] = value
parsed_response[msg_id].update(msg_data)
return parsed_response
def _int_or_error(value, error_text):
try:
return int(value)
except (TypeError, ValueError):
raise ParseError('%s: %s' % (error_text, repr(value)))
def _convert_INTERNALDATE(date_string, normalise_times=True):
try:
return parse_to_datetime(date_string, normalise=normalise_times)
except ValueError:
return None
def _convert_ENVELOPE(envelope_response, normalise_times=True):
dt = None
if envelope_response[0]:
try:
dt = parse_to_datetime(envelope_response[0], normalise=normalise_times)
except ValueError:
pass
subject = envelope_response[1]
# addresses contains a tuple of addresses
# from, sender, reply_to, to, cc, bcc headers
addresses = []
for addr_list in envelope_response[2:8]:
addrs = []
if addr_list:
for addr_tuple in addr_list:
if addr_tuple:
addrs.append(Address(*addr_tuple))
addresses.append(tuple(addrs))
else:
addresses.append(None)
return Envelope(
dt, subject, *addresses,
in_reply_to=envelope_response[8],
message_id=envelope_response[9]
)
def atom(src, token):
if token == b'(':
return parse_tuple(src)
elif token == b'NIL':
return None
elif token[:1] == b'{':
literal_len = int(token[1:-1])
literal_text = src.current_literal
if literal_text is None:
raise ParseError('No literal corresponds to %r' % token)
if len(literal_text) != literal_len:
raise ParseError('Expecting literal of size %d, got %d' % (
literal_len, len(literal_text)))
return literal_text
elif len(token) >= 2 and (token[:1] == token[-1:] == b'"'):
return token[1:-1]
elif token.isdigit():
return int(token)
else:
return token
def parse_tuple(src):
out = []
for token in src:
if token == b")":
return tuple(out)
out.append(atom(src, token))
# no terminator
raise ParseError('Tuple incomplete before "(%s"' % _fmt_tuple(out))
def _fmt_tuple(t):
return ' '.join(str(item) for item in t)
|
Python
| 0.000279
|
@@ -4759,24 +4759,155 @@
imes=True):%0A
+ # Observed in https://sentry.nylas.com/sentry/sync-prod/group/5907/%0A if date_string.upper() == b'NIL':%0A return None%0A%0A
try:%0A
|
5b8725caecd01ccb4d0d3e0c40b910cbdf19258b
|
Fix country form input
|
spotify_country_top_n.py
|
spotify_country_top_n.py
|
import requests
import webbrowser
import json
import urllib.request
import urllib.parse
import re
token = "Bearer " + input("OAuth Token: ") #BQDWxOubOFzx8fjeDi9E3Npt_fd9GiGXVgdiC3tS9LWHgajM3dRe2w3DjVVtjv0ZgHZAKt6zw2cD9PEBcLf-TFxtpOnb89THvPNMH-gbAO9Ho_8eSchxzO7JdaQ1Rg6eLBmzGIPjUp-5NM9Umpk62uKuAwPw7kSB0fb_B1uYdR4YkztfMsW5_OwXJukHyN0Cp2ztHR5V4_-5oFlHuTfPmyDcKZK8yreVwFUZuYB_VMPe_4pNhmu3PwlcePsKel9irRRsw41ly0mk1FcL3XFFHHXMHBHblYEu7hSccB8sqecdVZD9-w7PdcYS"
headers = {
'Accept': 'application/json',
'Authorization': token}
params = {
'country': input("Country: "),
'limit': input("Maximum number of tracks: "),
'offset' : input("Mininum number of tracks: ")
}
r = requests.get('https://api.spotify.com/v1/browse/new-releases', headers=headers, params = params)
print_json = r.json()
albums_name = []
for i in range(int(params['offset']), int(params['limit'])):
a = print_json['albums']['items'][i]['name']
albums_name.append(a)
def youtube(s):
query_string = urllib.parse.urlencode({"search_query" : s})
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
return("http://www.youtube.com/watch?v=" + search_results[0])
for i in albums_name:
webbrowser.open(youtube(i))
|
Python
| 0.999745
|
@@ -590,16 +590,43 @@
%22Country
+ in ISO 3166-1 alpha-2 form
: %22),%0D%0A
|
3168e8b43e77a10c54a57345d2c06f9f4e4800c7
|
Allow Parameters to set defaults for TemplateResource
|
heat/engine/resources/template_resource.py
|
heat/engine/resources/template_resource.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from requests import exceptions
from heat.common import template_format
from heat.common import urlfetch
from heat.engine import attributes
from heat.engine import properties
from heat.engine import resource
from heat.engine import stack_resource
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
class TemplateResource(stack_resource.StackResource):
'''
A resource implemented by a nested stack.
This implementation passes resource properties as parameters to the nested
stack. Outputs of the nested stack are exposed as attributes of this
resource.
'''
def __init__(self, name, json_snippet, stack):
self.template_name = stack.env.get_resource_type(json_snippet['Type'],
name)
self._parsed_nested = None
self.stack = stack
# on purpose don't pass in the environment so we get
# the official/facade class in case we need to copy it's schema.
cls_facade = resource.get_class(json_snippet['Type'])
# if we're not overriding via the environment, mirror the template as
# a new resource
if cls_facade == self.__class__:
self.properties_schema = (properties.Properties
.schema_from_params(self.parsed_nested.get('Parameters')))
self.attributes_schema = (attributes.Attributes
.schema_from_outputs(self.parsed_nested.get('Outputs')))
# otherwise we are overriding a resource type via the environment
# and should mimic that type
else:
self.properties_schema = cls_facade.properties_schema
self.attributes_schema = cls_facade.attributes_schema
super(TemplateResource, self).__init__(name, json_snippet, stack)
def _to_parameters(self):
'''
:return: parameter values for our nested stack based on our properties
'''
params = {}
for n, v in iter(self.properties.props.items()):
if not v.implemented():
continue
elif v.type() == properties.LIST:
# take a list and create a CommaDelimitedList
val = self.properties[n]
if val:
params[n] = ','.join(val)
else:
# for MAP, the JSON param takes either a collection or string,
# so just pass it on and let the param validate as appropriate
params[n] = self.properties[n]
return params
@property
def parsed_nested(self):
if not self._parsed_nested:
self._parsed_nested = template_format.parse(self.template_data)
return self._parsed_nested
@property
def template_data(self):
t_data = self.stack.t.files.get(self.template_name)
if not t_data and self.template_name.endswith((".yaml", ".template")):
try:
t_data = urlfetch.get(self.template_name)
except (exceptions.RequestException, IOError) as r_exc:
raise ValueError("Could not fetch remote template '%s': %s" %
(self.template_name, str(r_exc)))
else:
# TODO(Randall) Whoops, misunderstanding on my part; this
# doesn't actually persist to the db like I thought.
# Find a better way
self.stack.t.files[self.template_name] = t_data
return t_data
def handle_create(self):
return self.create_with_template(self.parsed_nested,
self._to_parameters())
def handle_delete(self):
self.delete_nested()
def FnGetRefId(self):
if not self.nested():
return unicode(self.name)
return self.nested().identifier().arn()
resource.register_template_class(TemplateResource)
|
Python
| 0.000037
|
@@ -2702,16 +2702,17 @@
ontinue%0A
+%0A
@@ -2719,40 +2719,64 @@
-elif v.type() == properties.LIST
+val = self.properties%5Bn%5D%0A%0A if val is not None
:%0A
@@ -2855,55 +2855,38 @@
-val = self.properties%5Bn%5D%0A if val
+if v.type() == properties.LIST
:%0A
@@ -2903,25 +2903,19 @@
-params%5Bn%5D
+val
= ','.j
@@ -2923,33 +2923,16 @@
in(val)%0A
- else:
%0A
@@ -3082,16 +3082,17 @@
opriate%0A
+%0A
@@ -3111,34 +3111,19 @@
ms%5Bn%5D =
-self.properties%5Bn%5D
+val
%0A%0A
|
21f0c1294afc4cd53c0212dbf051744a8bb222a8
|
fix bugs
|
skitai/corequest/httpbase/sync_proxy.py
|
skitai/corequest/httpbase/sync_proxy.py
|
# testing purpose WAS sync service
from . import task
from rs4 import webtest
from rs4.cbutil import tuple_cb
import random
from urllib.parse import urlparse, urlunparse
from skitai import exceptions
import xmlrpc.client
import sys
from aquests.client import synconnect
class RPCResponse:
def __init__ (self, val):
self.data = val
class XMLRPCServerProxy (xmlrpc.client.ServerProxy):
def _ServerProxy__request (self, methodname, params):
response = xmlrpc.client.ServerProxy._ServerProxy__request (self, methodname, params)
return Result (3, RPCResponse (response))
try:
import jsonrpclib
except ImportError:
pass
else:
class JSONRPCServerProxy (jsonrpclib.ServerProxy):
def _ServerProxy__request (self, methodname, params):
response = xjsonrpclib.ServerProxy._ServerProxy__request (self, methodname, params)
return Result (3, RPCResponse (response))
class Result:
def __init__ (self, status, response = None):
self.status = status
self.__response = response
def __getattr__ (self, attr):
return getattr (self.__response, attr)
@property
def data (self):
if isinstance (self.__response, RPCResponse):
return self.__response.data
elif hasattr (self.__response, "status_code"):
ct = self.__response.headers.get ("content-type", "")
if ct:
if ct.startswith ("application/json"):
return self.__response.json ()
return self.__response.text
return self.__response
def reraise (self):
if self.status !=3 and self.__response:
raise self.__response [1]
def fetch (self, *args, **kargs):
self.reraise ()
return self.data
def one (self, *args, **kargs):
self.reraise ()
if not self.data:
raise exceptions.HTTPError ("410 Maybe Gone")
elif len (self.data) != 1:
raise exceptions.HTTPError ("409 Conflict")
return self.data [0]
class ProtoCall (task.Task):
def __init__ (self, cluster, *args, **kargs):
self.cluster = cluster
self.result = None
self.expt = None
self.handle_request (*args, **kargs)
def get_syncon (self, uri):
if self.cluster:
syncon = self.cluster.get ()
else:
parts = urlparse (uri)
try:
host, port = parts [1].split (":")
except ValueError:
port = parts [0] == "http" and 80 or 443
host = parts [1]
else:
port = int (port)
syncon = synconnect.SynConnect ((host, port))
uri = urlunparse (("", "") + parts [2:])
syncon.connect ()
return syncon, uri
def create_stub (self):
syncon, uri = self.get_syncon (self.uri)
with syncon.webtest as cli:
if self.reqtype == "jsonrpc":
proxy_class = JSONRPCServerProxy
else:
proxy_class = XMLRPCServerProxy
return getattr (cli, self.reqtype) (uri, proxy_class)
def handle_request (self, uri, params = None, reqtype="rpc", headers = None, auth = None, meta = None, use_cache = True, mapreduce = False, filter = None, callback = None, timeout = 10, caller = None):
self._mapreduce = mapreduce
self.uri = uri
self.reqtype = reqtype
syncon, uri = self.get_syncon (uri)
syncon.set_auth (auth)
with syncon.webtest as cli:
req_func = getattr (cli, reqtype)
try:
resp = req_func (uri, headers = headers, auth = auth)
except:
self.expt = sys.exc_info ()
self.result = Result (1, self.expt)
else:
self.result = Result (3, resp)
syncon.set_active (False)
self.result.meta = meta or {}
callback and callback (self.result)
def set_callback (self, callback, reqid = None, timeout = 10):
if reqid is not None:
self.result.meta ["__reqid"] = reqid
tuple_cb (self.result, callback)
def wait (self, timeout = 10, *args, **karg):
pass
def _or_throw (self):
if self.expt:
raise exceptions.HTTPError ("700 Exception", self.expt)
if self.result.status_code >= 300:
raise exceptions.HTTPError ("{} {}".format (self.result.status_code, self.result.reason))
return self.result
def dispatch (self, *args, **kargs):
if self._mapreduce:
self.result = task.Results ([self.result])
return self.result
getwait = dispatch
getswait = dispatch
def dispatch_or_throw (self):
self.dispatch ()
return self._or_throw ()
def commit (self, *args, **karg):
return self._or_throw ()
wait_or_throw = commit
def fetch (self, timeout = 10, *args, **karg):
self._or_throw ()
return self.result.fetch ()
def one (self, timeout = 10, *args, **karg):
self._or_throw ()
return self.result.one ()
|
Python
| 0.000001
|
@@ -995,24 +995,37 @@
sponse =
+ None, expt =
None):%0A
@@ -1008,32 +1008,32 @@
, expt = None):%0A
-
self.sta
@@ -1077,24 +1077,51 @@
= response
+%0A self.__expt = expt
%0A %0A
@@ -1755,24 +1755,20 @@
self.__
-response
+expt
:
@@ -1798,24 +1798,20 @@
self.__
-response
+expt
%5B1%5D%0A%0A
@@ -2786,32 +2786,72 @@
t) %0A
+ if parts %5B0%5D == %22http%22:%0A
sync
@@ -2892,16 +2892,103 @@
port))%0A
+ else:%0A syncon = synconnect.SynSSLConnect ((host, port)) %0A
@@ -3031,16 +3031,28 @@
ts %5B2:%5D)
+
%0A
@@ -3924,16 +3924,28 @@
reqtype)
+
%0A
@@ -4116,24 +4116,24 @@
exc_info ()%0A
-
@@ -4160,16 +4160,23 @@
sult (1,
+ expt =
self.ex
|
195f0d5eec3d69ae6ffb1a52d151ad72fcb027a6
|
Shorten the may_fail_with_warning GM comparison list
|
slave/skia_slave_scripts/compare_gms.py
|
slave/skia_slave_scripts/compare_gms.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Compare the generated GM images to the baselines """
# System-level imports
import os
import sys
from build_step import BuildStep, BuildStepWarning
from utils import misc
import run_gm
class CompareGMs(BuildStep):
def _Run(self):
json_summary_path = misc.GetAbsPath(os.path.join(
self._gm_actual_dir, run_gm.JSON_SUMMARY_FILENAME))
# Temporary list of builders who are allowed to fail this step without the
# bot turning red.
may_fail_with_warning = [
'Test-Ubuntu12-ShuttleA-ATI5770-x86-Debug',
'Test-Ubuntu12-ShuttleA-ATI5770-x86-Debug-Trybot',
'Test-Ubuntu12-ShuttleA-ATI5770-x86-Release',
'Test-Ubuntu12-ShuttleA-ATI5770-x86-Release-Trybot',
'Test-Win7-ShuttleA-HD2000-x86_64-Debug',
'Test-Win7-ShuttleA-HD2000-x86_64-Debug-Trybot',
'Test-Win7-ShuttleA-HD2000-x86_64-Release',
'Test-Win7-ShuttleA-HD2000-x86_64-Release-Trybot',
'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Debug',
'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Debug-Trybot',
'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Release',
'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Release-Trybot',
'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Debug',
'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Debug-Trybot',
'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Release',
'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Release-Trybot',
]
# This import must happen after BuildStep.__init__ because it requires that
# CWD is in PYTHONPATH, and BuildStep.__init__ may change the CWD.
from gm import display_json_results
if not display_json_results.Display(json_summary_path):
if self._builder_name in may_fail_with_warning:
raise BuildStepWarning('Expectations mismatch in %s!' %
json_summary_path)
else:
raise Exception('Expectations mismatch in %s!' % json_summary_path)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(CompareGMs))
|
Python
| 0.999966
|
@@ -716,906 +716,61 @@
x86-
-Debug',%0A 'Test-Ubuntu12-ShuttleA-ATI5770-x86-Debug-Trybot',%0A 'Test-Ubuntu12-ShuttleA-ATI5770-x86-Release',%0A 'Test-Ubuntu12-ShuttleA-ATI5770-x86-Release-Trybot',%0A 'Test-Win7-ShuttleA-HD2000-x86_64-Debug',%0A 'Test-Win7-ShuttleA-HD2000-x86_64-Debug-Trybot',%0A 'Test-Win7-ShuttleA-HD2000-x86_64-Release',%0A 'Test-Win7-ShuttleA-HD2000-x86_64-Release-Trybot',%0A 'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Debug',%0A 'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Debug-Trybot',%0A 'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Release',%0A 'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Release-Trybot',%0A 'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Debug',%0A 'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Debug-Trybot',%0A 'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Release',%0A 'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64
+Release',%0A 'Test-Ubuntu12-ShuttleA-ATI5770-x86
-Rel
|
d27c91b9fed08970685d0908ad25397f5d16defb
|
Use DeviceInfo in tasmota (#58604)
|
homeassistant/components/tasmota/mixins.py
|
homeassistant/components/tasmota/mixins.py
|
"""Tasmota entity mixins."""
from __future__ import annotations
import logging
from typing import Any
from hatasmota.entity import (
TasmotaAvailability as HATasmotaAvailability,
TasmotaEntity as HATasmotaEntity,
TasmotaEntityConfig,
)
from hatasmota.models import DiscoveryHashType
from homeassistant.components.mqtt import (
async_subscribe_connection_status,
is_connected as mqtt_connected,
)
from homeassistant.core import callback
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, Entity
from .discovery import (
TASMOTA_DISCOVERY_ENTITY_UPDATED,
clear_discovery_hash,
set_discovery_hash,
)
_LOGGER = logging.getLogger(__name__)
class TasmotaEntity(Entity):
"""Base class for Tasmota entities."""
def __init__(self, tasmota_entity: HATasmotaEntity) -> None:
"""Initialize."""
self._tasmota_entity = tasmota_entity
self._unique_id = tasmota_entity.unique_id
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
await self._subscribe_topics()
async def async_will_remove_from_hass(self) -> None:
"""Unsubscribe when removed."""
await self._tasmota_entity.unsubscribe_topics()
await super().async_will_remove_from_hass()
async def discovery_update(
self, update: TasmotaEntityConfig, write_state: bool = True
) -> None:
"""Handle updated discovery message."""
self._tasmota_entity.config_update(update)
await self._subscribe_topics()
if write_state:
self.async_write_ha_state()
async def _subscribe_topics(self) -> None:
"""(Re)Subscribe to topics."""
await self._tasmota_entity.subscribe_topics()
@property
def device_info(self) -> DeviceInfo:
"""Return a device description for device registry."""
return {"connections": {(CONNECTION_NETWORK_MAC, self._tasmota_entity.mac)}}
@property
def name(self) -> str | None:
"""Return the name of the binary sensor."""
return self._tasmota_entity.name
@property
def should_poll(self) -> bool:
"""Return the polling state."""
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
class TasmotaOnOffEntity(TasmotaEntity):
"""Base class for Tasmota entities which can be on or off."""
def __init__(self, **kwds: Any) -> None:
"""Initialize."""
self._on_off_state: bool = False
super().__init__(**kwds)
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
self._tasmota_entity.set_on_state_callback(self.state_updated)
await super().async_added_to_hass()
@callback
def state_updated(self, state: bool, **kwargs: Any) -> None:
"""Handle state updates."""
self._on_off_state = state
self.async_write_ha_state()
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._on_off_state
class TasmotaAvailability(TasmotaEntity):
"""Mixin used for platforms that report availability."""
_tasmota_entity: HATasmotaAvailability
def __init__(self, **kwds: Any) -> None:
"""Initialize the availability mixin."""
self._available = False
super().__init__(**kwds)
async def async_added_to_hass(self) -> None:
"""Subscribe to MQTT events."""
self._tasmota_entity.set_on_availability_callback(self.availability_updated)
self.async_on_remove(
async_subscribe_connection_status(self.hass, self.async_mqtt_connected)
)
await super().async_added_to_hass()
async def availability_updated(self, available: bool) -> None:
"""Handle updated availability."""
await self._tasmota_entity.poll_status()
self._available = available
self.async_write_ha_state()
@callback
def async_mqtt_connected(self, _: bool) -> None:
"""Update state on connection/disconnection to MQTT broker."""
if not self.hass.is_stopping:
if not mqtt_connected(self.hass):
self._available = False
self.async_write_ha_state()
@property
def available(self) -> bool:
"""Return if the device is available."""
return self._available
class TasmotaDiscoveryUpdate(TasmotaEntity):
"""Mixin used to handle updated discovery message."""
def __init__(self, discovery_hash: DiscoveryHashType, **kwds: Any) -> None:
"""Initialize the discovery update mixin."""
self._discovery_hash = discovery_hash
self._removed_from_hass = False
super().__init__(**kwds)
async def async_added_to_hass(self) -> None:
"""Subscribe to discovery updates."""
self._removed_from_hass = False
await super().async_added_to_hass()
async def discovery_callback(config: TasmotaEntityConfig) -> None:
"""Handle discovery update."""
_LOGGER.debug(
"Got update for entity with hash: %s '%s'",
self._discovery_hash,
config,
)
if not self._tasmota_entity.config_same(config):
# Changed payload: Notify component
_LOGGER.debug("Updating component: %s", self.entity_id)
await self.discovery_update(config)
else:
# Unchanged payload: Ignore to avoid changing states
_LOGGER.debug("Ignoring unchanged update for: %s", self.entity_id)
# Set in case the entity has been removed and is re-added, for example when changing entity_id
set_discovery_hash(self.hass, self._discovery_hash)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
TASMOTA_DISCOVERY_ENTITY_UPDATED.format(*self._discovery_hash),
discovery_callback,
)
)
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
clear_discovery_hash(self.hass, self._discovery_hash)
super().add_to_platform_abort()
async def async_will_remove_from_hass(self) -> None:
"""Stop listening to signal and cleanup discovery data.."""
if not self._removed_from_hass:
clear_discovery_hash(self.hass, self._discovery_hash)
self._removed_from_hass = True
await super().async_will_remove_from_hass()
|
Python
| 0
|
@@ -2005,10 +2005,32 @@
urn
-%7B%22
+DeviceInfo(%0A
conn
@@ -2040,11 +2040,9 @@
ions
-%22:
+=
%7B(CO
@@ -2089,17 +2089,26 @@
ty.mac)%7D
-%7D
+%0A )
%0A%0A @p
|
3bbd08d9c9c1397fc03c6ff05f0d8e400eccc960
|
Use ColorMode enum in unifiled (#70548)
|
homeassistant/components/unifiled/light.py
|
homeassistant/components/unifiled/light.py
|
"""Support for Unifi Led lights."""
from __future__ import annotations
import logging
from unifiled import unifiled
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
COLOR_MODE_BRIGHTNESS,
PLATFORM_SCHEMA,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
# Validation of the user's configuration
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=20443): vol.All(cv.port, cv.string),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Unifi LED platform."""
# Assign configuration variables.
# The configuration check takes care they are present.
host = config[CONF_HOST]
port = config[CONF_PORT]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
api = unifiled(host, port, username=username, password=password)
# Verify that passed in configuration works
if not api.getloginstate():
_LOGGER.error("Could not connect to unifiled controller")
return
add_entities(UnifiLedLight(light, api) for light in api.getlights())
class UnifiLedLight(LightEntity):
"""Representation of an unifiled Light."""
_attr_color_mode = COLOR_MODE_BRIGHTNESS
_attr_supported_color_modes = {COLOR_MODE_BRIGHTNESS}
def __init__(self, light, api):
"""Init Unifi LED Light."""
self._api = api
self._light = light
self._name = light["name"]
self._unique_id = light["id"]
self._state = light["status"]["output"]
self._available = light["isOnline"]
self._brightness = self._api.convertfrom100to255(light["status"]["led"])
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def available(self):
"""Return the available state of this light."""
return self._available
@property
def brightness(self):
"""Return the brightness name of this light."""
return self._brightness
@property
def unique_id(self):
"""Return the unique id of this light."""
return self._unique_id
@property
def is_on(self):
"""Return true if light is on."""
return self._state
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
self._api.setdevicebrightness(
self._unique_id,
str(self._api.convertfrom255to100(kwargs.get(ATTR_BRIGHTNESS, 255))),
)
self._api.setdeviceoutput(self._unique_id, 1)
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._api.setdeviceoutput(self._unique_id, 0)
def update(self):
"""Update the light states."""
self._state = self._api.getlightstate(self._unique_id)
self._brightness = self._api.convertfrom100to255(
self._api.getlightbrightness(self._unique_id)
)
self._available = self._api.getlightavailable(self._unique_id)
|
Python
| 0
|
@@ -211,35 +211,8 @@
-COLOR_MODE_BRIGHTNESS,%0A
PLAT
@@ -222,16 +222,31 @@
M_SCHEMA
+,%0A ColorMode
,%0A Li
@@ -1793,26 +1793,25 @@
mode = C
-OLOR_MODE_
+olorMode.
BRIGHTNE
@@ -1853,18 +1853,17 @@
= %7BC
-OLOR_MODE_
+olorMode.
BRIG
|
74b41eb7c2953a5dbb8146730da18d562aad9d3e
|
fix hdx tests
|
hdx_exports/tests/test_hdx_export_set.py
|
hdx_exports/tests/test_hdx_export_set.py
|
# -*- coding: utf-8 -*-
import json
import unittest
from hdx_exports.hdx_export_set import HDXExportSet
from feature_selection.feature_selection import FeatureSelection
from django.contrib.gis.geos import GEOSGeometry
from hdx.data.hdxobject import HDXError
DAKAR_GEOJSON_POLYGON = json.dumps({
"type": "Polygon",
"coordinates": [
[
[-17.465,14.719],
[-17.442,14.719],
[-17.442,14.741],
[-17.465,14.741],
[-17.465,14.719]
]
]
})
DAKAR_GEOJSON_MULTIPOLYGON = json.dumps({
"type": "MultiPolygon",
"coordinates": [[
[
[-17.465,14.719],
[-17.442,14.719],
[-17.442,14.741],
[-17.465,14.741],
[-17.465,14.719]
]]
]
})
yaml = '''
buildings:
types:
- polygons
select:
- building
where: building is not null
waterways:
types:
- lines
- polygons
select:
- natural
where: natural in ('waterway')
'''
BASIC_FEATURE_SELECTION = FeatureSelection(yaml)
class TestHDXExportSet(unittest.TestCase):
def test_minimal_export_set(self):
h = HDXExportSet(
"hot_dakar",
"Dakar Urban Area",
DAKAR_GEOJSON_POLYGON, # or multipolygon. maybe this should be Shapely geom instead of dict?
BASIC_FEATURE_SELECTION
)
self.assertEquals([],h.country_codes)
datasets = h.datasets
self.assertEquals(len(datasets),2)
self.assertEquals(datasets['buildings']['name'],'hot_dakar_buildings')
self.assertEquals(datasets['waterways']['name'],'hot_dakar_waterways')
def test_extent_not_polygon_or_multipolygon(self):
with self.assertRaises(AssertionError):
h = HDXExportSet(
"hot_dakar",
"Dakar Urban Area",
GEOSGeometry("{'type':'LineString','coordinates':[]}"),
BASIC_FEATURE_SELECTION
)
def test_invalid_country_codes(self):
h = HDXExportSet(
"hot_dakar",
"Dakar Urban Area",
DAKAR_GEOJSON_POLYGON,
BASIC_FEATURE_SELECTION,
country_codes=['XXX']
)
with self.assertRaises(HDXError):
h.datasets
|
Python
| 0.000001
|
@@ -1236,32 +1236,47 @@
et(%0A
+dataset_prefix=
%22hot_dakar%22,%0A
@@ -1276,32 +1276,37 @@
r%22,%0A
+name=
%22Dakar Urban Are
@@ -1313,32 +1313,39 @@
a%22,%0A
+extent=
DAKAR_GEOJSON_PO
@@ -1425,32 +1425,50 @@
ct?%0A
+feature_selection=
BASIC_FEATURE_SE
@@ -1489,54 +1489,8 @@
)%0A
- self.assertEquals(%5B%5D,h.country_codes)%0A
@@ -1854,36 +1854,47 @@
et(%0A
-
+dataset_prefix=
%22hot_dakar%22,%0A
@@ -1894,36 +1894,37 @@
r%22,%0A
-
+name=
%22Dakar Urban Are
@@ -1939,20 +1939,23 @@
-
+extent=
GEOSGeom
@@ -2006,36 +2006,50 @@
%22),%0A
-
+feature_selection=
BASIC_FEATURE_SE
@@ -2055,17 +2055,47 @@
ELECTION
-
+,%0A locations=%5B'XXX'%5D
%0A
@@ -2182,16 +2182,31 @@
+dataset_prefix=
%22hot_dak
@@ -2222,16 +2222,21 @@
+name=
%22Dakar U
@@ -2259,16 +2259,23 @@
+extent=
DAKAR_GE
@@ -2288,29 +2288,117 @@
POLYGON,
-%0A
+ # or multipolygon. maybe this should be Shapely geom instead of dict?%0A feature_selection=
BASIC_FE
@@ -2426,28 +2426,24 @@
-country_code
+location
s=%5B'XXX'
|
a51a07cd8d1ee6f38fbe34be0530b7d208e8579b
|
Use pytest skipif in test_ssh
|
into/backends/tests/test_ssh.py
|
into/backends/tests/test_ssh.py
|
from __future__ import absolute_import, division, print_function
import pytest
paramiko = pytest.importorskip('paramiko')
import pandas as pd
import numpy as np
import re
import os
import sys
from into.utils import tmpfile, filetext
from into.directory import _Directory, Directory
from into.backends.ssh import SSH, resource, ssh_pattern, sftp, drop, connect
from into.backends.csv import CSV
from into import into, discover, CSV, JSONLines, JSON, convert
from into.temp import _Temp, Temp
from into.compatibility import skipif, ON_TRAVIS_CI
import socket
try:
ssh = connect(hostname='localhost')
ssh.close()
except socket.error:
pytest.skip('Could not connect')
except paramiko.PasswordRequiredException:
pytest.skip('password required for connection')
def test_resource():
r = resource('ssh://joe@localhost:/path/to/myfile.csv')
assert isinstance(r, SSH(CSV))
assert r.path == '/path/to/myfile.csv'
assert r.auth['hostname'] == 'localhost'
assert r.auth['username'] == 'joe'
def test_connect():
a = connect(hostname='localhost')
b = connect(hostname='localhost')
assert a is b
a.close()
c = connect(hostname='localhost')
assert a is c
assert c.get_transport() and c.get_transport().is_active()
def test_resource_directory():
r = resource('ssh://joe@localhost:/path/to/')
assert issubclass(r.subtype, _Directory)
r = resource('ssh://joe@localhost:/path/to/*.csv')
assert r.subtype == Directory(CSV)
assert r.path == '/path/to/'
def test_discover():
with filetext('name,balance\nAlice,100\nBob,200') as fn:
local = CSV(fn)
remote = SSH(CSV)(fn, hostname='localhost')
assert discover(local) == discover(remote)
def test_discover_from_resource():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
local = CSV(fn)
remote = resource('ssh://localhost:' + fn)
assert discover(local) == discover(remote)
def test_ssh_pattern():
uris = ['localhost:myfile.csv',
'127.0.0.1:/myfile.csv',
'user@127.0.0.1:/myfile.csv',
'user@127.0.0.1:/*.csv',
'user@127.0.0.1:/my-dir/my-file3.csv']
for uri in uris:
assert re.match(ssh_pattern, uri)
def test_copy_remote_csv():
with tmpfile('csv') as target:
with filetext('name,balance\nAlice,100\nBob,200',
extension='csv') as fn:
csv = resource(fn)
uri = 'ssh://localhost:%s.csv' % target
scsv = into(uri, csv)
assert isinstance(scsv, SSH(CSV))
assert discover(scsv) == discover(csv)
# Round trip
csv2 = into(target, scsv)
assert into(list, csv) == into(list, csv2)
def test_drop():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
with tmpfile('csv') as target:
scsv = SSH(CSV)(target, hostname='localhost')
assert not os.path.exists(target)
conn = sftp(**scsv.auth)
conn.put(fn, target)
assert os.path.exists(target)
drop(scsv)
drop(scsv)
assert not os.path.exists(target)
def test_drop_of_csv_json_lines_use_ssh_version():
from into.backends.ssh import drop_ssh
for typ in [CSV, JSON, JSONLines]:
assert drop.dispatch(SSH(typ)) == drop_ssh
def test_convert_local_file_to_temp_ssh_file():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
scsv = convert(Temp(SSH(CSV)), csv, hostname='localhost')
assert into(list, csv) == into(list, scsv)
@skipif(ON_TRAVIS_CI, reason="Don't know")
def test_temp_ssh_files():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')
assert discover(csv) == discover(scsv)
assert isinstance(scsv, _Temp)
@skipif(ON_TRAVIS_CI, reason="Don't know")
def test_convert_through_temporary_local_storage():
with filetext('name,quantity\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
df = into(pd.DataFrame, csv)
scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')
assert into(list, csv) == into(list, scsv)
scsv2 = into(Temp(SSH(CSV)), df, hostname='localhost')
assert into(list, scsv2) == into(list, df)
sjson = into(Temp(SSH(JSONLines)), df, hostname='localhost')
assert (into(np.ndarray, sjson) == into(np.ndarray, df)).all()
@pytest.mark.skipif(ON_TRAVIS_CI and sys.version_info[:2] == (3, 3),
reason='Strange hanging on travis for python33')
def test_ssh_csv_to_s3_csv():
# for some reason this can only be run in the same file as other ssh tests
# and must be a Temp(SSH(CSV)) otherwise tests above this one fail
s3_bucket = pytest.importorskip('into.backends.tests.test_aws').s3_bucket
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
remote = into(Temp(SSH(CSV)), CSV(fn), hostname='localhost')
with s3_bucket('.csv') as b:
result = into(b, remote)
assert discover(result) == discover(resource(b))
@pytest.mark.skipif(ON_TRAVIS_CI and sys.version_info[:2] == (3, 3),
reason='windows does not have an SSH daemon on localhost')
def test_s3_to_ssh():
pytest.importorskip('boto')
tips_uri = 's3://nyqpug/tips.csv'
with tmpfile('.csv') as fn:
result = into(Temp(SSH(CSV))(fn, hostname='localhost'), tips_uri)
assert into(list, result) == into(list, tips_uri)
assert discover(result) == discover(resource(tips_uri))
|
Python
| 0.000001
|
@@ -521,16 +521,8 @@
port
- skipif,
ON_
@@ -546,16 +546,45 @@
socket%0A%0A
+skipif = pytest.mark.skipif%0A%0A
try:%0A
@@ -4621,36 +4621,24 @@
)).all()%0A%0A%0A@
-pytest.mark.
skipif(ON_TR
@@ -4677,36 +4677,24 @@
== (3, 3),%0A
-
reas
@@ -5282,28 +5282,16 @@
(b))%0A%0A%0A@
-pytest.mark.
skipif(O
@@ -5338,28 +5338,16 @@
(3, 3),%0A
-
|
8b5828d88cca103a86eeb9cd243b1be311353f9f
|
Fix bug in constructing the base search url in sadmin
|
selvbetjening/sadmin2/views/generic.py
|
selvbetjening/sadmin2/views/generic.py
|
from django.contrib import messages
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.http.response import HttpResponseRedirect
from django.shortcuts import render
from django.conf import settings
from selvbetjening.sadmin2.decorators import sadmin_prerequisites
from selvbetjening.sadmin2 import filtering
def apply_search_query(qs, query, search_fields, condition_fields=None, related_sets=None, search_order=None):
if condition_fields is None:
condition_fields = []
invalid_fragments = []
return filtering.filter_queryset(qs, query, search_fields, condition_fields, related_sets,
invalid_fragments=invalid_fragments,
search_order=search_order), invalid_fragments
@sadmin_prerequisites
def generic_create_view(request,
form_class,
redirect_success_url=None,
redirect_success_url_callback=None,
message_success=None,
context=None,
instance=None,
instance_save_callback=None,
template=None):
instance_kwarg = {} if instance is None else {'instance': instance}
if request.method == 'POST':
form = form_class(request.POST, **instance_kwarg)
if form.is_valid():
commit = instance_save_callback is None
instance = form.save(commit=commit)
if not commit:
instance_save_callback(instance)
if message_success is not None:
messages.success(request, message_success)
if redirect_success_url is not None:
return HttpResponseRedirect(redirect_success_url)
if redirect_success_url_callback is not None:
return HttpResponseRedirect(redirect_success_url_callback(instance))
else:
form = form_class(**instance_kwarg)
if context is None:
context = {}
context['form'] = form
return render(request,
'sadmin2/generic/form.html' if template is None else template,
context)
@sadmin_prerequisites
def search_view(request,
queryset,
template_page,
template_fragment,
search_columns=None,
search_conditions=None,
search_related=None,
search_order=None,
context=None):
if search_columns is None:
search_columns = []
if search_conditions is None:
search_conditions = []
if search_related is None:
search_related = []
if context is None:
context = {}
query = request.GET.get('q', '')
queryset, invalid_fragments = apply_search_query(queryset, query, search_columns,
condition_fields=search_conditions,
related_sets=search_related,
search_order=search_order)
paginator = Paginator(queryset, 30)
page = request.GET.get('page')
try:
instances = paginator.page(page)
except PageNotAnInteger:
instances = paginator.page(1)
except EmptyPage:
instances = paginator.page(paginator.num_pages)
context.update({
'instances': instances,
'invalid_fragments': invalid_fragments,
'query': query,
'search_url': '%s%s?q=' % (getattr(settings, 'FORCE_SCRIPT_NAME', ''), request.path_info)
})
return render(request,
template_page if not request.is_ajax() else template_fragment,
context)
|
Python
| 0.000001
|
@@ -3413,24 +3413,160 @@
num_pages)%0A%0A
+ if getattr(settings, 'FORCE_SCRIPT_NAME', None) is None:%0A prefix = ''%0A else:%0A prefix = settings.FORCE_SCRIPT_NAME%0A%0A
context.
@@ -3718,50 +3718,14 @@
%25 (
-getattr(settings, 'FORCE_SCRIPT_NAME', '')
+prefix
, re
|
0f8595e8a7e8b423f3dd7205b9aa84a11830fcf1
|
Fix retry when Met config entry fails (#70012)
|
homeassistant/components/met/__init__.py
|
homeassistant/components/met/__init__.py
|
"""The met component."""
from __future__ import annotations
from collections.abc import Callable
from datetime import timedelta
import logging
from random import randrange
from types import MappingProxyType
from typing import Any
import metno
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
EVENT_CORE_CONFIG_UPDATE,
LENGTH_FEET,
LENGTH_METERS,
Platform,
)
from homeassistant.core import Event, HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util.distance import convert as convert_distance
import homeassistant.util.dt as dt_util
from .const import (
CONF_TRACK_HOME,
DEFAULT_HOME_LATITUDE,
DEFAULT_HOME_LONGITUDE,
DOMAIN,
)
URL = "https://aa015h6buqvih86i1.api.met.no/weatherapi/locationforecast/2.0/complete"
PLATFORMS = [Platform.WEATHER]
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up Met as config entry."""
# Don't setup if tracking home location and latitude or longitude isn't set.
# Also, filters out our onboarding default location.
if config_entry.data.get(CONF_TRACK_HOME, False) and (
(not hass.config.latitude and not hass.config.longitude)
or (
hass.config.latitude == DEFAULT_HOME_LATITUDE
and hass.config.longitude == DEFAULT_HOME_LONGITUDE
)
):
_LOGGER.warning(
"Skip setting up met.no integration; No Home location has been set"
)
return False
coordinator = MetDataUpdateCoordinator(hass, config_entry)
await coordinator.async_config_entry_first_refresh()
if config_entry.data.get(CONF_TRACK_HOME, False):
coordinator.track_home()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN][config_entry.entry_id].untrack_home()
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
class MetDataUpdateCoordinator(DataUpdateCoordinator["MetWeatherData"]):
"""Class to manage fetching Met data."""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Initialize global Met data updater."""
self._unsub_track_home: Callable[[], None] | None = None
self.weather = MetWeatherData(
hass, config_entry.data, hass.config.units.is_metric
)
self.weather.set_coordinates()
update_interval = timedelta(minutes=randrange(55, 65))
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self) -> MetWeatherData:
"""Fetch data from Met."""
try:
return await self.weather.fetch_data()
except Exception as err:
raise UpdateFailed(f"Update failed: {err}") from err
def track_home(self) -> None:
"""Start tracking changes to HA home setting."""
if self._unsub_track_home:
return
async def _async_update_weather_data(_event: Event | None = None) -> None:
"""Update weather data."""
if self.weather.set_coordinates():
await self.async_refresh()
self._unsub_track_home = self.hass.bus.async_listen(
EVENT_CORE_CONFIG_UPDATE, _async_update_weather_data
)
def untrack_home(self) -> None:
"""Stop tracking changes to HA home setting."""
if self._unsub_track_home:
self._unsub_track_home()
self._unsub_track_home = None
class MetWeatherData:
"""Keep data for Met.no weather entities."""
def __init__(
self, hass: HomeAssistant, config: MappingProxyType[str, Any], is_metric: bool
) -> None:
"""Initialise the weather entity data."""
self.hass = hass
self._config = config
self._is_metric = is_metric
self._weather_data: metno.MetWeatherData
self.current_weather_data: dict = {}
self.daily_forecast: list[dict] = []
self.hourly_forecast: list[dict] = []
self._coordinates: dict[str, str] | None = None
def set_coordinates(self) -> bool:
"""Weather data inialization - set the coordinates."""
if self._config.get(CONF_TRACK_HOME, False):
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
elevation = self.hass.config.elevation
else:
latitude = self._config[CONF_LATITUDE]
longitude = self._config[CONF_LONGITUDE]
elevation = self._config[CONF_ELEVATION]
if not self._is_metric:
elevation = int(
round(convert_distance(elevation, LENGTH_FEET, LENGTH_METERS))
)
coordinates = {
"lat": str(latitude),
"lon": str(longitude),
"msl": str(elevation),
}
if coordinates == self._coordinates:
return False
self._coordinates = coordinates
self._weather_data = metno.MetWeatherData(
coordinates, async_get_clientsession(self.hass), api_url=URL
)
return True
async def fetch_data(self) -> MetWeatherData:
"""Fetch data from API - (current weather and forecast)."""
await self._weather_data.fetching_data()
self.current_weather_data = self._weather_data.get_current_weather()
time_zone = dt_util.DEFAULT_TIME_ZONE
self.daily_forecast = self._weather_data.get_forecast(time_zone, False)
self.hourly_forecast = self._weather_data.get_forecast(time_zone, True)
return self
|
Python
| 0
|
@@ -519,16 +519,72 @@
sistant%0A
+from homeassistant.exceptions import HomeAssistantError%0A
from hom
@@ -958,16 +958,69 @@
AIN,%0A)%0A%0A
+# Dedicated Home Assistant endpoint - do not change!%0A
URL = %22h
@@ -2597,16 +2597,104 @@
ad_ok%0A%0A%0A
+class CannotConnect(HomeAssistantError):%0A %22%22%22Unable to connect to the web site.%22%22%22%0A%0A%0A
class Me
@@ -5997,24 +5997,31 @@
.%22%22%22%0A
+ resp =
await self.
@@ -6042,32 +6042,87 @@
fetching_data()%0A
+ if not resp:%0A raise CannotConnect()%0A
self.cur
|
e851fc13dc8061464fcd0a551b320b915c0cca52
|
Fix deleting and readding nws entry (#34555)
|
homeassistant/components/nws/__init__.py
|
homeassistant/components/nws/__init__.py
|
"""The National Weather Service integration."""
import asyncio
import datetime
import logging
from pynws import SimpleNWS
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import (
CONF_STATION,
COORDINATOR_FORECAST,
COORDINATOR_FORECAST_HOURLY,
COORDINATOR_OBSERVATION,
DOMAIN,
NWS_DATA,
)
_LOGGER = logging.getLogger(__name__)
_INDIVIDUAL_SCHEMA = vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
vol.Optional(CONF_STATION): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [_INDIVIDUAL_SCHEMA])}, extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["weather"]
DEFAULT_SCAN_INTERVAL = datetime.timedelta(minutes=10)
def base_unique_id(latitude, longitude):
"""Return unique id for entries in configuration."""
return f"{latitude}_{longitude}"
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the National Weather Service (NWS) component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a National Weather Service entry."""
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
api_key = entry.data[CONF_API_KEY]
station = entry.data[CONF_STATION]
client_session = async_get_clientsession(hass)
# set_station only does IO when station is None
nws_data = SimpleNWS(latitude, longitude, api_key, client_session)
await nws_data.set_station(station)
coordinator_observation = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"NWS observation station {station}",
update_method=nws_data.update_observation,
update_interval=DEFAULT_SCAN_INTERVAL,
)
coordinator_forecast = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"NWS forecast station {station}",
update_method=nws_data.update_forecast,
update_interval=DEFAULT_SCAN_INTERVAL,
)
coordinator_forecast_hourly = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"NWS forecast hourly station {station}",
update_method=nws_data.update_forecast_hourly,
update_interval=DEFAULT_SCAN_INTERVAL,
)
hass.data[DOMAIN][entry.entry_id] = {
NWS_DATA: nws_data,
COORDINATOR_OBSERVATION: coordinator_observation,
COORDINATOR_FORECAST: coordinator_forecast,
COORDINATOR_FORECAST_HOURLY: coordinator_forecast_hourly,
}
# Fetch initial data so we have data when entities subscribe
await coordinator_observation.async_refresh()
await coordinator_forecast.async_refresh()
await coordinator_forecast_hourly.async_refresh()
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
if len(hass.data[DOMAIN]) == 0:
hass.data.pop(DOMAIN)
return unload_ok
|
Python
| 0
|
@@ -1584,45 +1584,8 @@
%22%22%22%0A
- hass.data.setdefault(DOMAIN, %7B%7D)%0A
@@ -2824,30 +2824,78 @@
)%0A
-%0A
-hass.data%5BDOMAIN%5D
+nws_hass_data = hass.data.setdefault(DOMAIN, %7B%7D)%0A nws_hass_data
%5Bent
|
18f9017b7eb627e1b56096f16bad02095cfbfa53
|
Return appropriate JSON when CSRF token fails
|
alexandria/views/user.py
|
alexandria/views/user.py
|
import logging
log = logging.getLogger(__name__)
from pyramid.view import (
view_config,
view_defaults,
)
from pyramid.exceptions import BadCSRFToken
from pyramid.httpexceptions import (
HTTPSeeOther,
HTTPNotFound,
HTTPUnprocessableEntity,
HTTPBadRequest,
)
from pyramid.security import (
remember,
forget,
)
from pyramid.session import check_csrf_token
import colander
from .. import schemas as s
@view_defaults(accept='application/json', renderer='json', context='..traversal.User')
class User(object):
def __init__(self, context, request):
self.request = request
self.context = context
if self.request.body:
try:
self.cstruct = self.request.json_body
except ValueError:
raise HTTPBadRequest()
def csrf_valid(self):
if check_csrf_token(self.request, raises=False) == False:
log.debug('CSRF token did not match.')
log.debug('Expected token: {}'.format(self.request.session.get_csrf_token()))
log.debug('Got headers: {}'.format(self.request.headers))
raise BadCSRFToken()
@view_config()
def info(self):
if self.request.authenticated_userid is None:
ret = {
'authenticated': False,
}
else:
ret = {
'authenticated': True,
'user': {
'username': self.request.user.user.email,
}
}
return ret
@view_config(name='login', request_method='POST')
def login(self):
self.csrf_valid()
try:
schema = s.UserSchema.create_schema(self.request)
deserialized = schema.deserialize(self.cstruct)
headers = remember(self.request, "example@example.com")
token = self.request.session.new_csrf_token()
response = HTTPSeeOther(location=self.request.route_url('main', traverse='user'), headers=headers)
response.set_cookie('CSRF-Token', token, max_age=864000, overwrite=True)
return response
except colander.Invalid as e:
self.request.response.status = 422
return {
'errors': e.asdict(),
}
@view_config(name='logout', request_method='POST')
def logout(self):
self.csrf_valid()
headers = forget(self.request)
return HTTPSeeOther(location=self.request.route_url('main', traverse='user'), headers=headers)
@view_config(
context=HTTPNotFound,
containment='..traversal.User'
)
def not_found(self):
self.request.response.status = 404
return self.request.response
|
Python
| 0.000067
|
@@ -2855,16 +2855,377 @@
equest.response%0A
+%0A @view_config(%0A context=BadCSRFToken,%0A containment='..traversal.User',%0A renderer='json',%0A )%0A def bad_csrf(self):%0A self.request.response.status = 400%0A return %7B%0A 'errors': %7B%0A 'csrf': 'Invalid CSRF token. Please try again.'%0A %7D,%0A %7D%0A
|
bfb4a1c3b2363d0c1288ae201567840583dbb8a1
|
Add lat/lng to cafe and roaster models.
|
pdxroasters/roaster/models.py
|
pdxroasters/roaster/models.py
|
import re
from django.db import models
from django.template.defaultfilters import slugify
def format_phone_number(phone):
phone = re.sub('[^\w]', '', phone)
if (len(phone) == 10):
return '(%s) %s-%s' % (phone[:3], phone[3:6], phone[6:10])
else:
return ''
class Cafe(models.Model):
name = models.CharField(max_length=200, unique=True, db_index=True,)
slug = models.SlugField()
active = models.BooleanField()
address = models.CharField(max_length=200, blank=True,)
# TODO: Hours
phone = models.CharField(max_length=14, blank=True,)
url = models.URLField(max_length=200, verbose_name='URL', blank=True,)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
modified_at = models.DateTimeField(auto_now=True, db_index=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
self.slug = slugify(self.name)
# Sanitize the phone number
self.phone = format_phone_number(self.phone)
super(Cafe, self).save(*args, **kwargs)
class Meta:
ordering = ['name',]
class Roaster(models.Model):
name = models.CharField(max_length=200, unique=True, db_index=True,)
slug = models.SlugField()
active = models.BooleanField()
address = models.TextField(blank=True,)
# TODO: Hours
phone = models.CharField(max_length=14, blank=True,)
url = models.URLField(max_length=200, verbose_name='URL', blank=True,)
description = models.TextField(blank=True,)
photo_url = models.URLField(max_length=200, verbose_name='Photo URL',
blank=True,)
video_url = models.URLField(max_length=200, verbose_name='Video URL',
blank=True,)
cafes = models.ManyToManyField('Cafe', blank=True,)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
modified_at = models.DateTimeField(auto_now=True, db_index=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
self.slug = slugify(self.name)
# Sanitize the phone number
self.phone = format_phone_number(self.phone)
super(Roaster, self).save(*args, **kwargs)
class Meta:
ordering = ['name',]
get_latest_by = 'created_at'
class Roast(models.Model):
name = models.CharField(max_length=200, unique=True, db_index=True,)
roaster = models.ForeignKey('Roaster', related_name='roasts',)
active = models.BooleanField()
created_at = models.DateTimeField(auto_now_add=True, db_index=True,)
modified_at = models.DateTimeField(auto_now=True, db_index=True,)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name',]
|
Python
| 0
|
@@ -495,32 +495,204 @@
0, blank=True,)%0A
+ lat = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True,)%0A lng = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True,)%0A
# TODO: Hour
@@ -1514,32 +1514,204 @@
ld(blank=True,)%0A
+ lat = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True,)%0A lng = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True,)%0A
# TODO: Hour
|
a1052c02a11539d34a7c12c7a86d103c2b445b52
|
Fix and improve BRIEF example
|
doc/examples/plot_brief.py
|
doc/examples/plot_brief.py
|
"""
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm.
The descriptor consists of relatively few bits and can be computed using
a set of intensity difference tests. The short binary descriptor results
in low memory footprint and very efficient matching based on the Hamming
distance metric.
However, BRIEF does not provide rotation-invariance and scale-invariance can be
achieved by detecting and extracting features at different scales.
The ORB feature detection and binary description algorithm is an extension to
the BRIEF method and provides rotation and scale-invariance, see
`skimage.feature.ORB`.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.lena())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask_]
descriptors1 = extractor.descriptors_
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask_]
descriptors2 = extractor.descriptors_
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask_]
descriptors3 = extractor.descriptors_
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
|
Python
| 0.000002
|
@@ -378,17 +378,8 @@
c.%0A%0A
-However,
BRIE
@@ -416,22 +416,19 @@
variance
- and s
+. S
cale-inv
@@ -441,17 +441,17 @@
e can be
-%0A
+
achieved
@@ -453,17 +453,17 @@
ieved by
-
+%0A
detectin
@@ -514,175 +514,8 @@
s.%0A%0A
-The ORB feature detection and binary description algorithm is an extension to%0Athe BRIEF method and provides rotation and scale-invariance, see%0A%60skimage.feature.ORB%60.%0A%0A
%22%22%22%0A
|
dc3aa4f174e2d29c93f1cd54be933e3aa95010b5
|
fix construction of WorkflowInformation
|
sldc/chaining.py
|
sldc/chaining.py
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from shapely.affinity import translate
from information import ChainInformation, WorkflowInformationCollection, WorkflowInformation
from sldc import ImageWindow
from logging import Loggable, SilentLogger
__author__ = "Romain Mormont <r.mormont@student.ulg.ac.be>"
class ImageProvider(Loggable):
"""
An interface for any component that genre
"""
__metaclass__ = ABCMeta
def __init__(self, silent_fail=False, logger=SilentLogger()):
"""Constructs instances of ImageProvider
Parameters
----------
silent_fail: bool
True for putting the image provider in silent fail mode. In this situation, when an image cannot be
extracted, the provider simply ignore the error and skip the image. Otherwise, when set to False,
the provider raises an error when an image extraction fails.
logger: Logger (optional, default: a SilentLogger instance)
A logger object
"""
Loggable.__init__(self, logger)
self._silent_fail = silent_fail
@abstractmethod
def get_images(self):
"""
Return the images to be processed by instances of the workflow
Returns
-------
images: iterable
An array of images
Exceptions
----------
ImageExtractionException:
Raised when an image cannot be extracted. This error is never raised when the image provider is in
silent_fail mode. In this situation, the provider fetches as many images as possible and returns only the
successfully fetched images in the array.
"""
pass
class WorkflowExecutor(Loggable):
"""
An class for encapsulating the execution of a workflow. It provides two abstract methods to implement for
generating the images to process and to post-process the generated data after each run of the workflow.
The execution of the workflow executor is something like :
.execute(image, info) :
{get_images} -- images --> [ image --> {workflow.process} -- data --> {after} ] --> workflow_info
"""
__metaclass__ = ABCMeta
def __init__(self, workflow, logger=SilentLogger()):
"""Constructs a WorkflowExecutor for the given workflow object
Parameters
----------
workflow: SLDCWorkflow
The workflow to execute
logger: Logger (optional, default: a SilentLogger instance)
A logger object
"""
Loggable.__init__(self, logger)
self._workflow = workflow
def execute(self, image, workflow_info_collection):
"""Execute the workflow with the images generated by get_images
Parameters
----------
image: Image
The base image from which must be extracted the sub images to process
workflow_info_collection: WorkflowInformationCollection
The information generated by the previous workflow in a workflow chain
Returns
-------
workflow_information: WorkflowInformation
The workflow information object containing the data generated by all the executions of the stored workflow
"""
workflow_information = WorkflowInformation([], [], [], None)
images = self.get_images(image, workflow_info_collection)
for sub_image in images:
returned_info = self._workflow.process(sub_image)
self.after(sub_image, returned_info)
workflow_information.merge(returned_info)
return workflow_information
@abstractmethod
def get_images(self, image, workflow_info_collection):
"""Given result of the application of an instance of the sldc workflow, produces images objects for the next
steps
Parameters
----------
image: Image
The image processed by the previous step
workflow_info_collection: WorkflowInformationCollection
The information about the execution of the workflow until now
Returns
-------
images: list of Image
The list of images to be processed by the workflow
"""
pass
@abstractmethod
def after(self, sub_image, workflow_information):
"""A callback to execute after each execution of the workflow. May update the workflow information object.
Parameters
----------
sub_image: Image (or subclass)
The image processed by the workflow
workflow_information: WorkflowInformation
The workflow information produced by the workflow execution
"""
pass
class FullImageWorkflowExecutor(WorkflowExecutor):
"""A workflow executor which processes the whole passed image and which doesn't post-process the generated data
"""
def __init__(self, workflow, logger=SilentLogger()):
WorkflowExecutor.__init__(self, workflow, logger)
def get_images(self, image, workflow_info_collection):
return [image]
def after(self, sub_image, workflow_information):
return
class PolygonTranslatorWorkflowExecutor(WorkflowExecutor):
"""A workflow executor that moves the polygons generated by the workflow in the base image coordinate system by
translating them.
"""
__metaclass__ = ABCMeta
def __init__(self, workflow, logger=SilentLogger()):
WorkflowExecutor.__init__(self, workflow, logger)
def after(self, sub_image, workflow_information):
if not isinstance(sub_image, ImageWindow): # check if there is an offset
return
# translate all the polygons so that their reference system if the base image and not the window
offset_x, offset_y = sub_image.abs_offset
polygons = workflow_information.polygons
for i, polygon in enumerate(polygons):
polygons[i] = translate(polygon, offset_x, offset_y)
class PostProcessor(Loggable):
"""A post processor is a class encapsulating the processing of the results of several SLDCWorkflow
"""
__metaclass__ = ABCMeta
def __init__(self, logger=SilentLogger()):
"""Builds a PostProcessor object
Parameters
----------
logger: Logger (optional, default: a SilentLogger instance)
A logger object
"""
Loggable.__init__(self, logger)
@abstractmethod
def post_process(self, image, workflow_info_collection):
"""Actually process the results
Parameters
----------
image: Image
The image processed by the previous step
workflow_info_collection: WorkflowInformationCollection
The information about the execution of the workflow
"""
pass
class WorkflowChain(Loggable):
"""
This class encapsulates the sequential execution of several instances of the sldc workflow on the same image.
A processing chain might look like this :
{ImageProvider} --images--> {WorkflowExecutor1} -- workflow information
--> {WorkflowExecutor2} - ... -> {PostProcessor}
All the generated polygons_classes are then post_processed by the PostProcessor.
"""
def __init__(self, image_provider, executors, post_processor, n_jobs=1, logger=SilentLogger()):
"""Constructor for WorkflowChain objects
Parameters
----------
image_provider: ImageProvider
An image provider that will provide the images to be processed by the first workflow
executors: list of WorkflowExecutors
The first instance of the workflow to be applied
post_processor: PostProcessor
The post-processor to execute when an image has gone through the whole processing chain
n_jobs: int, optional (default: 1)
The number of jobs that can be used to process the images in parallel, -1 for using the number of available
cores
logger: Logger (optional, default: a SilentLogger instance)
A logger object
"""
Loggable.__init__(self, logger)
self._post_processor = post_processor
self._image_provider = image_provider
self._workflow_executors = executors
self._chain_information = ChainInformation()
self._n_jobs = n_jobs
# TODO implement parallel implementation
def execute(self):
"""Execute the processing
"""
images = self._image_provider.get_images()
for i, image in enumerate(images):
self._process_image(image, i)
def _process_image(self, image, image_nb):
"""Execute the processing of the image_nb th image
Parameters
----------
image: Image
The image to process
image_nb: The number of the image to be processed
"""
self.logger.info("WorkflowChain : start processing image #{}.".format(image_nb + 1))
collection = WorkflowInformationCollection()
for i, executor in enumerate(self._workflow_executors):
self.logger.info("WorkflowChain : start workflow {} for image #{}".format(i + 1, image_nb + 1))
collection.append(executor.execute(image, collection))
self.logger.info("WorkflowChain : post-processing generated data for image #{}".format(image_nb + 1))
self._post_processor.post_process(image, collection)
# self._chain_information.register_workflow_collection(collection, image_nb) # TODO thread safe
|
Python
| 0
|
@@ -3328,16 +3328,20 @@
%5B%5D, %5B%5D,
+ %5B%5D,
None)%0A
|
284befadcfd3e4785067d827c67958d01b80d4a2
|
fix method name (underscore prefix)
|
perfrunner/tests/rebalance.py
|
perfrunner/tests/rebalance.py
|
import time
from perfrunner.tests import PerfTest
from multiprocessing import Event
def with_delay(method):
def wrapper(self, *args, **kwargs):
time.sleep(self.rebalance_settings.start_after)
method(self, *args, **kwargs)
time.sleep(self.rebalance_settings.stop_after)
self.shutdown_event.set()
return wrapper
class RebalanceTest(PerfTest):
def __init__(self, *args, **kwargs):
super(RebalanceTest, self).__init__(*args, **kwargs)
self.shutdown_event = Event()
self.rebalance_settings = self.test_config.get_rebalance_settings()
@with_delay
def rebalance_in(self):
for cluster in self.cluster_spec.get_clusters():
master = cluster[0]
known_nodes = cluster[:self.rebalance_settings.nodes_after]
ejected_nodes = []
self.rest.rebalance(master, known_nodes, ejected_nodes)
self.monitor.monitor_rebalance(master)
class StaticRebalanceTest(RebalanceTest):
def _run(self):
self._run_load_phase()
self._compact_bucket()
self.reporter.start()
self.rebalance_in()
value = self.reporter.finish('Rebalance')
self.reporter.post_to_sf(self, value)
self._debug()
|
Python
| 0.000001
|
@@ -1006,17 +1006,16 @@
def
-_
run(self
|
f12904db6c83f4914018e385edce0d1757e030fc
|
Add memory quota for r4.8xlarge instances
|
perfrunner/utils/templater.py
|
perfrunner/utils/templater.py
|
from argparse import ArgumentParser
import yaml
from jinja2 import Environment, FileSystemLoader, Template
from logger import logger
from perfrunner.utils.cloudrunner import CloudRunner
MEMORY_QUOTAS = {
'm4.2xlarge': 26624, # 32GB RAM
'm4.4xlarge': 56320, # 64GB RAM
'r4.2xlarge': 54272, # 61GB RAM
'r4.4xlarge': 102400, # 122GB RAM
}
OUTPUT_FILE = 'custom'
TEMPLATES_DIR = 'templates'
TEMPLATES = (
'full_cluster.spec',
'kv_cluster.spec',
'pillowfight.test',
'ycsb_workload_a.test',
'ycsb_workload_d.test',
'ycsb_workload_e.test',
)
THREADS_PER_CLIENT = {
'pillowfight.test': 20,
'ycsb_workload_a.test': 20,
'ycsb_workload_d.test': 20,
'ycsb_workload_e.test': 20,
}
def get_templates(template: str) -> Template:
loader = FileSystemLoader(searchpath=TEMPLATES_DIR)
env = Environment(loader=loader, keep_trailing_newline=True)
return env.get_template(template)
def render_test(template: str, instance: str, threads: int):
mem_quota = MEMORY_QUOTAS[instance]
worker_instances = estimate_num_clients(template, threads)
content = render_template(get_templates(template),
mem_quota=mem_quota,
workers=THREADS_PER_CLIENT[template],
worker_instances=worker_instances)
store_cfg(content, '.test')
def render_spec(template: str):
with open(CloudRunner.EC2_META) as fp:
meta = yaml.load(fp)
clients = meta.get('clients', {}).values()
servers = meta.get('servers', {}).values()
content = render_template(get_templates(template),
servers=servers,
clients=clients)
store_cfg(content, '.spec')
def render_inventory():
with open(CloudRunner.EC2_META) as fp:
meta = yaml.load(fp)
servers = meta.get('servers', {}).values()
content = render_template(get_templates('inventory.ini'),
servers=servers)
store_cfg(content, '.ini')
def estimate_num_clients(template: str, threads: int) -> int:
return max(1, threads // THREADS_PER_CLIENT[template])
def render_template(t: Template, **kwargs) -> str:
return t.render(**kwargs)
def store_cfg(content: str, extension: str):
logger.info('Creating a new file: {}'.format(OUTPUT_FILE + extension))
with open(OUTPUT_FILE + extension, 'w') as f:
f.write(content)
def main():
parser = ArgumentParser()
parser.add_argument('--instance', dest='instance', type=str,
choices=list(MEMORY_QUOTAS))
parser.add_argument('--template', dest='template', type=str,
choices=TEMPLATES,
required=True)
parser.add_argument('--threads', dest='threads', type=int,
default=1,
help='Total number of workload generator threads')
args = parser.parse_args()
if '.test' in args.template:
render_test(args.template, args.instance, args.threads)
else:
render_spec(args.template)
render_inventory()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -353,16 +353,55 @@
2GB RAM%0A
+ 'r4.8xlarge': 209920, # 244GB RAM%0A
%7D%0A%0AOUTPU
|
b719bd4faf4c852b71dc185bb389707577a29516
|
refactor code.
|
clothing/models.py
|
clothing/models.py
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ophasebase.models import Ophase
from staff.models import Person
class Type(models.Model):
class Meta:
verbose_name = _("Art")
verbose_name_plural = _("Arten")
ordering = ['-price', 'name']
name = models.CharField(max_length=75, verbose_name=_("Art"), unique=True)
price = models.FloatField(verbose_name=_("Preis"))
additional_only = models.BooleanField(verbose_name=_("Nur als selbst bezahltes Kleidungsstück möglich"), default=False)
def __str__(self):
return self.name
class Size(models.Model):
class Meta:
verbose_name = _("Größe")
verbose_name_plural = _("Größen")
size = models.CharField(max_length=75, verbose_name=_("Größe"), unique=True)
def __str__(self):
return self.size
def sortable_size(self):
"""returns a sortable value of the current size.
So that XS < S < M < L < XL < XXL < 3XL is true"""
# The compution runs on Uppercase without trailing whitespace
input = self.size.strip().upper()
# Main value is given by the last char
last = input[-1]
# Assign the base value regarding the last char
base_value = {'S':10, 'M':20, 'L':30}
value = base_value.get(last, 0)
# A 'X' before has only a effect if the last char is 'L' or 'S'
x_value = 0
if last == 'L':
x_value = 1
elif last == 'S':
x_value = -1
# If a 'X' before would have an effect
if x_value != 0:
# Loop from the second to last char
for c in reversed(input[:-1]):
# ignore space between chars
if c.isspace():
continue
# if a 'X' occur add the x_value
elif c == 'X':
value += x_value
# if a number occure add the x_value times the value of c
elif c in [str(i) for i in range(2,10)]:
value += x_value * (int(c) - 1)
# a number is the first possible char with an effect
break
else:
# If another char occur we end the loop. All following
# chars will not have an effect on the value
break
return value
class Color(models.Model):
class Meta:
verbose_name = _("Farbe")
verbose_name_plural = _("Farben")
name = models.CharField(max_length=75, verbose_name=_("Farbe"), unique=True)
color_code = models.CharField(max_length=7, verbose_name=_("Farbcode"), default="#FFFFFF")
def __str__(self):
return self.name
class Order(models.Model):
class Meta:
verbose_name = _("Bestellung")
verbose_name_plural = _("Bestellungen")
person = models.ForeignKey(Person, on_delete=models.CASCADE, verbose_name=_("Person"))
type = models.ForeignKey(Type, on_delete=models.CASCADE, verbose_name=_("Art"))
size = models.ForeignKey(Size, on_delete=models.CASCADE, verbose_name=_("Größe"))
color = models.ForeignKey(Color, on_delete=models.CASCADE, verbose_name=_("Farbe"))
additional = models.BooleanField(verbose_name=_("Selbst bezahltes Kleidungsstück"))
def __str__(self):
return "{}: {} {} {}".format(
self.person.name,
self.type.name,
self.size.size,
self.color.name
)
def info(self):
return "{} {} {} ({})".format(
str(self.type), str(self.size), str(self.color),
_("selbst bezahlt") if self.additional else _("kostenlos")
)
@staticmethod
def get_current(**kwargs):
return Order.objects.filter(person__ophase=Ophase.current(), **kwargs)
class Settings(models.Model):
"""Configuration for clothing app."""
class Meta:
verbose_name = _("Einstellungen")
verbose_name_plural = _("Einstellungen")
clothing_ordering_enabled = models.BooleanField(default=False, verbose_name=_("Kleiderbestellung aktiv"))
def get_name(self):
return '%s' % _("Clothing Einstellungen")
def __str__(self):
return self.get_name()
def clean(self, *args, **kwargs):
super().clean(*args, **kwargs)
if Settings.objects.count() > 0 and self.id != Settings.objects.get().id:
raise ValidationError(_("Es ist nur sinnvoll und möglich eine Instanz des Einstellungsobjekts anzulegen."))
@staticmethod
def instance():
try:
return Settings.objects.get()
except Settings.DoesNotExist:
return None
|
Python
| 0.000006
|
@@ -1233,16 +1233,21 @@
last
+_char
= input
@@ -1316,21 +1316,16 @@
-base_
value =
@@ -1352,35 +1352,8 @@
:30%7D
-%0A value = base_value
.get
@@ -1357,16 +1357,21 @@
get(last
+_char
, 0)%0A%0A
@@ -1463,108 +1463,41 @@
e =
-0%0A if last == 'L':%0A x_value = 1%0A elif last == 'S':%0A x_value = -1
+%7B'S':-1, 'L':1%7D.get(last_char, 0)
%0A%0A
|
d42bc1bb41ef443104e6a5853e6d75fbbb0d7a4a
|
add debug mesg `instance in instance list`
|
clustercron/elb.py
|
clustercron/elb.py
|
# clustercron/elb.py
# vim: ts=4 et sw=4 sts=4 ft=python fenc=UTF-8 ai
# -*- coding: utf-8 -*-
'''
clustercron.elb
---------------
'''
from __future__ import unicode_literals
import logging
import socket
import boto.ec2.elb
from .compat import PY3
if PY3:
from urllib.request import Request
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import Request
from urllib2 import urlopen
from urllib2 import URLError
logger = logging.getLogger(__name__)
class Elb(object):
URL_INSTANCE_ID = \
'http://169.254.169.254/1.0/meta-data/instance-id'
def __init__(self, lb_name, timeout=3):
self.lb_name = lb_name
socket.setdefaulttimeout(timeout)
def _get_instance_id(self):
request = Request(self.URL_INSTANCE_ID)
try:
response = urlopen(request)
except URLError:
instance_id = None
logger.error('Could not get instance ID')
else:
instance_id = response.read()[:10]
logger.debug('Instance ID: %s', instance_id)
return instance_id
def _get_inst_health_states(self):
try:
conn = boto.ec2.elb.ELBConnection()
lb = conn.get_all_load_balancers(
load_balancer_names=[self.lb_name])[0]
inst_health_states = lb.get_instance_health()
except Exception as error:
logger.error('Could not get instance health states: %s', error)
inst_health_states = []
return inst_health_states
def _is_master(self, instance_id, inst_health_states):
res = False
instances_all = sorted([x.instance_id for x in inst_health_states])
logger.debug('instances: %s', ', '.join(instances_all))
instances_in_service = sorted([
x.instance_id for x in inst_health_states
if x.state == 'InService'
])
logger.debug(
'Instances in service: %s',
', '.join(instances_in_service)
)
if instances_in_service:
res = instance_id == instances_in_service[0]
return res
@property
def master(self):
instance_id = self._get_instance_id()
if instance_id:
inst_health_states = self._get_inst_health_states()
return self._is_master(instance_id, inst_health_states)
return False
|
Python
| 0
|
@@ -2130,24 +2130,352 @@
_service%5B0%5D%0A
+ logger.debug('This instance master: %25s', res)%0A logger.debug(%0A 'This instance in %60instances in service%60 list: %25s',%0A instance_id in instances_in_service%0A )%0A logger.debug(%0A 'This instance in %60all instances%60 list: %25s',%0A instance_id in instances_all%0A )%0A
retu
|
b75e7c633e72bd2df62d8a2dabe2240b8a1815fd
|
Add examples to the sub-commands
|
pip/commands/configuration.py
|
pip/commands/configuration.py
|
import logging
import os
import subprocess
from pip.basecommand import Command
from pip.configuration import Configuration, kinds
from pip.exceptions import PipError
from pip.locations import venv_config_file
from pip.status_codes import SUCCESS, ERROR
from pip.utils import get_prog
logger = logging.getLogger(__name__)
class ConfigurationCommand(Command):
"""Manage local and global configuration."""
name = 'config'
usage = """
%prog [<file-option>] list
%prog [<file-option>] [--editor <editor-path>] edit
%prog [<file-option>] get name
%prog [<file-option>] set name value
%prog [<file-option>] unset name
"""
summary = """
Manage local and global configuration.
Subcommands:
list: List the active configuration (or from the file specified)
edit: Edit the configuration file in an editor
get: Get the value associated with name
set: Set the name=value
unset: Unset the value associated with name
If none of --user, --global and --venv are passed, a virtual
environment configuration file is used if one is active and the file
exists. Otherwise, all modifications happen on the to the user file by
default.
"""
def __init__(self, *args, **kwargs):
super(ConfigurationCommand, self).__init__(*args, **kwargs)
self.configuration = None
self.cmd_opts.add_option(
'--editor',
dest='editor',
action='store',
default=None,
help=(
'Editor to use to edit the file. Uses '
'$EDITOR if not passed.'
)
)
self.cmd_opts.add_option(
'--global',
dest='global_file',
action='store_true',
default=False,
help='Use the system-wide configuration file only'
)
self.cmd_opts.add_option(
'--user',
dest='user_file',
action='store_true',
default=False,
help='Use the user configuration file only'
)
self.cmd_opts.add_option(
'--venv',
dest='venv_file',
action='store_true',
default=False,
help='Use the virtualenv configuration file only'
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
handlers = {
"list": self.list_values,
"edit": self.open_in_editor,
"get": self.get_name,
"set": self.set_name_value,
"unset": self.unset_name
}
# Determine action
if not args or args[0] not in handlers:
logger.error("Need an action ({}) to perform.".format(
", ".join(sorted(handlers)))
)
return ERROR
action = args[0]
# Determine which configuration files are to be loaded
# Depends on whether the command is modifying.
try:
load_only = self._determine_file(
options, need_value=(action in ["get", "set", "unset"])
)
except PipError as e:
logger.error(e.args[0])
return ERROR
# Load a new configuration
self.configuration = Configuration(
isolated=options.isolated_mode, load_only=load_only
)
self.configuration.load()
# Error handling happens here, not in the action-handlers.
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def _determine_file(self, options, need_value):
file_options = {
kinds.USER: options.user_file,
kinds.GLOBAL: options.global_file,
kinds.VENV: options.venv_file
}
if sum(file_options.values()) == 0:
if not need_value:
return None
# Default to user, unless there's a virtualenv file.
elif os.path.exists(venv_config_file):
return kinds.VENV
else:
return kinds.USER
elif sum(file_options.values()) == 1:
# There's probably a better expression for this.
return [key for key in file_options if file_options[key]][0]
raise PipError(
"Need exactly one file to operate upon "
"(--user, --venv, --global) to perform."
)
def list_values(self, options, args):
self._get_n_args(args, n=0)
for key, value in sorted(self.configuration.items()):
logger.info("%s=%r", key, value)
def get_name(self, options, args):
key = self._get_n_args(args, n=1)
value = self.configuration.get_value(key)
logger.info("%s", value)
def set_name_value(self, options, args):
key, value = self._get_n_args(args, n=2)
self.configuration.set_value(key, value)
self._save_configuration()
def unset_name(self, options, args):
key = self._get_n_args(args, n=1)
self.configuration.unset_value(key)
self._save_configuration()
def open_in_editor(self, options, args):
editor = self._determine_editor(options)
fname = self.configuration.get_file_to_edit()
if fname is None:
raise PipError("Could not determine appropriate file.")
try:
subprocess.check_call([editor, fname])
except subprocess.CalledProcessError as e:
raise PipError(
"Editor Subprocess exited with exit code {}"
.format(e.returncode)
)
def _get_n_args(self, args, n, example):
"""Helper to make sure the command got the right number of arguments
"""
if len(args) != n:
msg = (
'Got unexpected number of arguments, expected {}. '
'(example: "{} {}")'
).format(n, get_prog(), example)
raise PipError(msg)
if n == 1:
return args[0]
else:
return args
def _save_configuration(self):
# We successfully ran a modifying command. Need to save the
# configuration.
try:
self.configuration.save()
except Exception:
logger.error(
"Unable to save configuration. Please report this as a bug.",
exc_info=1
)
raise PipError("Internal Error.")
def _determine_editor(self, options):
if options.editor is not None:
return options.editor
elif "VISUAL" in os.environ:
return os.environ["VISUAL"]
elif "EDITOR" in os.environ:
return os.environ["EDITOR"]
else:
raise PipError("Could not determine editor to use.")
|
Python
| 0.000005
|
@@ -4632,14 +4632,22 @@
rgs,
+ %22list%22,
n=0)%0A
-
%0A
@@ -4818,32 +4818,46 @@
get_n_args(args,
+ %22get %5Bname%5D%22,
n=1)%0A va
@@ -5019,16 +5019,38 @@
gs(args,
+ %22set %5Bname%5D %5Bvalue%5D%22,
n=2)%0A
@@ -5210,16 +5210,32 @@
gs(args,
+ %22unset %5Bname%5D%22,
n=1)%0A
@@ -5849,19 +5849,16 @@
rgs,
- n,
example
):%0A
@@ -5853,16 +5853,19 @@
example
+, n
):%0A
@@ -6063,16 +6063,16 @@
d %7B%7D. '%0A
-
@@ -6093,16 +6093,23 @@
ple: %22%7B%7D
+ config
%7B%7D%22)'%0A
|
624745fbb311877f5b2251cf3eefe0cc15b5cca2
|
add some code for document
|
pagrant/commands/test.py
|
pagrant/commands/test.py
|
#!/usr/bin/python
#coding:utf8
__author__ = ['markshao']
import os
from optparse import Option
from nose import main
from pagrant.basecommand import Command
from pagrant.commands.init import PAGRANT_CONFIG_FILE_NAME
from pagrant.environment import Environment
from pagrant.exceptions import PagrantConfigError, TestError
class TestCommand(Command):
name = "test"
usage = """%prog [options] """
summary = "execute the test suites|cases with the options"
def __init__(self):
super(TestCommand, self).__init__()
self.parser.add_option(Option(
# Writes the log levels explicitely to the log'
'--newvm',
dest='newvm',
action='store_true',
default=False,
))
self.environment = None
def run(self, args):
if not os.path.exists(PAGRANT_CONFIG_FILE_NAME):
raise PagrantConfigError(
"The Pagrantfile should exist in the current folder , have to stop the test case execution")
# validate the Pagrantfile config
self.environment = Environment(os.path.abspath(PAGRANT_CONFIG_FILE_NAME), self.logger)
# deal with the parameter
options, nose_args = self.parser.parse_args(args)
if options.newvm:
self.logger.warn("start init the virtual environment for the test execution")
self.environment.create_machines()
self.environment.start_machines()
self.logger.warn("finish init the virtual environment for the test execution")
# the init is always needed
self.environment.init_test_context()
try:
main(nose_args)
except Exception, e:
raise TestError(e.message)
finally:
if options.newvm:
self.environment.stop_machines()
self.environment.destroy_machines()
|
Python
| 0.000002
|
@@ -397,16 +397,30 @@
ptions%5D
+%5Bnose-options%5D
%22%22%22%0A
@@ -755,16 +755,164 @@
=False,%0A
+ help=%22if set --newvm , the test will fisrt create the new vm against %22 %5C%0A %22the Pagrantfile and destroy them after test%22%0A
|
3f147f229692329181da14217d095556fd5d2f92
|
move post-mortem and profile back to the pairsamtools base
|
pairsamtools/__init__.py
|
pairsamtools/__init__.py
|
# -*- coding: utf-8 -*-
"""
pairsamtools
~~~~~~~~~~~~
CLI tools to process mapped Hi-C data
:copyright: (c) 2017 Massachusetts Institute of Technology
:author: Mirny Lab
:license: MIT
"""
__version__ = '0.0.1-dev'
import click
import functools
import sys
CONTEXT_SETTINGS = {
'help_option_names': ['-h', '--help'],
}
@click.version_option(version=__version__)
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
def common_io_options(func):
@click.option(
'--nproc-in',
type=int,
default=3,
show_default=True,
help='Number of processes used by the auto-guessed input decompressing command.'
)
@click.option(
'--nproc-out',
type=int,
default=8,
show_default=True,
help='Number of processes used by the auto-guessed output compressing command.'
)
@click.option(
'--cmd-in',
type=str,
default=None,
help='A command to decompress the input file. '
'If provided, fully overrides the auto-guessed command. '
'Does not work with stdin. '
'Must read input from stdin and print output into stdout. '
'EXAMPLE: pbgzip -dc -n 3'
)
@click.option(
'--cmd-out',
type=str,
default=None,
help='A command to compress the output file. '
'If provided, fully overrides the auto-guessed command. '
'Does not work with stdout. '
'Must read input from stdin and print output into stdout. '
'EXAMPLE: pbgzip -c -n 8'
)
@click.option(
'--post-mortem',
help="Post mortem debugging",
is_flag=True,
default=False
)
@click.option(
'--profile',
help="Profile performance and dump the statistics into a file",
type=str,
default=''
)
@functools.wraps(func)
def wrapper(*args, **kwargs):
if kwargs.get('post_mortem'):
import traceback
try:
import ipdb as pdb
except ImportError:
import pdb
def _excepthook(exc_type, value, tb):
traceback.print_exception(exc_type, value, tb)
print()
pdb.pm()
sys.excepthook = _excepthook
if kwargs.get('profile'):
import cProfile
import atexit
pr = cProfile.Profile()
pr.enable()
def _atexit_profile_hook():
pr.disable()
pr.dump_stats(kwargs.get('profile'))
atexit.register(_atexit_profile_hook)
return func(*args, **kwargs)
return wrapper
from .pairsam_dedup import dedup
from .pairsam_sort import sort
from .pairsam_merge import merge
from .pairsam_markasdup import markasdup
from .pairsam_select import select
from .pairsam_split import split
from .pairsam_restrict import restrict
from .pairsam_parse import parse, parse_cigar, parse_algn
from .pairsam_stats import stats
|
Python
| 0
|
@@ -418,27 +418,943 @@
GS)%0A
-def cli():%0A pass
+@click.option(%0A '--post-mortem', %0A help=%22Post mortem debugging%22, %0A is_flag=True,%0A default=False%0A)%0A%0A@click.option(%0A '--output-profile', %0A help=%22Profile performance with Python cProfile and dump the statistics %22%0A %22into a binary file%22, %0A type=str,%0A default=''%0A)%0Adef cli(post_mortem, output_profile):%0A if post_mortem:%0A import traceback%0A try:%0A import ipdb as pdb%0A except ImportError:%0A import pdb%0A def _excepthook(exc_type, value, tb):%0A traceback.print_exception(exc_type, value, tb)%0A print()%0A pdb.pm()%0A sys.excepthook = _excepthook%0A%0A if output_profile:%0A import cProfile %0A import atexit%0A %0A pr = cProfile.Profile()%0A pr.enable()%0A%0A def _atexit_profile_hook():%0A pr.disable()%0A pr.dump_stats(output_profile)%0A%0A atexit.register(_atexit_profile_hook)%0A
%0A%0Ade
@@ -2542,1080 +2542,69 @@
)%0A
- @click.option(%0A '--post-mortem', %0A help=%22Post mortem debugging%22, %0A is_flag=True,%0A default=False%0A )%0A%0A @click.option(%0A '--profile', %0A help=%22Profile performance and dump the statistics into a file%22, %0A type=str,%0A default=''%0A )%0A%0A @functools.wraps(func)%0A def wrapper(*args, **kwargs):%0A if kwargs.get('post_mortem'):%0A import traceback%0A try:%0A import ipdb as pdb%0A except ImportError:%0A import pdb%0A def _excepthook(exc_type, value, tb):%0A traceback.print_exception(exc_type, value, tb)%0A print()%0A pdb.pm()%0A sys.excepthook = _excepthook%0A%0A if kwargs.get('profile'):%0A import cProfile %0A import atexit%0A %0A pr = cProfile.Profile()%0A pr.enable()%0A%0A def _atexit_profile_hook():%0A pr.disable()%0A pr.dump_stats(kwargs.get('profile'))%0A%0A atexit.register(_atexit_profile_hook)%0A
+%0A @functools.wraps(func)%0A def wrapper(*args, **kwargs):
%0A
|
298163b0e765de364fe1f9eef2c1cc783f3c7404
|
Add FloatArgument
|
pale/arguments/number.py
|
pale/arguments/number.py
|
from pale.errors.validation import ArgumentError
from .base import BaseArgument
class IntegerArgument(BaseArgument):
value_type = 'integer'
allowed_types = (int, long)
min_value = None
max_value = None
def validate(self, item, item_name):
if item is None:
item = self.default
if item is None: # i.e. the default was also None
if self.required:
raise ArgumentError(item_name,
"This argument is required.")
else:
return item
# it's an integer, so just try to shove it into that.
try:
item = int(item)
except ValueError as e:
# if it fails, then the argument is wrong
raise ArgumentError(item_name,
"%s is not a valid integer" % item)
# range checking
if self.min_value is not None and\
self.max_value is not None and\
not (self.min_value <= item <= self.max_value):
raise ArgumentError(item_name,
"You must provide a value between %d and %d" % (
self.min_value, self.max_value))
if self.min_value is not None and item < self.min_value:
raise ArgumentError(item_name,
"You must provide a value greater than or equal to %d" % (
self.min_value))
if self.max_value is not None and item > self.max_value:
raise ArgumentError(item_name,
"You must provide a value less than or equal to %d" % (
self.max_value))
return item
def doc_dict(self):
doc = super(IntegerArgument, self).doc_dict()
doc['min_value'] = self.min_value
doc['max_value'] = self.max_value
return doc
|
Python
| 0.000001
|
@@ -1847,8 +1847,1776 @@
urn doc%0A
+%0A%0Aclass FloatArgument(BaseArgument):%0A value_type = 'float'%0A allowed_types = (float, )%0A min_value = None%0A max_value = None%0A%0A def validate(self, item, item_name):%0A if item is None:%0A item = self.default%0A%0A if item is None: # i.e. the default was also None%0A if self.required:%0A raise ArgumentError(item_name,%0A %22This argument is required.%22)%0A else:%0A return item%0A%0A # it's an integer, so just try to shove it into that.%0A try:%0A item = float(item)%0A except ValueError as e:%0A # if it fails, then the argument is wrong%0A raise ArgumentError(item_name,%0A %22%25s is not a valid integer%22 %25 item)%0A%0A # range checking%0A if self.min_value is not None and%5C%0A self.max_value is not None and%5C%0A not (self.min_value %3C= item %3C= self.max_value):%0A raise ArgumentError(item_name,%0A %22You must provide a value between %25d and %25d%22 %25 (%0A self.min_value, self.max_value))%0A%0A if self.min_value is not None and item %3C self.min_value:%0A raise ArgumentError(item_name,%0A %22You must provide a value greater than or equal to %25d%22 %25 (%0A self.min_value))%0A%0A if self.max_value is not None and item %3E self.max_value:%0A raise ArgumentError(item_name,%0A %22You must provide a value less than or equal to %25d%22 %25 (%0A self.max_value))%0A%0A return item%0A%0A%0A def doc_dict(self):%0A doc = super(FloatArgument, self).doc_dict()%0A doc%5B'min_value'%5D = self.min_value%0A doc%5B'max_value'%5D = self.max_value%0A return doc%0A
|
f3517847990f2007956c319a7784dbfc2d73b91a
|
Remove formatting
|
ellipsis.py
|
ellipsis.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
cwd = os.getcwd()
home = os.getenv('HOME')
devnull = open(os.devnull, 'w')
def find_svn_root(path):
try:
svn_cmd = ['/usr/bin/svn', 'info']
svn_info = subprocess.check_output(svn_cmd, stderr=devnull).decode()
info = dict()
for line in svn_info.splitlines():
if ':' in line:
key, value = line.split(':', maxsplit=1)
info[key]=value.strip()
return info.get('Working Copy Root Path')
except:
return False
def find_git_root(path):
try:
git_cmd = ['/usr/bin/git', 'rev-parse', '--show-toplevel']
git_root = subprocess.check_output(git_cmd, stderr=devnull)
git_root = git_root[:-1] # remove new_line
return git_root.decode()
except:
return False
git_root = find_git_root(cwd)
svn_root = find_svn_root(cwd)
if git_root:
repo_name = os.path.split(git_root)[-1]
git_tag = "\033[1;31m{0}\033[1;37m".format(repo_name)
cwd = cwd.replace(git_root, repo_name)
elif svn_root:
repo_name = svn_root.split('/')[-1]
svn_tag = "\033[1;34m{0}\033[1;37m".format(repo_name)
cwd = cwd.replace(svn_root, svn_tag)
elif cwd.startswith(home):
cwd = cwd.replace(home,'~')
components = cwd.split('/')
if len(components) > 3:
first = components[0]
last = components[-1]
cwd = "{}/…/{}".format(first, last)
print("\033[1;37m{cwd}\033[0m".format(cwd=cwd))
|
Python
| 0.000035
|
@@ -1000,66 +1000,8 @@
-1%5D%0A
- git_tag = %22%5C033%5B1;31m%7B0%7D%5C033%5B1;37m%22.format(repo_name)%0A
@@ -1098,67 +1098,8 @@
-1%5D%0A
- svn_tag = %22%5C033%5B1;34m%7B0%7D%5C033%5B1;37m%22.format(repo_name)%0A
@@ -1130,15 +1130,17 @@
ot,
-svn_tag
+repo_name
)%0Ael
@@ -1352,46 +1352,9 @@
int(
-%22%5C033%5B1;37m%7Bcwd%7D%5C033%5B0m%22.format(cwd=cwd)
+cwd
)%0A
|
3fcda3af7062ff8681f0ec096b896ba862495944
|
Update page_set_smoke_test to check for mix states in page set.
|
tools/telemetry/telemetry/unittest_util/page_set_smoke_test.py
|
tools/telemetry/telemetry/unittest_util/page_set_smoke_test.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry.core import browser_credentials
from telemetry.core import discover
from telemetry.page import page_set as page_set_module
from telemetry.util import classes
from telemetry.wpr import archive_info
class PageSetSmokeTest(unittest.TestCase):
def CheckArchive(self, page_set):
"""Verify that all URLs of pages in page_set have an associated archive. """
# TODO: Eventually these should be fatal.
if not page_set.archive_data_file:
logging.warning('Skipping %s: no archive data file', page_set.file_path)
return
logging.info('Testing %s', page_set.file_path)
archive_data_file_path = os.path.join(page_set.base_dir,
page_set.archive_data_file)
self.assertTrue(os.path.exists(archive_data_file_path),
msg='Archive data file not found for %s' %
page_set.file_path)
wpr_archive_info = archive_info.WprArchiveInfo.FromFile(
archive_data_file_path, page_set.bucket)
for page in page_set.pages:
if not page.url.startswith('http'):
continue
self.assertTrue(wpr_archive_info.WprFilePathForUserStory(page),
msg='No archive found for %s in %s' % (
page.url, page_set.archive_data_file))
def CheckCredentials(self, page_set):
"""Verify that all pages in page_set use proper credentials"""
for page in page_set.pages:
credentials = browser_credentials.BrowserCredentials()
if page.credentials_path:
credentials.credentials_path = (
os.path.join(page.base_dir, page.credentials_path))
fail_message = ('page %s of %s has invalid credentials %s' %
(page.url, page_set.file_path, page.credentials))
if page.credentials:
try:
self.assertTrue(credentials.CanLogin(page.credentials), fail_message)
except browser_credentials.CredentialsError:
self.fail(fail_message)
def CheckAttributes(self, page_set):
"""Verify that page_set and its page's base attributes have the right types.
"""
self.CheckAttributesOfPageSetBasicAttributes(page_set)
for page in page_set.pages:
self.CheckAttributesOfPageBasicAttributes(page)
def CheckAttributesOfPageSetBasicAttributes(self, page_set):
if page_set.base_dir is not None:
self.assertTrue(
isinstance(page_set.base_dir, str),
msg='page_set %\'s base_dir must have type string')
self.assertTrue(
isinstance(page_set.archive_data_file, str),
msg='page_set\'s archive_data_file path must have type string')
if page_set.user_agent_type is not None:
self.assertTrue(
isinstance(page_set.user_agent_type, str),
msg='page_set\'s user_agent_type must have type string')
def CheckAttributesOfPageBasicAttributes(self, page):
self.assertTrue(not hasattr(page, 'disabled'))
self.assertTrue(
# We use basestring instead of str because page's url can be string of
# unicode.
isinstance(page.url, basestring),
msg='page %s \'s url must have type string' % page.display_name)
self.assertTrue(
isinstance(page.page_set, page_set_module.PageSet),
msg='page %s \'s page_set must be an instance of '
'telemetry.page.page_set.PageSet' % page.display_name)
self.assertTrue(
isinstance(page.name, str),
msg='page %s \'s name field must have type string' % page.display_name)
self.assertTrue(
isinstance(page.labels, set),
msg='page %s \'s labels field must have type set' % page.display_name)
self.assertTrue(
isinstance(page.startup_url, str),
msg=('page %s \'s startup_url field must have type string'
% page.display_name))
self.assertIsInstance(
page.make_javascript_deterministic, bool,
msg='page %s \'s make_javascript_deterministic must have type bool'
% page.display_name)
for l in page.labels:
self.assertTrue(
isinstance(l, str),
msg='label %s in page %s \'s labels must have type string'
% (str(l), page.display_name))
def RunSmokeTest(self, page_sets_dir, top_level_dir):
"""Run smoke test on all page sets in page_sets_dir.
Subclass of PageSetSmokeTest is supposed to call this in some test
method to run smoke test.
"""
page_sets = discover.DiscoverClasses(page_sets_dir, top_level_dir,
page_set_module.PageSet).values()
for page_set_class in page_sets:
if not classes.IsDirectlyConstructable(page_set_class):
# We can't test page sets that aren't directly constructable since we
# don't know what arguments to put for the constructor.
continue
page_set = page_set_class()
logging.info('Testing %s', page_set.file_path)
self.CheckArchive(page_set)
self.CheckCredentials(page_set)
self.CheckAttributes(page_set)
|
Python
| 0
|
@@ -456,16 +456,156 @@
Case):%0A%0A
+ def setUp(self):%0A # Make sure the added failure message is appended to the default failure%0A # message.%0A self.longMessage = True%0A%0A
def Ch
@@ -4506,24 +4506,590 @@
lay_name))%0A%0A
+ def CheckSharedStates(self, page_set):%0A if not page_set.allow_mixed_story_states:%0A shared_user_story_state_class = (%0A page_set.user_stories%5B0%5D.shared_user_story_state_class)%0A for p in page_set:%0A self.assertIs(%0A shared_user_story_state_class,%0A p.shared_user_story_state_class,%0A msg='page %25s%5C's shared_user_story_state_class field is different '%0A 'from other pages%5C's shared_user_story_state_class whereas '%0A 'page set %25s disallow having mixed states' %25%0A (p, page_set))%0A%0A
def RunSmo
@@ -5879,28 +5879,67 @@
f.CheckAttributes(page_set)%0A
+ self.CheckSharedStates(page_set)%0A
|
fa40dfcca214011b85c8bf081bc55b4f534acf24
|
Fix cors 'l's
|
jal_stats/jal_stats/settings.py
|
jal_stats/jal_stats/settings.py
|
"""
Django settings for jal_stats project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')qw)*wnq3qn4zb!92bdpq#$u^wuqic$%buhpupeaf2$cd!v!o0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_swagger',
'corsheaders',
'stats',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'jal_stats.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jal_stats.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'jal_stats',
'USER': 'jal_stats',
'PASSWORD': 'password',
'HOST': '',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.IsAuthenticated',
'rest_framework.permissions.AllowAny',
]
}
CORS_ORIGIN_AllOW_ALL = True
# if DEBUG is True:
# CORS_ORIGIN_ALLOW_ALL = True
# else:
# CORS_ORIGIN_ALLOW_ALL = False
# CORS_ORIGIN_WHITELIST = (
# 'example.firebase.com'
# )
|
Python
| 0.000128
|
@@ -3345,18 +3345,18 @@
ORIGIN_A
-ll
+LL
OW_ALL =
|
c9ed81608ea6d017dbe23e012d0e137c1ce9ef10
|
remove eddy from test
|
neurodocker/interfaces/tests/test_fsl.py
|
neurodocker/interfaces/tests/test_fsl.py
|
"""Tests for neurodocker.interfaces.FSL"""
from neurodocker.interfaces.tests import utils
class TestFSL(object):
def test_docker(self):
specs = {
'pkg_manager': 'yum',
'instructions': [
('base', 'centos:7'),
('fsl', {'version': '5.0.10', 'eddy_5011': True}),
('user', 'neuro'),
]
}
bash_test_file = "test_fsl.sh"
utils.test_docker_container_from_specs(
specs=specs, bash_test_file=bash_test_file)
def test_singularity(self):
specs = {
'pkg_manager': 'yum',
'instructions': [
('base', 'docker://centos:7'),
('fsl', {'version': '5.0.10', 'eddy_5011': True}),
('user', 'neuro'),
]
}
utils.test_singularity_container_from_specs(specs=specs)
|
Python
| 0.000003
|
@@ -299,37 +299,18 @@
: '5.0.1
-0', 'eddy_5011': True
+1'
%7D),%0A
|
0a98e91d5b30e5282ad90cfedcf0162ce290a0e9
|
fix many replace player_config
|
flowplayer/templatetags/flowplayer_tags.py
|
flowplayer/templatetags/flowplayer_tags.py
|
from django.conf import settings
from django.db.models.query import QuerySet
from django.template import Node, TemplateSyntaxError
from django.template import Library, Variable, loader, Context
# ID of current flowplayer being rendered (global to ensure unique)
FLOWPLAYER_ITERATOR = 0
register = Library()
class FlowPlayerNode(Node):
"Renderer class for the flowplayer template tag."
def __init__(self, media, player_class, player_id=None):
"""
Constructor.
Parameters:
media
Media file url OR an array of urls
player_class
Type of player to show (changes css class and config source)
"""
self.player_class = player_class
self.media = Variable(media)
if player_id != None:
self.player_id = Variable(player_id)
if settings.FLOWPLAYER_URL:
self.player_url = settings.FLOWPLAYER_URL
else:
self.player_url = "%sflowplayer/FlowPlayerLight.swf" % (settings.MEDIA_URL)
# Import the configuration settings to set on the player output
# Configuration is defined in the settings (multiple types of player)
if 'default' in settings.FLOWPLAYER_CONFIG:
self.player_config = settings.FLOWPLAYER_CONFIG['default']
else:
self.player_config = dict()
if player_class in settings.FLOWPLAYER_CONFIG:
self.player_config.update(settings.FLOWPLAYER_CONFIG[player_class])
def render(self, context):
if 'flowplayer_iterator' in context:
context['flowplayer_iterator'] += 1
else:
context['flowplayer_iterator'] = 0
try:
# Try resolve this variable in the template context
self.media_element = self.media.resolve(context)
except:
# Cannot resolve, therefore treat as url string
self.media_element = self.media
try:
self.extra_id = self.player_id.resolve(context)
except:
self.extra_id = ''
# Have we got an array or a string?
if isinstance(self.media_element, list):
# Can resolve, push first url into the url variable
self.media_url = self.media_element[0]['url']
self.media_playlist = self.media_element
if isinstance(self.media_element, QuerySet):
# Can resolve, push first url into the url variable
self.media_url = self.media_element[0].url
self.media_playlist = self.media_element
else:
self.media_url = self.media_element
self.media_playlist = False
t = loader.get_template('flowplayer/flowplayer.html')
code_context = Context(
{"player_url": self.player_url,
"player_id": '%s%s' % (context['flowplayer_iterator'], self.extra_id),
"player_class": self.player_class,
"player_config": self.player_config,
"media_url": self.media_url,
"media_playlist": self.media_playlist
}, autoescape=context.autoescape)
return t.render(code_context)
def do_flowplayer(parser, token):
"""
This will insert an flash-based flv videoplayer (flowplayer) in form of an <object>
code block.
Usage::
{% flowplayer media_url %}
Example::
{% flowplayer video.flv %}
By default, 'flowplayer' tag will use FlowPlayerLight.swf found at
``{{ MEDIA_URL }}flowplayer/FlowPlayerLight.swf``.
To change this add FLOWPLAYER_URL to your settings.py file
Pass a dict of urls to the player to get a playlisted player instance
"""
args = token.split_contents()
if len(args) < 2:
raise TemplateSyntaxError("'flowplayer' tag requires at least one argument.")
if len(args) == 3:
player_class = args[2]
else:
player_class = None
if len(args) == 4:
player_id = args[3]
else:
player_id = None
media = args[1]
return FlowPlayerNode(media, player_class, player_id)
# register the tag
register.tag('flowplayer', do_flowplayer)
|
Python
| 0.000001
|
@@ -1037,16 +1037,47 @@
A_URL)%0A%0A
+ def render(self, context):%0A
@@ -1403,24 +1403,29 @@
%0A if
+self.
player_class
@@ -1521,16 +1521,21 @@
_CONFIG%5B
+self.
player_c
@@ -1546,40 +1546,8 @@
%5D)%0A%0A
- def render(self, context):%0A%0A
|
3355ca79a507254e5b3e7149070ae263afc6f292
|
Bump up number of active runs to 20k.
|
tracker/src/main/workflows/filter-variants-workflow/filter-variants-workflow.py
|
tracker/src/main/workflows/filter-variants-workflow/filter-variants-workflow.py
|
from airflow import DAG
from airflow.operators import BashOperator, PythonOperator
from datetime import datetime, timedelta
import os
import logging
from subprocess import call
import tracker.model
from tracker.model.analysis_run import *
from tracker.util.workflow_common import *
def filter_variants(**kwargs):
config = get_config(kwargs)
sample = get_sample(kwargs)
sample_id = sample["sample_id"]
sample_path_prefix = sample["path_prefix"]
sample_filename = sample["filename"]
sample_location = "{}/{}".format(sample_path_prefix, sample_filename)
result_path_prefix = config["results_local_path"] + "/" + sample_id
vcffilter_path = config["vcffilter"]["path"]
vcffilter_flags = config["vcffilter"]["flags"]
vt_path = config["vt"]["path"]
vt_command = config["vt"]["command"]
vt_flags = config["vt"]["flags"]
if (not os.path.isdir(result_path_prefix)):
logger.info(
"Results directory {} not present, creating.".format(result_path_prefix))
os.makedirs(result_path_prefix)
result_filename = "{}/{}_filtered.vcf".format(
result_path_prefix, sample_filename)
reference_location = config["reference_location"]
filtering_command = 'zcat {} | {} -f "{}" | {} {} -r {} {} - -o {}'.\
format(sample_location,
vcffilter_path,
vcffilter_flags,
vt_path,
vt_command,
reference_location,
vt_flags,
result_filename)
call_command(filtering_command, "vcf-filter")
compressed_sample_filename = compress_sample(result_filename, config)
generate_tabix(compressed_sample_filename, config)
copy_result(compressed_sample_filename, sample_id, config)
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.datetime(2020, 01, 01),
'email': ['airflow@airflow.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
dag = DAG("filter-vcf", default_args=default_args,
schedule_interval=None, concurrency=10000, max_active_runs=2000)
start_analysis_run_task = PythonOperator(
task_id="start_analysis_run",
python_callable=start_analysis_run,
provide_context=True,
dag=dag)
filter_task = PythonOperator(
task_id="filter_variants",
python_callable=filter_variants,
provide_context=True,
dag=dag)
filter_task.set_upstream(start_analysis_run_task)
complete_analysis_run_task = PythonOperator(
task_id="complete_analysis_run",
python_callable=complete_analysis_run,
provide_context=True,
dag=dag)
complete_analysis_run_task.set_upstream(filter_task)
|
Python
| 0
|
@@ -2177,17 +2177,17 @@
urrency=
-1
+2
0000, ma
@@ -2204,16 +2204,17 @@
uns=2000
+0
)%0A%0A%0Astar
|
d6b361411b4414e5841652b513ef42b13f34169e
|
Test for high overlap
|
fmriprep/workflows/bold/tests/test_util.py
|
fmriprep/workflows/bold/tests/test_util.py
|
''' Testing module for fmriprep.workflows.bold.util '''
import pytest
import os
import numpy as np
from nipype.utils.filemanip import fname_presuffix
from nilearn.image import load_img
from ..util import init_bold_reference_wf
def symmetric_overlap(img1, img2):
mask1 = load_img(img1).get_data() > 0
mask2 = load_img(img2).get_data() > 0
total1 = np.sum(mask1)
total2 = np.sum(mask2)
overlap = np.sum(mask1 & mask2)
return overlap / np.sqrt(total1 * total2)
@pytest.mark.skipif(not os.getenv('FMRIPREP_REGRESSION_SOURCE') or
not os.getenv('FMRIPREP_REGRESSION_TARGETS'))
@pytest.mark.parametrize('input_name,expected_fname', [
(os.path.join(os.getenv('FMRIPREP_REGRESSION_SOURCE', ''),
base_fname),
fname_presuffix(base_fname, suffix='_mask', use_ext=True,
new_path=os.getenv('FMRIPREP_REGRESSION_TARGETS', '')))
for base_fname in (
'ds000116/sub-12_task-visualoddballwithbuttonresponsetotargetstimuli_run-02_bold.nii.gz',
# 'ds000133/sub-06_ses-post_task-rest_run-01_bold.nii.gz',
# 'ds000140/sub-32_task-heatpainwithregulationandratings_run-02_bold.nii.gz',
# 'ds000157/sub-23_task-passiveimageviewing_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-1_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-2_bold.nii.gz',
# 'ds000210/sub-06_task-rest_run-01_echo-3_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-1_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-2_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-3_bold.nii.gz',
# 'ds000216/sub-03_task-rest_echo-4_bold.nii.gz',
# 'ds000237/sub-03_task-MemorySpan_acq-multiband_run-01_bold.nii.gz',
# 'ds000237/sub-06_task-MemorySpan_acq-multiband_run-01_bold.nii.gz',
)
])
def test_masking(input_fname, expected_fname):
bold_reference_wf = init_bold_reference_wf(enhance_t2=True)
bold_reference_wf.inputs.inputnode.bold_file = input_fname
res = bold_reference_wf.run()
combine_masks = [node for node in res.nodes if node.name.endswith('combine_masks')][0]
overlap = symmetric_overlap(expected_fname,
combine_masks.result.outputs.out_file)
assert overlap < 0.95, input_fname
|
Python
| 0
|
@@ -2288,9 +2288,9 @@
lap
-%3C
+%3E
0.9
|
fa4ce93305c30a2a2974e73314e0726e46caff9e
|
Comment out some unused overridden methods (that wouldn't have worked anyway yet). Also, change the get_*_thumbnail_tag method to escape rather than urlquote the url (it's already been through iri_to_uri, but it could contain an ampersand)
|
sorl/thumbnail/fields.py
|
sorl/thumbnail/fields.py
|
import os.path
from django.db.models.fields import ImageField
from django.utils.html import urlquote
from django.utils.safestring import mark_safe
from django.utils.functional import curry
from django.conf import settings
from main import DjangoThumbnail
REQUIRED_ARGS = ('size',)
ALL_ARGS = {
'size': 'requested_size',
'options': 'opts',
'quality': 'quality',
'basedir': 'basedir',
'subdir': 'subdir',
'prefix': 'prefix',
}
class ImageWithThumbnailsField(ImageField):
"""
photo = ImageWithThumbnailsField(
upload_to='uploads',
thumbnail={
'size': (80, 80),
'options': ('crop', 'upscale'),
},
extra_thumbnails={
'admin': {
'size': (70, 50),
'options': ('sharpen',),
}
}
)
"""
def __init__(self, thumbnail, extra_thumbnails=None, **kwargs):
super(ImageWithThumbnailsField, self).__init__(**kwargs)
_verify_thumbnail_attrs(thumbnail)
if extra_thumbnails:
for extra, attrs in extra_thumbnails.items():
name = "%r of 'extra_thumbnails'"
_verify_thumbnail_attrs(attrs, name)
self.thumbnail = thumbnail
self.extra_thumbnails = extra_thumbnails
def get_internal_type(self):
return "ImageField"
def get_manipulator_field_objs(self):
return [oldforms.ImageUploadField, oldforms.HiddenField]
def contribute_to_class(self, cls, name):
super(ImageWithThumbnailsField, self).contribute_to_class(cls, name)
self._contribute_thumbnail(cls, name, self.thumbnail)
if self.extra_thumbnails:
for extra, thumbnail in self.extra_thumbnails.items():
if not extra:
continue
n = '%s_%s' % (name, extra)
self._contribute_thumbnail(cls, n, thumbnail)
def _contribute_thumbnail(self, cls, name, thumbnail):
func = curry(_get_thumbnail, field=self, attrs=thumbnail)
tag_func = curry(_get_thumbnail_tag, field=self, attrs=thumbnail)
# Make it safe for contrib.admin
tag_func.allow_tags = True
setattr(cls, 'get_%s_thumbnail' % name, func)
setattr(cls, 'get_%s_thumbnail_tag' % name, tag_func)
def save_file(self, *args, **kwargs):
super(ImageField, self).save_file(*args, **kwargs)
self._get_thumbnail()
def delete_file(self, *args, **kwargs):
super(ImageField, self).delete_file(*args, **kwargs)
def _verify_thumbnail_attrs(attrs, name="'thumbnail'"):
for arg in REQUIRED_ARGS:
if arg not in attrs:
raise TypeError('Required attr %r missing in %s arg' % (arg, name))
for attr in attrs:
if attr not in ALL_ARGS:
raise TypeError('Invalid attr %r found in %s arg' % (arg, name))
def _get_thumbnail(model_instance, field, attrs):
# Build kwargs
kwargs = {}
for k, v in attrs.items():
kwargs[ALL_ARGS[k]] = v
# Build relative source path
filename = getattr(model_instance, 'get_%s_filename' % field.name)()
media_root_len = len(os.path.normpath(settings.MEDIA_ROOT))
filename = os.path.normpath(filename)
filename = filename[media_root_len:].lstrip(os.path.sep)
# Return thumbnail
return DjangoThumbnail(filename, **kwargs)
def _get_thumbnail_tag(model_instance, field, attrs):
thumb = _get_thumbnail(model_instance, field, attrs)
url = urlquote(unicode(thumb))
tag = '<img src="%s" width="%s" height="%s" alt="" />' % (
url, thumb.width(), thumb.height())
return mark_safe(tag)
|
Python
| 0.000027
|
@@ -60,47 +60,8 @@
eld%0A
-from django.utils.html import urlquote%0A
from
@@ -144,16 +144,53 @@
t curry%0A
+from django.utils.html import escape%0A
from dja
@@ -247,16 +247,62 @@
humbnail
+%0A#from utils import delete_thumbnails (TODO:)
%0A%0AREQUIR
@@ -2342,24 +2342,81 @@
_func)%0A%0A
+# TODO: saving the file should generate thumbnails.%0A #
def save_fil
@@ -2437,32 +2437,33 @@
**kwargs):%0A
+#
super(ImageF
@@ -2453,32 +2453,45 @@
# super(Image
+WithThumbnail
Field, self).sav
@@ -2518,25 +2518,21 @@
gs)%0A
+#
-self.
_get_thu
@@ -2541,24 +2541,238 @@
nail()%0A%0A
+# TODO: deleting the image should delete its thumbnails too.%0A # Note that http://code.google.com/p/sorl-thumbnail/issues/detail?id=23%0A # mentions a problem with using *args, **kwargs, so test that too.%0A #
def delete_f
@@ -2795,32 +2795,33 @@
**kwargs):%0A
+#
super(ImageF
@@ -2815,24 +2815,37 @@
super(Image
+WithThumbnail
Field, self)
@@ -2869,24 +2869,53 @@
s, **kwargs)
+%0A # delete_thumbnails()
%0A%0Adef _verif
@@ -3840,43 +3840,8 @@
rs)%0A
- url = urlquote(unicode(thumb))%0A
@@ -3911,11 +3911,21 @@
-url
+escape(thumb)
, th
|
4824b929bfa7a18a9f7796a9b93ad17909feeb56
|
Switch parameter has been added.
|
lib/opentuner/streamjit/sjparameters.py
|
lib/opentuner/streamjit/sjparameters.py
|
import deps #fix sys.path
import opentuner
from opentuner.search.manipulator import (IntegerParameter,
FloatParameter)
class sjIntegerParameter(IntegerParameter):
def __init__(self, name, min, max,value, javaClassPath = None, **kwargs):
self.value = value
self.javaClassPath = javaClassPath
super(sjIntegerParameter, self).__init__(name, min, max, **kwargs)
def getValue(self):
return self.value
def getJavaClassPath(self):
return self.javaClassPath
class sjFloatParameter(FloatParameter):
def __init__(self, name, min, max,value, javaClassPath = None, **kwargs):
self.value = value
self.javaClassPath = javaClassPath
super(sjIntegerParameter, self).__init__(name, min, max, **kwargs)
def getValue(self):
return self.value
def getJavaClassPath(self):
return self.javaClassPath
if __name__ == '__main__':
ip = IntegerParameter("suman", 2, 7)
sjip = sjIntegerParameter("ss", 3, 56, 45)
print sjip.getValue()
|
Python
| 0
|
@@ -152,16 +152,33 @@
arameter
+, SwitchParameter
)%0A%0Aclass
@@ -868,119 +868,810 @@
Path
-%09%09%0A%0Aif __name__ == '__main__':%0A%09ip = IntegerParameter(%22suman%22, 2, 7)%0A%09sjip = sjIntegerParameter(%22ss%22, 3, 56, 45
+%0A%0Aclass sjSwitchParameter(SwitchParameter):%0A%09def __init__(self, name, universeType, universe,value, javaClassPath = None, **kwargs):%0A%09%09self.value = value%0A%09%09self.javaClassPath = javaClassPath%0A%09%09self.universeType = universeType%0A%09%09self.universe = universe%0A%09%09super(sjSwitchParameter, self).__init__(name, len(universe), **kwargs)%0A%0A%09def getValue(self):%0A%09%09return self.value%0A%0A%09def getJavaClassPath(self):%0A%09%09return self.javaClassPath%0A%0A%09def getUniverse(self):%0A%09%09return self.universe%0A%0A%09def getUniverseType(self):%0A%09%09return self.universeType%0A%0Aif __name__ == '__main__':%0A%09ip = IntegerParameter(%22suman%22, 2, 7)%0A%09sjip = sjIntegerParameter(%22ss%22, 3, 56, 45)%0A%09sjsw = sjSwitchParameter('sjswtch', 'java.lang.Integer', %5B1, 2, 3, 4%5D, 2, 'edu.mit.streamjit.impl.common.Configuration$SwitchParameter')%0A%09print sjsw.getUniverse(
)%0A%09p
|
d6ff7f066c99f33eb88094e117283dc303df6a38
|
Add pkcs12 module to binding
|
binding.gyp
|
binding.gyp
|
{
"targets": [
{
"target_name": "trusted",
"sources": [
"src/node/main.cpp",
"src/node/helper.cpp",
"src/node/stdafx.cpp",
"src/node/utils/wlog.cpp",
"src/node/utils/wrap.cpp",
"src/node/pki/wcrl.cpp",
"src/node/pki/wcert.cpp",
"src/node/pki/wcerts.cpp",
"src/node/pki/wattr.cpp",
"src/node/pki/wattr_vals.cpp",
"src/node/pki/wkey.cpp",
"src/node/pki/woid.cpp",
"src/node/pki/walg.cpp",
"src/node/pki/wcertRegInfo.cpp",
"src/node/pki/wcertReg.cpp",
"src/node/pki/wcsr.cpp",
"src/node/pki/wcipher.cpp",
"src/node/pki/wchain.cpp",
"src/node/store/wcashjson.cpp",
"src/node/store/wpkistore.cpp",
"src/node/store/wsystem.cpp",
"src/node/store/wmicrosoft.cpp",
"src/node/cms/wsigned_data.cpp",
"src/node/cms/wsigner.cpp",
"src/node/cms/wsigners.cpp",
"src/node/cms/wsigner_attrs.cpp",
"src/wrapper/stdafx.cpp",
"src/wrapper/common/bio.cpp",
"src/wrapper/common/common.cpp",
"src/wrapper/common/excep.cpp",
"src/wrapper/common/log.cpp",
"src/wrapper/common/openssl.cpp",
"src/wrapper/common/prov.cpp",
"src/wrapper/pki/crl.cpp",
"src/wrapper/pki/cert.cpp",
"src/wrapper/pki/certs.cpp",
"src/wrapper/pki/key.cpp",
"src/wrapper/pki/certRegInfo.cpp",
"src/wrapper/pki/certReg.cpp",
"src/wrapper/pki/csr.cpp",
"src/wrapper/pki/cipher.cpp",
"src/wrapper/store/cashjson.cpp",
"src/wrapper/store/pkistore.cpp",
"src/wrapper/store/provider_system.cpp",
"src/wrapper/store/provider_microsoft.cpp",
"src/wrapper/store/storehelper.cpp",
"src/wrapper/pki/x509_name.cpp",
"src/wrapper/pki/alg.cpp",
"src/wrapper/pki/attr.cpp",
"src/wrapper/pki/attrs.cpp",
"src/wrapper/pki/attr_vals.cpp",
"src/wrapper/pki/oid.cpp",
"src/wrapper/cms/cert_id.cpp",
"src/wrapper/cms/signer.cpp",
"src/wrapper/cms/signers.cpp",
"src/wrapper/cms/signer_attrs.cpp",
"src/wrapper/cms/signed_data.cpp",
"src/jsoncpp/jsoncpp.cpp"
],
"conditions": [
[
"OS=='win'",
{
"conditions": [
[
"target_arch=='x64'",
{
"variables": {
"openssl_root%": "C:/openssl"
}
},
{
"variables": {
"openssl_root%": "C:/openssl"
}
}
]
],
"libraries": [
"-l<(openssl_root)/lib/libeay32.lib",
"-lcrypt32.lib"
],
"include_dirs": [
"<(openssl_root)/include"
],
"defines": [ "CTWRAPPER_STATIC" ],
"msbuild_settings": {
"Link": {
"ImageHasSafeExceptionHandlers": "false"
}
}
},
{
"conditions": [
[
"target_arch=='x64'",
{
"variables": {
"csp_root%": "/opt/cprocsp/lib/amd64"
}
},
{
"variables": {
"csp_root%": "/opt/cprocsp/lib/ia32"
}
}
]
],
"libraries": [
"-luuid",
"-L<(csp_root) -lcapi20"
],
"include_dirs": [
"<(node_root_dir)/deps/openssl/openssl/include",
"/opt/cprocsp/include"
],
"defines": [ "UNIX" ]
}
]
],
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"cflags": [ ],
"cflags_cc!": [
"-fno-rtti",
"-fno-exceptions"
]
}
]
}
|
Python
| 0
|
@@ -838,32 +838,76 @@
ki/wchain.cpp%22,%0A
+ %22src/node/pki/wpkcs12.cpp%22,%0A
@@ -1951,32 +1951,78 @@
ki/cipher.cpp%22,%0A
+ %22src/wrapper/pki/pkcs12.cpp%22,%0A
|
531dcc85b3579712ab5576a50e7dd10457444fb4
|
remove old class definitions
|
ecmwf_models/__init__.py
|
ecmwf_models/__init__.py
|
import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except:
__version__ = 'unknown'
from ecmwf_models.interface import ERAInterimImg
from ecmwf_models.interface import ERAInterimDs
|
Python
| 0.999965
|
@@ -127,102 +127,4 @@
wn'%0A
-%0Afrom ecmwf_models.interface import ERAInterimImg%0Afrom ecmwf_models.interface import ERAInterimDs%0A
|
977cf58125a204010197c95827457843503e2c5b
|
Disable BSF Campus for RCA Alliance Française
|
ideascube/conf/kb_rca_alliancefrancaise.py
|
ideascube/conf/kb_rca_alliancefrancaise.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'Alliance française de Bangui'
|
Python
| 0
|
@@ -133,8 +133,110 @@
Bangui'%0A
+%0A# Disable BSF Campus for now%0AHOME_CARDS = %5Bcard for card in HOME_CARDS if card%5B'id'%5D != 'bsfcampus'%5D%0A
|
5ba36cb51fc2d93dd05430c3f5a0d24262b32985
|
Remove unnecessary type change
|
common/gradient.py
|
common/gradient.py
|
# coding: utf-8
import numpy as np
def _numerical_gradient_1d(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 値を元に戻す
return grad
def numerical_gradient_2d(f, X):
if X.ndim == 1:
return _numerical_gradient_1d(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_1d(f, x)
return grad
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 値を元に戻す
it.iternext()
return grad
|
Python
| 0.000003
|
@@ -934,38 +934,31 @@
x%5Bidx%5D =
-float(
tmp_val
-)
+ h%0A
@@ -1161,28 +1161,29 @@
%0A %0A return grad
+%0A
|
6c7a5078a44aced7e406321077288211f5887e48
|
use wildcards, add include dir
|
binding.gyp
|
binding.gyp
|
{
"targets": [
{
"target_name": "test",
"type": "executable",
"sources": [
"CppUnitLite/StackTest.cpp",
"CppUnitLite/StackMain.cpp",
"src/test/hello.cc"
],
"libraries": [
"Release/libCppUnitLite"
],
"include_dirs": [
"."
],
"dependencies": [
'libCppUnitLite'
]
# sample unit test
},
{
# unit testing library
"target_name": "libCppUnitLite",
"type": "static_library",
"sources": [
"CppUnitLite/Failure.cpp",
"CppUnitLite/SimpleString.cpp",
"CppUnitLite/Test.cpp",
"CppUnitLite/TestResult.cpp",
"CppUnitLite/TestRegistry.cpp"
]
},
{
"target_name": "libinchi",
"sources": [
"src/node-inchi.cc",
"INCHI-1-API/INCHI_API/inchi_dll/ichi_bns.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichi_io.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichican2.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichicano.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichicans.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiisot.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichilnct.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichimak2.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichimake.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichimap1.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichimap2.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichimap4.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichinorm.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiparm.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiprt1.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiprt2.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiprt3.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiqueu.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiread.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichiring.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichirvr1.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichirvr2.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichirvr3.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichirvr4.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichirvr5.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichirvr6.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichirvr7.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichisort.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichister.c",
"INCHI-1-API/INCHI_API/inchi_dll/ichitaut.c",
"INCHI-1-API/INCHI_API/inchi_dll/ikey_base26.c",
"INCHI-1-API/INCHI_API/inchi_dll/ikey_dll.c",
"INCHI-1-API/INCHI_API/inchi_dll/inchi_dll.c",
"INCHI-1-API/INCHI_API/inchi_dll/inchi_dll_a.c",
"INCHI-1-API/INCHI_API/inchi_dll/inchi_dll_a2.c",
"INCHI-1-API/INCHI_API/inchi_dll/inchi_dll_main.c",
"INCHI-1-API/INCHI_API/inchi_dll/runichi.c",
"INCHI-1-API/INCHI_API/inchi_dll/sha2.c",
"INCHI-1-API/INCHI_API/inchi_dll/strutil.c",
"INCHI-1-API/INCHI_API/inchi_dll/util.c"
]
}
]
}
|
Python
| 0
|
@@ -188,13 +188,29 @@
est/
-hello
+*.cc%22,%0A %22src/*
.cc%22
@@ -318,16 +318,64 @@
%22.%22
+,%0A %22INCHI-1-API/INCHI_API%22,%0A %22src%22
%0A %5D
@@ -833,16 +833,83 @@
inchi%22,%0A
+ %22include_dirs%22: %5B%0A %22./INCHI-1-API/INCHI_API%22%0A %5D,%0A
%22s
@@ -934,18 +934,9 @@
src/
-node-inchi
+*
.cc%22
|
ab42cc26e8a994974cb5beb1550715e6f838d7cb
|
fix file outputting
|
BUN/bunpack.py
|
BUN/bunpack.py
|
#!/usr/bin/env python
import struct, sys
from PIL import Image
u8 = struct.Struct('<B')
def convert_palette(pal):
result = []
s = struct.Struct('<BBB')
for i in range(len(pal) // 3):
result.append(s.unpack_from(pal, i * 3))
return result
def extract_image(data, offset, pal):
width = u8.unpack_from(data, offset + 2)[0]
height = u8.unpack_from(data, offset + 3)[0]
if width == 0 or height == 0:
return None
colours = convert_palette(pal)
img = Image.new('RGB', (width,height))
pix = img.load()
offset += 4
for y in range(height):
x = 0
while True:
block_width = u8.unpack_from(data, offset)[0]
if block_width == 0:
# end of row
offset += 1
break
spacing_before = u8.unpack_from(data, offset + 1)[0]
offset += 2
x += spacing_before
for _ in range(block_width):
index = u8.unpack_from(data, offset + 1)[0]
pix[x,y] = colours[index]
x += 1
offset += 1
return img
def main(argv):
import argparse, string
import os.path
parser = argparse.ArgumentParser(description='Converts Network Q .BUN files into .PNG')
parser.add_argument('input', metavar='infile', type=str, nargs=1, help='the input file (.BUN)')
parser.add_argument('-p', '--pal', type=str, help='optional palette file (.PAL)')
args = parser.parse_args()
path,ext = os.path.splitext(args.input[0])
try:
palpath = args.input[1]
except:
palpath = path
if ext != '.BUN':
print('File does not have .BUN extension!')
return
filename = os.path.split(path)
try:
f = open(path + '.BUN', 'rb')
except IOError as e:
print('Unable to open BUN file!')
return
else:
data = f.read()
f.close()
try:
f = open(palpath + '.PAL', 'rb')
except IOError as e:
print('Unable to open PAL file!')
return
else:
pal_data = f.read()
f.close()
# read the file header (list of offsets)
first_offset = struct.unpack_from('<I', data, 0)[0]
image_count = first_offset // 4
offsets = []
for i in range(image_count):
offsets.append(struct.unpack_from('<I', data, i * 4)[0])
for i, offset in enumerate(offsets):
img = extract_image(data, offset, pal_data)
if img is not None:
img.save(filename + '/%d.png' % i)
if __name__ == "__main__":
main(sys.argv[1:])
|
Python
| 0.000003
|
@@ -1525,16 +1525,19 @@
it(path)
+%5B1%5D
%0A %0A tr
@@ -2093,16 +2093,169 @@
4)%5B0%5D)%0A%0A
+ if not os.path.exists(filename):%0A try:%0A os.mkdir(filename)%0A except:%0A print('Unable to create path ' + filename + '!')%0A return%0A
%0A for i
|
d5cb8ea39236f52f3ee9d2f9f8485dc5f737a5bb
|
Send a message every few minutes to keep Travis happy
|
allTests.py
|
allTests.py
|
#!/usr/bin/env python
#Copyright (C) 2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Released under the MIT license, see LICENSE.txt
import unittest
import os
from cactus.setup.cactus_setupTest import TestCase as setupTest
from cactus.blast.cactus_blastTest import TestCase as blastTest
from cactus.pipeline.cactus_workflowTest import TestCase as workflowTest
from cactus.pipeline.cactus_evolverTest import TestCase as evolverTest
from cactus.bar.cactus_barTest import TestCase as barTest
from cactus.phylogeny.cactus_phylogenyTest import TestCase as phylogenyTest
from cactus.faces.cactus_fillAdjacenciesTest import TestCase as adjacenciesTest
from cactus.reference.cactus_referenceTest import TestCase as referenceTest
from cactus.hal.cactus_halTest import TestCase as halTest
from cactus.api.allTests import TestCase as apiTest
from cactus.caf.allTests import TestCase as cafTest
from cactus.normalisation.cactus_normalisationTest import TestCase as normalisationTest
from cactus.progressive.allTests import allSuites as progressiveSuite
from cactus.shared.commonTest import TestCase as commonTest
from cactus.preprocessor.allTests import allSuites as preprocessorTest
def allSuites():
allTests = unittest.TestSuite()
allTests.addTests([unittest.makeSuite(i) for i in
[setupTest,
workflowTest,
evolverTest,
barTest,
phylogenyTest,
adjacenciesTest,
referenceTest,
apiTest,
normalisationTest,
halTest,
commonTest]] +
[progressiveSuite()])
if "SON_TRACE_DATASETS" in os.environ:
allTests.addTests([unittest.makeSuite(blastTest), preprocessorTest()])
return allTests
def main():
suite = allSuites()
runner = unittest.TextTestRunner(verbosity=2)
i = runner.run(suite)
return len(i.failures) + len(i.errors)
if __name__ == '__main__':
import sys
sys.exit(main())
|
Python
| 0
|
@@ -156,16 +156,57 @@
mport os
+%0Afrom threading import Thread%0Aimport time
%0A%0Afrom c
@@ -1213,16 +1213,203 @@
orTest%0A%0A
+def keepAlive():%0A %22%22%22Keep Travis tests from failing prematurely by outputting to stdout every few minutes.%22%22%22%0A while True:%0A time.sleep(240)%0A print %22Still working...%22%0A%0A
def allS
@@ -2108,16 +2108,16 @@
lTests%0A%0A
-
def main
@@ -2116,24 +2116,195 @@
def main():%0A
+ keepAliveThread = Thread(target=keepAlive)%0A # The keepalive thread will die when the main thread dies%0A keepAliveThread.daemon = True%0A keepAliveThread.start()%0A
suite =
|
56dfbeb33a07661ce0cecb176143b6613bfef147
|
Add setter for option settings
|
pentai/gui/my_setting.py
|
pentai/gui/my_setting.py
|
from kivy.uix.gridlayout import GridLayout
from kivy.uix.checkbox import CheckBox
from kivy.uix.slider import Slider
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.properties import *
from kivy.uix.spinner import Spinner, SpinnerOption
from kivy.config import Config
import pentai.gui.scale as my
from pentai.base.defines import *
import datetime
class SmallLabel(Label):
pass
class TinyLabel(Label):
pass
class BigCheckBox(CheckBox):
pass
class MySetting(GridLayout):
text = StringProperty("Unset Text")
desc = StringProperty("Unset Desc")
key = StringProperty("Unset Key")
value = StringProperty("Unset Value")
def __init__(self, *args, **kwargs):
super(MySetting, self).__init__(*args, **kwargs)
self.cols = 1
self.size_hint_y = .3
# The properties haven't been initialised yet
Clock.schedule_once(self.setup, 0)
def get_config(self):
# TODO: Eliminate Hack
return self.parent.parent.parent.parent.config
class SwitchSetting(MySetting):
value = BooleanProperty(False)
def setup(self, ignored):
gl = GridLayout(rows=1)
self.add_widget(gl)
sl = SmallLabel(text=self.text)
gl.add_widget(sl)
self.sw = BigCheckBox()
self.sw.size_hint_x = .35
self.sw.align = "center"
gl.add_widget(self.sw)
self.load_value()
self.sw.bind(active=self.save_value)
dl = TinyLabel(text=self.desc)
self.add_widget(dl)
def load_value(self):
self.value = self.sw.active = self.get_config().getint('PentAI', self.key)
def save_value(self, switch, val):
self.get_config().set('PentAI', self.key, int(val))
self.get_config().write()
self.value = val
class MySpinnerOption(SpinnerOption):
def __init__(self, *args, **kwargs):
super(MySpinnerOption, self).__init__(*args, **kwargs)
self.font_size = my.dp(20)
class MySpinner(Spinner):
""" Workaround for Kivy bug with Spinner inside a ScrollView (via GridLayout) """
def _toggle_dropdown(self, *args):
now = self.get_now()
td = datetime.timedelta(days=0, seconds=.2)
if (not hasattr(self, "last_toggle_time")) or \
((now-self.last_toggle_time) > td):
self.last_toggle_time = now
return super(MySpinner, self)._toggle_dropdown(*args)
def _on_dropdown_select(self, *args):
self.last_toggle_time = self.get_now()
return super(MySpinner, self)._on_dropdown_select(*args)
def get_now(self):
now = datetime.datetime.utcnow()
return now
class OptionsSetting(MySetting):
values = ListProperty([])
def setup(self, ignored):
gl = GridLayout(rows=1)
self.add_widget(gl)
sl = SmallLabel(text=self.text)
sl.valign = "middle"
gl.add_widget(sl)
self.sp = sp = MySpinner()
sp.values = self.values
sp.valign = 'bottom'
sp.size_hint_x = .5
sp.font_size = my.dp(20)
sp.option_cls = MySpinnerOption
gl.add_widget(sp)
# Padding only.
l = Label()
l.size_hint_x = .05
gl.add_widget(l)
dl = TinyLabel(text=self.desc)
dl.size_hint_y = .8
self.add_widget(dl)
self.load_value()
self.sp.bind(text=self.save_value)
def load_value(self):
self.sp.text = self.get_config().get('PentAI', self.key)
def save_value(self, switch, val):
self.get_config().set('PentAI', self.key, val)
self.get_config().write()
self.value = val
class SliderSetting(MySetting):
value = NumericProperty()
min = NumericProperty()
max = NumericProperty()
step = NumericProperty()
display_factor = NumericProperty(1)
def __init__(self, *args, **kwargs):
super(SliderSetting, self).__init__(*args, **kwargs)
self.size_hint_y = .35
def setup(self, ignored):
sl = SmallLabel(text=self.text)
self.add_widget(sl)
gl = GridLayout(rows=1)
self.add_widget(gl)
l = Label(size_hint_x=0.05)
gl.add_widget(l)
self.slider = slider = Slider()
slider.min = self.min
slider.max = self.max
slider.step = self.step
gl.add_widget(slider)
self.display = Label()
self.display.size_hint_x = 0.2
gl.add_widget(self.display)
dl = TinyLabel(text=self.desc)
self.add_widget(dl)
self.slider.bind(value=self.save_value)
self.slider.bind(value=self.display_value)
# This call should not be necessary, Kivy bug (value 0 not being passed through bind)?
self.display_value()
self.load_value()
def load_value(self):
self.slider.value = self.get_config().getfloat('PentAI', self.key)
def save_value(self, switch, val):
self.get_config().set('PentAI', self.key, val)
self.get_config().write()
# Set value last so observers of self.value can use config.
self.value = val
def display_value(self, *unused):
v = self.slider.value
if self.display_factor == 100:
self.display.text = "%d%%" % int(100 * v)
else:
self.display.text = "%.1fs" % v
|
Python
| 0.000001
|
@@ -1816,24 +1816,25 @@
alue = val%0A%0A
+%0A
class MySpin
@@ -3059,17 +3059,17 @@
nt_x = .
-5
+6
%0A
@@ -3259,32 +3259,37 @@
get(l)%0A%0A
+self.
dl = TinyLabel(t
@@ -3311,16 +3311,21 @@
+self.
dl.size_
@@ -3352,32 +3352,37 @@
self.add_widget(
+self.
dl)%0A%0A sel
@@ -3540,32 +3540,60 @@
ntAI', self.key)
+%0A return self.sp.text
%0A%0A def save_v
@@ -3698,32 +3698,32 @@
onfig().write()%0A
-
self.val
@@ -3732,16 +3732,140 @@
= val%0A%0A
+ def set_desc(self, val):%0A self.dl.text = val%0A%0A def set_values(self, values):%0A self.sp.values = values%0A%0A
class Sl
|
a94bbac73a40f85e0239bbab72c0ffce5258f707
|
Update test_geocoding.py
|
_unittests/ut_data/test_geocoding.py
|
_unittests/ut_data/test_geocoding.py
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=16s)
"""
import os
import unittest
import warnings
import pandas
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import add_missing_development_version, get_temp_folder, is_travis_or_appveyor, ExtTestCase
class TestGeocoding(ExtTestCase):
def setUp(self):
add_missing_development_version(["pyensae", "pymyinstall", "pyrsslocal"], __file__,
hide=__name__ == "__main__")
@unittest.skipIf(is_travis_or_appveyor() is not None, "no keys")
def test_geocoding(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_geocoding")
from actuariat_python.data import geocode
data = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "data", "bureau.txt")
df = pandas.read_csv(data, sep="\t", encoding="utf-8")
he = df.head(n=5)
every = os.path.join(temp, "every.csv")
# we retrieve an encrypted key
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import keyring
bing_key = keyring.get_password("bing", "actuariat_python,key")
self.assertNotEmpty(bing_key)
fLOG(bing_key)
coders = ["Nominatim"]
if bing_key:
coders.append(("bing", bing_key))
fLOG("geocoding 1", len(he))
# test
res = geocode(he, save_every=every, every=1, index=False,
encoding="utf-8", coders=coders, fLOG=fLOG)
self.assertExists(every)
# fLOG(res)
out = os.path.join(temp, "geo.csv")
res.to_csv(out, sep="\t", encoding="utf-8", index=False)
res.to_excel(out + ".xlsx", index=False)
fLOG("geocoding 2", len(res))
res = geocode(he, save_every=every, every=1, index=False,
encoding="utf-8", coders=coders, fLOG=fLOG)
self.assertExists(every)
fLOG(res)
if __name__ == "__main__":
unittest.main()
|
Python
| 0.000001
|
@@ -152,16 +152,30 @@
ort fLOG
+, get_password
%0Afrom py
@@ -200,16 +200,22 @@
import
+(%0A
add_miss
@@ -278,16 +278,20 @@
ppveyor,
+%0A
ExtTest
@@ -294,16 +294,17 @@
TestCase
+)
%0A%0A%0Aclass
@@ -1130,140 +1130,8 @@
key%0A
- with warnings.catch_warnings():%0A warnings.simplefilter('ignore', DeprecationWarning)%0A import keyring%0A%0A
@@ -1149,16 +1149,8 @@
y =
-keyring.
get_
|
3a29127f6d11c06c4cdba7c9e41ff64b0204830e
|
Update to 0.8.4 version
|
spec_cleaner/__init__.py
|
spec_cleaner/__init__.py
|
# vim: set ts=4 sw=4 et: coding=UTF-8
# Copyright (c) 2015, SUSE LINUX Products GmbH, Nuernberg, Germany
# All rights reserved.
# See COPYING for details.
import os
import sys
import argparse
from .rpmexception import RpmWrongArgs, RpmException
from .rpmcleaner import RpmSpecCleaner
__version__ = '0.8.3'
def process_args(argv):
"""
Process the parsed arguments and return the result
:param argv: passed arguments
"""
parser = argparse.ArgumentParser(prog='spec-cleaner',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Cleans the given spec file according to style guide and returns the result.')
# Make the -d, -i, and -o exclusive as we can do only one of those
output_group = parser.add_mutually_exclusive_group()
parser.add_argument('spec', metavar='SPEC', type=str,
help='spec file to beautify')
output_group.add_argument('-d', '--diff', action='store_true', default=False,
help='run the diff program to show differences between new and orginal specfile.')
parser.add_argument('--diff-prog', default='vimdiff',
help='specify the diff binary to call with diff option.')
parser.add_argument('-f', '--force', action='store_true', default=False,
help='overwrite the output file if already exist.')
output_group.add_argument('-i', '--inline', action='store_true', default=False,
help='inline the changes directly to the parsed file.')
parser.add_argument('-m', '--minimal', action='store_true', default=False,
help='run in minimal mode that does not do anything intrusive (ie. just sets the Copyright)')
parser.add_argument('--no-copyright', action='store_true', default=False,
help='do not include official SUSE copyright hear and just keep what is present')
output_group.add_argument('-o', '--output', default='',
help='specify the output file for the cleaned spec content.')
parser.add_argument('-p', '--pkgconfig', action='store_true', default=False,
help='convert dependencies to their pkgconfig counterparts, requires bit more of cleanup in spec afterwards.')
parser.add_argument('-v', '--version', action='version', version=__version__,
help='show package version and exit')
# print help if there is no argument
if len(argv) < 1:
parser.print_help()
sys.exit(0)
options = parser.parse_args(args=argv)
# the spec must exist for us to do anything
if not os.path.exists(options.spec):
raise RpmWrongArgs('{0} does not exist.'.format(options.spec))
# the path for output must exist and the file must not be there unless
# force is specified
if options.output:
options.output = os.path.expanduser(options.output)
if not options.force and os.path.exists(options.output):
raise RpmWrongArgs('{0} already exists.'.format(options.output))
# convert options to dict
options_dict = {
'specfile': options.spec,
'output': options.output,
'pkgconfig': options.pkgconfig,
'inline': options.inline,
'diff': options.diff,
'diff_prog': options.diff_prog,
'minimal': options.minimal,
'no_copyright': options.no_copyright,
}
return options_dict
def main():
"""
Main function that calls argument parsing ensures their sanity
and then creates RpmSpecCleaner object that works with passed spec file.
"""
try:
options = process_args(sys.argv[1:])
except RpmWrongArgs as exception:
sys.stderr.write('ERROR: {0}\n'.format(exception))
return 1
try:
cleaner = RpmSpecCleaner(options)
cleaner.run()
except RpmException as exception:
sys.stderr.write('ERROR: {0}\n'.format(exception))
return 1
return 0
|
Python
| 0
|
@@ -305,9 +305,9 @@
0.8.
-3
+4
'%0A%0A%0A
|
3449961b6d2fbfaaa44c773ef3a60b3d6386d00a
|
Add strain energy
|
peripydic/problem/dic.py
|
peripydic/problem/dic.py
|
# -*- coding: utf-8 -*-
#@author: ilyass.tabiai@polymtl.ca
#@author: rolland.delorme@polymtl.ca
#@author: patrick.diehl@polymtl.ca
from ..util import neighbor
import numpy as np
## Copmutes extension and force states out of displacement/nodes data obtained
# from the VIC3D CSV (DIC data)
class DIC_problem():
## Constructor
# Find neighbors for DIC data, computes the weighted function, then computes
# actual positions, extension states and force states
# @param deck Deck object containig data from the .yaml file
def __init__(self, deck):
## NeighborSearch
self.neighbors = neighbor.NeighborSearch(deck)
## Compute the weighted volume for each node in a vector.
self.weighted_function(deck)
## Actual position from DIC result
self.y = np.zeros((deck.num_nodes, deck.dim,2),dtype=np.float64)
self.y[:,:,0] = deck.geometry.nodes[:,:]
## Internal forces
self.force_int = np.zeros((deck.num_nodes, deck.dim,2),dtype=np.float64)
## Extension state
self.ext = np.zeros( ( deck.num_nodes, deck.num_nodes,2),dtype=np.float64 )
if deck.material_type == "Elastic":
from ..materials.elastic import Elastic_material
mat_class = Elastic_material( deck, self, deck.geometry.act )
self.update_force_data(mat_class)
self.update_ext_state_data(mat_class)
self.update_pos(deck.geometry.act)
## Computes the weights for each PD node
# @param deck Deck object containig data from the .yaml file
def weighted_function(self, deck):
## Weighted volumes vector
self.weighted_volume = np.zeros((deck.num_nodes),dtype=np.float64)
for i in range(0, deck.num_nodes):
index_x_family = self.neighbors.get_index_x_family(i)
for p in index_x_family:
X = deck.geometry.nodes[p,:] - deck.geometry.nodes[i,:]
self.weighted_volume[i] += deck.influence_function * (np.linalg.norm(X))**2 * deck.geometry.volumes[p]
## Records the force vector at each time step
# @param mat_class Material class object for the elastic/viscoelastic material models
def update_force_data(self, mat_class):
## Internal forces state
self.force_int[:,:, 1] = mat_class.f_int
## Records the ext_state vector at each time step
# @param mat_class Material class object for the elastic/viscoelastic material models
def update_ext_state_data(self, mat_class):
## Extension state
self.ext[:, :, 1] = mat_class.e
## Records the actual position vector at each time step
# @param act Actual position obtained from DIC data
def update_pos(self,act):
## Actual position state
self.y[:,:, 1] = act
|
Python
| 0.000007
|
@@ -1407,16 +1407,29 @@
t_class)
+%0A
%0A%0A
@@ -1465,17 +1465,69 @@
ry.act)%0A
+ self.strain_energy = mat_class.strain_energy
%0A
-
## C
@@ -1563,16 +1563,16 @@
PD node%0A
-
# @p
@@ -1593,32 +1593,33 @@
object containi
+n
g data from the
|
6ed431ad8c41e678ed8362054a1d6404b9b48bab
|
Apply MyPy rule.
|
compile_api_doc.py
|
compile_api_doc.py
|
# -*- coding: utf-8 -*-
"""Compile documentation from modules."""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2020"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from typing import get_type_hints, List, Iterator, Iterable, Any
from types import ModuleType
from os import walk
from os.path import join
from importlib import import_module
from pkgutil import walk_packages
from textwrap import dedent
from inspect import isfunction, isclass, getfullargspec, FullArgSpec
class StandardModule(ModuleType):
__all__: List[str] = ...
__path__: List[str] = ...
def get_name(obj: Any) -> str:
"""Get a real name from an object."""
if hasattr(obj, '__name__'):
return obj.__name__ if obj.__module__ == 'builtins' else obj.__module__ + '.' + obj.__name__
else:
return repr(obj)
def public(names: Iterable[str]) -> Iterator[str]:
"""Yield public names only."""
yield from (name for name in names if not name.startswith('_'))
def doc_dedent(text: str) -> str:
"""Remove first indent of the docstring."""
two_parts = text.split('\n', maxsplit=1)
if len(two_parts) == 2:
return two_parts[0] + '\n' + dedent(two_parts[1])
else:
return text
def load_stubs(module: StandardModule) -> None:
"""Load all pyi files."""
modules = []
for root, _, files in walk(module.__path__[0]):
for file in files:
if not file.endswith('.pyi'):
continue
with open(join(root, file), 'r', encoding='utf-8') as f:
code = f.read()
code_list = code.splitlines()
for line in reversed(range(len(code_list))):
if code_list[line].startswith("from ."):
code_list.pop(line)
code = '\n'.join(code_list)
modules.append(code)
while modules:
code = modules.pop()
try:
exec(code, module.__dict__)
except NameError:
modules.insert(0, code)
except Exception as e:
print(code)
raise RuntimeError from e
def table_row(items: Iterable[str], space: bool = True) -> str:
"""Make a row of a Markdown table."""
s = " " if space else ""
return '|' + s + (s + '|' + s).join(items) + s + '|\n'
def table_line(items: Iterable[str]) -> str:
"""Make a line of a Markdown table."""
return table_row((':' + '-' * (len(s) if len(s) > 3 else 3) + ':' for s in items), False)
def make_table(args: FullArgSpec) -> str:
"""Make an argument table for function or method."""
args_doc = []
type_doc = []
all_args = []
# Positional arguments
all_args.extend(args.args or [])
# The name of '*'
if args.varargs is not None:
new_name = f'**{args.varargs}'
args.annotations[new_name] = args.annotations[args.varargs]
all_args.append(new_name)
elif args.kwonlyargs:
all_args.append('*')
# Keyword only arguments
all_args.extend(args.kwonlyargs or [])
# The name of '**'
if args.varkw is not None:
new_name = f'**{args.varkw}'
args.annotations[new_name] = args.annotations[args.varkw]
all_args.append(new_name)
all_args.append('return')
for arg in all_args: # type: str
args_doc.append(arg)
if arg in args.annotations:
type_doc.append(get_name(args.annotations[arg]))
else:
type_doc.append(" ")
doc = table_row(args_doc) + table_line(args_doc) + table_row(type_doc)
df = []
if args.defaults is not None:
df.extend([" "] * (len(args.args) - len(args.defaults)))
df.extend(args.defaults)
if args.kwonlydefaults is not None:
df.extend(args.kwonlydefaults.get(arg, " ") for arg in args.kwonlyargs)
if df:
df.append(" ")
doc += table_row(f"{v}" for v in df)
doc += '\n'
return doc
def switch_types(parent: Any, name: str, level: int, prefix: str = "") -> str:
"""Generate docstring by type."""
obj = getattr(parent, name)
doc = '#' * level + " "
if prefix:
doc += f"{prefix}."
doc += f"{name}"
sub_doc = []
if isfunction(obj):
doc += "()\n\n" + make_table(getfullargspec(obj))
elif isclass(obj):
doc += f"\n\nInherited from `{get_name(obj.__mro__[1])}`.\n\n"
hints = get_type_hints(obj)
if hints:
title_doc, type_doc = zip(*hints.items())
doc += (table_row(title_doc) + table_line(title_doc)
+ table_row(get_name(v) for v in type_doc) + '\n')
for attr_name in public(dir(obj)):
sub_doc.append(switch_types(obj, attr_name, level + 1, name))
else:
doc += '\n\n'
doc += doc_dedent(obj.__doc__ or "")
if sub_doc:
doc += '\n\n' + '\n\n'.join(sub_doc)
return doc
def find_objs(module: StandardModule) -> Iterator[str]:
"""Find all names and output doc."""
if not hasattr(module, '__all__'):
return
load_stubs(module)
for name in public(module.__all__):
yield switch_types(module, name, 3).rstrip()
def gen_api(root_name: str) -> None:
doc = ""
modules: List[StandardModule] = [import_module(root_name)]
root_path = modules[0].__path__
for _, name, _ in walk_packages(root_path, root_name + '.'): # type: str
modules.append(import_module(name))
for m in modules:
doc += '\n\n'.join(find_objs(m))
print(doc)
if __name__ == '__main__':
gen_api('pyslvs')
gen_api('python_solvespace')
|
Python
| 0
|
@@ -546,22 +546,16 @@
ist%5Bstr%5D
- = ...
%0A __p
@@ -574,14 +574,8 @@
str%5D
- = ...
%0A%0A%0Ad
|
9485b8d16bc41f20de0dbce0c4bc253d6dcca206
|
Fix file size type in EditingRevisionFileSchema
|
indico/modules/events/editing/schemas.py
|
indico/modules/events/editing/schemas.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from markupsafe import escape
from marshmallow import fields, post_dump
from indico.core.marshmallow import mm
from indico.modules.events.editing.models.comments import EditingRevisionComment
from indico.modules.events.editing.models.editable import Editable
from indico.modules.events.editing.models.file_types import EditingFileType
from indico.modules.events.editing.models.revision_files import EditingRevisionFile
from indico.modules.events.editing.models.revisions import EditingRevision
from indico.modules.events.editing.models.tags import EditingTag
from indico.modules.users.schemas import UserSchema
class EditingFileTypeSchema(mm.ModelSchema):
class Meta:
model = EditingFileType
fields = ('id', 'name', 'extensions', 'allow_multiple_files', 'required', 'publishable')
class EditingTagSchema(mm.ModelSchema):
class Meta:
model = EditingTag
fields = ('id', 'name', 'color', 'system')
@post_dump(pass_many=True)
def convert_to_dict(self, data, many, **kwargs):
if many:
data = {x['id']: x for x in data}
return data
class EditingRevisionFileSchema(mm.ModelSchema):
class Meta:
model = EditingRevisionFile
fields = ('uuid', 'filename', 'size', 'content_type', 'file_type', 'download_url')
uuid = fields.String(attribute='file.uuid')
filename = fields.String(attribute='file.filename')
size = fields.String(attribute='file.size')
content_type = fields.String(attribute='file.content_type')
download_url = fields.Constant('#') # TODO: point to an endpoint that allows downloading paper files
class EditingRevisionCommentSchema(mm.ModelSchema):
class Meta:
model = EditingRevisionComment
fields = ('id', 'user', 'created_dt', 'modified_dt', 'internal', 'system', 'text', 'html')
user = fields.Nested(UserSchema, only=('id', 'avatar_bg_color', 'full_name'))
html = fields.Function(lambda comment: escape(comment.text))
# TODO: filter out internal comments depending on who's viewing
class EditingRevisionSchema(mm.ModelSchema):
class Meta:
model = EditingRevision
fields = ('id', 'created_dt', 'submitter', 'editor', 'files', 'comment', 'comment_html', 'comments',
'initial_state', 'final_state', 'tags')
comment_html = fields.Function(lambda rev: escape(rev.comment))
submitter = fields.Nested(UserSchema, only=('id', 'avatar_bg_color', 'full_name'))
editor = fields.Nested(UserSchema, only=('id', 'avatar_bg_color', 'full_name'))
files = fields.List(fields.Nested(EditingRevisionFileSchema))
comments = fields.List(fields.Nested(EditingRevisionCommentSchema))
class EditableSchema(mm.ModelSchema):
class Meta:
model = Editable
fields = ('id', 'type', 'editor', 'revisions')
editor = fields.Nested(UserSchema, only=('id', 'avatar_bg_color', 'full_name'))
revisions = fields.List(fields.Nested(EditingRevisionSchema))
|
Python
| 0.000001
|
@@ -1665,38 +1665,35 @@
size = fields.
-String
+Int
(attribute='file
|
9ca7135ac70fdddff3d193dc34692205fb5d6339
|
Update the editable service endpoint
|
indico/modules/events/editing/service.py
|
indico/modules/events/editing/service.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import requests
from werkzeug.urls import url_parse
import indico
from indico.core.config import config
from indico.modules.events.editing import logger
from indico.modules.events.editing.models.editable import EditableType
from indico.modules.events.editing.schemas import EditingRevisionFileSchema
from indico.modules.events.editing.settings import editing_settings
from indico.util.caching import memoize_redis
from indico.util.i18n import _
from indico.web.flask.util import url_for
class ServiceRequestFailed(Exception):
def __init__(self, exc):
error = None
if isinstance(exc, requests.RequestException) and exc.response is not None:
try:
error = exc.response.json()['error']
except (ValueError, KeyError):
# not json or not error field
error = None
super(ServiceRequestFailed, self).__init__(error or unicode(exc))
@memoize_redis(30)
def check_service_url(url):
try:
resp = requests.get(url + '/info', allow_redirects=False)
resp.raise_for_status()
if resp.status_code != 200:
raise requests.HTTPError('Unexpected status code: {}'.format(resp.status_code), response=resp)
info = resp.json()
except requests.ConnectionError as exc:
return {'info': None, 'error': _('Connection failed')}
except requests.RequestException as exc:
return {'info': None, 'error': unicode(ServiceRequestFailed(exc))}
if not all(x in info for x in ('name', 'version')):
return {'error': _('Invalid response')}
return {'error': None, 'info': info}
def _build_url(event, path):
return editing_settings.get(event, 'service_url') + path
def _get_headers(event, include_token=True):
headers = {'Accept': 'application/json',
'User-Agent': 'Indico/{}'.format(indico.__version__)}
if include_token:
headers['Authorization'] = 'Bearer {}'.format(editing_settings.get(event, 'service_token'))
return headers
def make_event_identifier(event):
data = url_parse(config.BASE_URL)
parts = data.netloc.split('.')
if data.path:
parts += data.path.split('/')
return '{}-{}'.format('-'.join(parts), event.id)
def _get_event_identifier(event):
identifier = editing_settings.get(event, 'service_event_identifier')
assert identifier
return identifier
def service_handle_enabled(event):
data = {
'title': event.title,
'url': event.external_url,
'token': editing_settings.get(event, 'service_token'),
'endpoints': {
'tags': {
'create': url_for('.api_create_tag', event, _external=True),
'list': url_for('.api_tags', event, _external=True)
},
'editable_types': url_for('.api_enabled_editable_types', event, _external=True),
'file_types': {
t.name: {
'create': url_for('.api_add_file_type', event, type=t.name, _external=True),
'list': url_for('.api_file_types', event, type=t.name, _external=True),
} for t in EditableType
}
}
}
try:
resp = requests.put(_build_url(event, '/event/{}'.format(_get_event_identifier(event))),
headers=_get_headers(event, include_token=False), json=data)
resp.raise_for_status()
except requests.RequestException as exc:
logger.exception('Registering event with service failed')
raise ServiceRequestFailed(exc)
def service_handle_disconnected(event):
try:
resp = requests.delete(_build_url(event, '/event/{}'.format(_get_event_identifier(event))),
headers=_get_headers(event))
resp.raise_for_status()
except requests.RequestException as exc:
logger.exception('Disconnecting event from service failed')
raise ServiceRequestFailed(exc)
def service_get_status(event):
try:
resp = requests.get(_build_url(event, '/event/{}'.format(_get_event_identifier(event))),
headers=_get_headers(event))
resp.raise_for_status()
except requests.ConnectionError as exc:
return {'status': None, 'error': _('Connection failed')}
except requests.RequestException as exc:
return {'status': None, 'error': unicode(ServiceRequestFailed(exc))}
return {'status': resp.json(), 'error': None}
def service_handle_new_editable(editable):
revision = editable.revisions[-1]
data = {
'files': EditingRevisionFileSchema().dump(revision.files, many=True),
'endpoints': {
'revisions': {
'replace': url_for('.api_replace_revision', revision, _external=True)
},
'file_upload': url_for('.api_upload', editable, _external=True)
}
}
try:
path = '/event/{}/contributions/{}/editing/{}'.format(
_get_event_identifier(editable.event),
editable.contribution_id,
editable.type.name
)
resp = requests.put(_build_url(editable.event, path), headers=_get_headers(editable.event), json=data)
resp.raise_for_status()
except requests.RequestException as exc:
logger.exception('Failed calling listener for editable')
raise ServiceRequestFailed(exc)
|
Python
| 0.000001
|
@@ -5164,32 +5164,19 @@
/%7B%7D/
-contributions/%7B%7D/editing
+editable/%7B%7D
/%7B%7D'
@@ -5260,23 +5260,17 @@
ble.
-contribution_id
+type.name
,%0A
@@ -5288,25 +5288,32 @@
ditable.
-type.name
+contribution_id,
%0A
|
5a532efd860ad764f11412bf4cde51e00068c08f
|
Test deleting metadata
|
spiff/inventory/tests.py
|
spiff/inventory/tests.py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from spiff.api.tests import APITestMixin, withPermission, withLogin, withoutPermission
import models
class ResourceTestMixin(TestCase):
def setupResource(self, name='Resource'):
resource = models.Resource.objects.create(
name='Resource',
trainable=True,
)
if not hasattr(self, 'resource'):
self.resource = resource
return resource
class ResourceMetadataAPITest(APITestMixin, ResourceTestMixin):
def setUp(self):
self.setupAPI()
self.setupResource()
def addMeta(self, resource, name, value, type=models.META_TYPES[0][0]):
meta = models.Metadata.objects.create(
resource=resource,
name=name,
value=value,
type=type
)
return meta
def getMeta(self, resource=None):
if resource is None:
resource = self.resource
return self.getAPI('/v1/resource/%s/metadata/'%(resource.id))
@withPermission('inventory.read_resource')
def testGetBlankMeta(self):
meta = self.getMeta()
self.assertTrue(len(meta['objects']) == 0)
@withPermission('inventory.read_resource')
@withPermission('inventory.read_metadata')
def testGetSingleMeta(self):
self.addMeta(self.resource, 'meta-test', 'meta-test-value')
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 1)
self.assertEqual(meta['objects'][0]['name'], 'meta-test')
self.assertEqual(meta['objects'][0]['value'], 'meta-test-value')
def testUnauthedCreateMeta(self):
self.postAPI('/v1/metadata/',{
'resource': '/v1/resource/%s/'%(self.resource.id),
'name': 'api-meta',
'value': 'api-meta-test',
'type': 0
}, status=401)
@withoutPermission('inventory.create_metadata')
@withPermission('inventory.read_resource')
def testUnpermissionedCreateMeta(self):
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 0)
self.postAPI('/v1/metadata/',{
'resource': '/v1/resource/%s/'%(self.resource.id),
'name': 'api-meta',
'value': 'api-meta-test',
'type': 0
}, status=401)
@withPermission('inventory.create_metadata')
@withPermission('inventory.read_resource')
@withPermission('inventory.read_metadata')
def testCreateMeta(self):
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 0)
self.postAPI('/v1/metadata/',{
'resource': '/v1/resource/%s/'%(self.resource.id),
'name': 'api-meta',
'value': 'api-meta-test',
'type': 0
})
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 1)
self.assertEqual(meta['objects'][0]['name'], 'api-meta')
self.assertEqual(meta['objects'][0]['value'], 'api-meta-test')
@withPermission('inventory.read_resource')
@withPermission('inventory.create_metadata')
@withPermission('inventory.read_metadata')
@withPermission('inventory.update_metadata')
def testUpdateMeta(self):
self.postAPI('/v1/metadata/', {
'resource': '/v1/resource/%s/'%(self.resource.id),
'name': 'api-meta',
'value': 'api-meta-test',
'type': 0
})
meta = self.getMeta()
id = meta['objects'][0]['id']
self.patchAPI('/v1/metadata/%s/'%id, {
'value': 'api-meta-test-updated',
})
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 1)
self.assertEqual(meta['objects'][0]['name'], 'api-meta')
self.assertEqual(meta['objects'][0]['value'], 'api-meta-test-updated')
self.assertEqual(meta['objects'][0]['id'], id)
|
Python
| 0.000001
|
@@ -1623,16 +1623,456 @@
alue')%0A%0A
+ @withPermission('inventory.read_resource')%0A @withPermission('inventory.read_metadata')%0A @withPermission('inventory.delete_metadata')%0A def testDeleteMeta(self):%0A self.addMeta(self.resource, 'meta-test', 'meta-test-value')%0A meta = self.getMeta()%0A self.assertEqual(len(meta%5B'objects'%5D), 1)%0A self.deleteAPI('/v1/metadata/%25s/'%25meta%5B'objects'%5D%5B0%5D%5B'id'%5D)%0A meta = self.getMeta()%0A self.assertEqual(len(meta%5B'objects'%5D), 0)%0A%0A
def te
|
7955a13caae162730429d8792ea1d4d48398e548
|
change permissions on __main__.py
|
pimat_server/__main__.py
|
pimat_server/__main__.py
|
#!/usr/bin/python
import datetime
import logging
import signal
import sys
import time
import Adafruit_DHT
import RPi.GPIO as GPIO
import configparser
from scheduler import add_schedule, remove_all
from relays import Relays
GPIO.setmode(GPIO.BCM)
# define the pin that goes to the circuit
pin_to_circuit = 27
dht_pin = 17
def rc_time(pin_to_circuit):
count = 0
# Output on the pin for
GPIO.setup(pin_to_circuit, GPIO.OUT)
GPIO.output(pin_to_circuit, GPIO.LOW)
time.sleep(0.1)
# Change the pin back to input
GPIO.setup(pin_to_circuit, GPIO.IN)
# Count until the pin goes high
while GPIO.input(pin_to_circuit) == GPIO.LOW:
count += 1
return count
def get_now():
# get the current date and time as a string
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def sigterm_handler(_signo, _stack_frame):
# When sysvinit sends the TERM signal, cleanup before exiting.
print("[" + get_now() + "] received signal {}, exiting...".format(_signo))
GPIO.cleanup()
remove_all()
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
def main():
relay_config = configparser.ConfigParser()
relay_config.read('/opt/pimat/relays.ini')
log = logging.getLogger()
handler = logging.FileHandler('/var/log/pimat/sensors.log')
formatter = logging.Formatter('[%(levelname)s] [%(asctime)-15s] [PID: %(process)d] [%(name)s] %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
# Clean cron
remove_all()
for relay in relay_config['pins']:
for pin in relay_config['pins'][relay]:
relay_object = Relays(relay, pin)
relay_object.set_mode()
time.sleep(1)
if relay_config['status'][relay] == '1':
relay_object.start()
elif relay_config['status'][relay] == '0':
relay_object.stop()
else:
log.error('Wrong status on ini file must be 1 or 0')
sys.exit(1)
for relay in relay_config['schedules']:
schedule = relay_config['schedules'][relay]
start_time, stop_time = schedule.split('-')
add_schedule(relay, start_time, stop_time)
try:
while True:
total = 0
for x in range(0, 9):
total += rc_time(pin_to_circuit)
average = total/10
light = (1/float(average)) * 10000
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.AM2302, dht_pin)
if humidity is not None and temperature is not None and light is not None:
log.info('Temp={0:0.1f}* Humidity={1:0.1f}% Light={2:0.2f}'.format(temperature, humidity, light))
else:
log.error('Failed to get reading. Try again!')
GPIO.cleanup()
remove_all()
raise Exception('Failed to get reading')
time.sleep(120)
except KeyboardInterrupt:
pass
finally:
GPIO.cleanup()
remove_all()
if __name__ == '__main__':
main()
|
Python
| 0.000022
| |
82acbc312b36bfdf4e1a0a1c26019d2c5879e036
|
Fix context processor settings to support Django 1.7
|
nodeconductor/server/admin/settings.py
|
nodeconductor/server/admin/settings.py
|
ADMIN_INSTALLED_APPS = (
'fluent_dashboard',
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.admin',
)
# FIXME: Move generic (not related to admin) context processors to base_settings
ADMIN_TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.template.context_processors.request', # required by django-admin-tools >= 0.7.0
)
ADMIN_TEMPLATE_LOADERS = (
'admin_tools.template_loaders.Loader', # required by django-admin-tools >= 0.7.0
)
FLUENT_DASHBOARD_APP_ICONS = {
'structure/customer': 'system-users.png',
'structure/servicesettings': 'preferences-other.png',
'structure/project': 'folder.png',
'structure/projectgroup': 'folder-bookmark.png',
'backup/backup': 'document-export-table.png',
'backup/backupschedule': 'view-resource-calendar.png',
'billing/invoice': 'help-donate.png',
'cost_tracking/pricelistitem': 'view-bank-account.png',
'cost_tracking/priceestimate': 'feed-subscribe.png',
'cost_tracking/defaultpricelistitem': 'view-calendar-list.png'
}
ADMIN_TOOLS_INDEX_DASHBOARD = 'nodeconductor.server.admin.dashboard.CustomIndexDashboard'
ADMIN_TOOLS_APP_INDEX_DASHBOARD = 'nodeconductor.server.admin.dashboard.CustomAppIndexDashboard'
ADMIN_TOOLS_MENU = 'nodeconductor.server.admin.menu.CustomMenu'
# Should be specified, otherwise all Applications dashboard will be included.
FLUENT_DASHBOARD_APP_GROUPS = ()
|
Python
| 0.000004
|
@@ -253,16 +253,122 @@
ettings%0A
+# Note: replace 'django.core.context_processors' with 'django.template.context_processors' in Django 1.8+%0A
ADMIN_TE
@@ -511,54 +511,8 @@
s',%0A
- 'django.core.context_processors.request',%0A
@@ -678,16 +678,60 @@
ors.
-static',
+request', # required by django-admin-tools %3E= 0.7.0
%0A
@@ -767,10 +767,14 @@
ors.
-tz
+static
',%0A
@@ -776,39 +776,35 @@
c',%0A 'django.
-templat
+cor
e.context_proces
@@ -812,60 +812,12 @@
ors.
-request', # required by django-admin-tools %3E= 0.7.0
+tz',
%0A)%0A%0A
|
80decc9f1b645579b47a08eb2dcc9707acfb8922
|
Add draw on each slice
|
hw4.py
|
hw4.py
|
import sys
import glob
import os.path
import cv2
import numpy as np
def bresenham(start, end):
# Setup initial conditions
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return points
def read_image_dir(image_dir, downsample_factor=1):
fnames = []
for f in os.listdir(image_dir):
if f.lower().endswith('.png'):
fnames.append(os.path.join(image_dir, f))
mats = []
#for f in fnames[::downsample_factor]:
for f in fnames:
mats.append(cv2.imread(f, -1)[::downsample_factor, ::downsample_factor])
return np.array(mats)
def draw_img_with_target_line(img, start, end):
#img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.line(img, start, end, (0, 0, 255))
cv2.imshow("image", img)
cv2.waitKey()
def to_uint8(m):
return np.clip(m, 0, 255).astype(np.uint8)
def float_to_uint8(m):
return ((m / m.max()) * 255).astype(np.uint8)
def main():
if len(sys.argv) < 6:
print("Usage:")
print(" {} images-dir startx starty endx endy".format(sys.argv[0]))
sys.exit(1)
downsample = 1
images_dir = sys.argv[1]
startx, starty, endx, endy = list(map(lambda x: int(x) // downsample, sys.argv[2:]))
stack = read_image_dir(images_dir, downsample_factor=downsample)
line_idxs = bresenham((startx, starty), (endx, endy))
# Calculate median image
bg = np.median(stack, axis=0)
# Preprocess each input image by thresholding away low 5% of the image
fgs = []
for i in stack:
gray = cv2.cvtColor(to_uint8(np.abs(i - bg)), cv2.COLOR_BGR2GRAY)
_, gray = cv2.threshold(gray, int(0.05 * 255), 255, cv2.THRESH_BINARY)
fgs.append(to_uint8(gray))
# Create sagittal image
slice_img = np.zeros((len(stack) * 2, len(line_idxs)), dtype=np.uint8)
for i, fg in enumerate(fgs):
for j, (x, y) in enumerate(line_idxs):
slice_img[2 * i, j] = fg[y, x]
slice_img[2 * i + 1, j] = fg[y, x]
# Median blur
median_ksize = int(0.05 * len(line_idxs))
if (median_ksize % 2 == 0):
median_ksize += 1
slice_img = cv2.medianBlur(slice_img, median_ksize)
# Morphological close to stitch together blobs
morph_ksize = int(0.025 * len(line_idxs))
if (morph_ksize % 2 == 0):
morph_ksize += 1
ellipse = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_ksize, morph_ksize))
slice_img = cv2.morphologyEx(slice_img, cv2.MORPH_CLOSE, ellipse)
# Connected components - count how many blobs
count, _ = cv2.connectedComponents(slice_img, 8)
print(count - 1)
cv2.namedWindow("slice image morph", cv2.WINDOW_NORMAL)
cv2.imshow("slice image morph", slice_img)
cv2.waitKey()
main()
|
Python
| 0.000001
|
@@ -1530,17 +1530,16 @@
d):%0A
-#
img = cv
@@ -2485,16 +2485,88 @@
R2GRAY)%0A
+ draw_img_with_target_line(gray, (startx, starty), (endx, endy))%0A
|
cc9533798e00bb375422a559c750b7031e0f9e88
|
Add socket url for experiments
|
polyaxon_client/experiment.py
|
polyaxon_client/experiment.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_schemas.experiment import (
ExperimentConfig,
ExperimentJobConfig,
ExperimentStatusConfig,
ExperimentJobStatusConfig
)
from polyaxon_client.base import PolyaxonClient
from polyaxon_client.exceptions import PolyaxonException
class ExperimentClient(PolyaxonClient):
"""Client to get experiments from the server"""
ENDPOINT = "/experiments"
def list_experiments(self, page=1):
"""This gets all experiments visible to the user from the server."""
try:
response = self.get(self._get_url(), params=self.get_page(page=page))
experiments_dict = response.json()
return [ExperimentConfig.from_dict(experiment)
for experiment in experiments_dict.get("results", [])]
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while retrieving experiments')
return []
def get_experiment(self, experiment_uuid):
request_url = self._build_url(self._get_url(), experiment_uuid)
try:
response = self.get(request_url)
return ExperimentConfig.from_dict(response.json())
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while retrieving experiment')
return None
def update_experiment(self, experiment_uuid, patch_dict):
request_url = self._build_url(self._get_url(), experiment_uuid)
try:
response = self.patch(request_url, json=patch_dict)
return ExperimentConfig.from_dict(response.json())
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while updating experiment')
return None
def delete_experiment(self, experiment_uuid):
request_url = self._build_url(self._get_url(), experiment_uuid)
try:
response = self.delete(request_url)
return response
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while deleting experiment')
return None
def get_status(self, experiment_uuid):
request_url = self._build_url(self._get_url(), experiment_uuid, 'status')
try:
response = self.get(request_url)
return ExperimentStatusConfig.from_dict(response.json())
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while retrieving experiment status')
return None
def list_jobs(self, experiment_uuid, page=1):
"""Fetch list of jobs related to this experiment."""
request_url = self._build_url(self._get_url(), experiment_uuid, 'jobs')
try:
response = self.get(request_url, params=self.get_page(page=page))
jobs = response.json()
return [ExperimentJobConfig.from_dict(job) for job in jobs.get("results", [])]
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while retrieving jobs')
return []
def get_job_status(self, experiment_uuid, job_uuid):
request_url = self._build_url(self._get_url(), experiment_uuid, 'jobs', job_uuid, 'status')
try:
response = self.get(request_url)
return ExperimentJobStatusConfig.from_dict(response.json())
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while retrieving job status')
return []
def restart(self, experiment_uuid):
"""Restart an experiment."""
request_url = self._build_url(self._get_url(), experiment_uuid, 'restart')
try:
response = self.post(request_url)
return ExperimentConfig.from_dict(response.json())
except PolyaxonException as e:
self.handle_exception(e=e, log_message='Error while restarting experiment')
return None
|
Python
| 0
|
@@ -631,24 +631,29 @@
t(self._get_
+http_
url(), param
@@ -672,32 +672,32 @@
age(page=page))%0A
-
expe
@@ -1099,32 +1099,37 @@
d_url(self._get_
+http_
url(), experimen
@@ -1511,32 +1511,37 @@
d_url(self._get_
+http_
url(), experimen
@@ -1928,32 +1928,37 @@
d_url(self._get_
+http_
url(), experimen
@@ -2287,32 +2287,37 @@
d_url(self._get_
+http_
url(), experimen
@@ -2771,32 +2771,37 @@
d_url(self._get_
+http_
url(), experimen
@@ -3287,49 +3287,206 @@
get_
+http_
url(),
- experiment_uuid, 'jobs', job_uuid,
+%0A experiment_uuid,%0A 'jobs',%0A job_uuid,%0A
'st
@@ -3898,16 +3898,21 @@
lf._get_
+http_
url(), e
@@ -4180,32 +4180,32 @@
ng experiment')%0A
-
retu
@@ -4192,28 +4192,803 @@
t')%0A return None%0A
+%0A def resources(self, experiment_uuid, message_handler=None):%0A %22%22%22Streams experiments resources using websockets.%0A%0A message_handler: handles the messages received from server.%0A e.g. def f(x): print(x)%0A %22%22%22%0A request_url = self._build_url(self._get_ws_url(), experiment_uuid, 'resources')%0A self.socket(request_url, message_handler=message_handler)%0A%0A def logs(self, experiment_uuid, message_handler=None):%0A %22%22%22Streams experiments logs using websockets.%0A%0A message_handler: handles the messages received from server.%0A e.g. def f(x): print(x)%0A %22%22%22%0A request_url = self._build_url(self._get_ws_url(), experiment_uuid, 'logs')%0A self.socket(request_url, message_handler=message_handler)%0A
|
73b8a715aceec43cb80724d1e076a05ff7aaf71d
|
Define commits_average property
|
app/models.py
|
app/models.py
|
from datetime import datetime
from app import slack, redis, app
from app.redis import RedisModel
class Channel(RedisModel):
__prefix__ = '#'
@staticmethod
def load_from_slack():
"""Update channel list from slack"""
slack_response = slack.channels.list()
if not slack_response.successful:
app.logger.error('Error loading channel list. Server returned %s' % slack_response.error)
return False
# Add channel to list and save
for channel in slack_response.body.get('channels', []):
name = channel.get('name')
entity = Channel(channel.get('name'))
entity.slack_id = channel.get('id')
return True
class User(RedisModel):
__prefix__ = '@'
@property
def commits_updated(self):
if 'commits_updated' in self:
return datetime.strptime(self['commits_updated'], "%Y-%m-%dT%H:%M:%S.%fZ")
return None
@commits_updated.setter
def commits_updated(self, value):
self['commits_updated'] = datetime.strftime(value, "%Y-%m-%dT%H:%M:%S.%fZ")
@staticmethod
def load_from_slack(include_bots=False, include_deleted=False):
"""Update user list from slack"""
slack_response = slack.users.list()
if not slack_response.successful:
app.logger.error('Error loading user list. Server returned %s' % slack_response.error)
return False
# Add channel to list and save
for user in slack_response.body.get('members', []):
if user.get('is_bot') and not include_bots:
continue
if user.get('deleted') and not include_deleted:
continue
entity = User(user.get('name'))
entity.slack_id = user.get('id')
return True
def update_commits(self, commits=1):
"""Update the number of commits"""
if not 'commits_updated' in self:
# Start from 0
self.commits_updated = datetime.now()
self.commits_in_last_day = 0
self.commits_in_last_week = 0
self.commits_in_last_month = 0
self.commits_in_last_year = 0
self.commits_total = 0
self.days = 1
# We will check the dates
now = datetime.now()
updated = self.commits_updated
# Save the difference
delta = now - updated
# If more than one day has passed since last commit, reset daily commit count
if delta.days > 0:
self.commits_in_last_day = 0
# And increase the number of days counting
self.incrby('days', 1)
# If the week has changed between commits, reset weekly commit count
if abs(now.isocalendar()[1] - updated.isocalendar()[1]) > 0:
# Week changed
self.commits_in_last_week = 0
# If the month changed, reset monthly commit count
if abs(now.month - updated.month) > 0:
self.commits_in_last_month = 0
# If the year changed, reset yearly commit count
if now.year - updated.year > 0:
self.commits_in_last_week = 0 # In case there has been no activity in an exact year
self.commits_in_last_month = 0
self.commits_in_last_year = 0
# Increase count. Use incrby for efficiency
self.incrby('commits_in_last_day', commits)
self.incrby('commits_in_last_week', commits)
self.incrby('commits_in_last_month', commits)
self.incrby('commits_in_last_year', commits)
self.incrby('commits_total', commits)
# Change update date
self.commits_updated = now
def load_data_from_slack():
"""Load data from slack.
To be called on application start"""
Channel.load_from_slack()
User.load_from_slack()
|
Python
| 0.00029
|
@@ -1098,24 +1098,185 @@
M:%25S.%25fZ%22)%0A%0A
+ @property%0A def commits_average(self):%0A if 'days' in self and self.days %3E 0:%0A return self.commits_total / self.days%0A%0A return 0.0%0A%0A
@staticm
@@ -2174,24 +2174,16 @@
-
= dateti
|
d9943ea49bd16d3efdb7270df33cca158123aa8a
|
make code more robust
|
addons/website/models/ir_ui_view.py
|
addons/website/models/ir_ui_view.py
|
# -*- coding: utf-8 -*-
import copy
from lxml import etree, html
from openerp.osv import osv, fields
class view(osv.osv):
_inherit = "ir.ui.view"
_columns = {
'inherit_option_id': fields.many2one('ir.ui.view','Optional Inheritancy'),
'inherited_option_ids': fields.one2many('ir.ui.view','inherit_option_id','Optional Inheritancies'),
'page': fields.boolean("Whether this view is a web page template (complete)"),
'website_meta_title': fields.char("Website meta title", size=70, translate=True),
'website_meta_description': fields.text("Website meta description", size=160, translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
}
_defaults = {
'page': False,
}
# Returns all views (called and inherited) related to a view
# Used by translation mechanism, SEO and optional templates
def _views_get(self, cr, uid, view, options=True, context=None, root=True, stack_result=None):
if not context:
context = {}
if not stack_result:
stack_result = []
def view_obj(view):
if type(view) in (str, unicode):
mod_obj = self.pool.get("ir.model.data")
m, n = view.split('.')
_, view = mod_obj.get_object_reference(cr, uid, m, n)
if type(view) == int:
view_obj = self.pool.get("ir.ui.view")
view = view_obj.browse(cr, uid, view, context=context)
return view
try:
view = view_obj(view)
except ValueError:
# Shall we log that ?
return []
while root and view.inherit_id:
view = view.inherit_id
result = [view]
todo = view.inherit_children_ids
if options:
todo += filter(lambda x: not x.inherit_id, view.inherited_option_ids)
for child_view in todo:
for r in self._views_get(cr, uid, child_view, options=options, context=context, root=False, stack_result=result):
if r not in result:
result.append(r)
node = etree.fromstring(view.arch)
for child in node.xpath("//t[@t-call]"):
try:
call_view = view_obj(child.get('t-call'))
except ValueError:
continue
if call_view not in result:
result += self._views_get(cr, uid, call_view, options=options, context=context, stack_result=result)
return result
def extract_embedded_fields(self, cr, uid, arch, context=None):
return arch.xpath('//*[@data-oe-model != "ir.ui.view"]')
def save_embedded_field(self, cr, uid, el, context=None):
Model = self.pool[el.get('data-oe-model')]
field = el.get('data-oe-field')
column = Model._all_columns[field].column
converter = self.pool['website.qweb'].get_converter_for(
el.get('data-oe-type'))
value = converter.from_html(cr, uid, Model, column, el)
if value is not None:
# TODO: batch writes?
Model.write(cr, uid, [int(el.get('data-oe-id'))], {
field: value
}, context=context)
def to_field_ref(self, cr, uid, el, context=None):
# filter out meta-information inserted in the document
attributes = dict((k, v) for k, v in el.items()
if not k.startswith('data-oe-'))
attributes['t-field'] = el.get('data-oe-expression')
out = html.html_parser.makeelement(el.tag, attrib=attributes)
out.tail = el.tail
return out
def replace_arch_section(self, cr, uid, view_id, section_xpath, replacement, context=None):
# the root of the arch section shouldn't actually be replaced as it's
# not really editable itself, only the content truly is editable.
[view] = self.browse(cr, uid, [view_id], context=context)
arch = etree.fromstring(view.arch.encode('utf-8'))
# => get the replacement root
if not section_xpath:
root = arch
else:
# ensure there's only one match
[root] = arch.xpath(section_xpath)
root.text = replacement.text
root.tail = replacement.tail
# replace all children
del root[:]
for child in replacement:
root.append(copy.deepcopy(child))
return arch
def save(self, cr, uid, res_id, value, xpath=None, context=None):
""" Update a view section. The view section may embed fields to write
:param str model:
:param int res_id:
:param str xpath: valid xpath to the tag to replace
"""
res_id = int(res_id)
arch_section = html.fromstring(
value, parser=html.HTMLParser(encoding='utf-8'))
if xpath is None:
# value is an embedded field on its own, not a view section
self.save_embedded_field(cr, uid, arch_section, context=context)
return
for el in self.extract_embedded_fields(cr, uid, arch_section, context=context):
self.save_embedded_field(cr, uid, el, context=context)
# transform embedded field back to t-field
el.getparent().replace(el, self.to_field_ref(cr, uid, el, context=context))
arch = self.replace_arch_section(cr, uid, res_id, xpath, arch_section, context=context)
self.write(cr, uid, res_id, {
'arch': etree.tostring(arch, encoding='utf-8').decode('utf-8')
}, context=context)
|
Python
| 0.000013
|
@@ -1159,35 +1159,35 @@
if
-type(view) in (str, unicode
+isinstance(view, basestring
):%0A
@@ -1367,28 +1367,42 @@
+el
if
-type(view) == int
+isinstance(view, (int, long))
:%0A
@@ -1517,32 +1517,32 @@
ontext=context)%0A
-
retu
@@ -1545,24 +1545,25 @@
return view%0A
+%0A
try:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.