commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
8171af80ab1bff2ffac4b85642217a37fb485d74
|
Rewrite serializer
|
rest_framework_gis/serializers.py
|
rest_framework_gis/serializers.py
|
# rest_framework_gis/serializers.py
from django.contrib.gis.db import models
from rest_framework.serializers import ModelSerializer
from .fields import GeometryField
class GeoModelSerializer(ModelSerializer):
def get_field(self, model_field):
"""
Creates a default instance of a basic non-relational field.
"""
kwargs = {}
kwargs['blank'] = model_field.blank
if model_field.null or model_field.blank:
kwargs['required'] = False
if isinstance(model_field, models.AutoField) or not model_field.editable:
kwargs['read_only'] = True
if model_field.has_default():
kwargs['required'] = False
kwargs['default'] = model_field.get_default()
if issubclass(model_field.__class__, models.TextField):
kwargs['widget'] = widgets.Textarea
# TODO: TypedChoiceField?
if model_field.flatchoices: # This ModelField contains choices
kwargs['choices'] = model_field.flatchoices
return ChoiceField(**kwargs)
field_mapping = {
models.AutoField: IntegerField,
models.FloatField: FloatField,
models.IntegerField: IntegerField,
models.PositiveIntegerField: IntegerField,
models.SmallIntegerField: IntegerField,
models.PositiveSmallIntegerField: IntegerField,
models.DateTimeField: DateTimeField,
models.EmailField: EmailField,
models.CharField: CharField,
models.URLField: URLField,
models.SlugField: SlugField,
models.TextField: CharField,
models.CommaSeparatedIntegerField: CharField,
models.BooleanField: BooleanField,
models.FileField: FileField,
models.ImageField: ImageField,
models.GeometryField: GeometryField,
models.PointField: GeometryField,
models.LineStringField: GeometryField,
models.PolygonField: GeometryField,
models.MultiPointField: GeometryField,
models.MultiLineStringField: GeometryField,
models.MultiPolygonField: GeometryField,
models.GeometryCollectionField: GeometryField
}
try:
return field_mapping[model_field.__class__](**kwargs)
except KeyError:
return ModelField(model_field=model_field, **kwargs)
|
Python
| 0.000104
|
@@ -162,16 +162,17 @@
yField%0A%0A
+%0A
class Ge
@@ -210,1652 +210,61 @@
r):%0A
-
%0A
-def get_field(self, model_field):%0A %22%22%22%0A Creates a default instance of a basic non-relational field.%0A %22%22%22%0A kwargs = %7B%7D%0A%0A kwargs%5B'blank'%5D = model_field.blank%0A%0A if model_field.null or model_field.blank:%0A kwargs%5B'required'%5D = False%0A%0A if isinstance(model_field, models.AutoField) or not model_field.editable:%0A kwargs%5B'read_only'%5D = True%0A%0A if model_field.has_default():%0A kwargs%5B'required'%5D = False%0A kwargs%5B'default'%5D = model_field.get_default()%0A%0A if issubclass(model_field.__class__, models.TextField):%0A kwargs%5B'widget'%5D = widgets.Textarea%0A%0A # TODO: TypedChoiceField?%0A if model_field.flatchoices: # This ModelField contains choices%0A kwargs%5B'choices'%5D = model_field.flatchoices%0A return ChoiceField(**kwargs)%0A%0A field_mapping = %7B%0A models.AutoField: IntegerField,%0A models.FloatField: FloatField,%0A models.IntegerField: IntegerField,%0A models.PositiveIntegerField: IntegerField,%0A models.SmallIntegerField: IntegerField,%0A models.PositiveSmallIntegerField: IntegerField,%0A models.DateTimeField: DateTimeField,%0A models.EmailField: EmailField,%0A models.CharField: CharField,%0A models.URLField: URLField,%0A models.SlugField: SlugField,%0A models.TextField: CharField,%0A models.CommaSeparatedIntegerField: CharField,%0A models.BooleanField: BooleanField,%0A models.FileField: FileField,%0A models.ImageField: ImageField,%0A
+pass%0A%0AGeoModelSerializer.field_mapping.update(%7B%0A
@@ -296,32 +296,24 @@
metryField,%0A
-
models.P
@@ -334,32 +334,24 @@
metryField,%0A
-
models.L
@@ -377,32 +377,24 @@
metryField,%0A
-
models.P
@@ -417,32 +417,24 @@
metryField,%0A
-
models.M
@@ -460,32 +460,24 @@
metryField,%0A
-
models.M
@@ -508,32 +508,24 @@
metryField,%0A
-
models.M
@@ -557,24 +557,16 @@
yField,%0A
-
mode
@@ -611,184 +611,7 @@
eld%0A
- %7D%0A%0A try:%0A return field_mapping%5Bmodel_field.__class__%5D(**kwargs)%0A except KeyError:%0A return ModelField(model_field=model_field, **kwargs
+%7D
)%0A
|
f3ed10d94b44498030757f5f2866ba3a89349192
|
Version bump.
|
s3ftp/__init__.py
|
s3ftp/__init__.py
|
"""
s3ftp
MIT License. See LICENSE for more details.
Copyright (c) 2014, Jonathan Stoppani
"""
from .protocol import S3Realm, S3FTPShell
__version__ = '0.1.0'
__url__ = 'https://github.com/GaretJax/s3ftp'
__all__ = ['S3Realm', 'S3FTPShell']
|
Python
| 0
|
@@ -154,17 +154,17 @@
= '0.1.
-0
+1
'%0A__url_
|
70d009834123cb5a10788763fed3193017cc8162
|
Add a default null logger per python recommendations.
|
libpebble2/__init__.py
|
libpebble2/__init__.py
|
__author__ = 'katharine'
from .exceptions import *
|
Python
| 0
|
@@ -19,16 +19,32 @@
arine'%0A%0A
+import logging%0A%0A
from .ex
@@ -61,8 +61,75 @@
mport *%0A
+%0Alogging.getLogger('libpebble2').addHandler(logging.NullHandler())%0A
|
33a87081422a52f57785ebca683992b5bd362c7a
|
Use a better error message when clusters are not balanced
|
perfrunner/tests/__init__.py
|
perfrunner/tests/__init__.py
|
import time
from exceptions import KeyboardInterrupt
from logger import logger
from perfrunner.helpers.cbmonitor import CbAgent
from perfrunner.helpers.memcached import MemcachedHelper
from perfrunner.helpers.metrics import MetricHelper
from perfrunner.helpers.misc import log_phase, target_hash, pretty_dict
from perfrunner.helpers.monitor import Monitor
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.helpers.reporter import Reporter
from perfrunner.helpers.rest import RestHelper
from perfrunner.helpers.restore import RestoreHelper
from perfrunner.helpers.worker import WorkerManager
from perfrunner.settings import TargetSettings
class TargetIterator(object):
def __init__(self, cluster_spec, test_config, prefix=None):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.prefix = prefix
def __iter__(self):
password = self.test_config.bucket.password
prefix = self.prefix
for master in self.cluster_spec.yield_masters():
for bucket in self.test_config.buckets:
if self.prefix is None:
prefix = target_hash(master.split(':')[0])
yield TargetSettings(master, bucket, password, prefix)
class PerfTest(object):
COLLECTORS = {}
MONITORING_DELAY = 10
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.target_iterator = TargetIterator(cluster_spec, test_config)
self.memcached = MemcachedHelper(test_config)
self.monitor = Monitor(cluster_spec)
self.rest = RestHelper(cluster_spec)
self.remote = RemoteHelper(cluster_spec, test_config, verbose)
self.restore_helper = RestoreHelper(cluster_spec, test_config, verbose)
self.master_node = cluster_spec.yield_masters().next()
self.build = self.rest.get_version(self.master_node)
self.cbagent = CbAgent(self, verbose=verbose)
self.metric_helper = MetricHelper(self)
self.reporter = Reporter(self)
self.reports = {}
self.snapshots = []
self.master_events = []
if self.test_config.test_case.use_workers:
self.worker_manager = WorkerManager(cluster_spec, test_config)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.test_config.cluster.throttle_cpu:
self.remote.enable_cpu()
if self.test_config.test_case.use_workers:
self.worker_manager.terminate()
if exc_type != KeyboardInterrupt:
self.check_core_dumps()
for master in self.cluster_spec.yield_masters():
if not self.rest.is_balanced(master):
logger.interrupt('Rebalance failed')
self.check_failover(master)
def check_failover(self, master):
if hasattr(self, 'rebalance_settings'):
if self.rebalance_settings.failover or \
self.rebalance_settings.graceful_failover:
return
num_failovers = self.rest.get_failover_counter(master)
if num_failovers:
logger.interrupt(
'Failover happened {} time(s)'.format(num_failovers)
)
def check_core_dumps(self):
dumps_per_host = self.remote.detect_core_dumps()
core_dumps = {
host: dumps for host, dumps in dumps_per_host.items() if dumps
}
if core_dumps:
logger.interrupt(pretty_dict(core_dumps))
def compact_bucket(self):
for master in self.cluster_spec.yield_masters():
for bucket in self.test_config.buckets:
self.rest.trigger_bucket_compaction(master, bucket)
time.sleep(self.MONITORING_DELAY)
for master in self.cluster_spec.yield_masters():
self.monitor.monitor_task(master, 'bucket_compaction')
def wait_for_persistence(self):
for master in self.cluster_spec.yield_masters():
for bucket in self.test_config.buckets:
self.monitor.monitor_disk_queues(master, bucket)
self.monitor.monitor_dcp_queues(master, bucket)
def restore(self):
self.restore_helper.restore()
self.restore_helper.warmup()
def load(self, load_settings=None, target_iterator=None):
if load_settings is None:
load_settings = self.test_config.load_settings
if target_iterator is None:
target_iterator = self.target_iterator
log_phase('load phase', load_settings)
self.worker_manager.run_workload(load_settings, target_iterator)
self.worker_manager.wait_for_workers()
def hot_load(self):
hot_load_settings = self.test_config.hot_load_settings
log_phase('hot load phase', hot_load_settings)
self.worker_manager.run_workload(hot_load_settings,
self.target_iterator)
self.worker_manager.wait_for_workers()
def access(self, access_settings=None):
if access_settings is None:
access_settings = self.test_config.access_settings
log_phase('access phase', access_settings)
self.worker_manager.run_workload(access_settings, self.target_iterator)
self.worker_manager.wait_for_workers()
def access_bg(self, access_settings=None, target_iterator=None):
if access_settings is None:
access_settings = self.test_config.access_settings
if target_iterator is None:
target_iterator = self.target_iterator
log_phase('access phase in background', access_settings)
access_settings.index_type = self.test_config.index_settings.index_type
access_settings.ddocs = getattr(self, 'ddocs', None)
self.worker_manager.run_workload(access_settings,
target_iterator,
timer=access_settings.time)
def timer(self):
access_settings = self.test_config.access_settings
logger.info('Running phase for {} seconds'.format(access_settings.time))
time.sleep(access_settings.time)
def report_kpi(self, *args, **kwargs):
if self.test_config.stats_settings.enabled:
self._report_kpi(*args, **kwargs)
def _report_kpi(self, *args, **kwargs):
pass
|
Python
| 0
|
@@ -2828,22 +2828,33 @@
pt('
-Rebalance fail
+The cluster is not balanc
ed')
|
e9c42d80a4fd6616a9d55babe4aab969718fe090
|
Update PkgDistributionCreator.py
|
MTM_Installer/PkgDistributionCreator.py
|
MTM_Installer/PkgDistributionCreator.py
|
#!/usr/bin/env python
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *** Drew Coobs <coobs1@illinois.edu> ***
# Modified version of Chris Gerke's PkgDistributionCreator script
# https://github.com/autopkg/cgerke-recipes/blob/master/SharedProcessors/PkgDistributionCreator.py
#
import os.path
import subprocess
import shutil
import xml.etree.ElementTree as etree
from glob import glob
from autopkglib import Processor, ProcessorError
__all__ = ["PkgDistributionCreator"]
class PkgDistributionCreator(Processor):
description = ("Bundles together munki pkg installers with MTM onboarding pkg. ")
input_variables = {
"source_file1": {
"required": True,
"description": ("Path to a source file (MyCoolPkg1.pkg) "),
},
"source_file2": {
"required": True,
"description": ("Path to a source file (MyCoolPkg2.pkg) "),
},
"source_file3": {
"required": True,
"description": ("Path to a source file (MyCoolPkg3.pkg) "),
},
"source_file4": {
"required": True,
"description": ("Path to a source file (MyCoolPkg4.pkg) "),
},
"source_file5": {
"required": True,
"description": ("Path to a source file (MyCoolPkg5.pkg) "),
},
"source_file6": {
"required": True,
"description": ("Path to a source file (MyCoolPkg6.pkg) "),
},
"distribution_file": {
"required": True,
"description": ("Destination path of distribution file. "),
},
"package_dir": {
"required": True,
"description": ("Directory containing source pkgs. "),
},
"output_file": {
"required": True,
"description": ("Name of output file. "),
},
}
output_variables = {
}
__doc__ = description
source_path = None
def pkgConvert(self):
if os.path.exists('/usr/bin/productbuild'):
try:
self.output("Found binary %s" % '/usr/bin/productbuild')
except OSError as e:
raise ProcessorError(
"Can't find binary %s: %s" % ('/usr/bin/productbuild', e.strerror))
try:
pbcmd = ["/usr/bin/productbuild",
"--synthesize",
"--package", self.env['source_file1'],
"--package", self.env['source_file2'],
"--package", self.env['source_file3'],
"--package", self.env['source_file4'],
"--package", self.env['source_file5'],
"--package", self.env['source_file6'],
self.env['distribution_file']]
p = subprocess.Popen(pbcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
except OSError as e:
raise ProcessorError("Creation of distribution file failed with error code %d: %s"
% (e.errno, e.strerror))
if p.returncode != 0:
raise ProcessorError("Creation of distribution file %s failed: %s"
% (self.env['output_file'], err))
try:
pbcmd = ["/usr/bin/productbuild",
"--distribution", self.env['distribution_file'],
"--package-path", self.env['package_dir'],
self.env['output_file']]
p = subprocess.Popen(pbcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
except OSError as e:
raise ProcessorError("cmmac execution failed with error code %d: %s"
% (e.errno, e.strerror))
if p.returncode != 0:
raise ProcessorError("cmmac conversion of %s failed: %s"
% (self.env['output_file'], err))
root = etree.Element('/Users/Shared/AutoPkg/Cache/com.github.Gibbun.pkg.UofI_MTM_Installer/distribution.xml')
self.output(root)
child = etree.Element('<title>My Awesome App</title>')
self.output(child)
root.append(child)
def main(self):
if os.path.exists(self.env['source_file1']):
try:
self.output("Found %s" % self.env['source_file1'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file1'], e.strerror))
if os.path.exists(self.env['source_file2']):
try:
self.output("Found %s" % self.env['source_file2'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file2'], e.strerror))
if os.path.exists(self.env['source_file3']):
try:
self.output("Found %s" % self.env['source_file3'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file3'], e.strerror))
if os.path.exists(self.env['source_file4']):
try:
self.output("Found %s" % self.env['source_file4'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file4'], e.strerror))
if os.path.exists(self.env['source_file5']):
try:
self.output("Found %s" % self.env['source_file5'])
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file5'], e.strerror))
if os.path.exists(self.env['source_file6']):
try:
self.output("Found %s" % self.env['source_file6'])
self.pkgConvert()
except OSError as e:
raise ProcessorError(
"Can't find %s" % (self.env['source_file6'], e.strerror))
if __name__ == '__main__':
processor = PkgDistributionCreator()
processor.execute_shell()
|
Python
| 0
|
@@ -858,21 +858,18 @@
Tree as
-etree
+ET
%0A%0Afrom g
@@ -4554,31 +4554,25 @@
-root = etree.Element
+tree = ET.parse
('
-/
User
@@ -4666,24 +4666,28 @@
-self.output(
+root = tree.get
root
+(
)%0A
@@ -4704,22 +4704,22 @@
d =
-etree.
+ET.Sub
Element(
'%3Cti
@@ -4718,38 +4718,20 @@
ent(
-'%3Ctitle%3EMy Awesome App%3C/
+root, '
title
-%3E
')%0A
@@ -4741,53 +4741,37 @@
-self.output(child)%0A root.append(child)
+child.text = 'My Awesome App'
%0A
|
69fe87e0dd8deb194159f264bc30c50391806149
|
fix scheduler_error_mailer
|
scheduler_error_mailer/ir_cron.py
|
scheduler_error_mailer/ir_cron.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Scheduler Error Mailer module for OpenERP
# Copyright (C) 2012-2013 Akretion (http://www.akretion.com/)
# @author: Sébastien Beau <sebastien.beau@akretion.com>
# @author David Beal <bealdavid@gmail.com>
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import orm, fields
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class ir_cron(orm.Model):
_inherit = "ir.cron"
_columns = {
'email_template_id': fields.many2one(
'email.template',
'Error E-mail Template',
oldname="email_template",
help="Select the email template that will be "
"sent when this scheduler fails."),
}
def _handle_callback_exception(self, cr, uid, model_name, method_name,
args, job_id, job_exception):
res = super(ir_cron, self)._handle_callback_exception(
cr, uid, model_name, method_name, args, job_id, job_exception)
my_cron = self.browse(cr, uid, job_id)
if my_cron.email_template_id:
# we put the job_exception in context to be able to print it inside
# the email template
context = {
'job_exception': job_exception,
'dbname': cr.dbname,
}
_logger.debug("Sending scheduler error email with context=%s",
context)
self.pool['email.template'].send_mail(
cr, SUPERUSER_ID, my_cron.email_template.id, my_cron.id,
force_send=True, context=context)
return res
def _test_scheduler_failure(self, cr, uid, context=None):
"""This function is used to test and debug this module"""
raise orm.except_orm(
_('Error :'),
_("Task failure with UID = %d.") % uid)
|
Python
| 0.000007
|
@@ -2471,16 +2471,19 @@
template
+_id
.id, my_
|
eb9d297d14741f311cb4bf27c384077ba98cc789
|
Add missing slot.
|
web/db/mongo/__init__.py
|
web/db/mongo/__init__.py
|
# encoding: utf-8
"""MongoDB database connection extension."""
import re
from pymongo import MongoClient
from pymongo.errors import ConfigurationError
from .model import Model
from .resource import MongoDBResource
from .collection import MongoDBCollection
__all__ = ['Model', 'MongoDBResource', 'MongoDBCollection', 'MongoDBConnection']
log = __import__('logging').getLogger(__name__)
_safe_uri_replace = re.compile(r'(\w+)://(\w+):(?P<password>[^@]+)@')
class MongoDBConnection(object):
"""WebCore database extension connector for MongoDB databases.
This tiny class performs the work needed to populate the WebCore context with a MonogoDB database (or connection
if no default database is provided) on startup, using `pymongo`. In addition to performing initial configuration,
this extension adapts
"""
__slots__ = ('__name__', 'uri', 'config', 'client', 'db')
provides = {'mongodb'}
def __init__(self, uri, alias=None, **config):
"""Prepare MongoDB client configuration.
The only required configuration option (passed positionally or by keyword) is `uri`, specifying the host to
connect to and optionally client credentials (username, password), default database, and additional options.
Extraneous keyword arguments will be stored and passed through to the `MongoClient` class instantiated on
startup.
"""
self.uri = uri
self.client = None
self.db = None
self.alias = alias
# Configure a few of our own defaults here, usually because we compare the value somewhere.
config.setdefault('event_listeners', []) # For logging purposes, we add some of our own handlers.
self.config = config
def start(self, context):
name = self.alias or self.__name__ # Either we were configured with an explicit name, or the DB ext infers.
log.info("Connecting context.db.{name} to MongoDB database.".format(name=name), extra=dict(
uri = _safe_uri_replace.sub(r'\1://\2@', self.uri),
config = self.config,
))
client = self.client = MongoClient(self.uri, **self.config)
try:
db = self.db = client.get_default_database()
except ConfigurationError:
db = self.db = None
if self.config.get('connect', True):
pass # Log extra details about the connection here.
context.db[name] = db if db is not None else client
def stop(self, context):
self.client.close()
del context.db[self.alias or self.__name__]
|
Python
| 0
|
@@ -874,16 +874,25 @@
t', 'db'
+, 'alias'
)%0A%09%0A%09pro
|
bf24abb4ffba4f63f641cc61e22357253cdca956
|
Fix migration script
|
src/adhocracy/migration/versions/053_add_newsservice.py
|
src/adhocracy/migration/versions/053_add_newsservice.py
|
from datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import Boolean, DateTime, Integer, Unicode, UnicodeText
metadata = MetaData()
message_table = Table(
'message', metadata,
Column('id', Integer, primary_key=True),
Column('subject', Unicode(140), nullable=False),
Column('body', UnicodeText(), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('sender_email', Unicode(255), nullable=False),
)
message_recipient_table = Table(
'message_recipient', metadata,
Column('id', Integer, primary_key=True),
Column('message_id', Integer, ForeignKey('message.id'), nullable=False),
Column('recipient_id', Integer, ForeignKey('user.id'), nullable=False),
Column('email_sent', Boolean, default=False),
)
user_table = Table(
'user', metadata,
Column('id', Integer, primary_key=True),
Column('user_name', Unicode(255), nullable=False, unique=True, index=True),
Column('display_name', Unicode(255), nullable=True, index=True),
Column('bio', UnicodeText(), nullable=True),
Column('email', Unicode(255), nullable=True, unique=True),
Column('email_priority', Integer, default=3),
Column('activation_code', Unicode(255), nullable=True, unique=False),
Column('reset_code', Unicode(255), nullable=True, unique=False),
Column('password', Unicode(80), nullable=False),
Column('locale', Unicode(7), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime),
Column('banned', Boolean, default=False),
Column('no_help', Boolean, default=False, nullable=True),
Column('page_size', Integer, default=10, nullable=True),
Column('proposal_sort_order', Unicode(50), default=None, nullable=True),
Column('gender', Unicode(1), default=None),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
message_table.create()
message_recipient_table.create()
email_messages = Column('email_messages', Boolean, default=True)
email_messages.create(user_table)
def downgrade(migrate_engine):
raise NotImplementedError()
|
Python
| 0.000008
|
@@ -2218,16 +2218,20 @@
meta
+data
.bind =
|
307d866bb6538a78effcc44e005a4dcb90a2a4b5
|
Increment to 0.5.4
|
sanic/__init__.py
|
sanic/__init__.py
|
from sanic.app import Sanic
from sanic.blueprints import Blueprint
__version__ = '0.5.3'
__all__ = ['Sanic', 'Blueprint']
|
Python
| 0.999999
|
@@ -84,9 +84,9 @@
0.5.
-3
+4
'%0A%0A_
|
5fd62098bd2f2722876a0873d5856d70046d3889
|
Increment to 0.5.2
|
sanic/__init__.py
|
sanic/__init__.py
|
from sanic.app import Sanic
from sanic.blueprints import Blueprint
__version__ = '0.5.1'
__all__ = ['Sanic', 'Blueprint']
|
Python
| 0.999999
|
@@ -84,9 +84,9 @@
0.5.
-1
+2
'%0A%0A_
|
035938d8c0f3cc2cda353286c0089ee02ffe3b87
|
Use dj six
|
likert_field/models.py
|
likert_field/models.py
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import string_types
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
import likert_field.forms as forms
@python_2_unicode_compatible
class LikertField(models.IntegerField):
"""A Likert field is simply stored as an IntegerField"""
description = _('Likert item field')
def __init__(self, *args, **kwargs):
if 'null' not in kwargs and not kwargs.get('null'):
kwargs['null'] = True
super(LikertField, self).__init__(*args, **kwargs)
def __str__(self):
return "%s" % force_text(self.description)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
The field expects a number as a string (ie. '2'). Unscored fields are
empty strings and are stored as NULL
"""
if value is None:
return None
if isinstance(value, string_types) and len(value) == 0:
return None
value = int(value)
if value < 0:
value = 0
return value
def formfield(self, **kwargs):
defaults = {
'min_value': 0,
'form_class': forms.LikertField
}
defaults.update(kwargs)
return super(LikertField, self).formfield(**defaults)
|
Python
| 0.000001
|
@@ -61,38 +61,8 @@
ls%0A%0A
-from six import string_types%0A%0A
from
@@ -160,16 +160,58 @@
patible%0A
+from django.utils.six import string_types%0A
from dja
@@ -502,32 +502,94 @@
rgs, **kwargs):%0A
+ %22%22%22LikertField stores items with no answer as NULL%22%22%22%0A
if 'null
|
2194cc4e96fb2168b55c23a1c7a71636074ae8bf
|
Fix a comment
|
scout/adapter/mongo/rank_model.py
|
scout/adapter/mongo/rank_model.py
|
# -*- coding: utf-8 -*-
import logging
from io import StringIO
import requests
from configobj import ConfigObj
LOG = logging.getLogger(__name__)
TIMEUT = 20
class RankModelHandler(object):
def fetch_rank_model(self, rank_model_url):
"""Send HTTP request to retrieve rank model config file
Args:
rank_model_url(str): URL to resource containing rank model configuration
Returns:
StringIO(response.text): A StringIO containing the content of the config file
"""
try:
response = requests.get(rank_model_url, timeout=TIMEUT)
return StringIO(response.text)
except Exception as ex:
LOG.warning(ex)
def parse_rank_model(self, stringio):
"""Use configobj lib to extract RankModel key/values and return them in a dictionary
Args:
stringio(StringIO): Content of model from a file as a StringIO
Returns:
ConfigObj.dict(dictionary): dictionary with variant rank model key/values
"""
try:
return ConfigObj(stringio).dict()
except Exception as ex:
LOG.error(ex)
def add_rank_model(self, rank_model_url):
"""Fetch a rank model from remote.
Args:
rank_model_url(string): A string with the url to the rank model ini file to fetch.
Returns:
rank_model(dict): a copy of what was inserted, or None if failed
"""
response = self.fetch_rank_model(rank_model_url)
config = self.parse_rank_model(response)
if config:
config.update({"_id": rank_model_url})
config_id = self.rank_model_collection.insert_one(config).inserted_id
return self.rank_model_collection.find_one(config_id)
return {}
def rank_model_from_url(
self, rank_model_link_prefix, rank_model_version, rank_model_file_extension
):
"""Fetch a rank model configuration for A SNV or SV variant of a case
Args:
rank_model_link_prefix(str): specified in app config file
rank_model_version(string)
rank_model_file_extension(str): specified in app config file
Returns:
rank_model(dict)
"""
rank_model_url = "".join(
[rank_model_link_prefix, str(rank_model_version), rank_model_file_extension]
)
# Check if rank model document is already present in scout database
rank_model = self.rank_model_collection.find_one(rank_model_url)
if not rank_model: # Otherwise fetch it with HTTP request and save it to database
rank_model = self.add_rank_model(rank_model_url)
return rank_model
def get_ranges_info(self, rank_model, category):
"""Extract Rank model params value ranges from a database model.
These numbers will be used to describe model scores on variant page.
Args:
rank_model(dict)
category(string) examples: "Variant_call_quality_filter", "Deleteriousness" ..
Returns:
info(list) example:
"""
info = []
for _, item in rank_model.items():
if (
isinstance(item, dict) is False
or not item.get("category")
or item.get("category").casefold() != category.casefold()
):
continue
rank_info = {
"key": item.get("info_key"),
"description": item.get("description"),
"score_ranges": {},
}
for key, value in item.items():
if isinstance(value, dict) and "score" in value:
rank_info["score_ranges"][key] = value
info.append(rank_info)
return info
|
Python
| 0.999759
|
@@ -3114,17 +3114,92 @@
ist)
- example:
+: list of dictionaries containing %22key%22, %22description%22 and %22score_ranges%22 key/values
%0A
|
0e288d4a711f061a01a951d2e1d9892f737211e0
|
Add pps extension to IGNORED_EXTENSIONS
|
scrapy/linkextractors/__init__.py
|
scrapy/linkextractors/__init__.py
|
"""
scrapy.linkextractors
This package contains a collection of Link Extractors.
For more info see docs/topics/link-extractors.rst
"""
import re
from six.moves.urllib.parse import urlparse
from parsel.csstranslator import HTMLTranslator
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.url import (
canonicalize_url, url_is_from_any_domain, url_has_any_extension,
)
# common file extensions that are not followed if they occur in links
IGNORED_EXTENSIONS = [
# images
'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif',
'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg',
# audio
'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff',
# video
'3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv',
'm4a',
# office suites
'xls', 'xlsx', 'ppt', 'pptx', 'doc', 'docx', 'odt', 'ods', 'odg', 'odp',
# other
'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar',
]
_re_type = type(re.compile("", 0))
_matches = lambda url, regexs: any((r.search(url) for r in regexs))
_is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'}
class FilteringLinkExtractor(object):
_csstranslator = HTMLTranslator()
def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains,
restrict_xpaths, canonicalize, deny_extensions, restrict_css):
self.link_extractor = link_extractor
self.allow_res = [x if isinstance(x, _re_type) else re.compile(x)
for x in arg_to_iter(allow)]
self.deny_res = [x if isinstance(x, _re_type) else re.compile(x)
for x in arg_to_iter(deny)]
self.allow_domains = set(arg_to_iter(allow_domains))
self.deny_domains = set(arg_to_iter(deny_domains))
self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths))
self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath,
arg_to_iter(restrict_css)))
self.canonicalize = canonicalize
if deny_extensions is None:
deny_extensions = IGNORED_EXTENSIONS
self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)}
def _link_allowed(self, link):
if not _is_valid_url(link.url):
return False
if self.allow_res and not _matches(link.url, self.allow_res):
return False
if self.deny_res and _matches(link.url, self.deny_res):
return False
parsed_url = urlparse(link.url)
if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains):
return False
if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions):
return False
return True
def matches(self, url):
if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains):
return False
if self.deny_domains and url_is_from_any_domain(url, self.deny_domains):
return False
allowed = [regex.search(url) for regex in self.allow_res] if self.allow_res else [True]
denied = [regex.search(url) for regex in self.deny_res] if self.deny_res else []
return any(allowed) and not any(denied)
def _process_links(self, links):
links = [x for x in links if self._link_allowed(x)]
if self.canonicalize:
for link in links:
link.url = canonicalize_url(urlparse(link.url))
links = self.link_extractor._process_links(links)
return links
def _extract_links(self, *args, **kwargs):
return self.link_extractor._extract_links(*args, **kwargs)
# Top-level imports
from .lxmlhtml import LxmlLinkExtractor as LinkExtractor
|
Python
| 0.000001
|
@@ -849,16 +849,23 @@
'pptx',
+ 'pps',
'doc',
@@ -892,16 +892,20 @@
, 'odg',
+%0A
'odp',%0A
|
e8254ced75ce9d0df1033b6e4acb8e33f9b00e93
|
''.join want strings
|
scheduler/send.py
|
scheduler/send.py
|
#!/usr/bin/env python
import logging
logger = logging.getLogger('')
from models import Task
def send(function, args=None):
if args is None:
args = []
Task.objects.create(function=function, args=args)
logging.info("[x] Sent %s(%s)" % (function, ", ".join(args)))
|
Python
| 0.999958
|
@@ -273,12 +273,37 @@
oin(
+map(lambda x: %22%25s%22 %25 x,
args)))
+)
%0A
|
60156236836944205f3993badcf179aaa6e7ae54
|
Add an (unexposed) ResourceHandler so inheriting objects serialise better
|
ehriportal/portal/api/handlers.py
|
ehriportal/portal/api/handlers.py
|
"""
Piston handlers for notable resources.
"""
from piston.handler import BaseHandler
from portal import models
class RepositoryHandler(BaseHandler):
model = models.Repository
class CollectionHandler(BaseHandler):
model = models.Collection
class PlaceHandler(BaseHandler):
model = models.Place
class ContactHandler(BaseHandler):
model = models.Contact
class AuthorityHandler(BaseHandler):
model = models.Authority
|
Python
| 0
|
@@ -108,16 +108,80 @@
models%0A%0A
+class ResourceHandler(BaseHandler):%0A model = models.Resource%0A
%0Aclass R
|
38029cc99bfd049491e9c5fa4d3e45a64c58845f
|
Fix last line indent to make pyflakes happy.
|
schemaish/attr.py
|
schemaish/attr.py
|
"""
Schema attribute types, also imported into the main package.
"""
__all__ = ["String", "Integer", "Float", "Decimal", "Date", "Time", "Boolean", "Sequence",
"Tuple", "Structure"]
import itertools
from formencode import Invalid
# Internal counter used to ensure the order of a meta structure's attributes is
# maintained.
_meta_order = itertools.count()
class Attribute(object):
"""
Abstract base class for all attribute types in the package.
@ivar title: Title of the attribute.
@ivar description: Optional description.
@ivar validator: Optional FormEncode validator.
"""
def __init__(self, **k):
"""
Create a new attribute.
@param title: Title of the attribute.
@keyword description: Optional description.
@keyword validator: Optional FormEncode validator.
"""
self.title = k.pop('title', None)
self.description = k.pop('description', None)
self.validator = k.pop('validator', None)
self._meta_order = _meta_order.next()
def validate(self, value):
"""
Validate the value if a validator has been provided.
"""
if self.validator is None:
return value
return self.validator.to_python(value)
class String(Attribute):
"""
A Python unicode instance.
"""
pass
class Integer(Attribute):
"""
A Python integer.
"""
pass
class Float(Attribute):
"""
A Python float.
"""
pass
class Decimal(Attribute):
"""
A decimal.Decimal instance.
"""
pass
class Date(Attribute):
"""
A datetime.date instance.
"""
pass
class Time(Attribute):
"""
A datetime.time instance.
"""
pass
class DateTime(Attribute):
"""
A datetime.datetime instance.
"""
pass
class Boolean(Attribute):
"""
A Python Boolean instance.
"""
pass
class Sequence(Attribute):
"""
A sequence (Python list) of attributes of a specific type.
@ivar attr: Attribute type of items in the sequence.
"""
def __init__(self, attr, **k):
"""
Create a new Sequence instance.
@keyword attr: Attribute type of items in the sequence.
"""
super(Sequence, self).__init__(**k)
self.attr = attr
def validate(self, value):
"""
Validate all items in the sequence and then validate the Sequence
itself.
"""
errors = {}
if value is not None:
for n,item in enumerate(value):
try:
item = self.attr.validate(item)
except Invalid, e:
if e.error_dict is not None:
for k, v in e.error_dict.items():
errors['%s.%s'%(str(n),k)] = v
errors[str(n)] = e
try:
super(Sequence, self).validate(value)
except Invalid, e:
errors[''] = e
if errors.keys():
raise Invalid(e.message, value, None, error_dict = errors)
return value
class Tuple(Attribute):
"""
A Python tuple of attributes of specific types.
@ivar attrs: List of Attributes that define the items in the tuple.
"""
def __init__(self, attrs, **k):
"""
Create a Tuple instance.
@param attrs: List of Attributes that define the items in the tuple.
"""
super(Tuple, self).__init__(**k)
self.attrs = attrs
def validate(self, value):
"""
Validate the tuple's items and the tuple itself.
"""
if value:
value = tuple(attr.validate(item) for (attr, item) in zip(self.attrs, value))
return super(Tuple, self).validate(value)
class _StructureMeta(type):
def __init__(cls, name, bases, clsattrs):
attrs = []
for (name, value) in clsattrs.items():
if isinstance(value, Attribute):
attrs.append((name, value))
del clsattrs[name]
attrs = [(a[1]._meta_order, a) for a in attrs]
attrs.sort()
attrs = [i[1] for i in attrs]
cls.attrs = attrs
class Structure(Attribute):
"""
Python dict conforming to a fixed structure.
The class can be used to build a structure programmatically or using meta
class syntax. For example the following result in s1 and s2 defining the
same structure:
s1 = Structure("Your Name")
s1.add("title", String("Title"))
s1.add("first", String("First Name"))
s1.add("last", String("Last Name"))
class Name(Structure):
title = String("Title")
first = String("First Name")
last = String("Last Name")
s2 = Name("Your Name")
@ivar attrs: List of (name, attribute) tuples each of which defines the
names and type of an attribute of the structure.
"""
__metaclass__ = _StructureMeta
def __init__(self, attrs=None, **k):
"""
Create a new structure.
@params attrs: List of (name, attribute) tuples defining the name and
type of the structure's attributes.
"""
super(Structure, self).__init__(**k)
# If attrs has been passed as an arg then use that as the attrs of the
# structure. Otherwise use the class's attrs, making a copy to ensure
# that any added attrs to the instance do not get appended to te
# class's attrs.
if attrs is not None:
self.attrs = attrs
else:
self.attrs = list(self.attrs)
def add(self, name, attr):
"""
Add a names attribute to the structure.
@param name: Attribute name.
@param attr: Attribute type.
"""
self.attrs.append((name, attr))
def get(self, name):
"""
Get the attribute with the given name.
@param name: Name of the attribute to return.
@raise KeyError: Attribute name could not be found.
"""
for (attr_name, attr) in self.attrs:
if attr_name == name:
return attr
raise KeyError(name)
#def validate(self, value):
#"""
#Validate the structure's attributes and the structure itself.
#"""
#value = dict((name, attr.validate(value[name])) for (name, attr) in self.attrs)
#return super(Structure, self).validate(value)
def validate(self, value):
"""
Validate all items in the sequence and then validate the Sequence
itself.
"""
errors = {}
data = {}
if value is not None:
for (name, attr) in self.attrs:
try:
data[name] = attr.validate(value.get(name,None))
except Invalid, e:
if e.error_dict is not None:
for k, v in e.error_dict.items():
errors['%s.%s'%(name,k)] = v
errors[name] = e
try:
super(Structure, self).validate(value)
except Invalid, e:
errors[''] = e
if errors.keys():
raise Invalid(e.message, value, None, error_dict = errors)
return data
|
Python
| 0
|
@@ -7339,16 +7339,9 @@
rn data%0A
-
+%0A
|
3c3392aeedbdd69fc6f36c7da1e319add0873b2e
|
remove print statement
|
web3/apps/sites/views.py
|
web3/apps/sites/views.py
|
import os
from django.shortcuts import render, redirect, get_object_or_404, reverse
from django.core.exceptions import PermissionDenied
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import Site
from .forms import SiteForm, ProcessForm
from .helpers import (reload_services, delete_site_files, create_config_files,
make_site_dirs, create_process_config, restart_supervisor,
get_supervisor_status, delete_process_config)
from ..authentication.decorators import superuser_required
from ...utils.emails import send_new_site_email
@login_required
def create_view(request):
if request.user.is_superuser:
if request.method == "POST":
form = SiteForm(request.POST)
if form.is_valid():
site = form.save()
for user in site.group.users.filter(service=False):
send_new_site_email(user, site)
if not settings.DEBUG:
reload_services()
return redirect("index")
else:
form = SiteForm()
context = {
"form": form
}
return render(request, "sites/create_site.html", context)
else:
return render(request, "sites/create_info.html", {})
@superuser_required
def edit_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if request.method == "POST":
current_members = list(site.group.users.filter(service=False).values_list('id', flat=True))
form = SiteForm(request.POST, instance=site)
if form.is_valid():
site = form.save()
for user in site.group.users.filter(service=False).exclude(id__in=current_members):
print(user)
send_new_site_email(user, site)
if not settings.DEBUG:
reload_services()
return redirect("index")
else:
form = SiteForm(instance=site)
context = {
"form": form
}
return render(request, "sites/create_site.html", context)
@superuser_required
def delete_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if request.method == "POST":
if not request.POST.get("confirm", None) == site.name:
messages.error(request, "Delete confirmation failed!")
return redirect("info_site", site_id=site_id)
if not settings.DEBUG:
delete_site_files(site)
reload_services()
site.user.delete()
site.group.delete()
site.delete()
messages.success(request, "Site {} deleted!".format(site.name))
return redirect("index")
context = {
"site": site
}
return render(request, "sites/delete_site.html", context)
@superuser_required
def modify_process_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if request.method == "POST":
try:
form = ProcessForm(request.POST, instance=site.process)
except Site.process.RelatedObjectDoesNotExist:
form = ProcessForm(request.POST)
if form.is_valid():
proc = form.save()
if not settings.DEBUG:
create_process_config(proc)
reload_services()
messages.success(request, "Process modified!")
return redirect("info_site", site_id=proc.site.id)
else:
try:
form = ProcessForm(instance=site.process)
except Site.process.RelatedObjectDoesNotExist:
form = ProcessForm()
context = {
"form": form
}
return render(request, "sites/create_process.html", context)
@superuser_required
def delete_process_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if request.method == "POST":
try:
site.process.delete()
if not settings.DEBUG:
delete_process_config(site)
reload_services()
messages.success(request, "Process deleted!")
except Site.process.RelatedObjectDoesNotExist:
messages.error(request, "Process not found.")
return redirect("info_site", site_id=site.id)
else:
return render(request, "sites/delete_process.html", {"site": site})
@login_required
def config_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
if not settings.DEBUG:
create_config_files(site)
reload_services()
messages.success(request, "Configuration files regenerated!")
return redirect("info_site", site_id=site_id)
@login_required
def permission_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
if not settings.DEBUG:
make_site_dirs(site)
for root, dirs, files in os.walk(site.path):
for f in files + dirs:
os.chown(os.path.join(root, f), site.user.id, site.group.id)
messages.success(request, "File permissions regenerated!")
return redirect("info_site", site_id=site.id)
@login_required
def restart_process_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
if not settings.DEBUG:
restart_supervisor(site)
messages.success(request, "Restarted supervisor application!")
return redirect("info_site", site_id=site_id)
@login_required
def info_view(request, site_id):
site = get_object_or_404(Site, id=site_id)
if not request.user.is_superuser and not site.group.users.filter(id=request.user.id).exists():
raise PermissionDenied
context = {
"site": site,
"users": site.group.users.filter(service=False).order_by("username"),
"status": get_supervisor_status(site)
}
return render(request, "sites/info_site.html", context)
|
Python
| 0.999999
|
@@ -1806,36 +1806,8 @@
s):%0A
- print(user)%0A
|
488e5dd9bcdcba26de98fdbcaba1e23e8b4a8188
|
use csv writer for listing scraper
|
scrape_listing.py
|
scrape_listing.py
|
#!/usr/bin/env python
import sys
import requests
from models.listing import Listing
def scrape_listing(url):
response = requests.get(url)
listing = Listing(response.content)
# print('Title: ' + listing.title)
# print('Price: ' + listing.price)
# print('Image URLs: ' + listing.imgs)
# print('Location: ' + listing.location)
# print('Description: ' + listing.description)
# print('Category: ' + listing.category)
# print('Manufacturer: ' + listing.manufacturer)
# print('Caliber: ' + listing.caliber)
# print('Action: ' + listing.action)
# print('Firearm Type: ' + listing.firearm_type)
# print('Listing Date: ' + listing.listed_date)
# print('Post ID: ' + listing.post_id)
# print('Registration: ' + str(listing.registered))
# print('Party Type: ' + listing.party)
print('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13}'.format(listing.title, listing.listed_date, listing.post_id, listing.price, listing.location, listing.description, listing.registered, listing.category, listing.manufacturer, listing.caliber, listing.action, listing.firearm_type, listing.party, listing.imgs))
if __name__ == '__main__':
if len(sys.argv) == 1:
print('url required')
sys.exit()
url = str(sys.argv[1])
scrape_listing(url=url)
|
Python
| 0
|
@@ -16,16 +16,27 @@
python%0A%0A
+import csv%0A
import s
@@ -116,16 +116,52 @@
g(url):%0A
+ writer = csv.writer(sys.stdout)%0A
resp
@@ -872,16 +872,408 @@
.party)%0A
+ writer.writerow(%5B%0A listing.post_id,%0A listing.title,%0A listing.listed_date,%0A listing.price,%0A listing.location,%0A listing.description,%0A listing.registered,%0A listing.category,%0A listing.manufacturer,%0A listing.caliber,%0A listing.action,%0A listing.firearm_type,%0A listing.party,%0A listing.imgs%0A %5D)%0A%0A
prin
|
03ec4f818807808d6e983fe80b1adb0af27b6ea4
|
Update build_aar.py after webrtc/ dir was removed.
|
tools_webrtc/android/build_aar.py
|
tools_webrtc/android/build_aar.py
|
#!/usr/bin/env python
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script to generate libwebrtc.aar for distribution.
The script has to be run from the root src folder.
./tools_webrtc/android/build_aar.py
.aar-file is just a zip-archive containing the files of the library. The file
structure generated by this script looks like this:
- AndroidManifest.xml
- classes.jar
- libs/
- armeabi-v7a/
- libjingle_peerconnection_so.so
- x86/
- libjingle_peerconnection_so.so
"""
import argparse
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))
DEFAULT_ARCHS = ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64']
NEEDED_SO_FILES = ['libjingle_peerconnection_so.so']
JAR_FILE = 'lib.java/webrtc/sdk/android/libwebrtc.jar'
MANIFEST_FILE = 'webrtc/sdk/android/AndroidManifest.xml'
TARGETS = [
'webrtc/sdk/android:libwebrtc',
'webrtc/sdk/android:libjingle_peerconnection_so',
]
sys.path.append(os.path.join(SCRIPT_DIR, '..', 'libs'))
from generate_licenses import LicenseBuilder
def _ParseArgs():
parser = argparse.ArgumentParser(description='libwebrtc.aar generator.')
parser.add_argument('--build-dir',
help='Build dir. By default will create and use temporary dir.')
parser.add_argument('--output', default='libwebrtc.aar',
help='Output file of the script.')
parser.add_argument('--arch', default=DEFAULT_ARCHS, nargs='*',
help='Architectures to build. Defaults to %(default)s.')
parser.add_argument('--use-goma', action='store_true', default=False,
help='Use goma.')
parser.add_argument('--verbose', action='store_true', default=False,
help='Debug logging.')
parser.add_argument('--extra-gn-args', default=[], nargs='*',
help='Additional GN args to be used during Ninja generation.')
return parser.parse_args()
def _RunGN(args):
cmd = ['gn']
cmd.extend(args)
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
def _RunNinja(output_directory, args):
cmd = ['ninja', '-C', output_directory]
cmd.extend(args)
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
def _EncodeForGN(value):
"""Encodes value as a GN literal."""
if type(value) is str:
return '"' + value + '"'
elif type(value) is bool:
return repr(value).lower()
else:
return repr(value)
def _GetOutputDirectory(build_dir, arch):
"""Returns the GN output directory for the target architecture."""
return os.path.join(build_dir, arch)
def _GetTargetCpu(arch):
"""Returns target_cpu for the GN build with the given architecture."""
if arch in ['armeabi', 'armeabi-v7a']:
return 'arm'
elif arch == 'arm64-v8a':
return 'arm64'
elif arch == 'x86':
return 'x86'
elif arch == 'x86_64':
return 'x64'
else:
raise Exception('Unknown arch: ' + arch)
def _GetArmVersion(arch):
"""Returns arm_version for the GN build with the given architecture."""
if arch == 'armeabi':
return 6
elif arch == 'armeabi-v7a':
return 7
elif arch in ['arm64-v8a', 'x86', 'x86_64']:
return None
else:
raise Exception('Unknown arch: ' + arch)
def Build(build_dir, arch, use_goma, extra_gn_args):
"""Generates target architecture using GN and builds it using ninja."""
logging.info('Building: %s', arch)
output_directory = _GetOutputDirectory(build_dir, arch)
gn_args = {
'target_os': 'android',
'is_debug': False,
'is_component_build': False,
'rtc_include_tests': False,
'target_cpu': _GetTargetCpu(arch),
'use_goma': use_goma
}
arm_version = _GetArmVersion(arch)
if arm_version:
gn_args['arm_version'] = arm_version
gn_args_str = '--args=' + ' '.join([
k + '=' + _EncodeForGN(v) for k, v in gn_args.items()] + extra_gn_args)
_RunGN(['gen', output_directory, gn_args_str])
ninja_args = TARGETS[:]
if use_goma:
ninja_args.extend(['-j', '200'])
_RunNinja(output_directory, ninja_args)
def CollectCommon(aar_file, build_dir, arch):
"""Collects architecture independent files into the .aar-archive."""
logging.info('Collecting common files.')
output_directory = _GetOutputDirectory(build_dir, arch)
aar_file.write(MANIFEST_FILE, 'AndroidManifest.xml')
aar_file.write(os.path.join(output_directory, JAR_FILE), 'classes.jar')
def Collect(aar_file, build_dir, arch):
"""Collects architecture specific files into the .aar-archive."""
logging.info('Collecting: %s', arch)
output_directory = _GetOutputDirectory(build_dir, arch)
abi_dir = os.path.join('jni', arch)
for so_file in NEEDED_SO_FILES:
aar_file.write(os.path.join(output_directory, so_file),
os.path.join(abi_dir, so_file))
def GenerateLicenses(output_dir, build_dir, archs):
builder = LicenseBuilder(
[_GetOutputDirectory(build_dir, arch) for arch in archs], TARGETS)
builder.GenerateLicenseText(output_dir)
def main():
args = _ParseArgs()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
build_dir = args.build_dir if args.build_dir else tempfile.mkdtemp()
for arch in args.arch:
Build(build_dir, arch, args.use_goma, args.extra_gn_args)
with zipfile.ZipFile(args.output, 'w') as aar_file:
# Architecture doesn't matter here, arbitrarily using the first one.
CollectCommon(aar_file, build_dir, args.arch[0])
for arch in args.arch:
Collect(aar_file, build_dir, arch)
license_dir = os.path.dirname(os.path.realpath(args.output))
GenerateLicenses(license_dir, build_dir, args.arch)
if not args.build_dir:
shutil.rmtree(build_dir, True)
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000031
|
@@ -1152,23 +1152,16 @@
ib.java/
-webrtc/
sdk/andr
@@ -1196,23 +1196,16 @@
FILE = '
-webrtc/
sdk/andr
@@ -1240,31 +1240,24 @@
GETS = %5B%0A '
-webrtc/
sdk/android:
@@ -1271,23 +1271,16 @@
tc',%0A '
-webrtc/
sdk/andr
|
ca356ae7b85c9d88f42c5adc6227d0125ff49399
|
Update settings.py
|
udbproject/settings.py
|
udbproject/settings.py
|
"""
Django settings for udbproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c5d$g#)x!2s91v2nr@h9d21opa*p1&65z)i(#4%@62fm#f!!l-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'udb',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'udbproject.urls'
WSGI_APPLICATION = 'udbproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'PST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
Python
| 0
|
@@ -1880,11 +1880,23 @@
= '
-PST
+America/Seattle
'%0A%0AU
|
97b67b1ab49efd4c194b3338e3055ecb7991e682
|
Add ValueError when links have duplicates
|
ipysankeywidget/sankey_widget.py
|
ipysankeywidget/sankey_widget.py
|
import warnings
import base64
import ipywidgets as widgets
from traitlets import (
Float,
Dict,
List,
Bool,
observe,
Unicode,
)
@widgets.register
class SankeyWidget(widgets.DOMWidget):
"""Sankey widget"""
_view_name = Unicode('SankeyView').tag(sync=True)
_model_name = Unicode('SankeyModel').tag(sync=True)
_view_module = Unicode('jupyter-sankey-widget').tag(sync=True)
_model_module = Unicode('jupyter-sankey-widget').tag(sync=True)
_view_module_version = Unicode('^0.2.3').tag(sync=True)
_model_module_version = Unicode('^0.2.3').tag(sync=True)
links = List([]).tag(sync=True)
nodes = List([]).tag(sync=True)
order = List(None, allow_none=True).tag(sync=True)
groups = List([]).tag(sync=True)
# Options
rank_sets = List([]).tag(sync=True)
align_link_types = Bool(False).tag(sync=True)
scale = Float(None, allow_none=True).tag(sync=True)
margins = Dict({}).tag(sync=True)
# Get raster image data back
png = Unicode('').tag(sync=True)
# get vector image back
svg = Unicode('').tag(sync=True)
def __init__(self, **kwargs):
"""Constructor"""
# Automatically create nodes
nodes = kwargs.get('nodes', [])
node_ids = {node['id'] for node in nodes}
missing_ids = set()
for link in kwargs.get('links', []):
if link['source'] not in node_ids:
missing_ids.add(link['source'])
if link['target'] not in node_ids:
missing_ids.add(link['target'])
kwargs['nodes'] = nodes + [{'id': k} for k in missing_ids]
super(SankeyWidget, self).__init__(**kwargs)
self._node_clicked_handlers = widgets.CallbackDispatcher()
self._link_clicked_handlers = widgets.CallbackDispatcher()
self._auto_png_filename = None
self._auto_svg_filename = None
self.on_msg(self._handle_sankey_msg)
def on_node_clicked(self, callback, remove=False):
"""Register a callback to execute when a node is clicked.
The callback will be called with three arguments: the Sankey widget
instance, the node, and the index of the node in the nodes list.
Parameters
----------
remove : bool (optional)
Set to true to remove the callback from the list of callbacks.
"""
self._node_clicked_handlers.register_callback(callback, remove=remove)
def on_link_clicked(self, callback, remove=False):
"""Register a callback to execute when a link is clicked.
The callback will be called with three arguments: the Sankey widget
instance, the link, and the index of the link in the nodes list.
Parameters
----------
remove : bool (optional)
Set to true to remove the callback from the list of callbacks.
"""
self._link_clicked_handlers.register_callback(callback, remove=remove)
def _handle_sankey_msg(self, _, content, buffers):
"""Handle a msg from the front-end.
Parameters
----------
content: dict
Content of the msg."""
if content.get('event', '') == 'node_clicked':
self._node_clicked_handlers(self, content.get('node'))
if content.get('event', '') == 'link_clicked':
self._link_clicked_handlers(self, content.get('link'))
@observe("png")
def _on_png_data(self, change):
if change['type'] != 'change':
return
if self._auto_png_filename:
self.save_png(self._auto_png_filename)
self._auto_png_filename = None
def save_png(self, filename):
"""Save the diagram to a PNG file.
The widget must be displayed first before the PNG data is available. To
display the widget and save an image at the same time, use
`auto_save_png`.
Parameters
----------
filename : string
"""
if self.png:
data = base64.decodebytes(bytes(self.png, 'ascii'))
with open(filename, 'wb') as f:
f.write(data)
else:
warnings.warn('No png image available! Try auto_save_png() instead?')
def auto_save_png(self, filename):
"""Save the diagram to a PNG file, once it has been rendered.
This waits for the diagram to be rendered, then automatically calls
`save_png` for you.
Parameters
----------
filename : string
"""
self._auto_png_filename = filename
return self
@observe("svg")
def _on_svg_data(self, change):
if change['type'] != 'change':
return
if self._auto_svg_filename:
self.save_svg(self._auto_svg_filename)
self._auto_svg_filename = None
def save_svg(self, filename):
"""Save the diagram to an SVG file.
The widget must be displayed first before the SVG data is available. To
display the widget and save an image at the same time, use
`auto_save_svg`.
Parameters
----------
filename : string
"""
if self.svg:
with open(filename, 'wb') as f:
f.write(self.svg.encode('utf8'))
else:
warnings.warn('No svg image available! Try auto_save_svg() instead?')
def auto_save_svg(self, filename):
"""Save the diagram to an SVG file, once it has been rendered.
This waits for the diagram to be rendered, then automatically calls
`save_svg` for you.
Parameters
----------
filename : string
"""
self._auto_svg_filename = filename
return self
|
Python
| 0
|
@@ -1158,16 +1158,483 @@
tor%22%22%22%0A%0A
+ # check for duplicates in links%0A values = %5B%5D%0A for link in kwargs.get('links', %5B%5D):%0A linksource = link%5B'source'%5D%0A linktarget = link%5B'target'%5D%0A if 'type' in link:%0A linktype = link%5B'type'%5D%0A else:%0A linktype = None%0A values.append((linksource, linktarget, linktype))%0A if len(values) != len(set(values)):%0A raise ValueError(%22Links have duplicates%22)%0A%0A
|
844e1917e971e834f7c95064dc7ea31fc7cc0947
|
Make build_plugins.py bail on error
|
build/build_plugins.py
|
build/build_plugins.py
|
from __future__ import print_function
import glob, os.path, sys
from mergeex import mergeex
try:
import simplejson as json
except ImportError:
import json
plugins = []
filters = []
for fileName in sorted(glob.glob('../plugins/*.json')):
try:
with open(fileName, 'rb') as f:
content = f.read().decode('utf-8')
plugin = json.loads(content)
plugin['date'] = int(os.path.getmtime(fileName) * 1000)
plugins.append(plugin)
filters.append(plugin['match'])
except IOError as e:
print('Could not open file {0}: {1}'.format(fileName, e), file=sys.stderr)
except ValueError as e:
print('Could not load JSON from file {0}: {1}'.format(fileName, *e.args), file=sys.stderr)
print('Writing combined plugins.')
with open('../modules/plugins.json', 'w') as f:
json.dump(plugins, f)
|
Python
| 0.000001
|
@@ -91,17 +91,20 @@
x%0A%0Atry:%0A
-%09
+
import s
@@ -141,17 +141,20 @@
tError:%0A
-%09
+
import j
@@ -245,16 +245,25 @@
)):%0A
-%09
+
try:%0A
-%09%09
+
with
@@ -290,19 +290,28 @@
) as f:%0A
-%09%09%09
+
content
@@ -337,19 +337,28 @@
utf-8')%0A
-%09%09%09
+
plugin =
@@ -378,19 +378,28 @@
ontent)%0A
-%09%09%09
+
plugin%5B'
@@ -447,19 +447,28 @@
1000)%0A%0A
-%09%09%09
+
plugins.
@@ -486,11 +486,20 @@
in)%0A
-%09%09%09
+
filt
@@ -526,17 +526,20 @@
atch'%5D)%0A
-%09
+
except I
@@ -543,34 +543,40 @@
t IOError as e:%0A
-%09%09
+
print('Could not
@@ -634,17 +634,40 @@
stderr)%0A
-%09
+ sys.exit(1)%0A
except V
@@ -686,10 +686,16 @@
e:%0A
-%09%09
+
prin
@@ -780,16 +780,36 @@
.stderr)
+%0A sys.exit(1)
%0A%0Aprint(
@@ -889,9 +889,12 @@
f:%0A
-%09
+
json
@@ -910,8 +910,9 @@
gins, f)
+%0A
|
3b3a7d482b3091959533c6de3138af349a8af558
|
Tidy and comment spreadsheet reader module
|
autumn_model/spreadsheet.py
|
autumn_model/spreadsheet.py
|
from __future__ import print_function
from xlrd import open_workbook
from numpy import nan
import numpy
import os
import tool_kit
#######################################
### Individual spreadsheet readers ###
#######################################
class GlobalTbReportReader:
def __init__(self, country_to_read):
self.data = {}
self.tab_name = 'TB_burden_countries_2016-04-19'
self.key = 'tb'
self.parlist = []
self.filename = 'xls/gtb_data.xlsx'
self.start_row = 1
self.horizontal = False
self.start_column = 0
self.indices = []
self.year_indices = {}
self.country_to_read = tool_kit.adjust_country_name(country_to_read)
def parse_col(self, col):
col = tool_kit.replace_specified_value(col, nan, '')
# if it's the country column (the first one), find the indices for the country being simulated
if col[0] == 'country':
for i in range(len(col)):
if col[i] == self.country_to_read: self.indices += [i]
# ignore irrelevant columns
elif 'iso' in col[0] or 'g_who' in col[0] or 'source' in col[0]:
pass
# find years to read from year column
elif col[0] == 'year':
for i in self.indices:
self.year_indices[int(col[i])] = i
# get data from remaining columns
else:
self.data[str(col[0])] = {}
for year in self.year_indices:
if not numpy.isnan(col[self.year_indices[year]]):
self.data[col[0]][year] = col[self.year_indices[year]]
def get_data(self):
return self.data
#########################
### Master functions ###
#########################
def read_xls_with_sheet_readers(sheet_readers):
"""
Runs the individual readers to gather all the data from the sheets
Args:
sheet_readers: The sheet readers that were previously collated into a list
Returns:
All the data for reading as a single object
"""
result = {}
for reader in sheet_readers:
# check that the spreadsheet to be read exists
try:
print('Reading file', os.getcwd(), reader.filename)
workbook = open_workbook(reader.filename)
# if sheet unavailable, print error message but continue
except:
print('Unable to open spreadsheet')
# if the workbook was found to be available available, read the sheet in question
else:
sheet = workbook.sheet_by_name(reader.tab_name)
# read in the direction that the reader expects (either horizontal or vertical)
if reader.horizontal:
for i_row in range(reader.start_row, sheet.nrows):
reader.parse_row(sheet.row_values(i_row))
else:
for i_col in range(reader.start_column, sheet.ncols):
reader.parse_col(sheet.col_values(i_col))
result[reader.key] = reader.get_data()
return result
def read_input_data_xls(sheets_to_read, country=None):
"""
Compile sheet readers into a list according to which ones have been selected.
Note that most readers now take the country in question as an input,
while only the fixed parameters sheet reader does not.
Args:
from_test: Whether being called from the directory above
sheets_to_read: A list containing the strings that are also the
'keys' attribute of the reader
country: Country being read for
Returns:
A single data structure containing all the data to be read
(by calling the read_xls_with_sheet_readers method)
"""
sheet_readers = []
if 'tb' in sheets_to_read:
sheet_readers.append(GlobalTbReportReader(country))
for reader in sheet_readers:
reader.filename = os.path.join(reader.filename)
return read_xls_with_sheet_readers(sheet_readers)
|
Python
| 0
|
@@ -274,16 +274,142 @@
tReader:
+%0A %22%22%22%0A Reader object for the WHO's Global TB Report 2016. Illustrates general structure for spreadsheet readers.%0A %22%22%22
%0A%0A de
@@ -871,16 +871,149 @@
f, col):
+%0A %22%22%22%0A Read and interpret a column of the spreadsheet%0A%0A Args:%0A col: The column to be read%0A %22%22%22
%0A%0A
@@ -1628,16 +1628,20 @@
ta from
+the
remainin
@@ -1641,16 +1641,23 @@
emaining
+ (data)
columns
@@ -1919,16 +1919,70 @@
a(self):
+%0A %22%22%22%0A Return the read data.%0A %22%22%22
%0A%0A
@@ -2146,16 +2146,24 @@
Runs
+each of
the indi
@@ -2176,16 +2176,37 @@
readers
+ (currently only one)
to gath
@@ -2230,22 +2230,35 @@
rom the
+input spread
sheets
+.
%0A%0A Ar
@@ -2311,23 +2311,17 @@
hat
-were previously
+have been
col
@@ -2373,18 +2373,23 @@
e data f
-o
r
+om the
reading
@@ -2389,16 +2389,24 @@
reading
+process
as a sin
@@ -3726,73 +3726,8 @@
gs:%0A
- from_test: Whether being called from the directory above%0A
@@ -3793,28 +3793,16 @@
also the
-%0A
'keys'
@@ -3814,19 +3814,20 @@
bute of
-the
+each
reader%0A
@@ -3861,20 +3861,16 @@
ing read
- for
%0A%0A Re
@@ -3947,72 +3947,8 @@
ead%0A
- (by calling the read_xls_with_sheet_readers method)%0A
@@ -4005,24 +4005,16 @@
to_read:
-%0A
sheet_r
@@ -4090,24 +4090,16 @@
readers:
-%0A
reader.
|
6aa7acba495648b710635b465d5b7cd955d9f476
|
remove tmp line
|
api/__database.py
|
api/__database.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
import os
from core.config import _core_config
from core.config_builder import _core_default_config
from core.config_builder import _builder
from core.alert import warn
from core.alert import messages
def create_connection(language):
try:
return sqlite3.connect(os.path.join(os.path.dirname(os.path.dirname(__file__)),
_builder(_core_config(), _core_default_config())["api_db_name"]))
except:
warn(messages(language, 168))
return False
def submit_report_to_db(date, scan_id, report_filename, events_num, verbose, api_flag, report_type, graph_flag,
category, profile, scan_method, language, scan_cmd):
conn = create_connection(language)
if not conn:
return False
try:
c = conn.cursor()
c.execute("""
INSERT INTO reports (
date, scan_id, report_filename, events_num, verbose,
api_flag, report_type, graph_flag, category, profile,
scan_method, language, scan_cmd
)
VALUES (
'{0}', '{1}', '{2}', '{3}', '{4}',
'{5}', '{6}', '{7}', '{8}', '{9}',
'{10}', '{11}', '{12}'
);
""".format(date, scan_id, report_filename, events_num, verbose,
api_flag, report_type, graph_flag, category, profile,
scan_method, language, scan_cmd))
conn.commit()
conn.close()
except:
warn(messages(language, 168))
print 2
return False
return True
|
Python
| 0.000008
|
@@ -1556,24 +1556,8 @@
8))%0A
- print 2%0A
|
4b75e23687c3629d197cbdf0edac23d90e9c52b7
|
Add Sample and Observation models
|
varda/models.py
|
varda/models.py
|
"""
Models backed by SQL using SQLAlchemy.
"""
from datetime import date
from sqlalchemy import Index
from varda import db
class Variant(db.Model):
"""
Genomic variant.
"""
id = db.Column(db.Integer, primary_key=True)
chromosome = db.Column(db.String(2))
begin = db.Column(db.Integer)
end = db.Column(db.Integer)
reference = db.Column(db.String(200))
variant = db.Column(db.String(200))
def __init__(self, chromosome, begin, end, reference, variant):
self.chromosome = chromosome
self.begin = begin
self.end = end
self.reference = reference
self.variant = variant
def __repr__(self):
return '<Variant chr%s:%i %s>' % (
self.chromosome, self.begin, self.variant)
def to_dict(self):
return {'id': self.id,
'chromosome': self.chromosome,
'begin': self.begin,
'end': self.end,
'reference': self.reference,
'variant': self.variant}
Index('index_variant_position',
Variant.chromosome, Variant.begin, Variant.end)
Index('index_variant_unique',
Variant.chromosome, Variant.begin, Variant.end,
Variant.reference, Variant.variant, unique=True)
class Population(db.Model):
"""
Population study.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
added = db.Column(db.Date)
size = db.Column(db.Integer)
def __init__(self, name, size=0):
self.name = name
self.size = size
self.added = date.today()
def __repr__(self):
return '<Population %r>' % self.name
def to_dict(self):
return {'id': self.id,
'name': self.name,
'added': str(self.added),
'size': self.size}
class MergedObservation(db.Model):
"""
Observation in a population.
Todo: Add genotype.
"""
population_id = db.Column(db.Integer, db.ForeignKey('population.id'), primary_key=True)
variant_id = db.Column(db.Integer, db.ForeignKey('variant.id'), primary_key=True)
support = db.Column(db.Integer)
population = db.relationship(Population, backref=db.backref('merged_observations', lazy='dynamic'))
variant = db.relationship(Variant, backref=db.backref('merged_observations', lazy='dynamic'))
def __init__(self, population, variant, support=0):
self.population = population
self.variant = variant
self.support = support
def __repr__(self):
return '<MergedObservation %s %r %i>' % (self.population.name, self.variant, self.support)
def to_dict(self):
return {'population': self.population.id,
'variant': self.variant.id,
'support': self.support}
|
Python
| 0
|
@@ -2832,8 +2832,1768 @@
upport%7D%0A
+%0A%0Aclass Sample(db.Model):%0A %22%22%22%0A Sample.%0A%0A Todo: do we still need a poolSize in Sample now that we split population%0A studies to a separate model?%0A %22%22%22%0A id = db.Column(db.Integer, primary_key=True)%0A threshold = db.Column(db.Integer)%0A added = db.Column(db.Date)%0A comment = db.Column(db.String(200))%0A%0A def __init__(self, threshold=None, comment=None):%0A self.threshold = threshold%0A self.comment = comment%0A self.added = date.today()%0A%0A def __repr__(self):%0A return '%3CSample %25r%3E' %25 self.id%0A%0A def to_dict(self):%0A return %7B'id': self.id,%0A 'threshold': self.threshold,%0A 'added': str(self.added),%0A 'comment': self.comment%7D%0A%0A%0Aclass Observation(db.Model):%0A %22%22%22%0A Observation in a sample%0A %22%22%22%0A sample_id = db.Column(db.Integer, db.ForeignKey('sample.id'), primary_key=True)%0A variant_id = db.Column(db.Integer, db.ForeignKey('variant.id'), primary_key=True)%0A coverage = db.Column(db.Integer)%0A support = db.Column(db.Integer)%0A%0A sample = db.relationship(Sample, backref=db.backref('observations', lazy='dynamic'))%0A variant = db.relationship(Variant, backref=db.backref('observations', lazy='dynamic'))%0A%0A def __init__(self, sample, variant, coverage=None, support=None):%0A self.sample = sample%0A self.variant = variant%0A self.coverage = coverage%0A self.support = support%0A%0A def __repr__(self):%0A return '%3CObservation %25i %25r %25i %25i%3E' %25 (self.sample.id, self.variant, self.coverage, self.support)%0A%0A def to_dict(self):%0A return %7B'sample': self.sample.id,%0A 'variant': self.variant.id,%0A 'coverage': self.coverage,%0A 'support': self.support%7D%0A
|
591b0550e0724f3e515974fee02d8d40e070e52a
|
Bump version
|
lintreview/__init__.py
|
lintreview/__init__.py
|
__version__ = '2.25.0'
|
Python
| 0
|
@@ -17,7 +17,7 @@
.25.
-0
+1
'%0A
|
c0b3a1b40149e939e91c5483383f1a1c715a9b9c
|
Update ipc_lista1.7.py
|
lista1/ipc_lista1.7.py
|
lista1/ipc_lista1.7.py
|
#ipc_lista1.7
#Professor: Jucimar Junior
#Any Mendes Carvalho
#
#
#
#
#Faça um programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o #usuário.
altura = input("Digite a altura do quadrado em metros: ")
largura = input("Digite a largura
|
Python
| 0
|
@@ -264,9 +264,12 @@
largura
+ em
%0A
|
4f2fdfc78cf9b070b7f53fd2e7c0472f2329e77d
|
test changes of pextant harness to work with pextant updates 2
|
apps/basaltApp/pextantHarness.py
|
apps/basaltApp/pextantHarness.py
|
#__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
import traceback
import logging
import json
import os
from django.conf import settings
from django.shortcuts import render_to_response, redirect, render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, Http404, HttpResponse
from django.template import RequestContext
from django.utils.translation import ugettext, ugettext_lazy as _
from pextant.api import Pathfinder
from pextant.ExplorerModel import Astronaut
from pextant.geoshapely import GeoPoint, LAT_LONG
from pextant.ExplorationObjective import *
from pextant.EnvironmentalModel import EnvironmentalModel, loadElevationMap
def getCornersForMap(extent, zone, zoneLetter):
if extent:
# nw_corner = UTMCoord(extent[0], extent[3], zone, zoneLetter)
# se_corner = UTMCoord(extent[2], extent[1], zone, zoneLetter)
nw_corner = GeoPoint(LAT_LONG, extent[3], extent[0])
se_corner = GeoPoint(LAT_LONG, extent[1], extent[2])
return nw_corner, se_corner
return None, None
def getMap(site, desiredRes=0.5, maxSlope=15, extent=None):
site_frame = site['name']
dem_name = site_frame.replace(' ', '_')+'.tif'
fullPath = os.path.join(settings.STATIC_ROOT, 'basaltApp', 'dem', dem_name)
if os.path.isfile(fullPath):
zone=site['alternateCrs']['properties']['zone']
zoneLetter=site['alternateCrs']['properties']['zoneLetter']
if extent:
nw_corner, se_corner = getCornersForMap(extent, zone, zoneLetter)
else:
nw_corner = None
se_corner = None
#TODO limit based on bounds of plan
dem = loadElevationMap(fullPath, maxSlope=maxSlope, nw_corner=nw_corner, se_corner=se_corner, zone=zone, zone_letter=zoneLetter, desired_res=desiredRes)
return dem
return None
def testJsonSegments(plan):
prevStation = None
for index, entry in enumerate(plan.jsonPlan.sequence):
if entry['type'] == 'Station':
prevStation = entry['geometry']['coordinates']
elif entry['type'] == 'Segment':
nextStation = plan.jsonPlan.sequence[index+1]['geometry']['coordinates']
allCoords = [prevStation, nextStation]
entry['geometry'] = {"coordinates": allCoords,
"type": "LineString"}
prevStation = nextStation
return plan
def clearSegmentGeometry(plan):
for elt in plan.jsonPlan.sequence:
if elt.type == 'Segment':
try:
del elt['geometry']
except:
pass
try:
derivedInfo = elt['derivedInfo']
del derivedInfo['distanceList']
del derivedInfo['energyList']
del derivedInfo['timeList']
del derivedInfo['totalDistance']
del derivedInfo['totalEnergy']
del derivedInfo['totalTime']
except:
pass
plan.save()
return plan
def callPextant(request, plan, optimize=None, desiredRes=0.5, maxSlope=15, extent=None):
executions = plan.executions
if not executions:
msg = 'Plan %s not scheduled; could not call Sextant' % plan.name
raise Exception(msg)
if not executions[0].ev:
msg = 'No EV associated with plan %s; could not call Sextant' % plan.name
raise Exception(msg)
# Per Kevin, BASALTExplorer is not the thing. Astronaut is the thing
# explorer = BASALTExplorer(executions[0].ev.mass)
explorer = Astronaut(executions[0].ev.mass)
site = plan.jsonPlan['site']
dem = getMap(site, desiredRes, maxSlope, extent)
if not dem:
raise Exception('Could not load DEM while calling Pextant for ' + site['name'])
#
pathFinder = Pathfinder(explorer, dem)
sequence = plan.jsonPlan.sequence
jsonSequence = json.dumps(sequence)
try:
# print 'about to call pathfinder'
if not optimize:
optimize = str(plan.jsonPlan.optimization)
else:
plan.jsonPlan.optimization = optimize
result = pathFinder.completeSearchFromJSON(optimize, jsonSequence)
# print 'actually came back from pathfinder'
if 'NaN' not in result and 'Infinity' not in result:
plan.jsonPlan.sequence = json.loads(result)
plan.save()
except Exception, e:
traceback.print_exc()
raise e
pass
return plan
|
Python
| 0
|
@@ -2498,43 +2498,8 @@
ner,
- zone=zone, zone_letter=zoneLetter,
des
|
4fb6112552ab7969bddca7193dd51910be51d8b2
|
Update ipc_lista1.7.py
|
lista1/ipc_lista1.7.py
|
lista1/ipc_lista1.7.py
|
#ipc_lista1.7
#Professor: Jucimar Junior
#Any Mendes Carvalho
#
#
#
#
#Faça um programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o #usuário.
altura = input("Digite a altura do quadrado em metros: ")
largura = input("Digite a largura do em
|
Python
| 0
|
@@ -268,11 +268,20 @@
gura do
+quadrado
em%0A
|
f7d8d58393cf2e9fa69dfde58e5da18758408105
|
move order_with_respect_to to correct location (Meta class of models)
|
api/api/models.py
|
api/api/models.py
|
# REST API Backend for the Radiocontrol Project
#
# Copyright (C) 2017 Stefan Derkits <stefan@derkits.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import Dict
from django.db import models
from ordered_model.models import OrderedModel
class Song(models.Model):
artist = models.CharField(max_length=128)
title = models.CharField(max_length=128)
filename = models.CharField(max_length=256, unique=True)
length = models.IntegerField()
class Playlist(models.Model):
name = models.CharField(max_length=128, unique=True)
songs = models.ManyToManyField(Song, through="PlaylistOrder")
@property
def length(self) -> float:
return sum([song.length for song in self.songs.all()])
def __unicode__(self):
return f"{self.name}"
class PlaylistOrder(OrderedModel):
playlist = models.ForeignKey(Playlist, on_delete=models.CASCADE)
song = models.ForeignKey(Song, on_delete=models.CASCADE)
order_with_respect_to = 'playlist'
class Meta:
ordering = ('playlist', 'order')
# distinct entry in the schedule (from pause to pause)
class ScheduleEntry(models.Model):
begin_datetime = models.DateTimeField(max_length=128, unique=True)
playlists = models.ManyToManyField(Playlist, through='ScheduleEntryOrder')
task_id = models.CharField(max_length=256)
# this method is on the model's manager
@staticmethod
def get_closest_to(target_datetime) -> Dict[str, 'ScheduleEntry']:
closest_after = ScheduleEntry.objects.filter(begin_datetime__gt=target_datetime).order_by('begin_datetime')
closest_before = ScheduleEntry.objects.filter(begin_datetime__lt=target_datetime).order_by('-begin_datetime')
closest_entries = {
'before': closest_before.first(),
'after': closest_after.first()
}
return closest_entries
@property
def length(self):
return sum([playlist.length for playlist in self.playlists.all()])
class ScheduleEntryOrder(OrderedModel):
schedule_entry = models.ForeignKey(ScheduleEntry, on_delete=models.CASCADE)
playlist = models.ForeignKey(Playlist, on_delete=models.CASCADE)
order_with_respect_to = 'schedule_entry'
class Meta:
ordering = ('schedule_entry', 'order')
# songs that may not yet be available
class DraftSong(models.Model):
artist = models.CharField(max_length=128)
title = models.CharField(max_length=128)
filename = models.CharField(max_length=256, unique=True)
length = models.FloatField(blank=True)
# generated from uploaded playlists and not all songs may yet be available
# when all songs are available, it can be stored as a Playlist
class DraftPlaylist(models.Model):
name = models.CharField(max_length=128, unique=True)
songs = models.ManyToManyField(DraftSong, through='DraftPlaylistOrder')
class DraftPlaylistOrder(OrderedModel):
playlist = models.ForeignKey(DraftPlaylist, on_delete=models.CASCADE)
song = models.ForeignKey(DraftSong, on_delete=models.CASCADE)
order_with_respect_to = 'playlist'
class Meta:
ordering = ('playlist', 'order')
|
Python
| 0
|
@@ -1557,47 +1557,8 @@
ADE)
-%0A order_with_respect_to = 'playlist'
%0A%0A
@@ -1576,39 +1576,51 @@
a:%0A order
-ing
+_with_respect_to
=
-(
'playlist', 'ord
@@ -1613,26 +1613,16 @@
laylist'
-, 'order')
%0A%0A%0A# dis
@@ -2736,53 +2736,8 @@
ADE)
-%0A order_with_respect_to = 'schedule_entry'
%0A%0A
@@ -2763,23 +2763,35 @@
order
-ing
+_with_respect_to
=
-(
'schedul
@@ -2798,26 +2798,16 @@
e_entry'
-, 'order')
%0A%0A%0A# son
@@ -3558,47 +3558,8 @@
ADE)
-%0A order_with_respect_to = 'playlist'
%0A%0A
@@ -3589,15 +3589,27 @@
rder
-ing
+_with_respect_to
=
-(
'pla
@@ -3618,15 +3618,5 @@
ist'
-, 'order')
%0A
|
360ef0dec991d4486ec51f23ffb065d0225347fa
|
Update ipc_lista1.8.py
|
lista1/ipc_lista1.8.py
|
lista1/ipc_lista1.8.py
|
#ipc_lista1.8
#Professor:
|
Python
| 0
|
@@ -19,9 +19,16 @@
fessor:
+Jucimar
%0A
|
e43e488df34e3b0485aeb91b672695949a47533d
|
fix path to subscription with id
|
moira_client/models/subscription.py
|
moira_client/models/subscription.py
|
from ..client import InvalidJSONError
from ..client import ResponseStructureError
from .base import Base
DAYS_OF_WEEK = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
MINUTES_IN_HOUR = 60
class Subscription(Base):
def __init__(self, client, contacts=None, tags=None, enabled=None, throttling=None, sched=None,
ignore_warnings=False, ignore_recoverings=False, **kwargs):
"""
:param client: api client
:param contacts: list of contact id
:param tags: list of str tags
:param enabled: bool is enabled
:param throttling: bool throttling
:param sched: dict schedule
:param ignore_warnings: bool ignore warnings
:param ignore_recoverings: bool ignore recoverings
:param kwargs: additional parameters
"""
self._client = client
self._id = kwargs.get('id', None)
if not contacts:
contacts = []
self.contacts = contacts
if not tags:
tags = []
self.tags = tags
self.enabled = enabled
self.throttling = throttling
default_sched = {
'startOffset': 0,
'endOffset': 1439,
'tzOffset': -180
}
if not sched:
sched = default_sched
self.disabled_days = set()
else:
if 'days' in sched and sched['days'] is not None:
self.disabled_days = {day['name'] for day in sched['days'] if not day['enabled']}
self.sched = sched
# compute time range
self._start_hour = self.sched['startOffset'] // MINUTES_IN_HOUR
self._start_minute = self.sched['startOffset'] - self._start_hour * MINUTES_IN_HOUR
self._end_hour = self.sched['endOffset'] // MINUTES_IN_HOUR
self._end_minute = self.sched['endOffset'] - self._end_hour * MINUTES_IN_HOUR
self.ignore_warnings = ignore_warnings
self.ignore_recoverings = ignore_recoverings
def _send_request(self, subscription_id=None):
data = {
'contacts': self.contacts,
'tags': self.tags,
'enabled': self.enabled,
'throttling': self.throttling,
'sched': self.sched,
'ignore_warnings': self.ignore_warnings,
'ignore_recoverings': self.ignore_recoverings
}
if subscription_id:
data['id'] = subscription_id
data['sched']['days'] = []
for day in DAYS_OF_WEEK:
day_info = {
'enabled': True if day not in self.disabled_days else False,
'name': day
}
data['sched']['days'].append(day_info)
data['sched']['startOffset'] = self._start_hour * MINUTES_IN_HOUR + self._start_minute
data['sched']['endOffset'] = self._end_hour * MINUTES_IN_HOUR + self._end_minute
result = self._client.put('subscription', json=data)
if 'id' not in result:
raise ResponseStructureError("id doesn't exist in response", result)
self._id = result['id']
return self._id
def disable_day(self, day):
"""
Disable day
:param day: str one of DAYS_OF_WEEK
:return: None
"""
self.disabled_days.add(day)
def enable_day(self, day):
"""
Enable day
:param day: str one of DAYS_OF_WEEK
:return: None
"""
self.disabled_days.remove(day)
def add_tag(self, tag):
"""
Add tag to subscription
:param tag: str tag name
:return: None
"""
self.tags.append(tag)
def add_contact(self, contact_id):
"""
Add contact
:param contact_id: str contact id
:return: None
"""
self.contacts.append(contact_id)
def save(self):
"""
Save subscription
:return: subscription id
"""
if self._id:
return self.update()
self._send_request()
def update(self):
"""
Update subscription
:return: subscription id
"""
if not self._id:
return self.save()
self._send_request(self._id)
def set_start_hour(self, hour):
"""
Set start hour
:param hour: int hour
:return: None
"""
self._start_hour = hour
def set_start_minute(self, minute):
"""
Set start minute
:param minute: int minute
:return: None
"""
self._start_minute = minute
def set_end_hour(self, hour):
"""
Set end hour
:param hour: int hour
:return: None
"""
self._end_hour = hour
def set_end_minute(self, minute):
"""
Set end minute
:param minute: int minute
:return: None
"""
self._end_minute = minute
class SubscriptionManager:
def __init__(self, client):
self._client = client
def fetch_all(self):
"""
Returns all existing subscriptions
:return: list of Subscription
:raises: ResponseStructureError
"""
result = self._client.get(self._full_path())
if 'list' in result:
subscriptions = []
for subscription in result['list']:
subscriptions.append(Subscription(self._client, **subscription))
return subscriptions
else:
raise ResponseStructureError("list doesn't exist in response", result)
def is_exist(self, **kwargs):
"""
Check whether subscription exists or not by any attributes
:param kwargs: attributes
:return: bool
:raises: ValueError
"""
for subscription in self.fetch_all():
equal = True
for attr, value in kwargs.items():
try:
if getattr(subscription, attr) != value:
equal = False
break
except Exception:
raise ValueError('Wrong attibute "{}"'.format(attr))
if equal:
return True
return False
def create(self, contacts=None, tags=None, enabled=True, throttling=True, sched=None,
ignore_warnings=False, ignore_recoverings=False, **kwargs):
"""
Create new subscription.
:param contacts: list of contact id
:param tags: list of str tags
:param enabled: bool is enabled
:param throttling: bool throttling
:param sched: dict schedule
:param ignore_warnings: bool ignore warnings
:param ignore_recoverings: bool ignore recoverings
:param kwargs: additional parameters
:return: Subscription
"""
return Subscription(
self._client,
contacts,
tags,
enabled,
throttling,
sched,
ignore_warnings,
ignore_recoverings,
**kwargs
)
def delete(self, subscription_id):
"""
Remove subscription by given id
:return: True on success, False otherwise
"""
try:
self._client.delete(self._full_path(subscription_id))
return False
except InvalidJSONError as e:
if e.content == b'': # successfully if response is blank
return True
return False
def test(self, subscription_id):
"""
Send test notification to subscription contact
:return: True on success, False otherwise
"""
try:
self._client.put(self._full_path(subscription_id) + "/test")
return False
except InvalidJSONError as e:
if e.content == b'': # successfully if response is blank
return True
return False
def _full_path(self, path=''):
if path:
return 'subscription/' + path
return 'subscription'
|
Python
| 0
|
@@ -2865,32 +2865,162 @@
lf._end_minute%0A%0A
+ if subscription_id:%0A result = self._client.put('subscription/' + subscription_id, json=data)%0A else:%0A
result =
|
93a91ac118ab4e7280562bd0cfac0ea964ae0a7e
|
remove auth_check import
|
plstackapi/core/api/sites.py
|
plstackapi/core/api/sites.py
|
from types import StringTypes
from django.contrib.auth import authenticate
from plstackapi.openstack.manager import OpenStackManager
from plstackapi.core.api.auth import auth_check
from plstackapi.core.models import Site
def _get_sites(filter):
if isinstance(filter, StringTypes) and filter.isdigit():
filter = int(filter)
if isinstance(filter, int):
sites = Site.objects.filter(id=filter)
elif isinstance(filter, StringTypes):
sites = Site.objects.filter(login_base=filter)
elif isinstance(filter, dict):
sites = Site.objects.filter(**filter)
else:
sites = []
return sites
def add_site(auth, fields):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
site = Site(**fields)
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.save()
return site
def update_site(auth, id, **fields):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
sites = _get_sites(id)
if not sites:
return
site = Site[0]
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.update(**fields)
return site
def delete_site(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
sites = _get_sites(id)
for site in sites:
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.delete()
return 1
def get_sites(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
sites = _get_sites(filter)
return sites
|
Python
| 0.000002
|
@@ -134,56 +134,8 @@
%0A
-from plstackapi.core.api.auth import auth_check%0A
from
|
273aeda221aa12aac7fe1eea51e0aed859cd9098
|
move fixme to right pos
|
sim.py
|
sim.py
|
import logging
from cardroom import Game, Table, Player, Stock, Waste, Card
log = logging.getLogger(__name__)
def play_game(players=3, cardsPerPlayer=5):
game = start_new_game(players, cardsPerPlayer)
while not game.over:
game.next_turn()
play_turn(game.player, game.table)
return game
def start_new_game(players, cardsPerPlayer):
players = invite_players(players)
deck = fetch_fresh_deck_of_cards()
make_sure_we_are_ok_to_play(players, cardsPerPlayer, deck)
table = set_the_table(deck)
for player in players:
deal_cards(player, table.stock, cardsPerPlayer)
return Game(players, table)
def invite_players(players):
"""Invite players to the game.
:type players: int or list of str
"""
try:
players = [Player(name) for name in players]
except TypeError:
players = [Player("Player %s" % (n)) for n in range(1, players + 1)]
log.debug("invited players are: %s", players)
return players
def fetch_fresh_deck_of_cards():
"""Magic a fresh deck of cards out of nothing from a definition"""
class Def:
values = [7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace']
suits = ['diamonds', 'hearts', 'spades', 'clubs']
deck = Stock([Card(v, s) for v in Def.values for s in Def.suits])
log.debug(str(deck))
return deck
def make_sure_we_are_ok_to_play(players, cardsPerPlayer, deck):
assert len(players) > 1
assert len(players) * cardsPerPlayer <= len(deck)
def set_the_table(deck):
deck.shuffle()
stock = deck
upcard = stock.fetch_card()
waste = Waste()
return Table(stock, waste, upcard)
def deal_cards(player, stock, cardsPerPlayer):
deal = stock.fetch_cards(cardsPerPlayer)
player.hand = deal
log.debug(str(player))
def play_turn(player, table):
log.debug("upcard: %s; hand: %s", table.upcard, player.hand)
if not player.play_card(table.upcard, table):
# FIXME this could be more symmetric to what happens in play_card
# - draw_card returns boolean
# - if False (stock empty)
# - replenish stock
# - draw again
ensure_stock_is_replenished(table)
player.draw_card(table.stock)
def ensure_stock_is_replenished(table):
if table.stock.isEmpty:
table.stock = Stock(table.waste.cards)
table.waste = Waste()
table.stock.shuffle()
|
Python
| 0
|
@@ -440,19 +440,21 @@
s()%0A
-mak
+ensur
e_sure_w
@@ -1355,11 +1355,13 @@
def
-mak
+ensur
e_su
|
36ae43735ed899b0ecb7b5679e60e4b0b2496d80
|
Move pdf under chromiumcontent
|
chromiumcontent/chromiumcontent.gyp
|
chromiumcontent/chromiumcontent.gyp
|
{
'targets': [
{
'target_name': 'chromiumcontent_all',
'type': 'none',
'dependencies': [
'chromiumcontent',
'<(DEPTH)/chrome/chrome.gyp:chromedriver',
],
'conditions': [
['OS=="linux"', {
'dependencies': [
'chromiumviews',
'<(DEPTH)/build/linux/system.gyp:libspeechd',
'<(DEPTH)/third_party/mesa/mesa.gyp:osmesa',
],
}],
['OS=="win"', {
'dependencies': [
'chromiumviews',
'<(DEPTH)/pdf/pdf.gyp:pdf',
],
}],
],
},
{
'target_name': 'chromiumcontent',
# Build chromiumcontent as shared_library otherwise some static libraries
# will not build.
'type': 'shared_library',
'dependencies': [
'<(DEPTH)/base/base.gyp:base_prefs',
'<(DEPTH)/content/content.gyp:content',
'<(DEPTH)/content/content.gyp:content_app_both',
'<(DEPTH)/content/content_shell_and_tests.gyp:content_shell_pak',
'<(DEPTH)/net/net.gyp:net_with_v8',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_host',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_proxy',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_ipc',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_shared',
],
'sources': [
'empty.cc',
],
},
],
'conditions': [
['OS in ["win", "linux"]', {
'targets': [
{
'target_name': 'chromiumviews',
'type': 'none',
'dependencies': [
'<(DEPTH)/ui/content_accelerators/ui_content_accelerators.gyp:ui_content_accelerators',
'<(DEPTH)/ui/display/display.gyp:display',
'<(DEPTH)/ui/display/display.gyp:display_util',
'<(DEPTH)/ui/views/controls/webview/webview.gyp:webview',
'<(DEPTH)/ui/views/views.gyp:views',
'<(DEPTH)/ui/wm/wm.gyp:wm',
],
'conditions': [
['OS=="linux"', {
'dependencies': [
'<(DEPTH)/chrome/browser/ui/libgtk2ui/libgtk2ui.gyp:gtk2ui',
],
}], # OS=="linux"
],
},
],
}],
],
}
|
Python
| 0
|
@@ -523,48 +523,8 @@
s',%0A
- '%3C(DEPTH)/pdf/pdf.gyp:pdf',%0A
@@ -1259,24 +1259,16 @@
shared',
-
%0A %5D
@@ -1309,32 +1309,180 @@
y.cc',%0A %5D,%0A
+ 'conditions': %5B%0A %5B'OS==%22win%22', %7B%0A 'dependencies': %5B%0A '%3C(DEPTH)/pdf/pdf.gyp:pdf',%0A %5D,%0A %7D%5D,%0A %5D,%0A
%7D,%0A %5D,%0A 'c
|
64c7cea7768902f98b58813fca867084acf469a6
|
Rename some metrics
|
desktop/core/src/desktop/metrics.py
|
desktop/core/src/desktop/metrics.py
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import gc
import multiprocessing
import threading
from django.contrib.auth.models import User
from desktop.lib.metrics import global_registry
global_registry().gauge_callback(
name='python.threads.total',
callback=lambda: len(threading.enumerate()),
label='Threads',
description='The total number of threads',
numerator='threads',
)
global_registry().gauge_callback(
name='python.threads.daemon',
callback=lambda: sum(1 for thread in threading.enumerate() if thread.isDaemon()),
label='Daemon Threads',
description='The number of daemon threads',
numerator='threads',
)
# ------------------------------------------------------------------------------
global_registry().gauge_callback(
name='python.multiprocessing',
callback=lambda: len(multiprocessing.active_children()),
label='Multiprocessing Processes',
description='Number of multiprocessing processes',
numerator='processes',
)
global_registry().gauge_callback(
name='python.multiprocessing.daemon',
callback=lambda: sum(1 for proc in multiprocessing.active_children() if proc.daemon),
label='Daemon Multiprocessing Processes',
description='Number of daemon multiprocessing processes',
numerator='processes',
)
# ------------------------------------------------------------------------------
for i in xrange(3):
global_registry().gauge_callback(
name='python.gc.generation.%s' % i,
callback=lambda: gc.get_count()[i],
label='GC Object Count in Generation %s' % i,
description='Total number of objects in garbage collection generation %s' % i,
numerator='objects',
)
global_registry().gauge_callback(
name='python.gc.objects',
callback=lambda: sum(gc.get_count()),
label='GC Object Count',
description='Total number of objects in the Python process',
numerator='objects',
)
# ------------------------------------------------------------------------------
active_requests = global_registry().counter(
name='requests.active',
label='Active Requests',
description='Number of currently active requests',
numerator='requests',
treat_counter_as_gauge=True,
)
request_exceptions = global_registry().counter(
name='requests.exceptions',
label='Request Exceptions',
description='Number requests that resulted in an exception',
numerator='requests',
)
response_time = global_registry().timer(
name='requests.response-time',
label='Request Response Time',
description='Time taken to respond to requests across all Hue endpoints',
numerator='seconds',
counter_numerator='requests',
rate_denominator='seconds',
)
# ------------------------------------------------------------------------------
user_count = global_registry().gauge_callback(
name='users',
callback=lambda: User.objects.count(),
label='Users',
description='Total number of user accounts',
numerator='users',
)
# ------------------------------------------------------------------------------
ldap_authentication_time = global_registry().timer(
name='auth.ldap.auth-time',
label='LDAP Authentication Time',
description='The time spent waiting for LDAP to authenticate a user',
numerator='seconds',
counter_numerator='authentications',
rate_denominator='seconds',
)
pam_authentication_time = global_registry().timer(
name='auth.pam.auth-time',
label='PAM Authentication Time',
description='The time spent waiting for PAM to authenticate a user',
numerator='seconds',
counter_numerator='authentications',
rate_denominator='seconds',
)
spnego_authentication_time = global_registry().timer(
name='auth.spnego.auth-time',
label='SPNEGO Authentication Time',
description='The time spent waiting for SPNEGO to authenticate a user',
numerator='seconds',
counter_numerator='authentications',
rate_denominator='seconds',
)
|
Python
| 0.000106
|
@@ -984,39 +984,32 @@
back(%0A name='
-python.
threads.total',%0A
@@ -1008,16 +1008,16 @@
total',%0A
+
call
@@ -1197,23 +1197,16 @@
name='
-python.
threads.
@@ -1522,39 +1522,32 @@
back(%0A name='
-python.
multiprocessing'
@@ -1545,16 +1545,32 @@
ocessing
+.processes.total
',%0A c
@@ -1793,23 +1793,16 @@
name='
-python.
multipro
@@ -1809,16 +1809,26 @@
cessing.
+processes.
daemon',
|
2656e59215e0f94892a79e8f94cd90b8717fe8d6
|
change list style
|
archivebox/cli/archivebox_add.py
|
archivebox/cli/archivebox_add.py
|
#!/usr/bin/env python3
__package__ = 'archivebox.cli'
__command__ = 'archivebox add'
import sys
import argparse
from typing import List, Optional, IO
from ..main import add
from ..util import docstring
from ..parsers import PARSERS
from ..config import OUTPUT_DIR, ONLY_NEW
from ..logging_util import SmartFormatter, accept_stdin, stderr
@docstring(add.__doc__)
def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional[str]=None) -> None:
parser = argparse.ArgumentParser(
prog=__command__,
description=add.__doc__,
add_help=True,
formatter_class=SmartFormatter,
)
parser.add_argument(
'--update-all', #'-n',
action='store_true',
default=not ONLY_NEW, # when ONLY_NEW=True we skip updating old links
help="Also retry previously skipped/failed links when adding new links",
)
parser.add_argument(
'--index-only', #'-o',
action='store_true',
help="Add the links to the main index without archiving them",
)
parser.add_argument(
'urls',
nargs='*',
type=str,
default=None,
help=(
'URLs or paths to archive e.g.:\n'
' https://getpocket.com/users/USERNAME/feed/all\n'
' https://example.com/some/rss/feed.xml\n'
' https://example.com\n'
' ~/Downloads/firefox_bookmarks_export.html\n'
' ~/Desktop/sites_list.csv\n'
)
)
parser.add_argument(
"--depth",
action="store",
default=0,
choices=[0, 1],
type=int,
help="Recursively archive all linked pages up to this many hops away"
)
parser.add_argument(
"--overwrite",
default=False,
action="store_true",
help="Re-archive URLs from scratch, overwriting any existing files"
)
parser.add_argument(
"--init", #'-i',
action='store_true',
help="Init/upgrade the curent data directory before adding",
)
parser.add_argument(
"--extract",
type=str,
help="Pass a list of the extractors to be used. If the method name is not correct, it will be ignored. \
This does not take precedence over the configuration",
default=""
)
parser.add_argument(
"--parser",
type=str,
help="Parser used to read inputted URLs.",
default="auto",
choices=["auto"] + list(PARSERS.keys())
)
command = parser.parse_args(args or ())
urls = command.urls
stdin_urls = accept_stdin(stdin)
if (stdin_urls and urls) or (not stdin and not urls):
stderr(
'[X] You must pass URLs/paths to add via stdin or CLI arguments.\n',
color='red',
)
raise SystemExit(2)
add(
urls=stdin_urls or urls,
depth=command.depth,
update_all=command.update_all,
index_only=command.index_only,
overwrite=command.overwrite,
init=command.init,
extractors=command.extract,
parser=command.parser,
out_dir=pwd or OUTPUT_DIR,
)
if __name__ == '__main__':
main(args=sys.argv[1:], stdin=sys.stdin)
# TODO: Implement these
#
# parser.add_argument(
# '--mirror', #'-m',
# action='store_true',
# help='Archive an entire site (finding all linked pages below it on the same domain)',
# )
# parser.add_argument(
# '--crawler', #'-r',
# choices=('depth_first', 'breadth_first'),
# help='Controls which crawler to use in order to find outlinks in a given page',
# default=None,
# )
|
Python
| 0.000002
|
@@ -2475,17 +2475,11 @@
uto%22
-%5D + list(
+, *
PARS
@@ -2488,17 +2488,18 @@
S.keys()
-)
+%5D,
%0A )%0A
|
07f9edc5764d3002fd3d4c1018a6ec43d5046dd0
|
Fix unused import.
|
kinto2xml/tests/test_verifier.py
|
kinto2xml/tests/test_verifier.py
|
import json
import mock
import os
import sys
from six import StringIO
from kinto2xml.verifier import sort_lists_in_dict, main
def build_path(filename):
return os.path.join(os.path.dirname(__file__), 'fixtures', filename)
def test_sort_lists_in_dict_handles_recursion():
assert json.dumps(sort_lists_in_dict({
'@name': 'judith',
'validators': [{
'@id': 'gbc',
'toto': ['b', 'a']
}, {
'@id': 'abc',
'toto': ['c', 'd', 'a'],
'apps': [{
'@guid': 'cde',
'minVersion': 2,
}, {
'@guid': 'abc',
'minVersion': 3
}]
}]
}), sort_keys=True) == (
'{"@name": "judith", "validators": [{'
'"@id": "abc", '
'"apps": [{"@guid": "abc", "minVersion": 3}, '
'{"@guid": "cde", "minVersion": 2}], '
'"toto": ["a", "c", "d"]}, '
'{"@id": "gbc", "toto": ["a", "b"]}]}'
)
def test_files_checking():
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('blocklist.xml'),
build_path('generated-blocklist.xml')])
assert stdout.getvalue() == ''
assert stderr.getvalue() == ''
def test_fails_if_file_does_not_exists():
assert main(['unknown']) == 1
def test_verifier_supports_http_links():
with open(build_path('blocklist.xml')) as f:
blocklist_content = f.read()
response = mock.MagicMock(text=blocklist_content)
with mock.patch('requests.get', return_value=response) as mocked_request:
main(['http://first_server/url/', 'http://second_server/url/'])
mocked_request.assert_any_call('http://first_server/url/')
mocked_request.assert_any_call('http://second_server/url/')
def test_clean_option_does_not_remove_tmp_files():
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('blocklist.xml'),
build_path('generated-blocklist.xml'), '-k'])
assert stderr.getvalue().startswith('$ diff -u'), stderr.getvalue()
def test_in_case_diff_fails_display_the_error():
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('fennec-blocklist.xml'),
build_path('generated-blocklist.xml')])
assert stderr.getvalue() != ''
|
Python
| 0
|
@@ -31,19 +31,8 @@
os%0A
-import sys%0A
from
|
aa7c58eb04599138bc97f93245c1acf3c5b81f85
|
Revert of [devil] Use /data/local/tmp for the command line on eng + userdebug builds. (patchset #2 id:20001 of https://codereview.chromium.org/2275863002/ )
|
devil/devil/android/flag_changer.py
|
devil/devil/android/flag_changer.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from devil.android import device_errors
class FlagChanger(object):
"""Changes the flags Chrome runs with.
Flags can be temporarily set for a particular set of unit tests. These
tests should call Restore() to revert the flags to their original state
once the tests have completed.
"""
def __init__(self, device, cmdline_file):
"""Initializes the FlagChanger and records the original arguments.
Args:
device: A DeviceUtils instance.
cmdline_file: Path to the command line file on the device.
"""
self._device = device
# Unrooted devices have limited access to the file system,
# as do all devices on Nougat thanks to tighter SELinux controls.
# Place files in /data/local/tmp/ rather than /data/local/
if ((not device.HasRoot() or device.build_type in ('eng', 'userdebug'))
and not '/data/local/tmp/' in cmdline_file):
self._cmdline_file = cmdline_file.replace('/data/local/',
'/data/local/tmp/')
else:
self._cmdline_file = cmdline_file
stored_flags = ''
if self._device.PathExists(self._cmdline_file):
try:
stored_flags = self._device.ReadFile(self._cmdline_file).strip()
except device_errors.CommandFailedError:
pass
# Store the flags as a set to facilitate adding and removing flags.
self._state_stack = [set(self._TokenizeFlags(stored_flags))]
def ReplaceFlags(self, flags):
"""Replaces the flags in the command line with the ones provided.
Saves the current flags state on the stack, so a call to Restore will
change the state back to the one preceeding the call to ReplaceFlags.
Args:
flags: A sequence of command line flags to set, eg. ['--single-process'].
Note: this should include flags only, not the name of a command
to run (ie. there is no need to start the sequence with 'chrome').
"""
new_flags = set(flags)
self._state_stack.append(new_flags)
self._UpdateCommandLineFile()
def AddFlags(self, flags):
"""Appends flags to the command line if they aren't already there.
Saves the current flags state on the stack, so a call to Restore will
change the state back to the one preceeding the call to AddFlags.
Args:
flags: A sequence of flags to add on, eg. ['--single-process'].
"""
self.PushFlags(add=flags)
def RemoveFlags(self, flags):
"""Removes flags from the command line, if they exist.
Saves the current flags state on the stack, so a call to Restore will
change the state back to the one preceeding the call to RemoveFlags.
Note that calling RemoveFlags after AddFlags will result in having
two nested states.
Args:
flags: A sequence of flags to remove, eg. ['--single-process']. Note
that we expect a complete match when removing flags; if you want
to remove a switch with a value, you must use the exact string
used to add it in the first place.
"""
self.PushFlags(remove=flags)
def PushFlags(self, add=None, remove=None):
"""Appends and removes flags to/from the command line if they aren't already
there. Saves the current flags state on the stack, so a call to Restore
will change the state back to the one preceeding the call to PushFlags.
Args:
add: A list of flags to add on, eg. ['--single-process'].
remove: A list of flags to remove, eg. ['--single-process']. Note that we
expect a complete match when removing flags; if you want to remove
a switch with a value, you must use the exact string used to add
it in the first place.
"""
new_flags = self._state_stack[-1].copy()
if add:
new_flags.update(add)
if remove:
new_flags.difference_update(remove)
self.ReplaceFlags(new_flags)
def Restore(self):
"""Restores the flags to their state prior to the last AddFlags or
RemoveFlags call.
"""
# The initial state must always remain on the stack.
assert len(self._state_stack) > 1, (
"Mismatch between calls to Add/RemoveFlags and Restore")
self._state_stack.pop()
self._UpdateCommandLineFile()
def _UpdateCommandLineFile(self):
"""Writes out the command line to the file, or removes it if empty."""
current_flags = list(self._state_stack[-1])
logging.info('Current flags: %s', current_flags)
# Root is not required to write to /data/local/tmp/.
use_root = '/data/local/tmp/' not in self._cmdline_file
if current_flags:
# The first command line argument doesn't matter as we are not actually
# launching the chrome executable using this command line.
cmd_line = ' '.join(['_'] + current_flags)
self._device.WriteFile(
self._cmdline_file, cmd_line, as_root=use_root)
file_contents = self._device.ReadFile(
self._cmdline_file, as_root=use_root).rstrip()
assert file_contents == cmd_line, (
'Failed to set the command line file at %s' % self._cmdline_file)
else:
self._device.RunShellCommand('rm ' + self._cmdline_file,
as_root=use_root)
assert not self._device.FileExists(self._cmdline_file), (
'Failed to remove the command line file at %s' % self._cmdline_file)
@staticmethod
def _TokenizeFlags(line):
"""Changes the string containing the command line into a list of flags.
Follows similar logic to CommandLine.java::tokenizeQuotedArguments:
* Flags are split using whitespace, unless the whitespace is within a
pair of quotation marks.
* Unlike the Java version, we keep the quotation marks around switch
values since we need them to re-create the file when new flags are
appended.
Args:
line: A string containing the entire command line. The first token is
assumed to be the program name.
"""
if not line:
return []
tokenized_flags = []
current_flag = ""
within_quotations = False
# Move through the string character by character and build up each flag
# along the way.
for c in line.strip():
if c is '"':
if len(current_flag) > 0 and current_flag[-1] == '\\':
# Last char was a backslash; pop it, and treat this " as a literal.
current_flag = current_flag[0:-1] + '"'
else:
within_quotations = not within_quotations
current_flag += c
elif not within_quotations and (c is ' ' or c is '\t'):
if current_flag is not "":
tokenized_flags.append(current_flag)
current_flag = ""
else:
current_flag += c
# Tack on the last flag.
if not current_flag:
if within_quotations:
logging.warn('Unterminated quoted argument: ' + line)
else:
tokenized_flags.append(current_flag)
# Return everything but the program name.
return tokenized_flags[1:]
|
Python
| 0.000027
|
@@ -810,78 +810,8 @@
stem
-,%0A # as do all devices on Nougat thanks to tighter SELinux controls
.%0A
@@ -882,10 +882,8 @@
if
-((
not
@@ -902,62 +902,8 @@
ot()
- or device.build_type in ('eng', 'userdebug'))%0A
and
@@ -933,33 +933,32 @@
in cmdline_file
-)
:%0A self._cm
|
51f4d40cf6750d35f10f37d939a2c30c5f26d300
|
Update script to write results to the database.
|
backend/scripts/updatedf.py
|
backend/scripts/updatedf.py
|
#!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -16,17 +16,16 @@
python%0A%0A
-#
import h
@@ -45,133 +45,531 @@
os%0A
-%0Adef main():%0A for root, dirs, files in os.walk(%22/mcfs/data/materialscommons%22):%0A for f in files:%0A print f
+import rethinkdb as r%0A%0Adef main():%0A conn = r.connect('localhost', 28015, db='materialscommons')%0A for root, dirs, files in os.walk(%22/mcfs/data/materialscommons%22):%0A for f in files:%0A path = os.path.join(root, f)%0A with open(path) as fd:%0A data = fd.read()%0A hash = hashlib.md5(data).hexdigest()%0A s = os.stat(path).st_size%0A r.table('datafiles').get(f).update(%7B'size':s, 'checksum':hash%7D).run(conn)%0A print %22%25s:%25s:%25d%22 %25(path, hash, s)
%0A%0Aif
|
45b9b2c838bcf9fd8f73d4b5f064e2d81bdf092a
|
Fix up the firefox aurora mobile parsing.
|
user_agents/parsers.py
|
user_agents/parsers.py
|
import sys
from collections import namedtuple
from ua_parser import user_agent_parser
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
MOBILE_DEVICE_FAMILIES = (
'iPhone',
'iPod',
'Generic Smartphone',
'Generic Feature Phone',
)
MOBILE_OS_FAMILIES = (
'Windows Phone',
'Windows Phone OS', # Earlier versions of ua-parser returns Windows Phone OS
'Symbian OS',
)
TABLET_DEVICE_FAMILIES = (
'iPad',
'BlackBerry Playbook',
'Blackberry Playbook', # Earlier versions of ua-parser returns "Blackberry" instead of "BlackBerry"
'Kindle',
'Kindle Fire',
)
TOUCH_CAPABLE_OS_FAMILIES = (
'iOS',
'Android',
'Windows Phone',
'Windows Phone OS',
'Windows RT',
)
TOUCH_CAPABLE_DEVICE_FAMILIES = (
'BlackBerry Playbook',
'Blackberry Playbook',
'Kindle Fire',
)
def parse_version(major=None, minor=None, patch=None, patch_minor=None):
# Returns version number tuple, attributes will be integer if they're numbers
if major is not None and isinstance(major, string_types):
major = int(major) if major.isdigit() else major
if minor is not None and isinstance(minor, string_types):
minor = int(minor) if minor.isdigit() else minor
if patch is not None and isinstance(patch, string_types):
patch = int(patch) if patch.isdigit() else patch
if patch_minor is not None and isinstance(patch_minor, string_types):
patch_minor = int(patch_minor) if patch_minor.isdigit() else patch_minor
if patch_minor:
return (major, minor, patch, patch_minor)
elif patch:
return (major, minor, patch)
elif minor:
return (major, minor)
elif major:
return (major,)
else:
return tuple()
Browser = namedtuple('Browser', ['family', 'version', 'version_string'])
def parse_browser(family, major=None, minor=None, patch=None, patch_minor=None):
# Returns a browser object
version = parse_version(major, minor, patch)
version_string = '.'.join([str(v) for v in version])
return Browser(family, version, version_string)
OperatingSystem = namedtuple('OperatingSystem', ['family', 'version', 'version_string'])
def parse_operating_system(family, major=None, minor=None, patch=None, patch_minor=None):
version = parse_version(major, minor, patch)
version_string = '.'.join([str(v) for v in version])
return OperatingSystem(family, version, version_string)
Device = namedtuple('Device', ['family'])
def parse_device(family):
return Device(family)
class UserAgent(object):
def __init__(self, user_agent_string):
ua_dict = user_agent_parser.Parse(user_agent_string)
self.ua_string = user_agent_string
self.os = parse_operating_system(**ua_dict['os'])
self.browser = parse_browser(**ua_dict['user_agent'])
self.device = parse_device(**ua_dict['device'])
def _is_android_tablet(self):
# Newer Android tablets don't have "Mobile" in their user agent string,
# older ones like Galaxy Tab still have "Mobile" though they're not
if 'Mobile Safari' not in self.ua_string:
return True
if 'SCH-' in self.ua_string:
return True
return False
def _is_blackberry_touch_capable_device(self):
# A helper to determine whether a BB phone has touch capabilities
# Blackberry Bold Touch series begins with 99XX
if 'Blackberry 99' in self.device.family:
return True
if 'Blackberry 95' in self.device.family: # BB Storm devices
return True
if 'Blackberry 95' in self.device.family: # BB Torch devices
return True
return False
@property
def is_tablet(self):
if self.device.family in TABLET_DEVICE_FAMILIES:
return True
if (self.os.family == 'Android' and self._is_android_tablet()):
return True
if self.os.family == 'Windows RT':
return True
return False
@property
def is_mobile(self):
# First check for mobile device families
if self.device.family in MOBILE_DEVICE_FAMILIES:
return True
# Device is considered Mobile OS is Android and not tablet
# This is not fool proof but would have to suffice for now
if self.os.family == 'Android' and not self.is_tablet:
return True
if self.os.family == 'BlackBerry OS' and self.device.family != 'Blackberry Playbook':
return True
if self.os.family in MOBILE_OS_FAMILIES:
return True
# TODO: remove after https://github.com/tobie/ua-parser/issues/126 is closed
if 'J2ME' in self.ua_string or 'MIDP' in self.ua_string:
return True
return False
@property
def is_touch_capable(self):
# TODO: detect touch capable Nokia devices
if self.os.family in TOUCH_CAPABLE_OS_FAMILIES:
return True
if self.device.family in TOUCH_CAPABLE_DEVICE_FAMILIES:
return True
if self.os.family == 'Windows 8' and 'Touch' in self.ua_string:
return True
if 'BlackBerry' in self.os.family and self._is_blackberry_touch_capable_device():
return True
return False
@property
def is_pc(self):
# Returns True for "PC" devices (Windows, Mac and Linux)
if 'Windows NT' in self.ua_string:
return True
# TODO: remove after https://github.com/tobie/ua-parser/issues/127 is closed
if self.os.family == 'Mac OS X' and 'Silk' not in self.ua_string:
return True
if 'Linux' in self.ua_string and 'X11' in self.ua_string:
return True
return False
@property
def is_bot(self):
return True if self.device.family == 'Spider' else False
def parse(user_agent_string):
return UserAgent(user_agent_string)
|
Python
| 0.000004
|
@@ -3171,16 +3171,17 @@
if
+(
'Mobile
@@ -3201,32 +3201,93 @@
n self.ua_string
+ and%0A self.browser.family != %22Firefox Mobile%22)
:%0A re
@@ -6039,24 +6039,25 @@
Agent(user_agent_string)
+%0A
|
599672acbf925cab634bc15ab47055aabb131efd
|
Fix xkcd text regex. Closes #46
|
dosagelib/plugins/x.py
|
dosagelib/plugins/x.py
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2013 Bastian Kleineidam
from re import compile
from ..scraper import _BasicScraper
from ..helpers import bounceStarter
from ..util import tagre
class xkcd(_BasicScraper):
url = 'http://xkcd.com/'
starter = bounceStarter(url, compile(tagre("a", "href", r'(/\d+/)', before="next")))
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r'(http://imgs\.xkcd\.com/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(/\d+/)', before="prev"))
help = 'Index format: n (unpadded)'
description = u'A webcomic of romance, sarcasm, math, and language.'
textSearch = compile(tagre("img", "title", r'([^"]+)'))
adult = True
@classmethod
def namer(cls, imageUrl, pageUrl):
index = int(pageUrl.rstrip('/').rsplit('/', 1)[-1])
name = imageUrl.rsplit('/', 1)[-1].split('.')[0]
return '%03d-%s' % (index, name)
@classmethod
def imageUrlModifier(cls, url, data):
if url and '/large/' in data:
return url.replace(".png", "_large.png")
return url
|
Python
| 0.999991
|
@@ -789,24 +789,66 @@
, r'(%5B%5E%22%5D+)'
+, before=r'http://imgs%5C.xkcd%5C.com/comics/'
))%0A adult
|
f0593b2d69730441b5a486e27ed6eb7001939bf4
|
Include unlimited features for enterprise
|
corehq/apps/accounting/bootstrap/config/user_buckets_august_2018.py
|
corehq/apps/accounting/bootstrap/config/user_buckets_august_2018.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
from decimal import Decimal
from corehq.apps.accounting.models import (
FeatureType,
SoftwarePlanEdition,
)
BOOTSTRAP_CONFIG = {
(SoftwarePlanEdition.COMMUNITY, False, False): {
'role': 'community_plan_v1',
'product_rate_monthly_fee': Decimal('0.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
},
(SoftwarePlanEdition.STANDARD, False, False): {
'role': 'standard_plan_v0',
'product_rate_monthly_fee': Decimal('300.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=50, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.PRO, False, False): {
'role': 'pro_plan_v0',
'product_rate_monthly_fee': Decimal('600.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=250, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, False, False): {
'role': 'advanced_plan_v0',
'product_rate_monthly_fee': Decimal('1200.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=500, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, True, False): {
'role': 'advanced_plan_v0',
'product_rate_monthly_fee': Decimal('0.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
}
}
|
Python
| 0
|
@@ -187,16 +187,44 @@
dition,%0A
+ UNLIMITED_FEATURE_USAGE%0A
)%0A%0ABOOTS
@@ -1743,34 +1743,55 @@
t(monthly_limit=
-10
+UNLIMITED_FEATURE_USAGE
, per_excess_fee
@@ -1792,33 +1792,33 @@
ss_fee=Decimal('
-2
+0
.00')),%0A
@@ -1849,33 +1849,55 @@
t(monthly_limit=
-0
+UNLIMITED_FEATURE_USAGE
),%0A %7D%0A
|
ab32f1bbe34847617a1ec5f227adaefb1497e62d
|
declare some constants
|
warped_alloy.py
|
warped_alloy.py
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
print("FIRST")
import __main__
import sys
import os
from socket import socketpair, AF_INET, AF_UNIX, SOCK_STREAM, fromfd
import attr
from twisted.python.usage import Options
from twisted.internet.task import react
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet.protocol import Protocol, Factory, ProcessProtocol
from twisted.protocols.amp import AMP, Command, Descriptor
from twisted.internet.unix import Server as UNIXServer
from twisted.web.server import Site
from twisted.web.static import Data
def show(*args):
"""
"""
print(*args)
sys.stdout.flush()
sys.stderr.flush()
class ManagerServerNotReallyProtocol(Protocol, object):
"""
"""
def connectionMade(self):
"""
"""
transport = self.transport
# goodbye, sweet reactor
socketObject = transport.getHandle()
self.factory.mpmManager.sendOutFileDescriptor(socketObject.fileno())
transport.stopReading()
transport.stopWriting()
class ManagerServerFactory(Factory, object):
"""
"""
def __init__(self, mpmManager):
"""
"""
self.mpmManager = mpmManager
protocol = ManagerServerNotReallyProtocol
class ManagerOptions(Options, object):
"""
"""
def postOptions(self):
"""
"""
show("manager-ing")
@inlineCallbacks
def go(self, reactor):
"""
"""
mgr = MPMManager(reactor)
endpoint = TCP4ServerEndpoint(reactor, 8123)
msf = ManagerServerFactory(mgr)
yield endpoint.listen(msf)
mgr.newSubProcess()
yield Deferred()
class Ping(Command, object):
"""
"""
class SendDescriptor(Command, object):
"""
"""
arguments = [("descriptor", Descriptor())]
response = []
errors = []
class ConnectionFromManager(AMP, object):
"""
"""
def __init__(self, reactor, factory):
"""
"""
super(ConnectionFromManager, self).__init__()
self.factory = factory
self.reactor = reactor
def connectionMade(self):
"""
"""
show("CFM")
def connectionLost(self, reason):
"""
"""
super(ConnectionFromManager, self).connectionLost(reason)
show("CL")
@SendDescriptor.responder
def receiveDescriptor(self, descriptor):
"""
"""
show("receivering")
self.reactor.adoptStreamConnection(descriptor, AF_INET, self.factory)
show("receivated")
return {}
def fileDescriptorReceived(self, descriptor):
"""
"""
show("FDR", descriptor)
return super(ConnectionFromManager, self).fileDescriptorReceived(descriptor)
@Ping.responder
def pung(self):
"""
"""
show("received subprocess ping")
sys.stdout.flush()
return {}
class WorkerOptions(Options, object):
"""
"""
def postOptions(self):
"""
"""
show("worker-ing")
def go(self, reactor):
"""
"""
data = Data("Hello world\n", "text/plain")
data.putChild("", data)
factory = Site(data)
# TODO: adoptStreamConnection should really support AF_UNIX
protocol = ConnectionFromManager(reactor, factory)
fileDescriptor = 7
skt = fromfd(fileDescriptor, AF_UNIX, SOCK_STREAM)
show("fromfd", skt)
# os.close(fileDescriptor)
serverTransport = UNIXServer(skt, protocol, None, None, 1234, reactor)
show("transported")
protocol.makeConnection(serverTransport)
show("reading")
serverTransport.startReading()
show("waiting...")
return Deferred()
class CommandLineOptions(Options, object):
"""
"""
synopsis = "Usage: warped_alloy [options]"
subCommands = [
# ['command-name', 'command-shortcut', ParserClass, documentation]
['manager', 'm', ManagerOptions, 'For managing'],
['worker', 'w', WorkerOptions, 'For workering'],
]
defaultSubCommand = 'manager'
@attr.s
class MyProcessProtocol(ProcessProtocol, object):
"""
"""
mpmManager = attr.ib()
def outReceived(self, data):
"""
"""
show("sub-out:", repr(data))
def errReceived(self, data):
"""
"""
show("sub-err:", repr(data))
def processExited(self, reason):
"""
"""
show("exit?", reason)
def processEnded(self, reason):
"""
"""
show("ended?", reason)
class OneWorkerProtocol(AMP, object):
"""
"""
def connectionMade(self):
"""
"""
show("OWP_CM")
def connectionLost(self, reason):
"""
"""
show("OWP_CL", reason)
super(OneWorkerProtocol, self).connectionLost(reason)
@inlineCallbacks
def sendFD(self, fileDescriptor):
"""
"""
show("pinging...")
result1 = yield self.callRemote(Ping)
show("pinged!", result1)
show("sending??????", result1, fileDescriptor)
d = self.callRemote(SendDescriptor,
descriptor=fileDescriptor)
show("send...ing?")
result = yield d
show("sended/!?@?!", result)
@attr.s
class MPMManager(object):
"""
"""
reactor = attr.ib()
openSubprocessConnections = attr.ib(default=attr.Factory(list))
def sendOutFileDescriptor(self, fileDescriptor):
if not self.openSubprocessConnections:
self.newSubProcess()
self.openSubprocessConnections[0].sendFD(fileDescriptor)
def newSubProcess(self):
"""
"""
here, there = socketpair(AF_UNIX, SOCK_STREAM)
owp = OneWorkerProtocol()
serverTransport = UNIXServer(here, owp,
None, None, 4321,
self.reactor)
owp.makeConnection(serverTransport)
script = __main__.__file__
argv = [sys.executable, script, b'w']
show("argv?", argv)
procTrans = self.reactor.spawnProcess(
MyProcessProtocol(self), sys.executable,
args=argv,
env=os.environ.copy(),
childFDs={
0: 'w',
1: 'r',
2: 'r',
7: there.fileno(),
}
)
show(procTrans)
# there.close()
serverTransport.startReading()
self.openSubprocessConnections.append(owp)
@react
def main(reactor):
"""
"""
show("BOOTSTRAP")
clo = CommandLineOptions()
clo.parseOptions(sys.argv[1:])
subCommandParser = clo.subOptions
return subCommandParser.go(reactor)
|
Python
| 0.000197
|
@@ -671,16 +671,75 @@
t Data%0A%0A
+STDIN = 0%0ASTDOUT = 1%0ASTDERR = 2%0AMAGIC_FILE_DESCRIPTOR = 7%0A%0A
%0Adef sho
|
ceb53a36fd65eb64369ee423add73d6ced93b352
|
Reset default class to original class after initializing the Astropy logger
|
astropy/config/logging_helper.py
|
astropy/config/logging_helper.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines a logging class based on the built-in logging module"""
from __future__ import print_function
import os
import sys
import inspect
import logging
import warnings
from contextlib import contextmanager
from . import ConfigurationItem
from ..utils.console import color_print
from ..utils.misc import find_current_module
__all__ = ['log', 'AstropyLogger', 'LoggingError']
class LoggingError(Exception):
pass
# Read in configuration
LOG_LEVEL = ConfigurationItem('log_level', 'WARN',
"Threshold for the logging messages. Logging "
"messages that are less severe than this level "
"will be ignored. The levels are 'DEBUG', "
"'INFO', 'WARNING', 'ERROR'")
USE_COLOR = ConfigurationItem('use_color', True,
"Whether to use color for the level names")
LOG_WARNINGS = ConfigurationItem('log_warnings', False,
"Whether to log warnings.warn calls")
LOG_EXCEPTIONS = ConfigurationItem('log_exceptions', False,
"Whether to log exceptions before raising them")
LOG_TO_FILE = ConfigurationItem('log_to_file', True,
"Whether to always log messages to a log "
"file")
LOG_FILE_PATH = ConfigurationItem('log_file_path', '~/.astropy/astropy.log',
"The file to log messages to")
LOG_FILE_LEVEL = ConfigurationItem('log_file_level', 'WARN',
"Threshold for logging messages to "
"log_file_path")
LOG_FILE_FORMAT = ConfigurationItem('log_file_format', "%(asctime)r, "
"%(origin)r, %(levelname)r, %(message)r",
"Format for log file entries")
class FilterOrigin(object):
'''A filter for the record origin'''
def __init__(self, origin):
self.origin = origin
def filter(self, record):
return record.origin.startswith(self.origin)
class ListHandler(logging.Handler):
'''A handler that can be used to capture the records in a list'''
def __init__(self, filter_level=None, filter_origin=None):
logging.Handler.__init__(self)
self.log_list = []
def emit(self, record):
self.log_list.append(record)
Logger = logging.getLoggerClass()
class AstropyLogger(Logger):
def makeRecord(self, name, level, pathname, lineno, msg, args, exc_info, func=None, extra=None):
if extra is None:
extra = {}
if 'origin' not in extra:
current_module = find_current_module(1, finddiff=[True, 'logging'])
if current_module is not None:
extra['origin'] = current_module.__name__
else:
extra['origin'] = 'unknown'
return Logger.makeRecord(self, name, level, pathname, lineno, msg, args, exc_info, func, extra)
_showwarning_orig = None
def _showwarning(self, *args, **kwargs):
self.warn(args[0].message)
def enable_warnings_logging(self):
if self._showwarning_orig is not None:
raise LoggingError("Warnings logging has already been enabled")
self._showwarning_orig = warnings.showwarning
warnings.showwarning = self._showwarning
def disable_warnings_logging(self):
if self._showwarning_orig is None:
raise LoggingError("Warnings logging has not been enabled")
if warnings.showwarning != self._showwarning:
raise LoggingError("Cannot disable warnings logging: warnings.showwarning was not set by this logger, or has been overridden")
warnings.showwarning = self._showwarning_orig
self._showwarning_orig = None
_excepthook_orig = None
def _excepthook(self, type, value, traceback):
try:
origin = inspect.getmodule(traceback.tb_next).__name__
except:
origin = inspect.getmodule(traceback).__name__
self.error(value.message, extra={'origin': origin})
self._excepthook_orig(type, value, traceback)
def enable_exception_logging(self):
if self._excepthook_orig is not None:
raise LoggingError("Exception logging has already been enabled")
self._excepthook_orig = sys.excepthook
sys.excepthook = self._excepthook
def disable_exception_logging(self):
if self._excepthook_orig is None:
raise LoggingError("Exception logging has not been enabled")
if sys.excepthook != self._excepthook:
raise LoggingError("Cannot disable exception logging: sys.excepthook was not set by this logger, or has been overridden")
sys.excepthook = self._excepthook_orig
self._excepthook_orig = None
def setColor(self, use_color):
self._use_color = use_color
def stream_formatter(self, record):
if record.levelno < logging.DEBUG or not self._use_color:
print(record.levelname, end='')
elif(record.levelno < logging.INFO):
color_print(record.levelname, 'magenta', end='')
elif(record.levelno < logging.WARN):
color_print(record.levelname, 'green', end='')
elif(record.levelno < logging.ERROR):
color_print(record.levelname, 'brown', end='')
else:
color_print(record.levelname, 'red', end='')
print(": " + record.msg + " [{:s}]".format(record.origin))
@contextmanager
def log_to_file(self, filename, filter_level=None, filter_origin=None):
fh = logging.FileHandler(filename)
if filter_level is not None:
fh.setLevel(filter_level)
if filter_origin is not None:
fh.addFilter(FilterOrigin(filter_origin))
f = logging.Formatter(LOG_FILE_FORMAT())
fh.setFormatter(f)
self.addHandler(fh)
yield
self.removeHandler(fh)
@contextmanager
def log_to_list(self, filter_level=None, filter_origin=None):
lh = ListHandler()
if filter_level is not None:
lh.setLevel(filter_level)
if filter_origin is not None:
lh.addFilter(FilterOrigin(filter_origin))
self.addHandler(lh)
yield lh.log_list
self.removeHandler(lh)
def set_defaults(self):
# Reset any previously installed hooks
if self._showwarning_orig is not None:
self.disable_warnings_logging()
if self._excepthook_orig is not None:
self.disable_exception_logging()
# Remove all previous handlers
for handler in self.handlers[:]:
self.removeHandler(handler)
# Set levels
self.setLevel(LOG_LEVEL())
self.setColor(USE_COLOR())
# Set up the stdout handler
sh = logging.StreamHandler()
sh.emit = self.stream_formatter
self.addHandler(sh)
# Set up the main log file handler if requested (but this might fail if
# configuration directory or log file is not writeable).
if LOG_TO_FILE():
try:
fh = logging.FileHandler(os.path.expanduser(LOG_FILE_PATH()))
except IOError:
pass
else:
formatter = logging.Formatter(LOG_FILE_FORMAT())
fh.setFormatter(formatter)
fh.setLevel(LOG_FILE_LEVEL())
self.addHandler(fh)
if LOG_WARNINGS():
self.enable_warnings_logging()
if LOG_EXCEPTIONS():
self.enable_exception_logging()
# Set up the class
logging.setLoggerClass(AstropyLogger)
# Initialize logger
log = logging.getLogger('astropy')
log.set_defaults()
|
Python
| 0
|
@@ -7734,16 +7734,82 @@
he class
+ and initialize logger%0A_orig_logger_cls = logging.getLoggerClass()
%0Alogging
@@ -7843,29 +7843,17 @@
er)%0A
-%0A# Initialize logger%0A
+try:%0A
log
@@ -7883,16 +7883,20 @@
tropy')%0A
+
log.set_
@@ -7902,12 +7902,91 @@
_defaults()%0A
+finally:%0A logging.setLoggerClass(_orig_logger_cls)%0A del _orig_logger_cls%0A
|
205f3fb2f36f33c6d13b4541ad49522b799d358d
|
simplify the call to make file list
|
src/actions/server.py
|
src/actions/server.py
|
import sys
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from twisted.internet import task
from twisted.internet.protocol import DatagramProtocol
from . import utils
class Broadcaster(DatagramProtocol):
"""
Broadcast the ip to all of the listeners on the channel
"""
def __init__(self, address):
self.ip = address # shouldn't this be passed in
self.host = '224.0.0.5'
self.port = 8005
def startProtocol(self):
log.msg("Serving on {0}:8888 and broadcasting IP on 224.0.0.5:8005".format(self.ip))
self.transport.joinGroup(self.host)
self._call = task.LoopingCall(self.sendHeartbeat)
self._loop = self._call.start(5)
def sendHeartbeat(self):
message ='{0}:8888'.format(self.ip)
self.transport.write(message, (self.host, self.port))
def stopProtocol(self):
self._call.stop()
def main(serve_dir):
from twisted.internet import reactor
resource = File(serve_dir)
factory = Site(resource)
log.startLogging(sys.stdout)
serve_at = utils.get_live_interface()
# this is messy
# the program should expect to serve files at a specific location everytime.
utils.make_file_list(utils.list_files(serve_dir),
utils.list_dirs(serve_dir),
serve_dir)
log.msg("Starting fileserver on{0}:8888".format(serve_at))
reactor.listenTCP(8888, factory)
log.msg("Broadcasting")
reactor.listenMulticast(8005, Broadcaster(serve_at))
reactor.run()
if __name__ == "__main__":
main('./')
|
Python
| 0.000129
|
@@ -1274,131 +1274,18 @@
ist(
-utils.list_files(serve_dir), %0A utils.list_dirs(serve_dir),%0A serve_dir)%0A
+serve_dir)
%0A
|
923d49c753acf7d8945d6b79efbdb08363e130a2
|
Bring test_frame_of_test_null_file up to date with new signature of frame_of_test().
|
noseprogressive/tests/test_utils.py
|
noseprogressive/tests/test_utils.py
|
from os import chdir, getcwd
from os.path import dirname, basename
from unittest import TestCase
from nose.tools import eq_
from noseprogressive.utils import human_path, frame_of_test
class UtilsTests(TestCase):
"""Tests for independent little bits and pieces"""
def test_human_path(self):
chdir(dirname(__file__))
eq_(human_path(__file__, getcwd()), basename(__file__))
def test_frame_of_test_null_file(self):
"""Make sure frame_of_test() doesn't crash when test_file is None."""
try:
frame_of_test((None, None, None), [('file', 333)])
except AttributeError:
self.fail('frame_of_test() raised AttributeError.')
|
Python
| 0
|
@@ -574,16 +574,86 @@
, None),
+ NotImplementedError,%0A NotImplementedError(),
%5B('file
|
0658a099a386791b3bde27f8e76c240253310890
|
Update pplot.py
|
src/analysis/pplot.py
|
src/analysis/pplot.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
'''Result analysis for automatic speech recognition
@Date:2016-4-9
@Author:zhang zewang
'''
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
class Analysis(object):
'''
class Analysis for ASR results
'''
def __init__(self,logFile,saveFig=True,showFig=False):
self.logFile = logFile
self.saveFig = saveFig
self.showFig = showFig
def getContent(self):
try:
with open(self.logFile) as f:
content = f.read().splitlines()
except RuntimeError as err:
print err
return content
def parse(self):
indexCostList = []
index1 = 0
indexValidateList = []
index2 = 0
costList = []
validateCostList = []
content = self.getContent()
keep = 0
model = ' '
dir_mfcc = ' '
learning_rate = 0
update = ''
for line in content:
if line.startswith('model'):
model = line.split(':')[1]
if line.startswith('penalty'):
penalty = line.split(':')[1]
if line.startswith('input_dim'):
input_dim = line.split(':')[1]
if line.startswith('n_hid'):
n_hid = line.split(':')[1]
if line.startswith('dataset'):
dir_mfcc = line.split(':')[1]
if line.startswith('learning_rate'):
learning_rate = line.split(':')[1]
if line.startswith('update'):
update = line.split(' ')[2]
if line.startswith('keep'):
keep = line.split(':')[1]
if line.startswith('Epoch'):
if 'validate cost' in line:
index2 = index2 + 1
cost = line.split(':')[2]
indexValidateList.append(index2)
validateCostList.append(float(cost))
elif 'train cost' in line:
index1 = index1+1
cost = line.split(':')[2]
indexCostList.append(index1)
costList.append(float(cost))
title = 'model:'+model+',dataset:'+dir_mfcc+',lr:'+ \
str(learning_rate)+'\nupdate:'+update
return title,indexCostList,indexValidateList,costList,validateCostList
def plot(self):
title,indexCostList,indexValidateList,costList,validateCostList = self.parse()
p1 = plt.plot(indexCostList,costList,marker='o',color='b',label='train cost')
p2 = plt.plot(indexValidateList,validateCostList,marker='o',color='r',label='validate cost')
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.legend()
plt.grid()
plt.title(title)
if self.saveFig:
plt.savefig(self.logFile+'.png',dpi=100)
#plt.savefig(self.logFile+'.eps',dpi=100)
if self.showFig:
plt.show()
if __name__ == '__main__':
dir_ = '/home/pony/acousticModeling/results/retest/'
for subdir, dirs, files in os.walk(dir_):
for f in files:
fullFilename = os.path.join(subdir, f)
if fullFilename.endswith('.txt'):
a = Analysis(fullFilename)
a.plot()
plt.clf()
|
Python
| 0.000002
|
@@ -1,24 +1,5 @@
#
-!/usr/bin/python%0A#
-*-
@@ -19,100 +19,946 @@
-*-%0A
-'''Result analysis for automatic speech recognition%0A@Date:2016-4-9%0A@Author:zhang zewang
+#!/usr/bin/python%0A%0A''' This file is designed to plot the cost curve, maybe deprecated.%0Aauthor:%0A%0A iiiiiiiiiiii iiiiiiiiiiii !!!!!!! !!!!!! %0A # ### # ### ### I# #: %0A # ### # I##; ##; ## ## %0A ### ### !## #### # %0A ### ### ### ## ### #' %0A !##; %60##%25 ##; ## ### ## %0A ### ### $## %60# ## # %0A ### # ### # #### ####; %0A %60### -# ### %60# ### ### %0A ############## ############## %60# # %0A %0Adate:2016-11-09
%0A'''%0A
+%0A%0A
impo
|
ac5053ada316e46d4286b1944c2fb957c42c3975
|
truncate superfluous trailing zeros
|
durationpy/duration.py
|
durationpy/duration.py
|
# -*- coding: UTF-8 -*-
import re
import datetime
_nanosecond_size = 1
_microsecond_size = 1000 * _nanosecond_size
_millisecond_size = 1000 * _microsecond_size
_second_size = 1000 * _millisecond_size
_minute_size = 60 * _second_size
_hour_size = 60 * _minute_size
_day_size = 24 * _hour_size
_week_size = 7 * _day_size
_month_size = 30 * _day_size
_year_size = 365 * _day_size
units = {
"ns": _nanosecond_size,
"us": _microsecond_size,
"µs": _microsecond_size,
"μs": _microsecond_size,
"ms": _millisecond_size,
"s": _second_size,
"m": _minute_size,
"h": _hour_size,
"d": _day_size,
"w": _week_size,
"mm": _month_size,
"y": _year_size,
}
def from_str(duration):
"""Parse a duration string to a datetime.timedelta"""
if duration in ("0", "+0", "-0"):
return datetime.timedelta()
pattern = re.compile('([\d\.]+)([a-zµμ]+)')
total = 0
sign = -1 if duration[0] == '-' else 1
matches = pattern.findall(duration)
if not len(matches):
raise Exception("Invalid duration {}".format(duration))
for (value, unit) in matches:
if unit not in units:
raise Exception(
"Unknown unit {} in duration {}".format(unit, duration))
try:
total += float(value) * units[unit]
except:
raise Exception(
"Invalid value {} in duration {}".format(value, duration))
microseconds = total / _microsecond_size
return datetime.timedelta(microseconds=sign * microseconds)
def to_str(delta):
"""Format a datetime.timedelta to a duration string"""
total_seconds = delta.total_seconds()
sign = "-" if total_seconds < 0 else ""
nanoseconds = abs(total_seconds * _second_size)
if total_seconds < 1:
result_str = _to_str_small(nanoseconds)
else:
result_str = _to_str_large(nanoseconds)
return "{}{}".format(sign, result_str)
def _to_str_small(nanoseconds):
result_str = ""
if not nanoseconds:
return "0"
milliseconds = int(nanoseconds / _millisecond_size)
if milliseconds:
nanoseconds -= _millisecond_size * milliseconds
result_str += "{}ms".format(milliseconds)
microseconds = int(nanoseconds / _microsecond_size)
if microseconds:
nanoseconds -= _microsecond_size * microseconds
result_str += "{}us".format(microseconds)
if nanoseconds:
result_str += "{}ns".format(nanoseconds)
return result_str
def _to_str_large(nanoseconds):
result_str = ""
hours = int(nanoseconds / _hour_size)
if hours:
nanoseconds -= _hour_size * hours
result_str += "{}h".format(hours)
minutes = int(nanoseconds / _minute_size)
if minutes:
nanoseconds -= _minute_size * minutes
result_str += "{}m".format(minutes)
seconds = float(nanoseconds) / float(_second_size)
if seconds:
nanoseconds -= _second_size * seconds
result_str += "{}s".format(seconds)
return result_str
|
Python
| 0.004364
|
@@ -2251,16 +2251,18 @@
tr += %22%7B
+:g
%7Dms%22.for
@@ -2437,16 +2437,18 @@
tr += %22%7B
+:g
%7Dus%22.for
@@ -2510,16 +2510,18 @@
tr += %22%7B
+:g
%7Dns%22.for
@@ -2738,16 +2738,18 @@
tr += %22%7B
+:g
%7Dh%22.form
@@ -2891,16 +2891,18 @@
tr += %22%7B
+:g
%7Dm%22.form
@@ -3055,16 +3055,18 @@
tr += %22%7B
+:g
%7Ds%22.form
|
d9800c562b81f4e118e9db96a68e301396af46f9
|
Add abstract job serializer
|
polyaxon/jobs/serializers.py
|
polyaxon/jobs/serializers.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from rest_framework import serializers
from jobs.models import JobResources
class JobResourcesSerializer(serializers.ModelSerializer):
class Meta:
model = JobResources
exclude = ('id',)
|
Python
| 0.003237
|
@@ -121,16 +121,24 @@
ializers
+, fields
%0A%0Afrom j
@@ -301,8 +301,1678 @@
('id',)%0A
+%0A%0Aclass JobSerializer(serializers.ModelSerializer):%0A cpu = fields.DictField(allow_null=True)%0A memory = fields.DictField(allow_null=True)%0A gpu = fields.DictField(allow_null=True)%0A resources = JobResourcesSerializer(read_only=True)%0A%0A class Meta:%0A fields = ('image', 'resources', 'cpu', 'memory', 'gpu')%0A extra_kwargs = %7B%0A 'cpu': %7B'write_only': True%7D,%0A 'memory': %7B'write_only': True%7D,%0A 'gpu': %7B'write_only': True%7D%7D%0A%0A @staticmethod%0A def _has_resources(validated_data):%0A cpu = validated_data%5B'cpu'%5D%0A memory = validated_data%5B'memory'%5D%0A gpu = validated_data%5B'gpu'%5D%0A if cpu is None and memory is None and gpu is None:%0A return False%0A return True%0A%0A @staticmethod%0A def _get_resources(validated_data):%0A cpu = validated_data%5B'cpu'%5D%0A memory = validated_data%5B'memory'%5D%0A gpu = validated_data%5B'gpu'%5D%0A return %7B'cpu': cpu, 'memory': memory, 'gpu': gpu%7D%0A%0A def _create_resources(self, validated_data):%0A if self._has_resources(validated_data):%0A resources = JobResourcesSerializer(data=self._get_resources(validated_data))%0A resources.is_valid(raise_exception=True)%0A return resources.save()%0A return None%0A%0A def _update_resources(self, resources_instance, validated_data):%0A if self._has_resources(validated_data):%0A resources = JobResourcesSerializer(instance=resources_instance,%0A data=self._get_resources(validated_data))%0A resources.is_valid(raise_exception=True)%0A return resources.save()%0A return None%0A
|
77c4b5a72ddad68717b6fb1291ce643f20a63e2d
|
Update SeleniumBase exceptions
|
seleniumbase/common/exceptions.py
|
seleniumbase/common/exceptions.py
|
""" SeleniumBase Exceptions
NoSuchFileException => Used by self.assert_downloaded_file(...)
NotUsingChromeException => Used by Chrome-only methods if not using Chrome
OutOfScopeException => Used by BaseCase methods when setUp() is skipped
TimeLimitExceededException => Used by "--time-limit=SECONDS"
"""
class NoSuchFileException(Exception):
pass
class NotUsingChromeException(Exception):
pass
class OutOfScopeException(Exception):
pass
class TimeLimitExceededException(Exception):
pass
class TextNotVisibleException(Exception):
pass
|
Python
| 0
|
@@ -48,23 +48,27 @@
tion =%3E
-Used by
+Called when
self.as
@@ -92,16 +92,23 @@
ile(...)
+ fails.
%0A Not
@@ -178,16 +178,17 @@
g Chrome
+.
%0A Out
@@ -255,16 +255,91 @@
skipped
+.%0A TextNotVisibleException =%3E Called when expected text fails to appear.
%0A Tim
@@ -365,23 +365,37 @@
tion =%3E
-Used by
+Called when exceeding
%22--time
@@ -413,13 +413,72 @@
NDS%22
+.
%0A%22%22%22%0A
+from selenium.common.exceptions import WebDriverException%0A
%0A%0Acl
@@ -546,32 +546,41 @@
ChromeException(
+WebDriver
Exception):%0A
@@ -642,32 +642,29 @@
%0Aclass T
-imeLimitExceeded
+extNotVisible
Exceptio
@@ -657,32 +657,41 @@
isibleException(
+WebDriver
Exception):%0A
@@ -700,37 +700,40 @@
ss%0A%0A%0Aclass T
-extNotVisible
+imeLimitExceeded
Exception(Ex
|
e6af9d901f26fdf779a6a13319face483fe48a3b
|
Disable clickjacking protection on demos to display them in iframes
|
dwitter/dweet/views.py
|
dwitter/dweet/views.py
|
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from dwitter.models import Dweet
def fullscreen_dweet(request, dweet_id):
dweet = get_object_or_404(Dweet, id=dweet_id)
context = {'dweet': dweet
}
return render(request, 'dweet/dweet-id.html', context );
|
Python
| 0
|
@@ -253,16 +253,111 @@
t Dweet%0A
+from django.views.decorators.clickjacking import xframe_options_exempt%0A%0A%0A@xframe_options_exempt
%0Adef ful
|
2e72be703998b2d0d9fdc06bfffddccec8fb11e3
|
use rps balancing
|
scripts/cycler.py
|
scripts/cycler.py
|
#!/usr/bin/python
from itertools import cycle
import subprocess
import time
targetsize = 16
def start():
# subprocess.call("sudo gcloud components update --quiet", shell=True)
# For completeness this should also create the backend, HTTP load balancer, template, and network
# Get the available zones
zones = subprocess.check_output("gcloud compute zones list --format='value(NAME)'", shell=True)
zoneList = zones.strip().split('\n')
# zoneList = sorted(zoneList)
# sort by zone letter (last character)
zoneList = sorted(zoneList, key=lambda x: x[-1])
print zoneList
for i, zone in enumerate(zoneList):
backendname = "retriever"
templatename = "retriever-1"
instancegroupname = "retriever-group-" + zone
print i, zone, instancegroupname
# Create the instance group
subprocess.call("gcloud compute instance-groups managed create {} --quiet --zone={} --size=0 --template={}".format(instancegroupname, zone, templatename), shell=True)
# Set instance template
subprocess.call("gcloud compute instance-groups managed set-instance-template {} --quiet --zone={} --template={}".format(instancegroupname, zone, templatename), shell=True)
# Add it to backend
subprocess.call("gcloud compute backend-services add-backend {} --quiet --instance-group={} --instance-group-zone={}".format(backendname, instancegroupname, zone), shell=True)
pool = cycle(zoneList)
while True:
# Consider all instances in each instance group connected in a chain
# Every iteration, slide the current window one slot
# Create the new instance in the next group first, then delete an instance in the current group
zone = next(pool)
instancegroupname = "retriever-group-" + zone
currentsize = int(subprocess.check_output("gcloud compute instance-groups managed describe {} --quiet --zone={} --format='value(targetSize)'".format(instancegroupname, zone), shell=True))
if currentsize > 0:
nextzone = next(pool)
nextinstancegroupname = "retriever-group-" + nextzone
nextsize = max(targetsize - currentsize, 0)
subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(nextinstancegroupname, nextzone, nextsize), shell=True)
time.sleep(60)
# nextsize = int(subprocess.check_output("gcloud compute instance-groups managed describe {} --quiet --zone={} --format='value(targetSize)'".format(nextinstancegroupname, nextzone), shell=True))
# # Scale up the next zone
# subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(nextinstancegroupname, nextzone, nextsize + 1), shell=True)
# # Find the oldest instance in this group
# delete = subprocess.check_output("gcloud compute instances list --sort-by=creationTimestamp --format='table[no-heading](name)' | grep {} | head -n 1".format(instancegroupname), shell=True)
# if (delete.startswith(instancegroupname)):
# # Delete old one
# subprocess.call("gcloud compute instance-groups managed delete-instances {} --quiet --zone={} --instances={}".format(instancegroupname, zone, delete), shell=True)
# # Scale down the current zone
# subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(instancegroupname, zone, currentsize - 1), shell=True)
# We want to cycle fast enough that each instance lives for 20 minutes
# time.sleep(1200 // (currentsize + nextsize))
while True:
try:
start()
except:
pass
|
Python
| 0
|
@@ -1380,16 +1380,285 @@
l=True)%0A
+ # Configure load balancing policy%0A subprocess.call(%22gcloud compute backend-services update-backend %7B%7D --quiet --instance-group=%7B%7D --instance-group-zone=%7B%7D --balancing-mode=RATE --max-rate-per-instance=1%22.format(backendname, instancegroupname, zone), shell=True)%0A
pool =
|
6cf4d5b76f40d6e596d8784d35231f5086fbb78a
|
Change hyperparams: l1_reg & l2_reg: log[-5, -2] -> log[-5, 5]
|
keras_image_captioning/config.py
|
keras_image_captioning/config.py
|
import yaml
import sys
from collections import namedtuple
from datetime import timedelta
from random import choice, randint, uniform
from .common_utils import parse_timedelta
_REDUCE_LR_PATIENCE = 2
_EARLY_STOPPING_PATIENCE = 4
Config = namedtuple('Config', '''
dataset_name
epochs
time_limit
batch_size
reduce_lr_factor
reduce_lr_patience
early_stopping_patience
lemmatize_caption
rare_words_handling
words_min_occur
learning_rate
vocab_size
embedding_size
rnn_output_size
dropout_rate
bidirectional_rnn
rnn_type
rnn_layers
l1_reg
l2_reg
''')
class ConfigBuilderBase(object):
def build_config(self):
raise NotImplementedError
class StaticConfigBuilder(ConfigBuilderBase):
def __init__(self, config):
self._config = config
def build_config(self):
return self._config
class DefaultConfigBuilder(ConfigBuilderBase):
def build_config(self):
return Config(dataset_name='flickr8k',
epochs=None,
time_limit=timedelta(hours=10),
batch_size=32,
# As nearest as possible to 1.0, but must not be >= 1.0
reduce_lr_factor=1.0 - 1e-6,
reduce_lr_patience=sys.maxsize,
early_stopping_patience=sys.maxsize,
lemmatize_caption=True,
rare_words_handling='nothing',
words_min_occur=1,
learning_rate=0.001,
vocab_size=None,
embedding_size=300,
rnn_output_size=256,
dropout_rate=0.3,
bidirectional_rnn=False,
rnn_type='lstm',
rnn_layers=1,
l1_reg=0.0,
l2_reg=0.0)
class RandomConfigBuilder(ConfigBuilderBase):
_BATCH_SIZE = lambda _: choice([16, 32, 64])
_REDUCE_LR_FACTOR = lambda _: uniform(0.1, 0.9)
_LEMMATIZE_CAPTION = lambda _: choice([True, False])
_RARE_WORDS_HANDLING = lambda _: choice(['nothing', 'discard', 'change'])
_WORDS_MIN_OCCUR = lambda _: randint(1, 5)
_LEARNING_RATE = lambda _: 10**uniform(-5, -3)
_EMBEDDING_SIZE = lambda _: 100 * randint(1, 5)
_RNN_OUTPUT_SIZE = lambda _: 100 * randint(1, 5)
_DROPOUT_RATE = lambda _: uniform(0.1, 0.6)
_BIDIRECTIONAL_RNN = lambda _: choice([True, False])
_RNN_TYPE = lambda _: choice(['lstm', 'gru'])
_RNN_LAYERS = lambda _: randint(1, 2)
_L1_REG = lambda _: 10**uniform(-5, -2)
_L2_REG = lambda _: 10**uniform(-5, -2)
def __init__(self, fixed_config_keys):
"""
Args
fixed_config_keys: dataset_name must exist;
epochs xor time_limit must exist
"""
if 'dataset_name' not in fixed_config_keys:
raise ValueError('fixed_config_keys must contain dataset_name!')
if not (bool(fixed_config_keys.get('epochs')) ^
bool(fixed_config_keys.get('time_limit'))):
raise ValueError('fixed_config_keys must contain either epochs or '
'time_limit, but not both!')
self._fixed_config_keys = fixed_config_keys
self._fixed_config_keys.setdefault('epochs', None)
self._fixed_config_keys.setdefault('time_limit', None)
def build_config(self):
config_dict = dict(
batch_size=self._BATCH_SIZE(),
reduce_lr_factor=self._REDUCE_LR_FACTOR(),
reduce_lr_patience=_REDUCE_LR_PATIENCE,
early_stopping_patience=_EARLY_STOPPING_PATIENCE,
lemmatize_caption=self._LEMMATIZE_CAPTION(),
rare_words_handling='nothing',
words_min_occur=1,
learning_rate=self._LEARNING_RATE(),
vocab_size=None,
embedding_size=self._EMBEDDING_SIZE(),
rnn_output_size=self._RNN_OUTPUT_SIZE(),
dropout_rate=self._DROPOUT_RATE(),
bidirectional_rnn=self._BIDIRECTIONAL_RNN(),
rnn_type=self._RNN_TYPE(),
rnn_layers=self._RNN_LAYERS(),
l1_reg=self._L1_REG(),
l2_reg=self._L2_REG())
config_dict.update(self._fixed_config_keys)
return Config(**config_dict)
class FileConfigBuilder(ConfigBuilderBase):
def __init__(self, yaml_path):
self._yaml_path = yaml_path
def build_config(self):
with open(self._yaml_path) as yaml_file:
config_dict = yaml.load(yaml_file)
config_dict['time_limit'] = parse_timedelta(config_dict['time_limit'])
return Config(**config_dict)
_active_config = DefaultConfigBuilder().build_config()
def active_config(new_active_config=None):
if new_active_config:
global _active_config
_active_config = new_active_config
return _active_config
def init_vocab_size(vocab_size):
if vocab_size is None:
raise ValueError('vocab_size cannot be None!')
if _active_config.vocab_size:
raise RuntimeError('vocab_size has been initialized before!')
global _active_config
_active_config = _active_config._replace(vocab_size=vocab_size)
def write_to_file(config, yaml_path):
with open(yaml_path, 'w') as f:
config_dict = dict(config._asdict())
time_limit = config_dict['time_limit']
if time_limit:
config_dict['time_limit'] = str(time_limit)
yaml.dump(config_dict, f, default_flow_style=False)
|
Python
| 0.000015
|
@@ -2644,18 +2644,17 @@
orm(-5,
--2
+5
)%0A _L
@@ -2691,10 +2691,9 @@
-5,
--2
+5
)%0A%0A
|
4d5a15a4a087ea8bcf458243da947f5e0934013b
|
Fix html not loading the initial value (#569)
|
src/blocks/widgets.py
|
src/blocks/widgets.py
|
from django import forms
from wagtail.utils.widgets import WidgetWithScript
class CodeMirrorWidget(WidgetWithScript, forms.Textarea):
def render_js_init(self, id, name, value):
js = """
CodeMirror.fromTextArea(
document.getElementById("{id}"),
{{
lineWrapping: true,
indentUnit: 4,
mode: "htmlmixed",
autoRefresh: true
}}
);
"""
return js.format(id=id)
@property
def media(self):
return forms.Media(
css={'all': ('libraries/codemirror/codemirror.css',)},
js=('libraries/codemirror/codemirror.js',
'libraries/codemirror/autorefresh.js',
'libraries/codemirror/xml.js',
'libraries/codemirror/css.js',
'libraries/codemirror/javascript.js',
'libraries/codemirror/htmlmixed.js')
)
|
Python
| 0
|
@@ -190,24 +190,95 @@
js = %22%22%22%0A
+ document.addEventListener('DOMContentLoaded', function()%7B%7B%0A
Code
@@ -298,32 +298,40 @@
xtArea(%0A
+
+
document.getElem
@@ -355,27 +355,38 @@
-%7B%7B%0A
+ %7B%7B%0A
lineWrap
@@ -377,16 +377,21 @@
+
lineWrap
@@ -410,16 +410,24 @@
+
+
indentUn
@@ -441,16 +441,24 @@
+
+
mode: %22h
@@ -476,16 +476,24 @@
+
+
autoRefr
@@ -510,16 +510,24 @@
+
+
%7D%7D%0A
@@ -529,16 +529,32 @@
+ )%0A %7D%7D
);%0A
@@ -739,16 +739,33 @@
js=(
+%0A
'librari
@@ -993,24 +993,24 @@
script.js',%0A
-
@@ -1048,16 +1048,30 @@
ixed.js'
+,%0A
)%0A
|
fdef6455ee7b0ab088fc54ea8a96c481ea2ea462
|
add mean quality
|
sequana/fastqc.py
|
sequana/fastqc.py
|
import zipfile
import re
from sequana.lazy import pylab
from sequana.lazy import pandas as pd
class FastQC():
"""A temporary class to manipulate fastqc statistics"""
def __init__(self):
self.fastqc_data = {}
def read_sample(self, filename, s_name):
"""reads the fastqc stats
:param filename:
:param s_name: sample name.
This method was copied from multiqc.modules.fastqc.fastqc modules as a
temporary hack to read the sample data.
"""
zz = zipfile.ZipFile(filename)
file_contents = zz.open("{}{}".format(zz.namelist()[0], "fastqc_data.txt")).read().decode('utf8')
#self.add_data_source(f, s_name)
self.fastqc_data[s_name] = { 'statuses': dict() }
# Here below is the code from multiqc v1.6
# Parse the report
section = None
s_headers = None
self.dup_keys = []
for l in file_contents.splitlines():
if l == '>>END_MODULE':
section = None
s_headers = None
elif l.startswith('>>'):
(section, status) = l[2:].split("\t", 1)
section = section.lower().replace(' ', '_')
self.fastqc_data[s_name]['statuses'][section] = status
elif section is not None:
if l.startswith('#'):
s_headers = l[1:].split("\t")
# Special case: Total Deduplicated Percentage header line
if s_headers[0] == 'Total Deduplicated Percentage':
self.fastqc_data[s_name]['basic_statistics'].append({
'measure': 'total_deduplicated_percentage',
'value': float(s_headers[1])
})
else:
# Special case: Rename dedup header in old versions of
# FastQC (v10)
if s_headers[1] == 'Relative count':
s_headers[1] = 'Percentage of total'
s_headers = [s.lower().replace(' ', '_') for s in s_headers]
self.fastqc_data[s_name][section] = list()
elif s_headers is not None:
s = l.split("\t")
row = dict()
for (i, v) in enumerate(s):
v.replace('NaN','0')
try:
v = float(v)
except ValueError:
pass
row[s_headers[i]] = v
self.fastqc_data[s_name][section].append(row)
# Special case - need to remember order of duplication keys
if section == 'sequence_duplication_levels':
try:
self.dup_keys.append(float(s[0]))
except ValueError:
self.dup_keys.append(s[0])
# Tidy up the Basic Stats
self.fastqc_data[s_name]['basic_statistics'] = {
d['measure']: d['value'] for d in self.fastqc_data[s_name]['basic_statistics']}
# Calculate the average sequence length (Basic Statistics gives a range)
length_bp = 0
total_count = 0
for d in self.fastqc_data[s_name].get('sequence_length_distribution', {}):
length_bp += d['count'] * self._avg_bp_from_range(d['length'])
total_count += d['count']
if total_count > 0:
self.fastqc_data[s_name]['basic_statistics']['avg_sequence_length'] = length_bp / total_count
def _avg_bp_from_range(self, bp):
""" Helper function - FastQC often gives base pair ranges (eg. 10-15)
which are not helpful when plotting. This returns the average from such
ranges as an int, which is helpful. If not a range, just returns the int
"""
# copied from multiqc v1.6
try:
if '-' in bp:
maxlen = float(bp.split("-",1)[1])
minlen = float(bp.split("-",1)[0])
bp = ((maxlen - minlen)/2) + minlen
except TypeError:
pass
return(int(bp))
def plot_sequence_quality(self, max_score=40, ax=None):
for sample in self.fastqc_data.keys():
data = {self._avg_bp_from_range(d['base']): d['mean']
for d in self.fastqc_data[sample]['per_base_sequence_quality']}
df = pd.Series(data)
df.plot(color="k", alpha=0.5)
ymax = max_score + 1
xmax = max(df.index) + 1
if ax:
pylab.sca(ax)
pylab.fill_between([0,xmax], [0,0], [20,20], color='red', alpha=0.4)
pylab.fill_between([0,xmax], [20,20], [30,30], color='orange', alpha=0.4)
pylab.fill_between([0,xmax], [30,30], [41,41], color='green', alpha=0.4)
X = range(1, xmax + 1)
#pylab.fill_between(X,
# self.df.mean()+self.df.std(),
# self.df.mean()-self.df.std(),
# color=color, interpolate=False)
#pylab.plot(X, self.df.mean(), color=color_line, lw=lw)
pylab.ylim([0, ymax])
pylab.xlim([0, xmax])
pylab.title("Quality scores across all bases")
pylab.xlabel("Position in read (bp)")
pylab.ylabel("Phred Score", fontsize=12)
pylab.grid(axis='x')
|
Python
| 0.000984
|
@@ -1815,122 +1815,8 @@
se:%0A
- # Special case: Rename dedup header in old versions of%0A # FastQC (v10)%0A
@@ -2089,17 +2089,16 @@
list()%0A
-%0A
@@ -3074,16 +3074,498 @@
ics'%5D%7D%0A%0A
+ # TC: may 2020 Here we add the mean quality, which surprisingly is not%0A # to be found in the basic statistics.%0A quality_sum = sum(%5Bx%5B'quality'%5D * x%5B'count'%5D for x in self.fastqc_data%5Bs_name%5D%5B'per_sequence_quality_scores'%5D%5D)%0A nreads = sum(%5Bx%5B'count'%5D for x in self.fastqc_data%5Bs_name%5D%5B'per_sequence_quality_scores'%5D%5D)%0A mean_quality = quality_sum / float(nreads)%0A self.fastqc_data%5Bs_name%5D%5B'basic_statistics'%5D%5B'mean_quality'%5D = mean_quality%0A%0A
@@ -5313,238 +5313,8 @@
1)%0A%0A
- #pylab.fill_between(X, %0A # self.df.mean()+self.df.std(), %0A # self.df.mean()-self.df.std(), %0A # color=color, interpolate=False)%0A%0A #pylab.plot(X, self.df.mean(), color=color_line, lw=lw)%0A
|
84c07019572d8945bd2d4c7473c2b86c314107d0
|
Attempt to return better json
|
zpr.py
|
zpr.py
|
#!/var/lib/zpr/api/bin/python
import json
import lib_zpr
#import logging
#from logging.handlers import RotatingFileHandler
from flask import Flask, jsonify, make_response
app = Flask(__name__)
# app.logger.setLevel(logging.INFO)
# app.logger.disabled = False
# handler = logging.handlers.RotatingFileHandler(
# '/var/log/zpr_flask.log',
# 'a',
# maxBytes=1024 * 1024 * 100,
# backupCount=20
# )
# log = logging.getLogger('werkzeug')
# log.setLevel(logging.DEBUG)
# app.logger.addHandler(handler)
api_version = 'v1.0'
api_base = str('/zpr/{v}'.format(v=api_version))
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
# @app.route('/zpr/job')
# def ls_test():
# return json.dumps(call('ls'))
# @app.route('{a}/job/rsync/<backup_host>'.format(a=api_base), methods=['GET'])
# def check_job(backup_host):
# job = str(lib_zpr.check_zpr_rsync_job(backup_host))
# return json.dumps(job)
@app.route('{a}/job/<backup_host>'.format(a=api_base), methods=['GET'])
def check_zpr_job(backup_host):
lib_zpr.check_tsp_job(backup_host)
return json.dumps(str(lib_zpr.check_tsp_job_out[0]))
@app.route('{a}/job/<backup_host>/output'.format(a=api_base), methods=['GET'])
def check_zpr_job_summary(backup_host):
lib_zpr.check_tsp_job(backup_host, show_changes=True)
for i in [lib_zpr.check_tsp_job_out[0]]:
json.dumps(i)
json.dumps(
lib_zpr.check_job_changes[lib_zpr.check_tsp_job_out.index(i)]
)
#return json.dumps(join_summary, indent=2)
if __name__ == '__main__':
# formatter = logging.Formatter(\
# "%(asctime)s - %(levelname)s - %(name)s: \t%(messages)s")
# handler.setFormatter(formatter)
app.run(debug=True, extra_files='/var/lib/zpr/api/lib_zpr.py')
|
Python
| 0.999868
|
@@ -1357,90 +1357,66 @@
-for i in %5Blib_zpr.check_tsp_job_out%5B0%5D%5D:%0A json.dumps(i)%0A json.dumps(
+job_checked = %5B%0A %7B%0A 'name': backup_host,
%0A
@@ -1423,16 +1423,28 @@
+ 'response':
lib_zpr
@@ -1454,20 +1454,47 @@
eck_
-job_
+tsp_job_out%5B0%5D,%0A '
changes
-%5B
+':
lib_
@@ -1507,38 +1507,83 @@
eck_
-tsp_job_out.index(i)%5D
+job_changes%0A %7D
%0A
+ %5D%0A
-
+return jsonify(%7B'job_checked': job_checked%7D
)%0A%0A
|
938d255db088ff721e69659db1afdd5cfa109c3f
|
Save temp as C and F
|
webapp/app/views/api/v1/points.py
|
webapp/app/views/api/v1/points.py
|
import time
from flask import request, abort
from app.views.api.v1 import APIView_v1
class PointsParser(object):
def __init__(self, data):
self.points = []
self.interval = None
self.state = 'root'
lines = map(lambda l: l.split(), filter(None, map(str.strip, data.split('\n'))))
self.parse(lines)
def parse(self, lines):
for line in lines:
try:
reparse = False
while True:
self.state, reparse = getattr(self, 'parse_' + self.state + '_line')(*line)
if not reparse:
break
except Exception as e:
abort(400, "Malformed line '{}' - {}: {}".format(' '.join(line), e.__class__.__name__, str(e)))
def require_args(self, num, args):
if len(args) != num:
raise ValueError("Expected {} args, got {}".format(num, len(args)))
def parse_int(self, value):
try:
return int(value)
except ValueError:
raise ValueError("Invalid integer value")
def parse_float(self, value):
try:
return float(value)
except ValueError:
raise ValueError("Invalid float value")
def parse_root_line(self, keyword, *args):
if keyword == 'INTERVAL':
self.require_args(1, args)
self.interval = self.parse_int(args[0])
return 'root', False
elif keyword == 'POINT':
self.require_args(0, args)
self.points.append([])
return 'point', False
else:
raise ValueError("Invalid root keyword")
def parse_point_line(self, keyword, *args):
if keyword == 'SENSOR':
self.require_args(1, args)
self.points[-1].append({'sensor': args[0]})
return 'sensor', False
else:
raise ValueError("Invalid point keyword")
def parse_sensor_line(self, keyword, *args):
if keyword in ('HUMIDITY', 'TEMPERATURE'):
self.require_args(1, args)
self.points[-1][-1][keyword.lower()] = self.parse_float(args[0])
return 'sensor', False
elif keyword == 'SENSOR':
return 'point', True
elif keyword == 'POINT':
return 'root', True
else:
raise ValueError("Invalid sensor keyword")
class PointsView(APIView_v1):
def post(self):
"""\
Expected data format:
INTERVAL 1000
POINT
SENSOR top
HUMIDITY 75.123455
TEMPERATURE 29.243513
"INTERVAL" is the interval between points in ms. The points sections
are ordered by time, ascending - the last section is assumed to be at
the current time.
A "POINT" section is a collection of datapoints for a variety of sensors
at a given point in time. The point section may be repeated any number
of times, and leading whitespace and blank lines are ignored.
A "SENSOR" section is the data from one sensor for a given point in
time. A point may contain data from many sensors. The sensor name,
given after the keyword, may be any string not containing a newline.
Each "SENSOR" section should contain a "TEMPERATURE" and "HUMIDITY". The
temperature is a floating point number specified in degrees celsius, and
the humidity is the percent humidity, also floating point
"""
now = time.time()
res = PointsParser(request.data)
for point in reversed(res.points):
for sensor in point:
sensor['timestamp'] = now
now += (res.interval / 1000.0)
return "OK"
|
Python
| 0.000001
|
@@ -2070,32 +2070,270 @@
e_args(1, args)%0A
+ if keyword == 'TEMPERATURE':%0A self.points%5B-1%5D%5B-1%5D%5B'temperature_c'%5D = self.parse_float(args%5B0%5D)%0A self.points%5B-1%5D%5B-1%5D%5B'temperature_f'%5D = (self.parse_float(args%5B0%5D) * 1.8) + 32%0A else:%0A
self
|
c6f0f02eeacca2b93bc8c32bc54c103336fb12b0
|
add the 6th
|
base100/base100/base_100.py
|
base100/base100/base_100.py
|
# 练习题来自于菜鸟教程
# http://www.runoob.com/python/python-exercise-example1.html
#################################################################################
# 1. 有四个数字:1、2、3、4,能组成多少个互不相同且无重复数字的三位数?各是多少?
# 思路:使用yield将函数作为生成器
d = [1,2,3,4]
def assemblyNum(num,targetD,length):
temp = num
for i in range(len(targetD)):
num = temp*10+targetD[i]
if length==3:
yield num
elif length<3:
for x in assemblyNum(num,targetD[:i]+targetD[i+1:],length+1):
yield x
targetD = list(assemblyNum(0,d,1))
print(targetD)
#################################################################################
# 2. 企业发放的奖金根据利润提成。利润(I)低于或等于10万元时,奖金可提10%;
# 利润高于10万元,低于20万元时,低于10万元的部分按10%提成,高于10万元的部分,可提成7.5%;20万到40万之间时,高于20万元的部分,可提成5%;
# 40万到60万之间时高于40万元的部分,可提成3%;
# 60万到100万之间时,高于60万元的部分,可提成1.5%,
# 高于100万元时,超过100万元的部分按1%提成,
# 从键盘输入当月利润I,求应发放奖金总数?
#思路: 奖金是根据利润走,且为区间为准;则按提成--利润对应关系
superprofit = [1000000,600000,400000,200000,100000,0]
royaltyRate = [0.01,0.015,0.03,0.05,0.075,0.1]
def calculateProfit(num,sumnum,profitList,rateList):
if num < 0:
return 0
for i,v in enumerate(profitList):
if (num - v) >0:
sumnum = sumnum + (num - v)*rateList[i]
yield (num - v)*rateList[i]
num = v
#profit = int(input())
profit = 120000
print(list(calculateProfit(profit,0,superprofit,royaltyRate)))
# 3. 一个整数,它加上100和加上268后都是一个完全平方数,请问该数是多少?
# 思路:假设为x 则 x+i与x+268都可以开平方
# 安装NumPy科学计算库:pip install NumPy
import numpy
def getSqrt(endNum):
for x in range(endNum):
d1 = int(numpy.sqrt(x+100))
d2 = int(numpy.sqrt(x+268))
if((numpy.square(d1) == (x+100)) and (numpy.square(d2) == (x+268))):
yield x
print(list(getSqrt(10000)))
# 4. 输入某年某月某日,判断这一天是这一年的第几天?
# 思路: 内置datetime进行日期格式转换与输出
# 如果手写则需要判断闰月
from datetime import datetime
#inputtime = input()
def getTheDaythForInput(inputtime):
dateStr = datetime.strptime(inputtime, '%Y-%m-%d')
date = datetime.date(dateStr)
print(date.strftime('%j'))
# getTheDaythForInput(inputtime)
# 5. 题目:输入三个整数x,y,z,请把这三个数由小到大输出。
# 思路:利用list的sort
lstr = input()
l = lstr.split(",")
l.sort()
print(l)
|
Python
| 0.999999
|
@@ -2146,16 +2146,17 @@
t%E7%9A%84sort%0A%0A
+#
lstr = i
@@ -2162,16 +2162,32 @@
input()%0A
+lstr = '1,3,5,2'
%0Al = lst
@@ -2218,8 +2218,181 @@
rint(l)%0A
+%0A# 6. %E9%A2%98%E7%9B%AE%EF%BC%9A%E6%96%90%E6%B3%A2%E9%82%A3%E5%A5%91%E6%95%B0%E5%88%97%0A%0Adef fib(max):%0A%0A n,a,b = 0,0,1%0A while n%3Cmax:%0A yield b%0A a,b=b,a+b%0A n= n+1%0A return 'done'%0A%0Af = fib(8)%0A%0Afor n in f:%0A print(n)%0A%0A
|
a9bf968facd2a89017ef258e5afead093d1054f7
|
add method execute
|
CURD.py
|
CURD.py
|
# coding=utf8
# Permission to use, copy, modify,
# and distribute this software for any purpose with
# or without fee is hereby granted,
# provided that the above copyright notice
# and this permission notice appear in all copies.
#
"""
CURD.py
~~~~~~~
Tiny Python ORM for MySQL
:Author: Hit9
:Email: nz2324[at]126.com
:URL: https://github.com/hit9/CURD.py
:License: BSD
"""
__version__ = '0.2.5'
import re
import sys
import MySQLdb
import MySQLdb.cursors
class Database(object):
"""Database connection manager"""
# configuration for connection with default values
configs = {
'host': 'localhost',
'port': 3306,
'db': '',
'user': '',
'passwd': '',
'charset': 'utf8'
}
# It is strongly recommended that you set this True
autocommit = True
# MySQL connection object
conn = None
@classmethod
def config(cls, autocommit=True, **configs):
"""
Configure the database connection.
The connection will be auto established with these configs.
Keyword parameters for this method:
host
string, host to connect
user
string, user to connect as
passwd
string, password for this user
db
string, database to use
port
integer, TCP/IP port to connect
charset
string, charset of connection
See the MySQLdb documentation for more information,
the parameters of `MySQLdb.connect` are all supported.
"""
cls.configs.update(configs)
cls.autocommit = autocommit
@classmethod
def connect(cls):
"""
Connect to database, this method will new a connect object
"""
cls.conn = MySQLdb.connect(
cursorclass=MySQLdb.cursors.DictCursor, **cls.configs
)
cls.conn.autocommit(cls.autocommit)
@classmethod
def get_conn(cls):
"""
Get MySQL connection object.
if the conn is open and working, return it.
else new another one and return it.
"""
# singleton
if not cls.conn or not cls.conn.open:
cls.connect()
try:
# ping to test if this conn is working
cls.conn.ping()
except MySQLdb.OperationalError:
cls.connect()
return cls.conn
|
Python
| 0.000005
|
@@ -2435,8 +2435,265 @@
ls.conn%0A
+%0A @classmethod%0A def execute(cls, sql):%0A %22%22%22%0A Execute one sql%0A%0A parameters%0A sql%0A string, sql command to run%0A %22%22%22%0A cursor = cls.get_conn().cursor()%0A cursor.execute(sql)%0A return cursor%0A
|
793ec2b19d63b70717a84293b45e583f6c0b9dd5
|
Enable JIT on LAS Model
|
fairseq/modules/linearized_convolution.py
|
fairseq/modules/linearized_convolution.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from .conv_tbc import ConvTBC
@with_incremental_state
class LinearizedConvolution(ConvTBC):
"""An optimized version of nn.Conv1d.
At training time, this module uses ConvTBC, which is an optimized version
of Conv1d. At inference time, it optimizes incremental generation (i.e.,
one time step at a time) by replacing the convolutions with linear layers.
Note that the input order changes from training to inference.
"""
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self._linearized_weight = None
self.register_backward_hook(self._clear_linearized_weight)
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars)
# don't store redundant _linearized_weight in checkpoints
if prefix + "_linearized_weight" in state:
del state[prefix + "_linearized_weight"]
return state
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
if prefix + "_linearized_weight" in state_dict:
del state_dict[prefix + "_linearized_weight"]
def forward(self, input, incremental_state=None):
"""
Args:
incremental_state: Used to buffer signal; if not None, then input is
expected to contain a single frame. If the input order changes
between time steps, call reorder_incremental_state.
Input:
Time x Batch x Channel during training
Batch x Time x Channel during inference
"""
if incremental_state is None:
output = super().forward(input)
if self.kernel_size[0] > 1 and self.padding[0] > 0:
# remove future timesteps added by padding
output = output[: -self.padding[0], :, :]
return output
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = input.new(bsz, kw, input.size(2)).zero_()
self._set_input_buffer(incremental_state, input_buffer)
else:
# shift buffer
input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()
# append next input
input_buffer[:, -1, :] = input[:, -1, :]
input = input_buffer
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1)
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size[0]
weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
self._linearized_weight = torch.nn.Parameter(
weight.view(self.out_channels, -1)
)
return self._linearized_weight
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
|
Python
| 0
|
@@ -1585,16 +1585,38 @@
ight%22%5D%0A%0A
+ @torch.jit.ignore%0A
def
|
452899e183c6a8dcb2e7eb10a34a9a560e99145f
|
test problems
|
basic_cms/tests/test_api.py
|
basic_cms/tests/test_api.py
|
"""Django page CMS functionnal tests suite module."""
from basic_cms.models import Page
from basic_cms.tests.testcase import TestCase
import json
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
class CMSPagesApiTests(TestCase):
fixtures = ['pages_tests.json', 'api.json']
# def setUp(self):
# self.original_data = Page.objects.from_path('terms', 'eng')
# self.original_json_data = json.dumps(self.original_data.dump_json_data())
# self.original_html_data = render_to_string(self.original_data.template,
# {"current_page": self.original_data})
def tests_basic_cms_api_access(self):
from django.test.client import Client
self.client = Client()
self.original_data = Page.objects.from_path('terms', 'en-us')
self.original_json_data = json.dumps(self.original_data.dump_json_data())
self.original_html_data = render_to_string(self.original_data.template,
{"current_page": self.original_data})
data = {
'format': 'json'
}
response = self.client.get(reverse('basic_cms_api', args=['alamakota']), data)
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('basic_cms_api', args=['terms']), data)
self.assertEqual(response.status_code, 200)
# self.assertJSONEqual(self.original_json_data, response.content)
self.assertEqual(self.original_json_data, response.content)
response = self.client.get(reverse('basic_cms_api', args=['terms']))
self.assertEqual(response.status_code, 200)
self.assertIn('Please read these Terms of Use', response.content)
response = self.client.get(reverse('basic_cms_api', args=['coaches']), {'format': 'json'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['title']['en-us'], 'coaches')
self.assertEqual(len(response.data['children']), 3)
self.assertEqual(response.data['children'][0]['title']['en-us'], 'Judith Singer')
self.assertEqual(response.data['children'][1]['title']['en-us'], 'Melissa Litwak')
self.assertEqual(response.data['children'][2]['title']['en-us'], 'Joanna Schaffler')
def test_urls(self):
from utils import links_append_domain
body = """
<a href="http://google.com">google.com</a>
<a href="foo">foo</a>
<a href="#a">#a</a>
<a href="/#a">/#a</a>
<img src="http://x.com/x.jpg"/>
<img src="a.jpg"/>
"""
return_body = """
<a href="http://google.com">google.com</a>
<a href="http://a.com/foo">foo</a>
<a href="#a">#a</a>
<a href="/#a">/#a</a>
<img src="http://x.com/x.jpg"/>
<img src="http://a.com/a.jpg"/>
"""
print links_append_domain(body, 'http://a.com').strip()
self.assertIn(links_append_domain(body, 'http://a.com').strip(), return_body.strip())
|
Python
| 0.000011
|
@@ -2709,32 +2709,44 @@
%22%22%22%0A
+%3Chtml%3E%3Cbody%3E
%3Ca href=%22http://
@@ -2964,32 +2964,46 @@
//a.com/a.jpg%22/%3E
+%3C/body%3E%3C/html%3E
%0A %22%22%22%0A
@@ -3003,72 +3003,8 @@
%22%22%22%0A
- print links_append_domain(body, 'http://a.com').strip()%0A
@@ -3018,18 +3018,21 @@
f.assert
-In
+Equal
(links_a
|
921c02c614ca4831539088745c50cf215d268447
|
Use smtp_uchicago_send.delay
|
uchicagohvz/users/mailing_list.py
|
uchicagohvz/users/mailing_list.py
|
# Mailing list configuration
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from uchicagohvz import secrets
from .tasks import smtp_uchicago_send
from .models import Profile
from rest_framework.response import Response
from rest_framework.views import APIView
import email
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import hashlib
import hmac
def _verify(token, timestamp, signature):
return signature == hmac.new(
key=secrets.MAILGUN_API_KEY,
msg='{}{}'.format(timestamp, token),
digestmod=hashlib.sha256).hexdigest()
class MailgunHookBase(APIView):
authentication_classes = []
listhost_unsubscribe_template = 'users/emails/how_to_unsubscribe.txt'
listhost_unsubscribe = '<https://www.uchicagohvz.org/users/account/>'
anonymize_from = False
def get_listhost_id(self):
return "%s <%s>" % (self.get_listhost_name(),
self.get_listhost_address().replace('@', '.'))
def get_listhost_name(self):
return getattr(self, 'listhost_name')
def get_listhost_address(self):
return getattr(self, 'listhost_address')
def get_to_addrs(self):
return getattr(self, 'to_addrs')
@method_decorator(csrf_exempt)
def post(self, request, *args, **kwargs):
FIELDS = (
'recipient', 'sender', 'from',
'subject', 'body-mime',
'timestamp', 'token', 'signature'
)
verified = _verify(request.data['token'], request.data['timestamp'], request.data['signature'])
if all([x in request.data for x in FIELDS]) and verified:
msg = email.message_from_string(request.data['body-mime'])
for x in ('From', 'Sender', 'To', 'Reply-To', 'Subject'):
del msg[x]
listhost_addr = self.get_listhost_address()
if self.anonymize_from:
msg['From'] = listhost_addr
if 'X-Envelope-From' in msg:
del msg['X-Envelope-From']
else:
msg['From'] = request.data['from']
msg['Sender'] = listhost_addr
msg['To'] = listhost_addr
msg['Reply-To'] = listhost_addr
subject_tag = "[%s]" % self.get_listhost_name()
if subject_tag not in request.data['subject']:
msg['Subject'] = subject_tag + ' ' + request.data['subject']
else:
msg['Subject'] = request.data['subject']
msg['List-Id'] = self.get_listhost_id()
msg['List-Post'] = "<mailto:%s>" % (listhost_addr)
msg['List-Unsubscribe'] = self.listhost_unsubscribe
include_unsub = True
for p in msg.walk():
if p.get_filename('') == 'how_to_unsubscribe.txt':
include_unsub = False
break
if include_unsub:
unsub_p = MIMEText(render_to_string(self.listhost_unsubscribe_template), 'plain')
unsub_p.add_header('Content-Disposition', 'inline', filename='how_to_unsubscribe.txt')
if msg.is_multipart():
if msg.get_content_type() == 'multipart/alternative':
msg_a = msg.get_payload()
msg.set_type('multipart/mixed')
msg_a_p = MIMEMultipart('alternative')
msg_a_p.set_payload(msg_a)
msg.set_payload([msg_a_p])
msg.attach(unsub_p)
elif msg.get_content_maintype() == 'text':
subtype = msg.get_content_subtype()
text_p = MIMEText(msg.get_payload(decode=True), subtype, msg.get_content_charset('us-ascii'))
msg.set_type('multipart/mixed')
msg.set_payload([text_p, unsub_p])
smtp_uchicago_send(listhost_addr, self.get_to_addrs(), msg.as_string())
return Response()
else:
return Response(status=406)
class ChatterMailingList(MailgunHookBase):
listhost_name = 'HvZ-Chatter'
listhost_address = 'chatter@lists.uchicagohvz.org'
def get_to_addrs(self):
return tuple(Profile.objects.filter(user__is_active=True, subscribe_chatter_listhost=True).values_list('user__email', flat=True))
class TestMailingList(MailgunHookBase):
"""
Used to test the mailing list logic/DKIM/SPF/etc.
"""
listhost_name = 'HvZ-Test'
listhost_address = 'test@lists.uchicagohvz.org'
def get_to_addrs(self):
return secrets.MAILING_LIST_TEST_RECIPIENTS
|
Python
| 0.000336
|
@@ -3354,16 +3354,22 @@
ago_send
+.delay
(listhos
|
05915c540aace84a6cb5cf43a9264a983862e358
|
set fully reflected if the server indicates it had all of the stream
|
lbrynet/stream/managed_stream.py
|
lbrynet/stream/managed_stream.py
|
import os
import asyncio
import typing
import logging
from lbrynet.extras.daemon.mime_types import guess_media_type
from lbrynet.stream.downloader import StreamDownloader
from lbrynet.stream.descriptor import StreamDescriptor
from lbrynet.stream.reflector.client import StreamReflectorClient
if typing.TYPE_CHECKING:
from lbrynet.extras.daemon.storage import StoredStreamClaim
from lbrynet.blob.blob_manager import BlobFileManager
log = logging.getLogger(__name__)
class ManagedStream:
STATUS_RUNNING = "running"
STATUS_STOPPED = "stopped"
STATUS_FINISHED = "finished"
def __init__(self, loop: asyncio.BaseEventLoop, blob_manager: 'BlobFileManager', descriptor: 'StreamDescriptor',
download_directory: str, file_name: str, downloader: typing.Optional[StreamDownloader] = None,
status: typing.Optional[str] = STATUS_STOPPED, claim: typing.Optional['StoredStreamClaim'] = None):
self.loop = loop
self.blob_manager = blob_manager
self.download_directory = download_directory
self.file_name = file_name
self.descriptor = descriptor
self.downloader = downloader
self.stream_hash = descriptor.stream_hash
self.stream_claim_info = claim
self._status = status
self.fully_reflected = asyncio.Event(loop=self.loop)
@property
def status(self) -> str:
return self._status
def update_status(self, status: str):
assert status in [self.STATUS_RUNNING, self.STATUS_STOPPED, self.STATUS_FINISHED]
self._status = status
@property
def finished(self) -> bool:
return self.status == self.STATUS_FINISHED
@property
def running(self) -> bool:
return self.status == self.STATUS_RUNNING
@property
def claim_id(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.claim_id
@property
def txid(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.txid
@property
def nout(self) -> typing.Optional[int]:
return None if not self.stream_claim_info else self.stream_claim_info.nout
@property
def outpoint(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.outpoint
@property
def claim_height(self) -> typing.Optional[int]:
return None if not self.stream_claim_info else self.stream_claim_info.height
@property
def channel_claim_id(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.channel_claim_id
@property
def channel_name(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.channel_name
@property
def claim_name(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.claim_name
@property
def metadata(self) ->typing.Optional[typing.Dict]:
return None if not self.stream_claim_info else self.stream_claim_info.claim.claim_dict['stream']['metadata']
@property
def blobs_completed(self) -> int:
return sum([1 if self.blob_manager.get_blob(b.blob_hash).get_is_verified() else 0
for b in self.descriptor.blobs[:-1]])
@property
def blobs_in_stream(self) -> int:
return len(self.descriptor.blobs) - 1
@property
def sd_hash(self):
return self.descriptor.sd_hash
def as_dict(self) -> typing.Dict:
full_path = os.path.join(self.download_directory, self.file_name)
if not os.path.exists(full_path):
full_path = None
mime_type = guess_media_type(os.path.basename(self.file_name))
if self.downloader:
written_bytes = self.downloader.written_bytes
elif full_path:
written_bytes = os.stat(full_path).st_size
else:
written_bytes = None
return {
'completed': self.finished,
'file_name': self.file_name,
'download_directory': self.download_directory,
'points_paid': 0.0,
'stopped': not self.running,
'stream_hash': self.stream_hash,
'stream_name': self.descriptor.stream_name,
'suggested_file_name': self.descriptor.suggested_file_name,
'sd_hash': self.descriptor.sd_hash,
'download_path': full_path,
'mime_type': mime_type,
'key': self.descriptor.key,
'total_bytes_lower_bound': self.descriptor.lower_bound_decrypted_length(),
'total_bytes': self.descriptor.upper_bound_decrypted_length(),
'written_bytes': written_bytes,
'blobs_completed': self.blobs_completed,
'blobs_in_stream': self.blobs_in_stream,
'status': self.status,
'claim_id': self.claim_id,
'txid': self.txid,
'nout': self.nout,
'outpoint': self.outpoint,
'metadata': self.metadata,
'channel_claim_id': self.channel_claim_id,
'channel_name': self.channel_name,
'claim_name': self.claim_name
}
@classmethod
async def create(cls, loop: asyncio.BaseEventLoop, blob_manager: 'BlobFileManager',
file_path: str, key: typing.Optional[bytes] = None,
iv_generator: typing.Optional[typing.Generator[bytes, None, None]] = None) -> 'ManagedStream':
descriptor = await StreamDescriptor.create_stream(
loop, blob_manager.blob_dir, file_path, key=key, iv_generator=iv_generator
)
sd_blob = blob_manager.get_blob(descriptor.sd_hash)
await blob_manager.blob_completed(sd_blob)
await blob_manager.storage.store_stream(
blob_manager.get_blob(descriptor.sd_hash), descriptor
)
return cls(loop, blob_manager, descriptor, os.path.dirname(file_path), os.path.basename(file_path),
status=cls.STATUS_FINISHED)
async def stop_download(self):
if self.downloader:
await self.downloader.stop()
if not self.finished:
self.update_status(self.STATUS_STOPPED)
async def upload_to_reflector(self, host: str, port: int) -> typing.List[str]:
sent = []
protocol = StreamReflectorClient(self.blob_manager, self.descriptor)
try:
await self.loop.create_connection(lambda: protocol, host, port)
except ConnectionRefusedError:
return sent
try:
await protocol.send_handshake()
except (asyncio.CancelledError, asyncio.TimeoutError, ValueError):
if protocol.transport:
protocol.transport.close()
return sent
try:
sent_sd, needed = await protocol.send_descriptor()
if sent_sd:
sent.append(self.sd_hash)
except (asyncio.CancelledError, asyncio.TimeoutError, ValueError):
if protocol.transport:
protocol.transport.close()
return sent
for blob_hash in needed:
try:
await protocol.send_blob(blob_hash)
sent.append(blob_hash)
except (asyncio.CancelledError, asyncio.TimeoutError, ValueError):
if protocol.transport:
protocol.transport.close()
return sent
if protocol.transport:
protocol.transport.close()
if not self.fully_reflected.is_set():
self.fully_reflected.set()
return sent
|
Python
| 0
|
@@ -6990,32 +6990,176 @@
d(self.sd_hash)%0A
+ if not sent_sd and not needed:%0A if not self.fully_reflected.is_set():%0A self.fully_reflected.set()%0A
except (
|
ec5db06452e56c1f4ef62b1175f225371864ae75
|
Remove left-over print statements
|
celery/events/state.py
|
celery/events/state.py
|
import time
import heapq
from carrot.utils import partition
from celery import states
from celery.datastructures import AttributeDict, LocalCache
from celery.utils import kwdict
HEARTBEAT_EXPIRE = 150 # 2 minutes, 30 seconds
class Element(AttributeDict):
"""Base class for types."""
visited = False
def __init__(self, **fields):
dict.__init__(self, fields)
class Worker(Element):
"""Worker State."""
def __init__(self, **fields):
super(Worker, self).__init__(**fields)
self.heartbeats = []
def on_online(self, timestamp=None, **kwargs):
self._heartpush(timestamp)
def on_offline(self, **kwargs):
self.heartbeats = []
def on_heartbeat(self, timestamp=None, **kwargs):
self._heartpush(timestamp)
def _heartpush(self, timestamp):
if timestamp:
heapq.heappush(self.heartbeats, timestamp)
@property
def alive(self):
return (self.heartbeats and
time.time() < self.heartbeats[-1] + HEARTBEAT_EXPIRE)
class Task(Element):
"""Task State."""
_info_fields = ("args", "kwargs", "retries",
"result", "eta", "runtime",
"exception")
_defaults = dict(uuid=None,
name=None,
state=states.PENDING,
received=False,
started=False,
succeeded=False,
failed=False,
retried=False,
revoked=False,
args=None,
kwargs=None,
eta=None,
retries=None,
worker=None,
timestamp=None)
def __init__(self, **fields):
super(Task, self).__init__(**dict(self._defaults, **fields))
def info(self, fields=None, extra=[]):
if fields is None:
fields = self._info_fields
fields = list(fields) + list(extra)
return dict((key, getattr(self, key, None))
for key in fields
if getattr(self, key, None) is not None)
@property
def ready(self):
return self.state in states.READY_STATES
def update(self, d, **extra):
if self.worker:
self.worker.on_heartbeat(timestamp=time.time())
return super(Task, self).update(d, **extra)
def on_received(self, timestamp=None, **fields):
print("ON RECEIVED")
self.received = timestamp
self.state = "RECEIVED"
print(fields)
self.update(fields, timestamp=timestamp)
def on_started(self, timestamp=None, **fields):
self.state = states.STARTED
self.started = timestamp
self.update(fields, timestamp=timestamp)
def on_failed(self, timestamp=None, **fields):
self.state = states.FAILURE
self.failed = timestamp
self.update(fields, timestamp=timestamp)
def on_retried(self, timestamp=None, **fields):
self.state = states.RETRY
self.retried = timestamp
self.update(fields, timestamp=timestamp)
def on_succeeded(self, timestamp=None, **fields):
self.state = states.SUCCESS
self.succeeded = timestamp
self.update(fields, timestamp=timestamp)
def on_revoked(self, timestamp=None, **fields):
self.state = states.REVOKED
self.revoked = timestamp
self.update(fields, timestamp=timestamp)
class State(object):
"""Represents a snapshot of a clusters state."""
event_count = 0
task_count = 0
def __init__(self, callback=None,
max_workers_in_memory=5000, max_tasks_in_memory=10000):
self.workers = LocalCache(max_workers_in_memory)
self.tasks = LocalCache(max_tasks_in_memory)
self.event_callback = callback
self.group_handlers = {"worker": self.worker_event,
"task": self.task_event}
def get_or_create_worker(self, hostname, **kwargs):
"""Get or create worker by hostname."""
try:
worker = self.workers[hostname]
worker.update(kwargs)
except KeyError:
worker = self.workers[hostname] = Worker(
hostname=hostname, **kwargs)
return worker
def get_or_create_task(self, uuid, **kwargs):
"""Get or create task by uuid."""
try:
task = self.tasks[uuid]
task.update(kwargs)
except KeyError:
task = self.tasks[uuid] = Task(uuid=uuid, **kwargs)
return task
def worker_event(self, type, fields):
"""Process worker event."""
hostname = fields.pop("hostname")
worker = self.get_or_create_worker(hostname)
handler = getattr(worker, "on_%s" % type)
if handler:
handler(**fields)
def task_event(self, type, fields):
"""Process task event."""
uuid = fields.pop("uuid")
hostname = fields.pop("hostname")
worker = self.get_or_create_worker(hostname)
task = self.get_or_create_task(uuid)
handler = getattr(task, "on_%s" % type)
if type == "received":
self.task_count += 1
if handler:
handler(**fields)
task.worker = worker
def event(self, event):
"""Process event."""
self.event_count += 1
event = kwdict(event)
group, _, type = partition(event.pop("type"), "-")
self.group_handlers[group](type, event)
if self.event_callback:
self.event_callback(self, event)
def tasks_by_timestamp(self):
"""Get tasks by timestamp.
Returns a list of ``(uuid, task)`` tuples.
"""
return self._sort_tasks_by_time(self.tasks.items())
def _sort_tasks_by_time(self, tasks):
"""Sort task items by time."""
return sorted(tasks, key=lambda t: t[1].timestamp, reverse=True)
def tasks_by_type(self, name):
"""Get all tasks by type.
Returns a list of ``(uuid, task)`` tuples.
"""
return self._sort_tasks_by_time([(uuid, task)
for uuid, task in self.tasks.items()
if task.name == name])
def tasks_by_worker(self, hostname):
"""Get all tasks by worker.
Returns a list of ``(uuid, task)`` tuples.
"""
return self._sort_tasks_by_time([(uuid, task)
for uuid, task in self.tasks.items()
if task.worker.hostname == hostname])
def task_types(self):
"""Returns a list of all seen task types."""
return list(set(task.name for task in self.tasks.values()))
def alive_workers(self):
"""Returns a list of (seemingly) alive workers."""
return [w for w in self.workers.values() if w.alive]
state = State()
|
Python
| 0.000054
|
@@ -2475,37 +2475,8 @@
s):%0A
- print(%22ON RECEIVED%22)%0A
@@ -2541,30 +2541,8 @@
ED%22%0A
- print(fields)%0A
|
783a9a138c1548e06c0386cad468bc383f9011a2
|
remove debug message in inmoredetail code. whoopsie.
|
lfluxproject/mdx_inmoredetail.py
|
lfluxproject/mdx_inmoredetail.py
|
from markdown import Extension
from markdown.inlinepatterns import Pattern
from markdown.treeprocessors import Treeprocessor
from markdown import util
IMD_RE = r'<imd (.+)>'
class InmoredetailPattern(Pattern):
""" deprecated <imd ....> syntax """
def handleMatch(self, m):
el = util.etree.Element("span")
el.text = m.group(2)
el.set('class', el.get('class', '') + ' inmoredetail')
return el
class InmoredetailTreeProcessor(Treeprocessor):
def _find_elem(self, tree, text):
tree.text = tree.text or ''
if text in tree.text:
return [tree]
if text in (tree.tail or ''):
return [tree]
for child in tree.getchildren():
x = self._find_elem(child, text)
if x:
return [tree] + x
return None
def _find_start(self, tree):
return self._find_elem(tree, '[imd]')
def _find_end(self, tree):
return self._find_elem(tree, '[/imd]')
def _wrap_in_span(self, text, imdcount):
newelem = util.etree.Element('span')
newelem.text = text
newelem.set('class', 'inmoredetail imd-%s' % imdcount)
return newelem
def _walk(self, start_tree, end_tree, imdcount):
depth = 0
for i in range(len(start_tree)):
if start_tree[:i] == end_tree[:i]:
depth = i
current = start_tree
i = len(current)
""" going left, to the depth where we can traverse to end_tree """
while i > depth+1:
x = current.pop()
if x.tail and x.tail.strip():
newelem = self._wrap_in_span(x.tail, imdcount)
x.tail = None
x.append(newelem)
i = len(current)
parent = current[-1]
children = parent.getchildren()
ind = children.index(x)
restchildren = children[ind+1:]
for child in restchildren:
child.set('class', child.get('class', '') + ' inmoredetail imd-%s' % imdcount)
if child.tail and child.tail.strip():
newelem = self._wrap_in_span(child.tail, imdcount)
child.tail = None
child.append(newelem)
""" traversing the siblings """
x = current.pop()
children = current[-1].getchildren()
while True and len(children)>children.index(x)+1:
x = children[children.index(x)+1]
print 'mark', x.tag, x.text, x.tail
if x==end_tree[len(current)]:
break
x.set('class', x.get('class', '') + ' inmoredetail imd-%s' % imdcount)
""" going down to the end point """
for i in range(depth+1, len(end_tree)):
parent = end_tree[i-1]
children = parent.getchildren()
x = end_tree[i]
index = children.index(x)
for j in range(index-1):
x = children[j]
x.set('class', x.get('class','') + ' inmoredetail imd-%s' % imdcount)
if x.tail and x.tail.strip():
newelem = self._wrap_in_span(x.tail, imdcount)
x.tail = None
x.append(newelem)
if parent.text and parent.text.strip():
newelem = self._wrap_in_span(parent.text, imdcount)
parent.text = ''
parent.insert(0, newelem)
parent = end_tree[-1]
is_tail = not('[/imd]' in parent.text)
if is_tail:
before, after = parent.tail.split('[/imd]')
parent.tail = ''
parent.set('class', parent.get('class','') + 'inmoredetail imd-%s' % imdcount)
newelem = self._wrap_in_span(before, imdcount)
newelem.tail = after
end_tree[-2].insert(end_tree[-2].getchildren().index(parent)+1, newelem)
else:
before, after = parent.text.split('[/imd]')
parent.text = after
newelem = self._wrap_in_span(before, imdcount)
parent.insert(0, newelem)
def run(self, root):
x = True
imdcount = 0
while x:
x = self._find_start(root)
if x:
parent,elem = x[-2:]
is_tail = not ('[imd]' in elem.text)
if not is_tail:
before, after = elem.text.split('[imd]')
elem.text = before
newelem = self._wrap_in_span(after, imdcount)
elem.append(newelem)
else:
before, after = elem.tail.split('[imd]')
elem.tail = before
elemindex = parent.getchildren().index(elem)
newelem = self._wrap_in_span(after, imdcount)
parent.insert(elemindex+1, newelem)
if is_tail:
x.pop()
if '[/imd]' in newelem.text:
index = newelem.text.index('[/imd]')
newelem.text,newelem.tail = newelem.text[0:index],newelem.text[index+6:]
else:
for i in range(len(x)):
y = self._find_end(x[-i])
if y:
self._walk(x+[newelem],y, imdcount)
break
imdcount += 1
return root
class InmoredetailExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.registerExtension(self)
md.inlinePatterns.add('inmoredetail', InmoredetailPattern(IMD_RE), '_begin')
md.treeprocessors.add('inmoredetial', InmoredetailTreeProcessor(), '_end')
def makeExtension(configs=[]):
return InmoredetailExtension(Extension)
|
Python
| 0
|
@@ -2479,56 +2479,8 @@
+1%5D%0A
- print 'mark', x.tag, x.text, x.tail%0A
|
ec7e00eb3d03a3177800eb4ac3fb4fdc7132af0e
|
Add error handling to popen
|
kbdgen/gen/base.py
|
kbdgen/gen/base.py
|
import itertools
import logging
import os
import os.path
import random
import re
import subprocess
import sys
from functools import lru_cache
from collections import OrderedDict
from ..base import ISO_KEYS, KbdgenException
logger = logging.getLogger()
class MissingApplicationException(KbdgenException): pass
class GenerationError(KbdgenException): pass
def bind_iso_keys(other):
return OrderedDict(((k, v) for k, v in zip(ISO_KEYS, other)))
class Generator:
def __init__(self, project, args=None):
self._project = project
self._args = args or {}
@property
def repo(self):
return self._args.get('repo', None)
@property
def branch(self):
return self._args.get('branch', 'master')
@property
def is_release(self):
return self._args.get('release', False)
@property
def dry_run(self):
return self._args.get('dry_run', False)
@property
def output_dir(self):
return self._args.get("output", ".")
@property
@lru_cache(maxsize=1)
def supported_layouts(self):
t = self._args["target"]
o = OrderedDict()
for k, v in self._project.layouts.items():
if v.supported_target(t):
o[k] = v
return o
def sanity_check(self) -> bool:
if len(self.supported_layouts) == 0:
logger.error("This project defines no supported layouts for this target.")
return False
else:
logger.debug("Supported layouts: %s" % ", ".join(self.supported_layouts))
return True
class PhysicalGenerator(Generator):
def validate_layout(self, layout):
# TODO finish cls-based validate_layout
mode_keys = set(layout.modes.keys())
deadkey_keys = set(layout.dead_keys.keys())
undefined_modes = deadkey_keys - mode_keys
if len(undefined_modes) > 0:
raise Exception("Dead key modes are defined for undefined modes: %r" % (list(undefined_modes),))
for mode, keys in layout.dead_keys.items():
dead_keys = set(keys)
layer_keys = set(layout.modes[mode].values())
matched_keys = dead_keys & layer_keys
if matched_keys != dead_keys:
raise Exception("Specified dead keys missing from mode %r: %r" % (mode, list(dead_keys - matched_keys)))
class TouchGenerator(Generator):
def validate_layout(self):
pass
MSG_LAYOUT_MISSING = "Layout '%s' is missing a required mode: '%s'."
def mode_iter(keyboard, key, required=False):
mode = keyboard.modes.get(key, None)
if mode is None:
if required:
raise GenerationError(MSG_LAYOUT_MISSING % (keyboard.internal_name, key))
return itertools.repeat(None)
return mode.values()
def mode_dict(keyboard, key, required=False, space=False):
mode = keyboard.modes.get(key, None)
if mode is None:
if required:
raise GenerationError(MSG_LAYOUT_MISSING % (keyboard.internal_name, key))
return OrderedDict(zip(ISO_KEYS, itertools.repeat(None)))
if space:
sp = keyboard.special.get('space', {}).get(key, " ")
mode['A03'] = sp
return mode
def git_update(dst, branch, clean, cwd='.', logger=print):
msg = "Updating repository '%s'…" % dst
logger(msg)
cmd = """git reset --hard &&
git fetch --all &&
git checkout %s &&
%s
git pull &&
git submodule init &&
git submodule sync &&
git submodule update""" % (
branch,
"git clean -fdx &&" if clean else ""
)
cmd = cmd.replace('\n', ' ')
cwd = os.path.join(cwd, dst)
# TODO error checking
process = subprocess.Popen(cmd, cwd=cwd, shell=True)
process.wait()
def git_clone(src, dst, branch, clean, cwd='.', logger=print):
msg = "Cloning repository '%s' to '%s'…" % (src, dst)
logger(msg)
cmd = ['git', 'clone', src, dst]
# TODO error checking
process = subprocess.Popen(cmd, cwd=cwd)
process.wait()
# Silence logger for update.
git_update(dst, branch, cwd, logger=lambda x: None)
def iterable_set(iterable):
return {i for i in itertools.chain.from_iterable(iterable)}
def filepath(fp, *args):
return os.path.join(os.path.dirname(fp), *args)
class DictWalker:
def on_branch(self, base, branch):
return base, branch
def on_leaf(self, base, branch, leaf):
return base, branch, leaf
def __init__(self, dict_):
self._dict = dict_
def __iter__(self):
def walk(dict_, buf):
for k, v in dict_.items():
if isinstance(v, dict):
c = yield self.on_branch(tuple(buf), k)
if c == False:
continue
nbuf = buf[:]
nbuf.append(k)
for vv in walk(v, nbuf):
yield vv
elif isinstance(v, (int, str)):
yield self.on_leaf(tuple(buf), k, v)
else:
raise TypeError(v)
for v in walk(self._dict, []):
yield v
def __call__(self):
# Run iterator to death
for _ in self: pass
def run_process(cmd, cwd=None, show_output=False):
process = subprocess.Popen(cmd, cwd=str(cwd) if cwd is not None else None,
stderr=None if show_output else subprocess.PIPE,
stdout=None if show_output else subprocess.PIPE)
if show_output:
process.wait()
return None, None
else:
out, err = process.communicate()
if process.returncode != 0:
x = err.decode()
if x.strip() == "":
x = out.decode()
logger.error(x)
logger.error("Application ended with error code %s." % (
process.returncode))
sys.exit(process.returncode)
return out, err
|
Python
| 0
|
@@ -5357,24 +5357,37 @@
put=False):%0A
+ try:%0A
process
@@ -5453,16 +5453,20 @@
e None,%0A
+
@@ -5538,16 +5538,20 @@
+
stdout=N
@@ -5590,16 +5590,170 @@
ss.PIPE)
+%0A except Exception as e:%0A logger.error(%22Process failed to launch with the following error message:%22)%0A logger.error(e)%0A sys.exit(1)
%0A%0A if
|
3af46d10382eb18dac4c639f6b6ca60180a7d487
|
Fix properties names
|
server/process.py
|
server/process.py
|
import datetime
import gzip
import urllib.request
import zlib
import mercantile
import simplejson as json
from jsonslicer import JsonSlicer
from . import API, Z_TARGET, db
from .utils import get_updated_metadata
def generate_raw(bbox, start, end, *filters, **headers):
bbox = mercantile.Bbox(*bbox)
for tile in bbox_tiles(bbox, Z_TARGET):
quadkey = mercantile.quadkey(tile)
for feature in get_tile_data(quadkey, start, end, *filters, **headers):
if lonlat_in_bbox(bbox, feature[0], feature[1]):
yield feature
def generate(bbox, start, end, *filters, **headers):
yield "" # signal
yield '{"type": "FeatureCollection", "features": ['
first = True
for feature in generate_raw(bbox, start, end, *filters, **headers):
feature_geojson = feature_to_geojson(feature)
if not first:
yield ", "
if first and feature_geojson:
first = False
yield feature_geojson
yield "]}"
def bbox_tiles(bbox, z_target, *tiles):
if not tiles:
tile = mercantile.bounding_tile(*bbox)
while tile.z > z_target:
tile = mercantile.parent(tile)
tiles = (tile,)
for tile in tiles:
bounds = mercantile.bounds(tile)
if (
bounds.east < bbox.left
or bbox.right < bounds.west
or bounds.north < bbox.bottom
or bbox.top < bounds.south
):
continue
if tile.z >= z_target:
yield tile
else:
yield from bbox_tiles(bbox, z_target, *mercantile.children(tile))
def lonlat_in_bbox(bbox, lon, lat):
return bbox.left <= lon <= bbox.right and bbox.bottom <= lat <= bbox.top
def stream_to_processed(resp):
slicer = JsonSlicer(resp, ("features", None))
start, end = get_updated_metadata()
group = []
for feature in slicer:
osmid = feature["properties"]["@osmId"]
if len(group) == 0:
group.append(feature)
elif group[0]["properties"]["@osmId"] == osmid:
group.append(feature)
else:
if processed := process_group(group, end):
yield processed
group = [feature]
if len(group) > 0:
if processed := process_group(group, end):
yield processed
def get_tile_data(quadkey, start, end, *filters, **headers):
filters = " and ".join(filter(None, filters))
cache = db.cache()
cache_key = f"{quadkey}_{start}_{end}_{filters}"
with db.lock(cache_key, ttl=300000):
result = cache.get(cache_key)
if not result:
bbox = mercantile.bounds(mercantile.quadkey_to_tile(quadkey))
params = urllib.parse.urlencode(
{
"bboxes": "|".join(map(str, bbox)),
"properties": "metadata",
"showMetadata": "true",
"time": f"{start},{end}",
"filter": filters,
}
)
req = urllib.request.Request(API + "?" + params)
for key, value in headers.items():
req.add_header(key, value)
with urllib.request.urlopen(req) as resp_gzipped:
resp = gzip.GzipFile(fileobj=resp_gzipped)
compress = zlib.compressobj()
result = b""
for chunk in stream_to_processed(resp):
serialized = json.dumps(chunk, use_decimal=True) + "\n"
result += compress.compress(serialized.encode())
result += compress.flush()
cache.set(cache_key, result, timeout=60 * 60 * 24 * 30)
for line in zlib.decompress(result).decode().split("\n"):
if line:
yield json.loads(line)
def process(group, end):
first, last = group[0], group[-1]
if last["properties"]["@validTo"] != end:
return # feature has been deleted
firstedit = datetime.datetime.strptime(
first["properties"]["@validFrom"], "%Y-%m-%dT%H:%M:%SZ"
)
lastedit = datetime.datetime.strptime(
last["properties"]["@validFrom"], "%Y-%m-%dT%H:%M:%SZ"
)
updatefrequency = last["properties"]["@version"] / (
(datetime.datetime.now().utcnow() - firstedit).days / 365
)
return (
last["geometry"]["coordinates"][0],
last["geometry"]["coordinates"][1],
int(first["properties"]["@osmId"].split("/")[1]),
firstedit.timestamp(),
lastedit.timestamp(),
last["properties"]["@version"],
updatefrequency,
)
def feature_to_geojson(feature):
return json.dumps(
{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [feature[0], feature[1]],
},
"properties": {
"id": feature[2],
"created": feature[3],
"lastedit": feature[4],
"version": feature[5],
"updatefrequency": feature[6],
},
},
use_decimal=True,
)
def process_group(group, end):
if processed := process(group, end):
return processed
|
Python
| 0.000005
|
@@ -4900,18 +4900,19 @@
%22creat
-ed
+ion
%22: featu
@@ -4975,23 +4975,25 @@
%22
-ver
+revi
sion
+s
%22: featu
@@ -5016,22 +5016,16 @@
%22
-update
frequenc
|
b5378b0da9378813f9c2fa1fe9ff48671868eefd
|
Add provider_name to serialized account representation
|
website/addons/base/serializer.py
|
website/addons/base/serializer.py
|
import abc
from framework.auth.decorators import collect_auth
from website.util import api_url_for, web_url_for
class AddonSerializer(object):
__metaclass__ = abc.ABCMeta
# TODO take addon_node_settings, addon_user_settings
def __init__(self, node_settings=None, user_settings=None):
self.node_settings = node_settings
self.user_settings = user_settings
@abc.abstractproperty
def addon_serialized_urls(self):
pass
@abc.abstractproperty
def serialized_urls(self):
pass
@abc.abstractproperty
def user_is_owner(self):
pass
@abc.abstractproperty
def credentials_owner(self):
pass
@property
def serialized_node_settings(self):
result = {
'nodeHasAuth': self.node_settings.has_auth,
'userIsOwner': self.user_is_owner,
'urls': self.serialized_urls,
}
if self.user_settings:
result['userHasAuth'] = self.user_settings.has_auth
else:
result['userHasAuth'] = False
if self.node_settings.has_auth:
owner = self.credentials_owner
if owner:
result['urls']['owner'] = web_url_for('profile_view_id',
uid=owner._primary_key)
result['ownerName'] = owner.fullname
return result
@property
def serialized_user_settings(self):
return {}
class OAuthAddonSerializer(AddonSerializer):
@property
def serialized_accounts(self):
return [
self.serialize_account(each)
for each in self.user_settings.external_accounts
]
@property
def serialized_user_settings(self):
retval = super(OAuthAddonSerializer, self).serialized_user_settings
retval['accounts'] = self.serialized_accounts
return retval
def serialize_account(self, external_account):
if external_account is None:
return None
return {
'id': external_account._id,
'provider_id': external_account.provider_id,
'display_name': external_account.display_name,
'profile_url': external_account.profile_url,
'nodes': [
self.serialize_granted_node(node)
for node in self.user_settings.get_attached_nodes(
external_account=external_account
)
]
}
@collect_auth
def serialize_granted_node(self, node, auth):
node_settings = node.get_addon(
self.user_settings.oauth_provider.short_name
)
serializer = node_settings.serializer(node_settings=node_settings)
urls = serializer.addon_serialized_urls
urls['view'] = node.url
return {
'id': node._id,
'title': node.title if node.can_view(auth) else None,
'urls': urls,
}
class CitationsAddonSerializer(OAuthAddonSerializer):
REQUIRED_URLS = ['importAuth', 'folders', 'config', 'deauthorize', 'accounts']
@property
def serialized_urls(self):
external_account = self.node_settings.external_account
ret = {
'auth': api_url_for('oauth_connect',
service_name=self.node_settings.provider_name),
'settings': web_url_for('user_addons'),
'files': self.node_settings.owner.url,
}
if external_account and external_account.profile_url:
ret['owner'] = external_account.profile_url
addon_urls = self.addon_serialized_urls
# Make sure developer returns set of needed urls
for url in self.REQUIRED_URLS:
assert url in addon_urls, "addon_serilized_urls must include key '{0}'".format(url)
ret.update(addon_urls)
return ret
@property
def serialized_node_settings(self):
result = super(CitationsAddonSerializer, self).serialized_node_settings
result['folder'] = self.node_settings.selected_folder_name
return result
@property
def user_is_owner(self):
if self.user_settings is None:
return False
user_accounts = self.user_settings.external_accounts
return bool(
(
self.node_settings.has_auth and
(self.node_settings.external_account in user_accounts)
) or len(user_accounts)
)
@property
def credentials_owner(self):
return self.node_settings.user_settings.owner
@abc.abstractmethod
def serialize_folder(self, folder):
pass
def serialize_citation(self, citation):
return {
'csl': citation,
'kind': 'file',
'id': citation['id'],
}
|
Python
| 0.000001
|
@@ -2109,24 +2109,80 @@
rovider_id,%0A
+ 'provider_name': external_account.provider,%0A
@@ -2510,16 +2510,17 @@
%5D
+,
%0A
|
c358f467bbab9bd0366347f9a1bd10cb2e027bb8
|
use moksha widget template
|
fedoracommunity/mokshaapps/packagemaintresource/controllers/root.py
|
fedoracommunity/mokshaapps/packagemaintresource/controllers/root.py
|
from moksha.lib.base import Controller
from moksha.lib.helpers import MokshaApp
from tg import expose, tmpl_context
from fedoracommunity.widgets import SubTabbedContainer
class TabbedNav(SubTabbedContainer):
tabs= (MokshaApp('Overview', 'fedoracommunity.packagemaint.overview'),
MokshaApp('Builds', 'fedoracommunity.builds'),
MokshaApp('Updates', 'fedoracommunity.updates'),
MokshaApp('Packages', 'fedoracommunity.packagemaint.packages'),
MokshaApp('Package Groups', 'fedoracommunity.packagemaint.packagegroups'),
)
class RootController(Controller):
def __init__(self):
self.widget = TabbedNav('packagemaintnav')
@expose('mako:fedoracommunity.mokshaapps.packagemaintresource.templates.index')
def index(self):
tmpl_context.widget = self.widget
return {}
|
Python
| 0
|
@@ -702,55 +702,14 @@
ako:
-fedoracommunity.mokshaapps.packagemaintresource
+moksha
.tem
@@ -715,21 +715,22 @@
mplates.
-index
+widget
')%0A d
@@ -803,10 +803,22 @@
return %7B
+'options':%7B%7D
%7D%0A
|
367b28277b03473e6453ad9aa26c734136db4105
|
use compound dimensions
|
ktbh/modelling.py
|
ktbh/modelling.py
|
import json
class AutoModellingException(Exception): pass
def make_model(amount_field, date_field, fields):
currency = "GBP"
dataset_name = "new-dataset"
description = "Dataset description"
label = "Dataset label"
dataset = {
"description": description,
"temporal_granularity": "day",
"schema_version": "2011-12-07",
"name": dataset_name,
"category": "other",
"currency": currency,
"label": label
}
def dimension(name, column_id, dim_type, data_type):
assert dim_type in ["date", "attribute", "measure"]
assert data_type in ["float", "string", "date"]
return (name, {
"default_value": "",
"description": name.title(),
"column": column_id,
"label": name.title(),
"datatype": data_type,
"type": dim_type
})
def as_os_type(t):
if t in ["integer", "number"]:
return "float"
else:
return "string"
dimensions_list = [
dimension("amount", amount_field["label"], "measure", "float"),
dimension("time", date_field["label"], "date", "date"),
]
for f in fields:
dim = dimension(f["id"], f["label"], "attribute", as_os_type(f["type"]))
dimensions_list.append(dim)
dimensions_list.append(("unique_rowid",
{"default_value": "",
"description": "Nonce Row ID",
"column": "unique_rowid",
"label": "RowID",
"datatype": "string",
"key": True,
"type": "attribute"
}))
return {
"dataset": dataset,
"mapping": dict(dimensions_list)
}
def infer_model_callback(body):
args = json.loads(body)
fields = args["schema"]["fields"]
# we need a date
# an amount
types = [ field["type"] for field in fields ]
numbers = filter(lambda s: s == "number", types)
if len(numbers) != 1:
raise AutoModellingException("Found more than one numerical field")
dates = filter(lambda s: s == "date", types)
if len(dates) != 1:
raise AutoModellingException("Found more than one date field")
other_fields = filter(lambda s: s not in ["number", "date"], types)
model = make_model(
[ f for f in fields if f["type"] == "number"][0],
[ f for f in fields if f["type"] == "date" ][0],
[ f for f in fields if f["type"] not in ["number", "date"]]
)
args["model"] = model
return [ ("import", args) ]
|
Python
| 0.000001
|
@@ -908,16 +908,580 @@
%7D)%0A%0A
+ def compound_dimension(name, column_id):%0A return (name, %0A %7B%0A %22attributes%22: %7B%0A %22name%22: %7B%0A %22datatype%22: %22id%22,%0A %22column%22: column_id%0A %7D,%0A %22label%22: %7B%0A %22column%22: column_id,%0A %22datatype%22: %22string%22%0A %7D%0A %7D,%0A %22type%22: %22compound%22,%0A %22description%22: column_id,%0A %22label%22: column_id%0A %7D)%0A%0A
def
@@ -1495,16 +1495,16 @@
ype(t):%0A
-
@@ -1804,16 +1804,17 @@
+#
dim = di
@@ -1842,16 +1842,42 @@
abel%22%5D,
+%0A #
%22attribu
@@ -1904,16 +1904,70 @@
ype%22%5D))%0A
+ dim = compound_dimension(f%5B%22id%22%5D, f%5B%22label%22%5D)%0A
|
5b6aa3f6cca7ea83a53178be7b9e58892597ac0b
|
Add some logging to Auth
|
opwen_email_server/services/auth.py
|
opwen_email_server/services/auth.py
|
from abc import ABCMeta
from abc import abstractmethod
from functools import lru_cache
from typing import Callable
from typing import Optional
from azure.storage.table import TableService
class Auth(metaclass=ABCMeta):
@abstractmethod
def domain_for(self, client_id: str) -> Optional[str]:
raise NotImplementedError # pramga: no cover
class AzureAuth(Auth):
def __init__(self, account: str, key: str, table: str,
client: TableService=None,
client_factory: Callable[..., TableService]=TableService
) -> None:
self._account = account
self._key = key
self._table = table
self.__client = client
self._client_factory = client_factory
@property
def _client(self) -> TableService:
if self.__client is not None:
return self.__client
client = self._client_factory(self._account, self._key)
client.create_table(self._table)
self.__client = client
return client
def insert(self, client_id: str, domain: str):
self._client.insert_entity(self._table, {
'RowKey': client_id,
'PartitionKey': client_id,
'domain': domain,
})
def domain_for(self, client_id):
try:
return self._domain_for_cached(client_id)
except KeyError:
return None
@lru_cache(maxsize=128)
def _domain_for_cached(self, client_id: str) -> str:
query = "PartitionKey eq '{0}' and RowKey eq '{0}'".format(client_id)
entities = self._client.query_entities(self._table, query)
for entity in entities:
domain = entity.get('domain')
if domain:
return domain
raise KeyError
|
Python
| 0.000001
|
@@ -183,16 +183,67 @@
ervice%0A%0A
+from opwen_email_server.utils.log import LogMixin%0A%0A
%0Aclass A
@@ -421,16 +421,26 @@
uth(Auth
+, LogMixin
):%0A d
@@ -1298,16 +1298,95 @@
%7D)
+%0A self.log_debug('Registered client %25s at domain %25s', client_id, domain)
%0A%0A de
@@ -1880,21 +1880,162 @@
-return domain
+self.log_debug('Got domain %25s for client %25s', domain, client_id)%0A return domain%0A self.log_debug('Unrecognized client %25s', client_id)
%0A
|
ac514a320b2bca52eb580936cad928f5be69200f
|
print drivers labware_client
|
labware_client.py
|
labware_client.py
|
#!/usr/bin/env python3
import asyncio
import time
import json
import uuid
import datetime
import sys
from labware_subscriber import Subscriber
from labware_publisher import Publisher
from labware_harness import Harness
from labware_driver import LabwareDriver
from autobahn.asyncio import wamp, websocket
from autobahn.asyncio.wamp import ApplicationSession, ApplicationRunner
loop = asyncio.get_event_loop()
def make_connection():
if loop.is_running():
loop.stop()
coro = loop.create_connection(transport_factory, '0.0.0.0', 8080)
transport, protocoler = loop.run_until_complete(coro)
#protocoler.set_outer(self)
if not loop.is_running():
loop.run_forever()
class WampComponent(wamp.ApplicationSession):
"""WAMP application session for OTOne (Overrides protocol.ApplicationSession - WAMP endpoint session)
"""
outer = None
def set_outer(self, outer_):
outer = outer_
def onConnect(self):
"""Callback fired when the transport this session will run over has been established.
"""
self.join(u"ot_realm")
@asyncio.coroutine
def onJoin(self, details):
"""Callback fired when WAMP session has been established.
May return a Deferred/Future.
Starts instatiation of robot objects by calling :meth:`otone_client.instantiate_objects`.
"""
print(datetime.datetime.now(),' - driver_client : WampComponent.onJoin:')
print('\targs:',locals())
if not self.factory._myAppSession:
self.factory._myAppSession = self
#def handshake(client_data):
# """
# """
# #if debug == True:
# print(datetime.datetime.now(),' - driver_client : WampComponent.handshake:')
# #if outer is not None:
# #outer.
# publisher.handshake(client_data)
#yield from self.subscribe(handshake, 'com.opentrons.driver_handshake')
yield from self.subscribe(subscriber.dispatch_message, 'com.opentrons.labware')
def onLeave(self, details):
"""Callback fired when WAMP session has been closed.
:param details: Close information.
"""
print('driver_client : WampComponent.onLeave:')
print('\targs:',locals())
if self.factory._myAppSession == self:
self.factory._myAppSession = None
try:
self.disconnect()
except:
raise
def onDisconnect(self):
"""Callback fired when underlying transport has been closed.
"""
print(datetime.datetime.now(),' - labware_client : WampComponent.onDisconnect:')
asyncio.get_event_loop().stop()
if __name__ == '__main__':
try:
session_factory = wamp.ApplicationSessionFactory()
session_factory.session = WampComponent
session_factory._myAppSession = None
url = "ws://0.0.0.0:8080/ws"
transport_factory = websocket.WampWebSocketClientFactory(session_factory,
url=url,
debug=False,
debug_wamp=False)
loop = asyncio.get_event_loop()
# TRYING THE FOLLOWING IN INSTANTIATE OBJECTS vs here
# INITIAL SETUP PUBLISHER, HARNESS, SUBSCRIBER
print('*\t*\t* initial setup - publisher, harness, subscriber\t*\t*\t*')
publisher = Publisher(session_factory)
labware_harness = Harness(publisher)
subscriber = Subscriber(labware_harness,publisher)
labware_harness.set_publisher(publisher)
# INSTANTIATE DRIVERS:
print('*\t*\t* instantiate drivers\t*\t*\t*')
labbie_driver = LabwareDriver()
# ADD DRIVERS TO HARNESS
print('*\t*\t* add drivers to harness\t*\t*\t*')
labware_harness.add_driver('frontend','','labware',labbie_driver)
print(labware_harness.drivers(publisher.id,None,None))
# DEFINE CALLBACKS:
#
# data_dict format:
#
#
#
#
#
print('*\t*\t* define callbacks\t*\t*\t*')
def frontend_cb(name, session_id, data_dict):
"""
"""
print(datetime.datetime.now(),' - labware_client.frontend')
print('\targs:',locals())
dd_name = list(data_dict)[0]
dd_value = data_dict[dd_name]
if sessionID == "":
publisher.publish('frontend',session_id,session_id,'labware',name,dd_name,dd_value)
else:
publisher.publish(session_id,session_id,session_id,'labware',name,dd_name,dd_value)
def driver_cb(name, session_id, data_dict):
"""
"""
print(datetime.datetime.now(),' - labware_client.driver')
print('\targs:',locals())
dd_name = list(data_dict)[0]
dd_value = data_dict[dd_name]
publisher.publish('driver','',sessionID,name,dd_name,dd_value)
# ADD METACALLBACKS VIA HARNESS:
print('*\t*\t* add callbacks via harness\t*\t*\t*')
# ADD CALLBACKS VIA HARNESS:
print('*\t*\t* add callbacks via harness\t*\t*\t*')
labware_harness.add_callback(publisher.id,'labware', {driver_cb:['driver']})
labware_harness.add_callback(publisher.id,'frontend', {frontend_cb:['frontend']})
#show what was added
for d in labware_harness.drivers(publisher.id,'',None,None):
print(labware_harness.callbacks(publisher.id,d, None))
# CONNECT TO DRIVERS:
print('*\t*\t* connect to drivers\t*\t*\t*')
#driver_harness.connect(publisher.id,'smoothie',None)
print('END INIT')
make_connection()
except KeyboardInterrupt:
pass
finally:
loop.close()
|
Python
| 0
|
@@ -4044,16 +4044,19 @@
sher.id,
+'',
None,Non
|
4b2a29c484ddd5e2dfb4ad91bb0ae5c7681553c1
|
Bump version to 0.1.5
|
lacrm/_version.py
|
lacrm/_version.py
|
__version_info__ = (0, 1, 4)
__version__ = '.'.join(map(str, __version_info__))
|
Python
| 0.000001
|
@@ -23,9 +23,9 @@
1,
-4
+5
)%0A__
|
0cdac10ee51cc3e812ae9188606301e6be0644ee
|
Fix default url bug
|
web/project/main/urls.py
|
web/project/main/urls.py
|
from django.conf.urls import url, include
from rest_framework.authtoken import views as authviews
from rest_framework_jwt import views as jwt_views
from . import views
urlpatterns = [
url(r'', views.index, name='index'),
url(r'^home/', views.index, name='index'),
# Authentication APIs
url(r'^api/auth', jwt_views.obtain_jwt_token, name="auth"),
url(r'^api/token-verify', jwt_views.verify_jwt_token, name="token-verify"),
url(r'^api/token-refresh', jwt_views.refresh_jwt_token, name="token-refresh"),
# User APIs
url(r'^api/register', views.UserCreateView.as_view(), name="register"),
url(r'^api/entity', views.EntityCreateView.as_view(), name="entity"),
url(r'^api/doctor', views.DoctorCreateView.as_view(), name="doctor"),
url(r'^api/login', views.UserLoginView.as_view(), name="login"),
url(r'^api/user', views.CurrentUserView.as_view(), name="user"),
url(r'^api/profile', views.UserProfileView.as_view(), name="profile"),
url(r'^api/record', views.RecordAPIView.as_view(), name="record"),
url(r'^api/questions', views.QuestionGetAPIView.as_view(), name="questions"),
url(r'^api/answer', views.AnswerAPIView.as_view(), name="answer"),
url(r'^api/symptom', views.SymptomAPIView.as_view(), name="symptom"),
url(r'^api/edit_symptom/(?P<record>\d+)/(?P<symptom>\d+)$', views.SymptomUpdateView.as_view(), name="edit_symptom"),
url(r'^api/edit_answer/(?P<record>\d+)/(?P<question>\d+)$', views.AnswerUpdateView.as_view(), name="edit_answer"),
url(r'^api/edit_record/(?P<pk>\d+)$', views.RecordUpdateView.as_view(), name="edit_record"),
url(r'^api/edit_question/(?P<pk>\d+)$', views.QuestionUpdateView.as_view(), name="edit_question"),
]
|
Python
| 0.000003
|
@@ -182,49 +182,8 @@
= %5B%0A
- url(r'', views.index, name='index'),%0A
@@ -1669,10 +1669,69 @@
tion%22),%0A
+ # Default URL%0A url(r'', views.index, name='index'),%0A
%5D%0A
|
9f7837f572017a4a8176c4e74b0aaba0625905ed
|
Add support for custom import apps
|
parachute/management/commands/import_from.py
|
parachute/management/commands/import_from.py
|
import logging
from optparse import make_option
from django.db.models.loading import load_app
from django.core.management.base import LabelCommand
class Command(LabelCommand):
import_app = 'importer'
option_list = LabelCommand.option_list + (
make_option('--importer',
dest='force_update',
help='Specify you own importer app to be used by parachute.'),
make_option('--database',
dest='database',
default=None,
help='Force importer to updated existing DB entries with imported ones.'),
make_option('--force-update',
action='store_true',
dest='force_update',
default=False,
help='Force importer to updated existing DB entries with imported ones.'),
make_option('--import-customers',
action='store_true',
dest='import_customers',
default=False,
help='Import only the customers.'),
make_option('--import-catalogue',
action='store_true',
dest='import_catalogue',
default=False,
help='Import only the catalogue.'),
make_option('--import-orders',
action='store_true',
dest='import_orders',
default=False,
help='Import only the orders.'),
make_option('--import-old-urls',
action='store_true',
dest='import_old_urls',
default=False,
help='Import the old urls of categories into url-tracker.'),
)
def handle_label(self, label, **options):
logger = self._get_logger()
# import the correct app for the desired backend
platform_app = options.importer
if not platform_app:
platform_app = "%s.%s" % (self.import_app, label)
logger.debug('trying to import platform app: %s', platform_app)
try:
load_app("%s.%s" % (self.import_app, label))
except ImportError:
logger.error("invalid import backend '%s' specified", label)
return
logger.info("succesfully loaded importer app for '%s'", label)
try:
backend = __import__(platform_app, globals(), locals(), ['Importer'])
importer = backend.Importer(
force_update=options.get('force_update', False),
verbosity=int(options.get('verbosity', logging.INFO)),
)
except AttributeError:
logger.error("no importer available in backend '%s'", platform_app)
return
logger.debug("found importer object for '%s'", platform_app)
importer.prepare_import(**options)
if options.get('import_customers'):
importer.import_customers()
if options.get('import_catalogue'):
importer.import_catalogue()
if options.get('import_orders'):
importer.import_orders()
if options.get('import_old_urls'):
importer.import_old_urls()
def _get_logger(self):
logger = logging.getLogger(__file__)
stream = logging.StreamHandler(self.stdout)
logger.addHandler(stream)
logger.setLevel(logging.DEBUG)
return logger
|
Python
| 0
|
@@ -190,24 +190,25 @@
_app = '
-impor
+parachu
te
-r
'%0A%0A o
@@ -253,150 +253,8 @@
+ (%0A
- make_option('--importer',%0A dest='force_update',%0A help='Specify you own importer app to be used by parachute.'),%0A
@@ -1462,16 +1462,45 @@
tions):%0A
+ platform_app = None%0A%0A
@@ -1540,124 +1540,214 @@
-# import the correct app for the desired backend%0A platform_app = options.importer%0A if not platform_app
+logger.debug('attempting to import app: %25s', label)%0A%0A try:%0A load_app(label)%0A except ImportError:%0A logger.debug(%22could not import custom backend '%25s'%22, label)%0A else
:%0A
@@ -1775,44 +1775,48 @@
p =
-%22%25s.%25s%22 %25 (self.import
+label%0A%0A if not platform
_app
-, label)%0A%0A
+:%0A
@@ -1854,39 +1854,36 @@
ort
-platform app: %25s', platform_
+from parachute default
app
+s'
)%0A%0A
@@ -1881,36 +1881,84 @@
apps')%0A%0A
-try:
+ # import the correct app for the desired backend
%0A loa
@@ -1955,24 +1955,23 @@
l
-oad_app(
+abel =
%22%25s.%25s%22
@@ -1996,18 +1996,70 @@
, label)
-)
%0A
+ try:%0A load_app(label)%0A
@@ -2070,32 +2070,36 @@
pt ImportError:%0A
+
logg
@@ -2167,23 +2167,83 @@
+
return%0A
+ else:%0A platform_app = label%0A%0A
@@ -2298,21 +2298,28 @@
'%25s'%22,
+p
la
-bel
+tform_app
)%0A%0A
|
4f691840d917e7981a047d967cfcdc17c9551be9
|
Update packaged_config_0_pyquickhelper.py
|
src/pymyinstall/packaged/packaged_config_0_pyquickhelper.py
|
src/pymyinstall/packaged/packaged_config_0_pyquickhelper.py
|
# -*- coding: utf-8 -*-
"""
@file
@brief Defines different a set of usual modules for Python.
"""
import sys
def pyquickhelper_set():
"""
list of modules needed to run unit test of module *pyquickhelper*
"""
names = [
"alabaster",
"asn1crypto",
"astroid",
"attrs",
"autopep8",
"babel",
'backcall',
"backports_abc",
"backports.shutil-get-terminal-size",
"bleach",
"blockdiag",
"bottleneck",
"brewer2mpl",
"cairocffi",
"cairosvg",
"certifi",
"cffi",
"chardet",
"codecov",
"colorama",
"coverage",
"cryptography",
"cssselect2",
"Cython",
"cycler",
"DataProperty",
"decorator",
"defusedxml", # cairosvg
"docformatter",
"docutils",
"entrypoints",
"et_xmlfile",
"filelock",
"funcparserlib",
"git-pandas",
"gitdb2",
"gitpython",
"html5lib",
"idna",
"imagesize",
"importlib_metadata",
"ipython",
"ipykernel",
"ipympl",
"ipystata" if sys.version_info[0] == 2 else None,
"ipython_genutils",
"ipywidgets",
"isort",
"jdcal",
"jedi",
"jeepney",
"jinja2",
"jsonschema",
"jupyter-console",
"jupyter",
"jupyterlab_pygments",
"jupyter_core",
"jupyter_client",
"jupyter-pip",
"jupyter_sphinx",
"jyquickhelper",
"keyring",
"kiwisolver",
"lazy_object_proxy",
"logbook",
"lxml",
"matplotlib",
"mbstrdecoder",
"metakernel",
"micropython-libc" if not sys.platform.startswith("win") else None,
"micropython-ffilib" if not sys.platform.startswith(
"win") else None,
"micropython-fcntl" if not sys.platform.startswith(
"win") else None,
'markupsafe',
"mccabe",
"mistune",
"multi_key_dict",
"nbformat",
"nbconvert",
"nbpresent",
"nose",
"notebook",
"notedown",
"numpy",
"olefile",
"openpyxl",
"path.py",
"pbr",
"packaging",
"pandas",
"pandoc-attributes",
"pandocfilters",
"parso",
"pathvalidate",
"patsy",
"pep8",
"pexpect",
"pickleshare",
"Pillow",
"pipdeptree",
"prometheus_client",
"prompt_toolkit",
"psutil",
"ptyprocess",
"pycodestyle",
"pycparser",
"pygments",
"pylint",
"pyparsing",
'pypiserver',
'pyrsistent',
"python-dateutil",
"python-jenkins",
"pytz",
"pywin32" if sys.platform.startswith("win") else None,
"pywin32-ctypes" if sys.platform.startswith("win") else None,
'pywinpty',
"pyzmq",
"qtconsole",
"requests",
"secretstorage",
"semantic_version",
"simplegeneric",
"Send2Trash",
"six",
"smmap2",
'snowballstemmer',
"sphinx",
"sphinx_gallery",
'sphinxcontrib-applehelp',
'sphinxcontrib-devhelp',
"sphinxcontrib-blockdiag",
'sphinxcontrib-htmlhelp',
'sphinxcontrib-imagesvg',
'sphinxcontrib-jsdemo',
'sphinxcontrib-jsmath',
'sphinxcontrib-qthelp',
'sphinxcontrib-serializinghtml',
'sphinxcontrib-websupport',
'sphinx-rtd-theme',
"tabledata",
'tabulate',
"terminado",
"testpath",
"tinycss2",
"tornado",
'tqdm',
"traitlets",
"typepy",
"unify",
"untokenize",
"urllib3",
"virtualenv",
"xlrd",
"xlsxwriter",
"xlwt",
"wcwidth",
"webcolors",
"webencodings",
"wheel",
"widgetsnbextension",
"wild_sphinx_theme",
"win_unicode_console",
"winrandom" if sys.platform.startswith("win") else None,
"winshell" if sys.platform.startswith("win") else None,
"wrapt", # astroid
"zipp",
]
from .automate_install import find_module_install
return [find_module_install(_) for _ in names if _ is not None]
|
Python
| 0.000004
|
@@ -2152,29 +2152,8 @@
t%22,%0A
- %22nbpresent%22,%0A
|
27a39812088b9312314b44a013483b49a77d8dfb
|
update set of modules to install for pyquickhelper
|
src/pymyinstall/packaged/packaged_config_0_pyquickhelper.py
|
src/pymyinstall/packaged/packaged_config_0_pyquickhelper.py
|
#-*- coding: utf-8 -*-
"""
@file
@brief Defines different a set of usual modules for Python.
"""
import sys
def pyquickhelper_set():
"""
list of modules needed to run unit test of module *pyquickhelper*
"""
names = [
"alabaster",
"autopep8",
"babel",
"certifi",
"colorama",
"coverage",
"Cython",
"cycler",
"decorator",
"docutils",
"flake8",
"futures",
"husl",
"ipython",
"ipykernel",
"ipystata" if sys.version_info[0] == 2 else None,
"ipython_genutils",
"ipywidgets",
"jinja2",
"jsonschema",
"jupyter-console",
"jupyter",
"jupyter_core",
"jupyter_client",
"jupyter-pip",
"lxml",
"matplotlib",
"metakernel",
"micropython-libc" if not sys.platform.startswith("win") else None,
"micropython-ffilib" if not sys.platform.startswith(
"win") else None,
"micropython-fcntl" if not sys.platform.startswith(
"win") else None,
'markupsafe',
"mccabe",
"mistune",
"multi_key_dict",
"nbformat",
"nbconvert",
"nose",
"backports_abc",
"notebook",
"notedown",
"numpy",
"onedrive-sdk-python",
"openpyxl",
"path.py",
"pbr",
"pandas",
"pep8",
"pexpect" if not sys.platform.startswith("win") else None,
"pickleshare",
"pipdeptree",
"psutil",
"ptyprocess" if not sys.platform.startswith("win") else None,
"pycparser",
"pyflakes",
"pygments",
"pyparsing",
'pypiserver',
"python-dateutil",
"python-jenkins",
"pytz",
"pywin32" if sys.platform.startswith("win") else None,
"pyzmq",
"qtconsole",
"requests",
"simplegeneric",
"six",
"sphinx",
'sphinxcontrib-images',
'sphinxcontrib-imagesvg',
'sphinxcontrib-jsdemo',
'snowballstemmer',
'sphinx-rtd-theme',
"sphinxjp.themes.revealjs",
"terminado" if not sys.platform.startswith("win") else None,
"tornado",
"traitlets",
"virtualenv",
"wheel",
"wild_sphinx_theme",
"winshell" if sys.platform.startswith("win") else None,
]
from . import find_module_install
return [find_module_install(_) for _ in names if _ is not None]
|
Python
| 0
|
@@ -1659,24 +1659,48 @@
pycparser%22,%0A
+ %22pycryptodome%22,%0A
%22pyf
|
ba73e1e06dae26716da29a02c1705458d402a9be
|
update PRISM model to take CDDs into account for electricity
|
eemeter/meter/prism.py
|
eemeter/meter/prism.py
|
from eemeter.meter.base import MeterBase
from eemeter.config.yaml_parser import load
class PRISMMeter(MeterBase):
"""Implementation of Princeton Scorekeeping Method.
"""
def __init__(self,**kwargs):
super(self.__class__, self).__init__(**kwargs)
self.meter = load(self._meter_yaml())
def _meter_yaml(self):
meter_yaml = """
!obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.FuelTypePresenceMeter {
fuel_types: [electricity,natural_gas]
},
!obj:eemeter.meter.ConditionalMeter {
condition_parameter: electricity_presence,
success: !obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.TemperatureSensitivityParameterOptimizationMeter {
fuel_unit_str: "kWh",
fuel_type: "electricity",
temperature_unit_str: "degF",
model: !obj:eemeter.models.HDDBalancePointModel &elec_model {
x0: [60,1.,1.],
bounds: [[55,65],[0,100],[0,100]],
},
},
!obj:eemeter.meter.AnnualizedUsageMeter {
fuel_type: "electricity",
temperature_unit_str: "degF",
model: *elec_model,
},
],
output_mapping: {
temp_sensitivity_params: temp_sensitivity_params_electricity,
annualized_usage: annualized_usage_electricity,
},
},
},
!obj:eemeter.meter.ConditionalMeter {
condition_parameter: natural_gas_presence,
success: !obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.TemperatureSensitivityParameterOptimizationMeter {
fuel_unit_str: "therms",
fuel_type: "natural_gas",
temperature_unit_str: "degF",
model: !obj:eemeter.models.HDDBalancePointModel &gas_model {
x0: [60,1.,1.],
bounds: [[55,65],[0,100],[0,100]],
},
},
!obj:eemeter.meter.AnnualizedUsageMeter {
fuel_type: "natural_gas",
temperature_unit_str: "degF",
model: *gas_model,
},
],
output_mapping: {
temp_sensitivity_params: temp_sensitivity_params_natural_gas,
annualized_usage: annualized_usage_natural_gas,
},
},
},
]
}
"""
return meter_yaml
def evaluate_mapped_inputs(self,**kwargs):
return self.meter.evaluate(**kwargs)
def _get_child_inputs(self):
return self.meter.get_inputs()
|
Python
| 0
|
@@ -1166,32 +1166,35 @@
meter.models.HDD
+CDD
BalancePointMode
@@ -1250,32 +1250,38 @@
x0: %5B
-60
+1.
,1.,1.
+,60.,5
%5D,%0A
@@ -1313,37 +1313,37 @@
bounds: %5B%5B
-55,65
+0,100
%5D,%5B0,100%5D,%5B0,100
@@ -1335,32 +1335,47 @@
,%5B0,100%5D,%5B0,100%5D
+,%5B55,65%5D,%5B2,10%5D
%5D,%0A
|
373a6f56a19c131debfb47f7d610280a586661f7
|
Use importlib instead of imp.
|
services/utils.py
|
services/utils.py
|
from datetime import datetime, timedelta
import dictconfig
import logging
import os
# get the right settings module
import imp
settingmodule = os.environ.get('DJANGO_SETTINGS_MODULE', 'settings_local')
if settingmodule.startswith(('zamboni', # typical git clone destination
'workspace', # Jenkins
'project', # vagrant VM
'freddo')):
settingmodule = settingmodule.split('.', 1)[1]
res = imp.find_module(settingmodule)
settings = imp.load_module(settingmodule, *res)
import posixpath
import re
import sys
from cef import log_cef as _log_cef
import MySQLdb as mysql
import sqlalchemy.pool as pool
from django.core.management import setup_environ
import commonware.log
# Pyflakes will complain about these, but they are required for setup.
setup_environ(settings)
from lib.log_settings_base import formatters, handlers, loggers
# Ugh. But this avoids any zamboni or django imports at all.
# Perhaps we can import these without any problems and we can
# remove all this.
from constants.applications import APPS_ALL
from constants.platforms import PLATFORMS
from constants.base import (ADDON_PREMIUM, STATUS_PUBLIC, STATUS_DISABLED,
STATUS_BETA, STATUS_LITE,
STATUS_LITE_AND_NOMINATED)
from constants.payments import (CONTRIB_CHARGEBACK, CONTRIB_PURCHASE,
CONTRIB_REFUND)
APP_GUIDS = dict([(app.guid, app.id) for app in APPS_ALL.values()])
PLATFORMS = dict([(plat.api_name, plat.id) for plat in PLATFORMS.values()])
ADDON_SLUGS_UPDATE = {
1: 'extension',
2: 'theme',
3: 'extension',
4: 'search',
5: 'item',
6: 'extension',
7: 'plugin'}
STATUSES_PUBLIC = {'STATUS_PUBLIC': STATUS_PUBLIC,
'STATUS_LITE': STATUS_LITE,
'STATUS_LITE_AND_NOMINATED': STATUS_LITE_AND_NOMINATED}
version_re = re.compile(r"""(?P<major>\d+) # major (x in x.y)
\.(?P<minor1>\d+) # minor1 (y in x.y)
\.?(?P<minor2>\d+|\*)? # minor2 (z in x.y.z)
\.?(?P<minor3>\d+|\*)? # minor3 (w in x.y.z.w)
(?P<alpha>[a|b]?) # alpha/beta
(?P<alpha_ver>\d*) # alpha/beta version
(?P<pre>pre)? # pre release
(?P<pre_ver>\d)? # pre release version""",
re.VERBOSE)
def get_mirror(status, id, row):
if row['datestatuschanged']:
published = datetime.now() - row['datestatuschanged']
else:
published = timedelta(minutes=0)
if row['disabled_by_user'] or status == STATUS_DISABLED:
host = settings.PRIVATE_MIRROR_URL
elif (status == STATUS_PUBLIC
and not row['disabled_by_user']
and row['file_status'] in (STATUS_PUBLIC, STATUS_BETA)
and published > timedelta(minutes=settings.MIRROR_DELAY)
and not settings.DEBUG):
host = settings.MIRROR_URL
else:
host = settings.LOCAL_MIRROR_URL
return posixpath.join(host, str(id), row['filename'])
def getconn():
db = settings.SERVICES_DATABASE
return mysql.connect(host=db['HOST'], user=db['USER'],
passwd=db['PASSWORD'], db=db['NAME'])
mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5, recycle=300)
def log_configure():
"""You have to call this to explicity configure logging."""
cfg = {
'version': 1,
'filters': {},
'formatters': dict(prod=formatters['prod']),
'handlers': dict(syslog=handlers['syslog']),
'loggers': {
'z': {'handlers': ['syslog'], 'level': logging.INFO},
},
'root': {},
# Since this configuration is applied at import time
# in verify.py we don't want it to clobber other logs
# when imported into the marketplace Django app.
'disable_existing_loggers': False,
}
dictconfig.dictConfig(cfg)
def log_exception(data):
# Note: although this logs exceptions, it logs at the info level so that
# on prod, we log at the error level and result in no logs on prod.
typ, value, discard = sys.exc_info()
error_log = logging.getLogger('z.receipt')
error_log.info(u'Type: %s, %s. Data: %s' % (typ, value, data))
def log_info(msg):
error_log = logging.getLogger('z.receipt')
error_log.info(msg)
def log_cef(request, app, msg, longer):
"""Log receipt transactions to the CEF library."""
c = {'cef.product': getattr(settings, 'CEF_PRODUCT', 'AMO'),
'cef.vendor': getattr(settings, 'CEF_VENDOR', 'Mozilla'),
'cef.version': getattr(settings, 'CEF_VERSION', '0'),
'cef.device_version': getattr(settings, 'CEF_DEVICE_VERSION', '0'),
'cef.file': getattr(settings, 'CEF_FILE', 'syslog'), }
kwargs = {'username': getattr(request, 'amo_user', ''),
'signature': 'RECEIPT%s' % msg.upper(),
'msg': longer, 'config': c,
'cs2': app, 'cs2Label': 'ReceiptTransaction'}
return _log_cef('Receipt %s' % msg, 5, request, **kwargs)
|
Python
| 0.000001
|
@@ -114,19 +114,8 @@
ule%0A
-import imp%0A
sett
@@ -444,93 +444,8 @@
1%5D%0A%0A
-res = imp.find_module(settingmodule)%0Asettings = imp.load_module(settingmodule, *res)%0A
%0Aimp
@@ -644,16 +644,102 @@
re.log%0A%0A
+from django.utils import importlib%0Asettings = importlib.import_module(settingmodule)%0A%0A
# Pyflak
|
9e0c83e751e72e3396a4729392b972834b25c8b7
|
Add TODO
|
v2/aws_secgroup_ids_from_names.py
|
v2/aws_secgroup_ids_from_names.py
|
# (c) 2015, Jon Hadfield <jon@lessknown.co.uk>
"""
Description: This lookup takes an AWS region and a list of one or more
security Group Names and returns a list of matching security Group IDs.
Example Usage:
{{ lookup('aws_secgroup_ids_from_names', ('eu-west-1', ['nginx_group', 'mysql_group'])) }}
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
import boto
import boto.ec2
except ImportError:
raise AnsibleError("aws_secgroup_ids_from_names lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
sg_list = []
region = terms[0]
group_names = terms[1]
conn = boto.ec2.connect_to_region(region)
for group_name in group_names:
filters = {'group_name': group_name}
sg = conn.get_all_security_groups(filters=filters)
if sg and sg[0]:
sg_list.append(sg[0].id)
return sg_list
|
Python
| 0.000002
|
@@ -932,16 +932,79 @@
region)%0A
+ #TODO: Use OR filter rather than making multiple calls%0A
|
0c731bf993eea346421d9dbcd5eaa61484e84018
|
fix bug in site_hrn()
|
sfa/util/plxrn.py
|
sfa/util/plxrn.py
|
# specialized Xrn class for PlanetLab
import re
from sfa.util.xrn import Xrn
# temporary helper functions to use this module instead of namespace
def hostname_to_hrn (auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_hrn()
def hostname_to_urn(auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_urn()
def slicename_to_hrn (auth_hrn, slicename):
return PlXrn(auth=auth_hrn,slicename=slicename).get_hrn()
def email_to_hrn (auth_hrn, email):
return PlXrn(auth=auth_hrn, email=email).get_hrn()
def hrn_to_pl_slicename (hrn):
return PlXrn(xrn=hrn,type='slice').pl_slicename()
def hrn_to_pl_login_base (hrn):
return PlXrn(xrn=hrn,type='slice').pl_login_base()
def hrn_to_pl_authname (hrn):
return PlXrn(xrn=hrn,type='any').pl_authname()
class PlXrn (Xrn):
@staticmethod
def site_hrn (auth, login_base):
return '.'.join(auth,login_base)
def __init__ (self, auth=None, hostname=None, slicename=None, email=None, **kwargs):
#def hostname_to_hrn(auth_hrn, login_base, hostname):
if hostname is not None:
self.type='node'
# keep only the first part of the DNS name
#self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
# escape the '.' in the hostname
self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
self.hrn_to_urn()
#def slicename_to_hrn(auth_hrn, slicename):
elif slicename is not None:
self.type='slice'
# split at the first _
parts = slicename.split("_",1)
self.hrn = ".".join([auth] + parts )
self.hrn_to_urn()
#def email_to_hrn(auth_hrn, email):
elif email is not None:
self.type='person'
# keep only the part before '@' and replace special chars into _
self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
self.hrn_to_urn()
else:
Xrn.__init__ (self,**kwargs)
#def hrn_to_pl_slicename(hrn):
def pl_slicename (self):
self._normalize()
leaf = self.leaf
leaf = re.sub('[^a-zA-Z0-9_]', '', leaf)
return self.pl_login_base() + '_' + leaf
#def hrn_to_pl_authname(hrn):
def pl_authname (self):
self._normalize()
return self.authority[-1]
#def hrn_to_pl_login_base(hrn):
def pl_login_base (self):
self._normalize()
base = self.authority[-1]
# Fix up names of GENI Federates
base = base.lower()
base = re.sub('\\\[^a-zA-Z0-9]', '', base)
if len(base) > 20:
base = base[len(base)-20:]
return base
|
Python
| 0
|
@@ -933,16 +933,17 @@
.'.join(
+%5B
auth,log
@@ -945,24 +945,25 @@
h,login_base
+%5D
)%0A%0A def _
|
1337c5269d97dc6f1cd47aed838cf26c6b488be2
|
bump version
|
shell/__init__.py
|
shell/__init__.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__title__ = 'shell'
__version__ = '0.0.6'
__author__ = 'Qingping Hou'
__license__ = 'MIT'
from .run_cmd import RunCmd
from .input_stream import InputStream
from .api import instream, cmd, pipe_all, ex, p, ex_all
|
Python
| 0
|
@@ -82,9 +82,9 @@
0.0.
-6
+7
'%0A__
|
5873bb323d21ab8f9373518a5dd9688df4b38a9a
|
Break line before 80 columns.
|
shell/src/main.py
|
shell/src/main.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import bayeslite
import bayeslite.crosscat
import bayeslite.shell.core as shell
import bayeslite.shell.hook as hook
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('bdbpath', type=str, nargs='?', default=':memory:',
help="bayesdb database file")
parser.add_argument('-j', '--njob', type=int, default=None,
help="Max number of jobs (processes) useable.")
parser.add_argument('-s', '--seed', type=int, default=None,
help="Random seed for the default generator.")
parser.add_argument('-f', '--file', type=str, nargs="+", default=None,
help="Path to commands file. May be used to specify a "
"project-specific init file.")
parser.add_argument('--batch', action='store_true',
help="Exit after executing file specified with -f.")
parser.add_argument('--debug', action='store_true', help="For unit tests.")
parser.add_argument('--no-init-file', action='store_true',
help="Do not load ~/.bayesliterc")
args = parser.parse_args(argv)
return args
def run(stdin, stdout, stderr, argv):
args = parse_args(argv[1:])
bdb = bayeslite.bayesdb_open(pathname=args.bdbpath)
# People shouldn't have to ask to go fast, they should have to ask to
# slow down.
if args.njob not in [0, 1]:
import crosscat.MultiprocessingEngine as ccme
crosscat = ccme.MultiprocessingEngine(seed=args.seed,
cpu_count=args.njob)
else:
import crosscat.LocalEngine as ccle
crosscat = ccle.LocalEngine(seed=args.seed)
metamodel = bayeslite.crosscat.CrosscatMetamodel(crosscat)
bayeslite.bayesdb_register_metamodel(bdb, metamodel)
bdbshell = shell.Shell(bdb, 'crosscat', debug=args.debug)
with hook.set_current_shell(bdbshell):
if not args.no_init_file:
init_file = os.path.join(os.path.expanduser('~/.bayesliterc'))
if os.path.isfile(init_file):
bdbshell.dot_read(init_file)
if args.file is not None:
for path in args.file:
if os.path.isfile(path):
bdbshell.dot_read(path)
else:
bdbshell.stdout.write('%s is not a file. Aborting.\n' % str(path))
break
bdbshell.cmdloop()
return 0
def main():
import sys
sys.exit(run(sys.stdin, sys.stdout, sys.stderr, sys.argv))
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -3051,16 +3051,17 @@
a file.
+
Abortin
@@ -3067,16 +3067,40 @@
ng.%5Cn' %25
+%0A
str(pat
|
adc5302c070cdcef1bcbf47f23348aa82c9c6670
|
Make aria2 respect --test mode again.
|
flexget/plugins/clients/aria2.py
|
flexget/plugins/clients/aria2.py
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import os
import xmlrpc.client
from socket import error as socket_error
from flexget import plugin
from flexget.event import event
log = logging.getLogger('aria2')
class OutputAria2(object):
"""
Simple Aria2 output
Example::
aria2:
path: ~/downloads/
"""
schema = {
'type': 'object',
'properties': {
'server': {'type': 'string', 'default': 'localhost'},
'port': {'type': 'integer', 'default': 6800},
'secret': {'type': 'string', 'default': ''},
'username': {'type': 'string', 'default': ''}, # NOTE: To be deprecated by aria2
'password': {'type': 'string', 'default': ''},
'path': {'type': 'string', 'format': 'path'},
'options': {
'type': 'object',
'additionalProperties': {'oneOf': [{'type': 'string'}, {'type': 'integer'}]}
}
},
'required': ['path'],
'additionalProperties': False
}
def aria2_connection(self, server, port, username=None, password=None):
if username and password:
userpass = '%s:%s@' % (username, password)
else:
userpass = ''
url = 'http://%s%s:%s/rpc' % (userpass, server, port)
log.debug('aria2 url: %s' % url)
log.info('Connecting to daemon at %s', url)
try:
return xmlrpc.client.ServerProxy(url).aria2
except xmlrpc.client.ProtocolError as err:
raise plugin.PluginError('Could not connect to aria2 at %s. Protocol error %s: %s'
% (url, err.errcode, err.errmsg), log)
except xmlrpc.client.Fault as err:
raise plugin.PluginError('XML-RPC fault: Unable to connect to aria2 daemon at %s: %s'
% (url, err.faultString), log)
except socket_error as e:
_, msg = e.args
raise plugin.PluginError('Socket connection issue with aria2 daemon at %s: %s' % (url, msg), log)
except:
log.debug('Unexpected error during aria2 connection', exc_info=True)
raise plugin.PluginError('Unidentified error during connection to aria2 daemon', log)
def prepare_config(self, config):
config.setdefault('server', 'localhost')
config.setdefault('port', 6800)
config.setdefault('username', '')
config.setdefault('password', '')
config.setdefault('secret', '')
config.setdefault('options', {})
return config
def on_task_output(self, task, config):
# don't add when learning
if task.options.learn:
return
config = self.prepare_config(config)
aria2 = self.aria2_connection(config['server'], config['port'],
config['username'], config['password'])
for entry in task.accepted:
try:
self.add_entry(aria2, entry, config)
except socket_error as se:
entry.fail('Unable to reach Aria2')
except xmlrpc.client.Fault as err:
log.critical('Fault code %s message %s', err.faultCode, err.faultString)
entry.fail('Aria2 communication Fault')
except Exception as e:
log.debug('Exception type %s', type(e), exc_info=True)
raise
def add_entry(self, aria2, entry, config):
"""
Add entry to Aria2
"""
options = config['options']
options['dir'] = os.path.expanduser(entry.render(config['path']).rstrip('/'))
secret = None
if config['secret']:
secret = 'token:%s' % config['secret']
# handle torrent files
if 'torrent' in entry:
if secret:
return aria2.addTorrent(secret, xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))
return aria2.addTorrent(xmlrpc.client.Binary(open(entry['file'], mode='rb').read()))
# handle everything else (except metalink -- which is unsupported)
# so magnets, https, http, ftp .. etc
if secret:
return aria2.addUri(secret, [entry['url']], options)
return aria2.addUri([entry['url']], options)
@event('plugin.register')
def register_plugin():
plugin.register(OutputAria2, 'aria2', api_ver=2)
|
Python
| 0
|
@@ -3046,16 +3046,148 @@
cepted:%0A
+ if task.options.test:%0A log.verbose('Would add %60%25s%60 to aria2.' %25 entry%5B'title'%5D)%0A continue%0A
|
3ccd648ba58fd7e6a84b94e464094d0c5e3a8e55
|
Add line to separate results
|
states/bootstrap/bootstrap.dir/modules/utils/salt_output.py
|
states/bootstrap/bootstrap.dir/modules/utils/salt_output.py
|
#!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_file_data(file_path):
"""
Load YAML formated data from file_path.
"""
# Instead of using `with` keyword, perform standard `try`/`finally`
# to support Python 2.5 on RHEL5.
yaml_file = open(file_path, 'r')
try:
loaded_data = yaml.load(yaml_file)
finally:
yaml_file.close()
return loaded_data
###############################################################################
def load_yaml_string_data(text_content):
"""
Load YAML formated data from string.
"""
loaded_data = yaml.load(text_content)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
success_counter = 0
total_counter = 0
for state_key in local_result.keys():
total_counter = total_counter + 1
logging.info("`comment`: " + str(local_result[state_key]['comment']))
if 'name' in local_result[state_key]:
logging.info("`name`: " + str(local_result[state_key]['name']))
result_value = local_result[state_key]['result']
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
success_counter = success_counter + 1
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
if overall_result:
logging.info("SUCCESS: " + str(success_counter) + " of " + str(total_counter))
else:
logging.critical("FAILURE: " + str(success_counter) + " of " + str(total_counter))
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_file_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
|
Python
| 0
|
@@ -1103,24 +1103,106 @@
lt.keys():%0A%0A
+ # Separate visually one result from another.%0A logging.info(%22---%22)%0A%0A
tota
|
b65191a52e0bea63aeda3bcc4e28cb715295efa5
|
Add some additional test for value references.
|
tensorflow_federated/python/program/value_reference_test.py
|
tensorflow_federated/python/program/value_reference_test.py
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.program import test_utils
from tensorflow_federated.python.program import value_reference
class MaterializeValueTest(parameterized.TestCase, tf.test.TestCase):
# pyformat: disable
@parameterized.named_parameters(
('none', None, None),
('bool', True, True),
('int', 1, 1),
('str', 'a', 'a'),
('list', [True, 1, 'a'], [True, 1, 'a']),
('list_empty', [], []),
('list_nested', [[True, 1], ['a']], [[True, 1], ['a']]),
('dict', {'a': True, 'b': 1, 'c': 'a'}, {'a': True, 'b': 1, 'c': 'a'}),
('dict_empty', {}, {}),
('dict_nested',
{'x': {'a': True, 'b': 1}, 'y': {'c': 'a'}},
{'x': {'a': True, 'b': 1}, 'y': {'c': 'a'}}),
('attr',
test_utils.TestAttrObject1(True, 1),
test_utils.TestAttrObject1(True, 1)),
('attr_nested',
{'a': [test_utils.TestAttrObject1(True, 1)],
'b': test_utils.TestAttrObject2('a')},
{'a': [test_utils.TestAttrObject1(True, 1)],
'b': test_utils.TestAttrObject2('a')}),
('tensor_int', tf.constant(1), tf.constant(1)),
('tensor_str', tf.constant('a'), tf.constant('a')),
('tensor_2d', tf.ones((2, 3)), tf.ones((2, 3))),
('tensor_nested',
{'a': [tf.constant(True), tf.constant(1)], 'b': [tf.constant('a')]},
{'a': [tf.constant(True), tf.constant(1)], 'b': [tf.constant('a')]}),
('numpy_int', np.int32(1), np.int32(1)),
('numpy_2d', np.ones((2, 3)), np.ones((2, 3))),
('numpy_nested',
{'a': [np.bool(True), np.int32(1)], 'b': [np.str_('a')]},
{'a': [np.bool(True), np.int32(1)], 'b': [np.str_('a')]}),
('server_array_reference', test_utils.TestServerArrayReference(1), 1),
('server_array_reference_nested',
{'a': [test_utils.TestServerArrayReference(True),
test_utils.TestServerArrayReference(1)],
'b': [test_utils.TestServerArrayReference('a')]},
{'a': [True, 1], 'b': ['a']}),
('materialized_values_and_value_references',
[1, test_utils.TestServerArrayReference(2)],
[1, 2]),
)
# pyformat: enable
def test_returns_value(self, value, expected_value):
actual_value = value_reference.materialize_value(value)
self.assertEqual(type(actual_value), type(expected_value))
if ((isinstance(actual_value, tf.Tensor) and
isinstance(expected_value, tf.Tensor)) or
(isinstance(actual_value, np.ndarray) and
isinstance(expected_value, np.ndarray))):
self.assertAllEqual(actual_value, expected_value)
else:
self.assertEqual(actual_value, expected_value)
if __name__ == '__main__':
absltest.main()
|
Python
| 0
|
@@ -708,16 +708,28 @@
ow as tf
+%0Aimport tree
%0A%0Afrom t
@@ -3346,16 +3346,1197 @@
value)%0A%0A
+ def test_returns_value_datasets(self):%0A value = tf.data.Dataset.from_tensor_slices(%5B1, 2, 3%5D)%0A%0A actual_value = value_reference.materialize_value(value)%0A%0A expected_value = tf.data.Dataset.from_tensor_slices(%5B1, 2, 3%5D)%0A self.assertEqual(type(actual_value), type(expected_value))%0A self.assertEqual(list(actual_value), list(expected_value))%0A%0A def test_returns_value_datasets_nested(self):%0A value = %7B%0A 'a': %5B%0A tf.data.Dataset.from_tensor_slices(%5BTrue, False%5D),%0A tf.data.Dataset.from_tensor_slices(%5B1, 2, 3%5D),%0A %5D,%0A 'b': %5Btf.data.Dataset.from_tensor_slices(%5B'a', 'b', 'c'%5D)%5D,%0A %7D%0A%0A actual_value = value_reference.materialize_value(value)%0A%0A expected_value = %7B%0A 'a': %5B%0A tf.data.Dataset.from_tensor_slices(%5BTrue, False%5D),%0A tf.data.Dataset.from_tensor_slices(%5B1, 2, 3%5D),%0A %5D,%0A 'b': %5Btf.data.Dataset.from_tensor_slices(%5B'a', 'b', 'c'%5D)%5D,%0A %7D%0A self.assertEqual(type(actual_value), type(expected_value))%0A actual_value = tree.map_structure(list, actual_value)%0A expected_value = tree.map_structure(list, expected_value)%0A self.assertEqual(actual_value, expected_value)%0A%0A
%0Aif __na
|
21ab430368ee262377c77f1ecc24b645377dd520
|
Revert "Bug Fix: sort keys when creating json data to send"
|
generic_request_signer/client.py
|
generic_request_signer/client.py
|
import six
from datetime import date
import json
import decimal
from apysigner import DefaultJSONEncoder
if six.PY3:
import urllib.request as urllib
else:
import urllib2 as urllib
from generic_request_signer import response, factory
def json_encoder(obj):
if isinstance(obj, date):
return str(obj.isoformat())
if isinstance(obj, decimal.Decimal):
return str(obj)
class Client(object):
def __init__(self, api_credentials):
self.api_credentials = api_credentials
def get_factory(self, files):
if files:
return factory.MultipartSignedRequestFactory
return factory.SignedRequestFactory
def _get_response(self, http_method, endpoint, data=None, files=None, timeout=15, **request_kwargs):
headers = request_kwargs.get("headers", {})
if not isinstance(data, str) and headers.get("Content-Type") == "application/json":
data = json.dumps(data, default=DefaultJSONEncoder, sort_keys=True)
try:
http_response = urllib.urlopen(
self._get_request(http_method, endpoint, data, files, **request_kwargs), timeout=timeout)
except urllib.HTTPError as e:
http_response = e
return response.Response(http_response)
def _get_request(self, http_method, endpoint, data=None, files=None, **request_kwargs):
factory_class = self.get_factory(files)
request_factory = factory_class(http_method, self._client_id, self._private_key, data, files)
service_url = self._get_service_url(endpoint)
return request_factory.create_request(service_url, **request_kwargs)
def _get_service_url(self, endpoint):
return self._base_url + endpoint
@property
def _base_url(self):
return self.api_credentials.base_url
@property
def _client_id(self):
return self.api_credentials.client_id
@property
def _private_key(self):
return self.api_credentials.private_key
|
Python
| 0
|
@@ -62,50 +62,8 @@
al%0A%0A
-from apysigner import DefaultJSONEncoder%0A%0A
if s
@@ -916,42 +916,20 @@
ult=
-DefaultJSONEncoder, sort_keys=True
+json_encoder
)%0A
|
67dfbfa250cd5de550a493c9951d456e05b05454
|
Make ModelImporter.model static for flexibility of usage
|
girder/utility/model_importer.py
|
girder/utility/model_importer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import importlib
from . import camelcase
# We want the models to essentially be singletons, so we keep this centralized
# cache of instantiated models that have been lazy-loaded.
_modelInstances = {}
def _loadModel(model, module, plugin):
global _modelInstances
className = camelcase(model)
try:
imported = importlib.import_module(module)
except ImportError: # pragma: no cover
raise Exception('Could not load model "{}".'.format(module))
try:
constructor = getattr(imported, className)
except AttributeError: # pragma: no cover
raise Exception('Incorrect model class name "{}" for model "{}".'
.format(className, module))
_modelInstances[plugin][model] = constructor()
def clearModels():
"""
Force reloading of all models by clearing the singleton cache. This is
used by the test suite to ensure that indices are built properly
at startup.
"""
global _modelInstances
_modelInstances = {}
class ModelImporter(object):
"""
Any class that wants to have convenient model importing semantics
should extend/mixin this class.
"""
def model(self, model, plugin='_core'):
"""
Call this to get the instance of the specified model. It will be
lazy-instantiated.
:param model: The name of the model to get. This is the module
name, e.g. "folder". The class name must be the
upper-camelcased version of that module name, e.g.
"Folder".
:type model: string
:param plugin: If the model you wish to load is a model within a plugin,
set this to the name of the plugin containing the model.
:returns: The instantiated model, which is a singleton.
"""
global _modelInstances
if plugin not in _modelInstances:
_modelInstances[plugin] = {}
if model not in _modelInstances[plugin]:
if plugin == '_core':
module = 'girder.models.{}'.format(model)
else:
module = 'girder.plugins.{}.models.{}'.format(plugin, model)
_loadModel(model, module, plugin)
return _modelInstances[plugin][model]
|
Python
| 0
|
@@ -1948,24 +1948,42 @@
ss.%0A %22%22%22%0A
+ @staticmethod%0A
def mode
@@ -1988,14 +1988,8 @@
del(
-self,
mode
|
b28ca4abf8a6986b96bfb89cf8737c8f737fee4e
|
update boto import to use boto3 (#1000)
|
global_settings/wagtail_hooks.py
|
global_settings/wagtail_hooks.py
|
import boto
import wagtail.admin.rich_text.editors.draftail.features as draftail_features
from wagtail.admin.rich_text.converters.html_to_contentstate import InlineStyleElementHandler
from wagtail.core import hooks
from django.urls import reverse
from wagtail.admin.menu import MenuItem
from .models import CloudfrontDistribution
@hooks.register('register_rich_text_features')
def register_strikethrough_feature(features):
"""
Registering the `superscript` feature, which uses the `SUPERSCRIPT` Draft.js inline style type,
and is stored as HTML with an `<sup>` tag.
"""
feature_name = 'superscript'
type_ = 'SUPERSCRIPT'
tag = 'sup'
control = {
'type': type_,
'label': '^',
'description': 'Superscript',
}
features.register_editor_plugin(
'draftail', feature_name, draftail_features.InlineStyleFeature(control)
)
db_conversion = {
'from_database_format': {tag: InlineStyleElementHandler(type_)},
'to_database_format': {'style_map': {type_: tag}},
}
features.default_features.append(feature_name)
features.register_converter_rule('contentstate', feature_name, db_conversion)
@hooks.register('after_edit_page')
def purge_cloudfront_caches(page, request):
try:
distribution = CloudfrontDistribution.objects.all()[0]
client = boto3.client('cloudfront')
response = client.create_invalidation(
DistributionId=distribution.distribution_id,
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': [
'/apps/cms/api/*' # invalidate the entire cache for the website
],
},
'CallerReference': str(time()).replace(".", "")
}
)
except CloudfrontDistribution.DoesNotExist:
return
@hooks.register('register_settings_menu_item')
def register_500_menu_item():
return MenuItem('Generate 500', reverse('throw_error'), classnames='icon icon-warning', order=10000)
|
Python
| 0
|
@@ -4,16 +4,17 @@
ort boto
+3
%0Aimport
|
c437074ee3ee15fc29790ca4de5413bbdd19728c
|
delete unused imports
|
autograd/convenience_wrappers.py
|
autograd/convenience_wrappers.py
|
"""Convenience functions built on top of `grad`."""
from __future__ import absolute_import
import itertools as it
import autograd.numpy as np
from autograd.core import grad, getval
from builtins import map
def multigrad(fun, argnums=0):
"""Takes gradients wrt multiple arguments simultaneously."""
original_fun = fun
def combined_arg_fun(multi_arg, *args, **kwargs):
extra_args_list = list(args)
for argnum_ix, arg_ix in enumerate(argnums):
extra_args_list[arg_ix] = multi_arg[argnum_ix]
return original_fun(*extra_args_list, **kwargs)
gradfun = grad(combined_arg_fun, argnum=0)
def gradfun_rearranged(*args, **kwargs):
multi_arg = tuple([args[i] for i in argnums])
return gradfun(multi_arg, *args, **kwargs)
return gradfun_rearranged
def grad_and_aux(fun, argnum=0):
"""Builds a function that returns the gradient of the first output and the
(unmodified) second output of a function that returns two outputs."""
def grad_and_aux_fun(*args, **kwargs):
saved_aux = []
def return_val_save_aux(*args, **kwargs):
val, aux = fun(*args, **kwargs)
saved_aux.append(aux)
return val
gradval = grad(return_val_save_aux, argnum)(*args, **kwargs)
return gradval, saved_aux[0]
return grad_and_aux_fun
def value_and_grad(fun, argnum=0):
"""Returns a function that returns both value and gradient. Suitable for use
in scipy.optimize"""
def double_val_fun(*args, **kwargs):
val = fun(*args, **kwargs)
return val, getval(val)
gradval_and_val = grad_and_aux(double_val_fun, argnum)
def value_and_grad_fun(*args, **kwargs):
gradval, val = gradval_and_val(*args, **kwargs)
return val, gradval
return value_and_grad_fun
def elementwise_grad(fun, argnum=0):
"""Like `jacobian`, but produces a function which computes just the diagonal
of the Jacobian, and does the computation in one pass rather than in a loop.
Note: this is only valid if the Jacobian is diagonal. Only arrays are
currently supported."""
def sum_output(*args, **kwargs):
return np.sum(fun(*args, **kwargs))
return grad(sum_output, argnum=argnum)
def hessian_vector_product(fun, argnum=0):
"""Builds a function that returns the exact Hessian-vector product.
The returned function has arguments (*args, vector, **kwargs), and takes
roughly 4x as long to evaluate as the original function."""
fun_grad = grad(fun, argnum)
def vector_dot_grad(*args, **kwargs):
args, vector = args[:-1], args[-1]
return np.dot(vector, fun_grad(*args, **kwargs))
return grad(vector_dot_grad, argnum) # Grad wrt original input.
def hessian(fun, argnum=0):
"""Returns a function that computes the exact Hessian.
The Hessian is computed by calling hessian_vector_product separately for
each row. For a function with N inputs, this takes roughly 4N times as
long as a single evaluation of the original function."""
hvp = hessian_vector_product(fun, argnum)
def hessian_fun(*args, **kwargs):
arg_in = args[argnum]
directions = np.eye(arg_in.size) # axis-aligned directions.
hvp_list = [hvp(*(args+(direction,)), **kwargs) for direction in directions]
return np.array(hvp_list)
return hessian_fun
|
Python
| 0.000001
|
@@ -87,31 +87,8 @@
port
-%0Aimport itertools as it
%0A%0Aim
@@ -156,33 +156,8 @@
val%0A
-from builtins import map%0A
%0A%0Ade
|
550133348a09b197025bc1352439cb055bf50c7b
|
Make sure mocks in place for setUp command.
|
autopush/tests/test_websocket.py
|
autopush/tests/test_websocket.py
|
import json
import twisted.internet.base
from mock import Mock
from moto import mock_dynamodb2
from txstatsd.metrics.metrics import Metrics
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.trial import unittest
from autopush.settings import AutopushSettings
from autopush.websocket import SimplePushServerProtocol
class WebsocketTestCase(unittest.TestCase):
def setUp(self):
twisted.internet.base.DelayedCall.debug = True
self.proto = SimplePushServerProtocol()
settings = AutopushSettings(
crypto_key="i_CYcNKa2YXrF_7V1Y-2MFfoEl7b6KX55y_9uvOKfJQ=",
hostname="localhost",
statsd_host=None,
)
self.proto.settings = settings
self.proto.sendMessage = self.send_mock = Mock()
self.proto.sendClose = self.close_mock = Mock()
self.proto.transport = self.transport_mock = Mock()
settings.metrics = Mock(spec=Metrics)
def _connect(self):
self.proto.onConnect(None)
def _send_message(self, msg):
self.proto.onMessage(json.dumps(msg).encode('utf8'), False)
def _wait_for_message(self, d):
args = self.send_mock.call_args
if args:
self.send_mock.reset_mock()
d.callback(args)
return
reactor.callLater(0.1, self._wait_for_message, d)
def _wait_for_close(self, d):
if self.close_mock.call_args is not None:
d.callback(True)
return
reactor.callLater(0.1, self._wait_for_close, d)
def _check_response(self, func):
"""Waits for a message to be sent, and runs the func with it"""
def handle_message(result):
args, _ = result
func(json.loads(args[0]))
d = Deferred()
d.addCallback(handle_message)
self._wait_for_message(d)
return d
@mock_dynamodb2
def test_hello(self):
self._connect()
self._send_message(dict(messageType="hello", channelIDs=[]))
def check_result(msg):
assert "messageType" in msg
return self._check_response(check_result)
@mock_dynamodb2
def test_hello_dupe(self):
self._connect()
self._send_message(dict(messageType="hello", channelIDs=[]))
def check_second_hello(msg):
self.assert_("messageType" in msg)
self.assertEqual(msg["status"], 401)
def check_first_hello(msg):
assert "messageType" in msg
# Send another hello
self._send_message(dict(messageType="hello", channelIDs=[]))
return self._check_response(check_second_hello)
return self._check_response(check_first_hello)
@mock_dynamodb2
def test_not_hello(self):
self._connect()
self._send_message(dict(messageType="wooooo"))
def check_result(result):
assert result is True
d = Deferred()
d.addCallback(check_result)
self._wait_for_close(d)
return d
|
Python
| 0
|
@@ -400,16 +400,36 @@
tCase):%0A
+ @mock_dynamodb2%0A
def
|
009ab26737923cfff97ba37a035dcff7639135b1
|
Replace all_pages_in_directory with concat_pdf_pages
|
Util.py
|
Util.py
|
"""Collection of Helper Functions"""
import os
from fnmatch import fnmatch
from PyPDF2 import PdfFileReader
def pdf_file(filename):
"""Test whether or the the filename ends with '.pdf'."""
return fnmatch(filename, '*.pdf')
def all_pdf_files_in_directory(path):
"""Return a list of of PDF files in a directory."""
return [filename for filename in os.listdir(path) if pdf_file(filename)]
def all_pages_in_directory(path):
"""A generator that yields one PDF page a time for all the PDF in the directory."""
for filename in sorted(all_pdf_files_in_directory(path)):
with open(filename, 'rb') as input_file:
for page in PdfFileReader(input_file).pages:
yield page
def split_on_condition(iterable, predicate):
"""Split a iterable into chunks, where the first item in the chunk will be the
evaluate to True with predicate function, and the rest of the items in the chunk
evaluates to False."""
it = iter(iterable)
# Initialize the chunk list with an item
# StopIteration will be thrown if there are no further items in the iterator
chunk = [it.next()]
while True:
try:
item = it.next()
if predicate(item):
# If the next item should be in a new chunk then return the current chunk
yield chunk
# Then rest the chunk list
chunk = [item]
else:
# Simply append the item to current chunk if it doesn't match the predicate
chunk.append(item)
except StopIteration:
# If the end of the iterator is reached then simply return the current chunk
yield chunk
break
|
Python
| 0
|
@@ -410,35 +410,30 @@
def
-all_pages_in_directory(path
+concat_pdf_pages(files
):%0A
@@ -494,23 +494,21 @@
all
-the PDF
+pages
in the
dire
@@ -503,25 +503,25 @@
in the
-directory
+PDF files
.%22%22%22%0A
@@ -529,115 +529,29 @@
for
-filename in sorted(all_pdf_files_in_directory(path)):%0A with open(filename, 'rb') as input_file:%0A
+input_file in files:%0A
@@ -595,28 +595,24 @@
ile).pages:%0A
-
|
d16373609b2f30c6ffa576c1269c529f12c9622c
|
Switch to fast method for personal timetable
|
backend/uclapi/timetable/urls.py
|
backend/uclapi/timetable/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^personal_fast$', views.get_personal_timetable_fast),
url(r'^personal$', views.get_personal_timetable),
url(r'^bymodule$', views.get_modules_timetable),
]
|
Python
| 0
|
@@ -83,21 +83,16 @@
personal
-_fast
$', view
@@ -127,62 +127,8 @@
t),%0A
- url(r'%5Epersonal$', views.get_personal_timetable),%0A
|
22785c709956365ac51bc3b79135e6debc6418ae
|
Exclude legacy objc API tests properly.
|
all.gyp
|
all.gyp
|
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'variables': {
'include_examples%': 1,
'include_tests%': 1,
'webrtc_root_additional_dependencies': [],
},
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'webrtc/webrtc.gyp:*',
'<@(webrtc_root_additional_dependencies)',
],
'conditions': [
['include_examples==1', {
'dependencies': [
'webrtc/webrtc_examples.gyp:*',
],
}],
['OS=="ios" or (OS=="mac" and target_arch!="ia32") and include_tests==1', {
'dependencies': [
'talk/app/webrtc/legacy_objc_api_tests.gyp:*',
],
}],
],
},
],
}
|
Python
| 0.000026
|
@@ -860,16 +860,17 @@
%5B'
+(
OS==%22ios
@@ -909,16 +909,17 @@
=%22ia32%22)
+)
and inc
|
127434cdc04ae3655747ff1e3530148404dbf849
|
fix flush
|
blaz.py
|
blaz.py
|
from os import environ, chdir, getenv
from os.path import abspath, basename, dirname
from subprocess import check_call
from sys import argv
from colors import bold
from hashlib import md5
import sys
class Blaz(object):
def __init__(self, **kwargs):
self.__dict__ = kwargs
self.file = abspath(argv[0])
self.script = basename(self.file)
self.argv = ' '.join(argv[1:])
self.__dict__.update({
'dir': dirname(self.file),
'image': getenv('BLAZ_IMAGE', 'alpine-blaz'),
'docker_exe': getenv('DOCKER_EXE', '/usr/local/bin/docker'),
'docker_sock': getenv('DOCKER_SOCK', '/var/run/docker.sock')
})
chdir(self.dir)
self._create_lock()
def _create_lock(self):
m = md5()
m.update(bytes('{0.dir}/{0.script} {0.argv}'.format(self), 'utf-8'))
self.lock = m.hexdigest()
def _fresh(self):
if 'BLAZ_LOCK'.format(self) in environ:
return environ['BLAZ_LOCK'] == self.lock
else:
return False
def invoke(self, main):
if self._fresh():
main(self)
else:
self._docker_run()
def log(self, msg='', fg='yellow'):
sys.stdout.flush()
sys.stderr.write(bold(msg + '\n', fg=fg))
sys.stderr.flush()
def run(self, cmd, fg='green'):
while True:
prev = cmd
cmd = cmd.format(self)
if prev == cmd:
break
self.log(cmd, fg=fg)
check_call(cmd, shell=True)
def _forward_blaz_env_vars(self):
result = []
for k in environ.keys():
if k.find('BLAZ_') == 0:
result.append('''
--env={}={}
'''.format(k, environ[k]))
elif k.find('_BLAZ_') == 0:
result.append('''
--env={0}=${0}
'''.format(k))
return ''.join(result)
def _docker_run(self):
cmd = '''
docker run
--rm
--privileged
--net=host
'''
cmd = cmd + self._forward_blaz_env_vars()
cmd = cmd + '''
--env=DOCKER_EXE={0.docker_exe}
--env=DOCKER_SOCK={0.docker_sock}
--env=BLAZ_LOCK={0.lock}
--volume={0.dir}:{0.dir}
--volume={0.docker_exe}:{0.docker_exe}
--volume={0.docker_sock}:{0.docker_sock}
{0.image}
{0.dir}/{0.script} {0.argv}
'''
cmd = '\n '.join([x.strip() + ' \\' for x in cmd.split('\n') if
x.strip() is not ''])[:-2]
self.run(cmd, fg='blue')
|
Python
| 0.000001
|
@@ -937,29 +937,16 @@
AZ_LOCK'
-.format(self)
in envi
@@ -1539,16 +1539,70 @@
ll=True)
+%0A sys.stdout.flush()%0A sys.stderr.flush()
%0A%0A de
|
7187abb00e78b9cc8cca5366a70b6f6045d94a9c
|
Fixed a Python 2.3 incompatibility.
|
django/contrib/formtools/preview.py
|
django/contrib/formtools/preview.py
|
"""
Formtools Preview application.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
import cPickle as pickle
import md5
AUTO_ID = 'formtools_%s' # Each form here uses this as its auto_id parameter.
class FormPreview(object):
preview_template = 'formtools/preview.html'
form_template = 'formtools/form.html'
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form):
# form should be a Form class, not an instance.
self.form, self.state = form, {}
def __call__(self, request, *args, **kwargs):
stage = {'1': 'preview', '2': 'post'}.get(request.POST.get(self.unused_name('stage')), 'preview')
self.parse_params(*args, **kwargs)
try:
method = getattr(self, stage + '_' + request.method.lower())
except AttributeError:
raise Http404
return method(request)
def unused_name(self, name):
"""
Given a first-choice name, adds an underscore to the name until it
reaches a name that isn't claimed by any field in the form.
This is calculated rather than being hard-coded so that no field names
are off-limits for use in the form.
"""
while 1:
try:
f = self.form.base_fields[name]
except KeyError:
break # This field name isn't being used by the form.
name += '_'
return name
def preview_get(self, request):
"Displays the form"
f = self.form(auto_id=AUTO_ID)
return render_to_response(self.form_template,
{'form': f, 'stage_field': self.unused_name('stage'), 'state': self.state},
context_instance=RequestContext(request))
def preview_post(self, request):
"Validates the POST data. If valid, displays the preview page. Else, redisplays form."
f = self.form(request.POST, auto_id=AUTO_ID)
context = {'form': f, 'stage_field': self.unused_name('stage'), 'state': self.state}
if f.is_valid():
context['hash_field'] = self.unused_name('hash')
context['hash_value'] = self.security_hash(request, f)
return render_to_response(self.preview_template, context, context_instance=RequestContext(request))
else:
return render_to_response(self.form_template, context, context_instance=RequestContext(request))
def post_post(self, request):
"Validates the POST data. If valid, calls done(). Else, redisplays form."
f = self.form(request.POST, auto_id=AUTO_ID)
if f.is_valid():
if self.security_hash(request, f) != request.POST.get(self.unused_name('hash')):
return self.failed_hash(request) # Security hash failed.
return self.done(request, f.cleaned_data)
else:
return render_to_response(self.form_template,
{'form': f, 'stage_field': self.unused_name('stage'), 'state': self.state},
context_instance=RequestContext(request))
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def parse_params(self, *args, **kwargs):
"""
Given captured args and kwargs from the URLconf, saves something in
self.state and/or raises Http404 if necessary.
For example, this URLconf captures a user_id variable:
(r'^contact/(?P<user_id>\d{1,6})/$', MyFormPreview(MyForm)),
In this case, the kwargs variable in parse_params would be
{'user_id': 32} for a request to '/contact/32/'. You can use that
user_id to make sure it's a valid user and/or save it for later, for
use in done().
"""
pass
def security_hash(self, request, form):
"""
Calculates the security hash for the given Form instance.
This creates a list of the form field names/values in a deterministic
order, pickles the result with the SECRET_KEY setting and takes an md5
hash of that.
Subclasses may want to take into account request-specific information
such as the IP address.
"""
data = [(bf.name, bf.data) for bf in form] + [settings.SECRET_KEY]
# Use HIGHEST_PROTOCOL because it's the most efficient. It requires
# Python 2.3, but Django requires 2.3 anyway, so that's OK.
pickled = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
return md5.new(pickled).hexdigest()
def failed_hash(self, request):
"Returns an HttpResponse in the case of an invalid security hash."
return self.preview_post(request)
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, cleaned_data):
"""
Does something with the cleaned_data and returns an
HttpResponseRedirect.
"""
raise NotImplementedError('You must define a done() method on your %s subclass.' % self.__class__.__name__)
|
Python
| 0.999738
|
@@ -4584,17 +4584,8 @@
ta,
-protocol=
pick
|
bb679edf2b7030de07e3d3688327c5e13851232e
|
Troubleshoot CI
|
kevlar/__init__.py
|
kevlar/__init__.py
|
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
# Core libraries
from __future__ import print_function
try:
import __builtin__ as builtins
except:
import builtins
from collections import namedtuple
from gzip import open as gzopen
import re
import sys
# Third-party libraries
import khmer
import screed
# Internal modules
from kevlar import seqio
from kevlar import overlap
from kevlar import counting
from kevlar import sketch
from kevlar.seqio import parse_augmented_fastx, print_augmented_fastx
from kevlar.variantset import VariantSet
from kevlar.timer import Timer
# Subcommands and command-line interface
from kevlar import dump
from kevlar import novel
from kevlar import collect
from kevlar import filter
from kevlar import reaugment
from kevlar import mutate
from kevlar import assemble
from kevlar import count
from kevlar import partition
from kevlar import localize
from kevlar import cli
# C extension(s)
from kevlar.alignment import contig_align as align
from kevlar._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]:
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
def revcom(seq):
return screed.dna.reverse_complement(str(seq))
def revcommin(seq):
rc = revcom(seq)
minseq = sorted((seq, rc))[0]
return minseq
def same_seq(seq1, seq2, seq2revcom=None):
if seq2revcom is None:
seq2revcom = revcom(seq2)
return seq1 == seq2 or seq1 == seq2revcom
def to_gml(graph, outfilename, logfile=sys.stderr):
"""Write the given read graph to a GML file."""
if not outfilename.endswith('.gml'):
print('[kevlar] WARNING: GML files usually need extension .gml',
file=logfile)
networkx.write_gml(graph, outfilename)
message = '[kevlar] graph written to {}'.format(args.gml)
print(message, file=logfile)
def multi_file_iter_screed(filenames):
for filename in filenames:
for record in screed.open(filename):
yield record
def multi_file_iter_khmer(filenames):
for filename in filenames:
for record in khmer.ReadParser(filename):
yield record
def clean_subseqs(sequence, ksize):
for subseq in re.split('[^ACGT]', sequence):
if len(subseq) >= ksize:
yield subseq
KmerOfInterest = namedtuple('KmerOfInterest', 'sequence offset abund')
|
Python
| 0.000001
|
@@ -1245,16 +1245,40 @@
sion(s)%0A
+import kevlar.alignment%0A
from kev
|
9b5d9ad8ddc2f1bf652e55102a4db67c71f8515f
|
Set defaults for sql options
|
keystone/config.py
|
keystone/config.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import sys
import os
from keystone.common import logging
from keystone.openstack.common import cfg
gettext.install('keystone', unicode=1)
class ConfigMixin(object):
def __call__(self, config_files=None, *args, **kw):
if config_files is not None:
self._opts['config_file']['opt'].default = config_files
kw.setdefault('args', [])
return super(ConfigMixin, self).__call__(*args, **kw)
def set_usage(self, usage):
self.usage = usage
self._oparser.usage = usage
class Config(ConfigMixin, cfg.ConfigOpts):
pass
class CommonConfig(ConfigMixin, cfg.CommonConfigOpts):
pass
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError('Unable to locate specified logging '
'config file: %s' % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
def _ensure_group(kw, conf):
group = kw.pop('group', None)
if group:
conf.register_group(cfg.OptGroup(name=group))
return group
CONF = CommonConfig(project='keystone')
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_str('compute_port', default=8774)
register_str('admin_port', default=35357)
register_str('public_port', default=5000)
# sql options
register_str('connection', group='sql')
register_int('idle_timeout', group='sql')
register_str('driver', group='catalog',
default='keystone.catalog.backends.sql.Catalog')
register_str('driver', group='identity',
default='keystone.identity.backends.sql.Identity')
register_str('driver', group='policy',
default='keystone.policy.backends.rules.Policy')
register_str('driver', group='token',
default='keystone.token.backends.kvs.Token')
register_str('driver', group='ec2',
default='keystone.contrib.ec2.backends.kvs.Ec2')
#ldap
register_str('url', group='ldap')
register_str('user', group='ldap')
register_str('password', group='ldap')
register_str('suffix', group='ldap')
register_bool('use_dumb_member', group='ldap')
register_str('user_tree_dn', group='ldap')
register_str('user_objectclass', group='ldap')
register_str('user_id_attribute', group='ldap')
register_str('tenant_tree_dn', group='ldap')
register_str('tenant_objectclass', group='ldap')
register_str('tenant_id_attribute', group='ldap')
register_str('tenant_member_attribute', group='ldap')
register_str('role_tree_dn', group='ldap')
register_str('role_objectclass', group='ldap')
register_str('role_id_attribute', group='ldap')
register_str('role_member_attribute', group='ldap')
|
Python
| 0.000015
|
@@ -4255,16 +4255,49 @@
up='sql'
+, default='sqlite:///keystone.db'
)%0Aregist
@@ -4330,16 +4330,29 @@
up='sql'
+, default=200
)%0A%0A%0Aregi
|
2bdfca93103d6d3c721e33c5907a8842e2c038b3
|
Fix unicode error in admin autocomplete field
|
django_extensions/admin/__init__.py
|
django_extensions/admin/__init__.py
|
#
# Autocomplete feature for admin panel
#
# Most of the code has been written by Jannis Leidel and was updated a bit
# for django_extensions.
# http://jannisleidel.com/2008/11/autocomplete-form-widget-foreignkey-model-fields/
#
# to_string_function, Satchmo adaptation and some comments added by emes
# (Michal Salaban)
#
import six
import operator
from six.moves import reduce
from django.http import HttpResponse, HttpResponseNotFound
from django.db import models
from django.db.models.query import QuerySet
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.utils.text import get_text_list
try:
from functools import update_wrapper
assert update_wrapper
except ImportError:
from django.utils.functional import update_wrapper
from django_extensions.admin.widgets import ForeignKeySearchInput
from django.conf import settings
if 'reversion' in settings.INSTALLED_APPS:
from reversion.admin import VersionAdmin as ModelAdmin
assert ModelAdmin
else:
from django.contrib.admin import ModelAdmin
class ForeignKeyAutocompleteAdmin(ModelAdmin):
"""Admin class for models using the autocomplete feature.
There are two additional fields:
- related_search_fields: defines fields of managed model that
have to be represented by autocomplete input, together with
a list of target model fields that are searched for
input string, e.g.:
related_search_fields = {
'author': ('first_name', 'email'),
}
- related_string_functions: contains optional functions which
take target model instance as only argument and return string
representation. By default __unicode__() method of target
object is used.
"""
related_search_fields = {}
related_string_functions = {}
def get_urls(self):
try:
from django.conf.urls import patterns, url
except ImportError: # django < 1.4
from django.conf.urls.defaults import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('', url(r'foreignkey_autocomplete/$', wrap(self.foreignkey_autocomplete), name='%s_%s_autocomplete' % info))
urlpatterns += super(ForeignKeyAutocompleteAdmin, self).get_urls()
return urlpatterns
def foreignkey_autocomplete(self, request):
"""
Searches in the fields of the given related model and returns the
result as a simple string to be used by the jQuery Autocomplete plugin
"""
query = request.GET.get('q', None)
app_label = request.GET.get('app_label', None)
model_name = request.GET.get('model_name', None)
search_fields = request.GET.get('search_fields', None)
object_pk = request.GET.get('object_pk', None)
try:
to_string_function = self.related_string_functions[model_name]
except KeyError:
to_string_function = lambda x: x.__unicode__()
if search_fields and app_label and model_name and (query or object_pk):
def construct_search(field_name):
# use different lookup methods depending on the notation
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
model = models.get_model(app_label, model_name)
queryset = model._default_manager.all()
data = ''
if query:
for bit in query.split():
or_queries = [models.Q(**{construct_search(smart_str(field_name)): smart_str(bit)}) for field_name in search_fields.split(',')]
other_qs = QuerySet(model)
other_qs.dup_select_related(queryset)
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
queryset = queryset & other_qs
data = ''.join([six.u('%s|%s\n' % (to_string_function(f), f.pk)) for f in queryset])
elif object_pk:
try:
obj = queryset.get(pk=object_pk)
except:
pass
else:
data = to_string_function(obj)
return HttpResponse(data)
return HttpResponseNotFound()
def get_help_text(self, field_name, model_name):
searchable_fields = self.related_search_fields.get(field_name, None)
if searchable_fields:
help_kwargs = {
'model_name': model_name,
'field_list': get_text_list(searchable_fields, _('and')),
}
return _('Use the left field to do %(model_name)s lookups in the fields %(field_list)s.') % help_kwargs
return ''
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overrides the default widget for Foreignkey fields if they are
specified in the related_search_fields class attribute.
"""
if (isinstance(db_field, models.ForeignKey) and db_field.name in self.related_search_fields):
model_name = db_field.rel.to._meta.object_name
help_text = self.get_help_text(db_field.name, model_name)
if kwargs.get('help_text'):
help_text = six.u('%s %s' % (kwargs['help_text'], help_text))
kwargs['widget'] = ForeignKeySearchInput(db_field.rel, self.related_search_fields[db_field.name])
kwargs['help_text'] = help_text
return super(ForeignKeyAutocompleteAdmin, self).formfield_for_dbfield(db_field, **kwargs)
|
Python
| 0.00001
|
@@ -4454,16 +4454,17 @@
%25s%7C%25s%5Cn'
+)
%25 (to_s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.