commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
ecb65ccb3351a9d137bf1c79fb1d35cad90a6e5b
|
Set seed for all random number generations in tests
|
tests/schema.py
|
tests/schema.py
|
"""
Test schema definition
"""
import random
import numpy as np
import datajoint as dj
from . import PREFIX, CONN_INFO
schema = dj.schema(PREFIX + '_test1', locals(), connection=dj.conn(**CONN_INFO))
@schema
class User(dj.Lookup):
definition = """ # lab members
username: varchar(12)
"""
contents = [['Jake'], ['Cathryn'], ['Shan'], ['Fabian'], ['Edgar'], ['George'], ['Dimitri']]
@schema
class Subject(dj.Manual):
definition = """ # Basic information about animal subjects used in experiments
subject_id :int # unique subject id
---
real_id :varchar(40) # real-world name. Omit if the same as subject_id
species = "mouse" :enum('mouse', 'monkey', 'human')
date_of_birth :date
subject_notes :varchar(4000)
unique index (real_id, species)
"""
contents = [
[1551, '1551', 'mouse', '2015-04-01', 'genetically engineered super mouse'],
[10, 'Curious George', 'monkey', '2008-06-30', ''],
[1552, '1552', 'mouse', '2015-06-15', ''],
[1553, '1553', 'mouse', '2016-07-01', '']]
def prepare(self):
self.insert(self.contents, ignore_errors=True)
@schema
class Experiment(dj.Imported):
definition = """ # information about experiments
-> Subject
experiment_id :smallint # experiment number for this subject
---
experiment_date :date # date when experiment was started
-> User
data_path="" :varchar(255) # file path to recorded data
notes="" :varchar(2048) # e.g. purpose of experiment
entry_time=CURRENT_TIMESTAMP :timestamp # automatic timestamp
"""
fake_experiments_per_subject = 5
def _make_tuples(self, key):
"""
populate with random data
"""
from datetime import date, timedelta
users = User().fetch()['username']
for experiment_id in range(self.fake_experiments_per_subject):
self.insert1(
dict(key,
experiment_id=experiment_id,
experiment_date=(date.today()-timedelta(random.expovariate(1/30))).isoformat(),
username=random.choice(users)))
@schema
class Trial(dj.Imported):
definition = """ # a trial within an experiment
-> Experiment
trial_id :smallint # trial number
---
start_time :double # (s)
"""
def _make_tuples(self, key):
"""
populate with random data (pretend reading from raw files)
"""
for trial_id in range(10):
self.insert1(
dict(key,
trial_id=trial_id,
start_time=random.random()*1e9
))
@schema
class Ephys(dj.Imported):
definition = """ # some kind of electrophysiological recording
-> Trial
----
sampling_frequency :double # (Hz)
duration :double # (s)
"""
def _make_tuples(self, key):
"""
populate with random data
"""
row = dict(key,
sampling_frequency=6000,
duration=np.minimum(2, random.expovariate(1)))
self.insert1(row)
number_samples = round(row['duration'] * row['sampling_frequency']);
EphysChannel().fill(key, number_samples=number_samples)
@schema
class EphysChannel(dj.Subordinate, dj.Imported):
definition = """ # subtable containing individual channels
-> Ephys
channel :tinyint unsigned # channel number within Ephys
----
voltage :longblob
"""
def fill(self, key, number_samples):
"""
populate random trace of specified length
"""
for channel in range(2):
self.insert1(
dict(key,
channel=channel,
voltage=np.float32(np.random.randn(number_samples))
))
|
Python
| 0
|
@@ -1855,16 +1855,52 @@
rname'%5D%0A
+ random.seed('Amazing Seed')%0A
@@ -1962,16 +1962,16 @@
bject):%0A
-
@@ -2550,32 +2550,68 @@
es)%0A %22%22%22%0A
+ random.seed('Amazing Seed')%0A
for tria
@@ -3094,32 +3094,68 @@
ata%0A %22%22%22%0A
+ random.seed('Amazing seed')%0A
row = di
@@ -3789,16 +3789,16 @@
length%0A
-
@@ -3793,32 +3793,68 @@
gth%0A %22%22%22%0A
+ random.seed('Amazing seed')%0A
for chan
|
17c2d6baadfa91985ed8f3d32754ee7d30ba87d9
|
Use "1" as ui3 cookie value to not confuse IA analytics.
|
internetarchive/search.py
|
internetarchive/search.py
|
import requests.sessions
from . import session
# Search class
# ________________________________________________________________________________________
class Search(object):
"""This class represents an archive.org item search. You can use
this class to search for archive.org items using the advanced
search engine.
Usage::
>>> import internetarchive.search
>>> search = internetarchive.search.Search('(uploader:jake@archive.org)')
>>> for result in search:
... print(result['identifier'])
"""
# init()
# ____________________________________________________________________________________
def __init__(self, query, fields=['identifier'], params={}, config=None, v2=False):
self.session = session.ArchiveSession(config)
self.http_session = requests.sessions.Session()
self.url = 'http://archive.org/advancedsearch.php'
default_params = dict(
q=query,
rows=100,
)
if v2:
self.session.cookies['ui3'] = 'ia-wrapper'
self.http_session.cookies = self.session.cookies
self.params = default_params.copy()
self.params.update(params)
if not self.params.get('output'):
self.params['output'] = 'json'
for k, v in enumerate(fields):
key = 'fl[{0}]'.format(k)
self.params[key] = v
self._search_info = self._get_search_info()
self.num_found = self._search_info['response']['numFound']
self.query = self._search_info['responseHeader']['params']['q']
# __repr__()
# ____________________________________________________________________________________
def __repr__(self):
return ('Search(query={query!r}, '
'num_found={num_found!r})'.format(**self.__dict__))
# _get_search_info()
# ____________________________________________________________________________________
def _get_search_info(self):
info_params = self.params.copy()
info_params['rows'] = 0
r = self.http_session.get(self.url, params=self.params)
results = r.json()
del results['response']['docs']
return results
# __iter__()
# ____________________________________________________________________________________
def __iter__(self):
"""Generator for iterating over search results"""
total_pages = ((self.num_found / self.params['rows']) + 2)
for page in range(1, total_pages):
self.params['page'] = page
r = self.http_session.get(self.url, params=self.params)
results = r.json()
for doc in results['response']['docs']:
yield doc
|
Python
| 0
|
@@ -1011,16 +1011,76 @@
if v2:%0A
+ # Use %221%22 as value to not confuse IA analytics.%0A
@@ -1118,18 +1118,9 @@
= '
-ia-wrapper
+1
'%0A
|
bf4147ec6c0b8ba73b484700912556367d7bbb81
|
Remove logging
|
tinman/model.py
|
tinman/model.py
|
"""
Base tinman data models. The Model class is the base model that all other base
model classes extend. StorageModel defines the interfaces for models with built
in storage functionality.
Specific model storage base classes exist in the tornado.model package.
Example use::
from tornado import gen
from tornado import web
from tinman.handlers import redis_handlers
from tinman.model.redis import AsyncRedisModel
class ExampleModel(AsyncRedisModel):
name = None
age = None
location = None
class Test(redis_handlers.AsynchronousRedisRequestHandler):
@web.asynchronous
@gen.engine
def get(self, *args, **kwargs):
model = ExampleModel(self.get_argument('id'),
redis_client=self.redis)
yield model.fetch()
self.finish(model.as_dict())
@web.asynchronous
@gen.engine
def post(self, *args, **kwargs):
model = ExampleModel(self.get_argument('id', None),
redis_client=self.redis)
# Assign the posted values, requiring at least a name
model.name = self.get_argument('name')
model.age = self.get_argument('age', None)
model.location = self.get_argument('location', None)
# Save the model
result = yield model.save()
if result:
self.set_status(201)
self.finish(model.as_dict())
else:
raise web.HTTPError(500, 'Could not save model')
"""
import base64
from tornado import gen
import hashlib
import logging
import time
import uuid
from tinman import mapping
LOGGER = logging.getLogger(__name__)
class Model(mapping.Mapping):
"""A data object that provides attribute level assignment and retrieval of
values, serialization and deserialization, the ability to load values from
a dict and dump them to a dict, and Mapping and iterator behaviors.
Base attributes are provided for keeping track of when the model was created
and when it was last updated.
If model attributes are passed into the constructor, they will be assigned
to the model upon creation.
:param str item_id: An id for the model, defaulting to a random UUID
:param dict kwargs: Additional kwargs passed in
"""
id = None
created_at = None
last_updated_at = None
def __init__(self, item_id=None, **kwargs):
"""Create a new instance of the model, passing in a id value."""
self.id = item_id or str(uuid.uuid4())
self.created_at = int(time.time())
self.last_updated_at = None
# If values are in the kwargs that match the model keys, assign them
for k in [k for k in kwargs.keys() if k in self.keys()]:
setattr(self, k, kwargs[k])
def from_dict(self, value):
"""Set the values of the model based upon the content of the passed in
dictionary.
:param dict value: The dictionary of values to assign to this model
"""
for key in self.keys():
setattr(self, key, value.get(key, None))
def sha1(self):
"""Return a sha1 hash of the model items.
:rtype: str
"""
sha1 = hashlib.sha1(''.join(['%s:%s' % (k,v) for k,v in self.items()]))
return str(sha1.hexdigest())
class StorageModel(Model):
"""A base model that defines the behavior for models with storage backends.
:param str item_id: An id for the model, defaulting to a random UUID
:param dict kwargs: Additional kwargs passed in
"""
_new = True
def __init__(self, item_id=None, **kwargs):
super(StorageModel, self).__init__(item_id, **kwargs)
if self.id:
# It's no longer a new model, since it's a load
self._new = False
# Fetch the model values from storage
self.fetch()
# Toggle the changed back to false since it's an initial load
self._dirty = False
def delete(self):
"""Delete the data for the model from storage and assign the values.
:raises: NotImplementedError
"""
raise NotImplementedError("Must extend this method")
def fetch(self):
"""Fetch the data for the model from storage and assign the values.
:raises: NotImplementedError
"""
raise NotImplementedError("Must extend this method")
def save(self):
"""Store the model.
:raises: NotImplementedError
"""
raise NotImplementedError("Must extend this method")
@property
def is_new(self):
"""Return a bool indicating if it's a new item or not
:rtype: bool
"""
return self._new
class AsyncRedisModel(StorageModel):
"""A model base class that uses Redis for the storage backend. Uses the
asynchronous tornadoredis client. If you assign a value to the _ttl
attribute, that _ttl value will be used to set the expiraiton of the
data in redis.
Data is serialized with msgpack to cut down on the byte size, but due to
the binary data, it is then base64 encoded. This is a win on large objects
but a slight amount of overhead on smaller ones.
:param str item_id: The id for the data item
:param tornadoredis.Client: The already created tornadoredis client
"""
_redis_client = None
_saved = False
_ttl = None
def __init__(self, item_id=None, *args, **kwargs):
if 'msgpack' not in globals():
import msgpack
self._serializer = msgpack
if 'redis_client' not in kwargs:
raise ValueError('redis_client must be passed in')
LOGGER.info('%r -- %r', args, kwargs)
LOGGER.info(repr(kwargs.get('redis_client')))
self._redis_client = kwargs['redis_client']
# The parent will attempt to fetch the value if item_id is set
super(AsyncRedisModel, self).__init__(item_id, **kwargs)
@property
def _key(self):
"""Return a storage key for Redis that consists of the class name of
the model and its id joined by :.
:rtype: str
"""
return '%s:%s' % (self.__class__.__name__, self.id)
@gen.coroutine
def delete(self):
"""Delete the item from storage
:rtype: bool
"""
result = gen.Task(self._redis_client.delete, self._key)
raise gen.Return(bool(result))
@gen.coroutine
def fetch(self):
"""Fetch the data for the model from Redis and assign the values.
:rtype: bool
"""
raw = yield gen.Task(self._redis_client.get, self._key)
if raw:
self.loads(base64.b64decode(raw))
raise gen.Return(True)
raise gen.Return(False)
@gen.coroutine
def save(self):
"""Store the model in Redis.
:rtype: bool
"""
pipeline = self._redis_client.pipeline()
pipeline.set(self._key, base64.b64encode(self.dumps()))
if self._ttl:
pipeline.expire(self._key, self._ttl)
result = yield gen.Task(pipeline.execute)
self._dirty, self._saved = not all(result), all(result)
raise gen.Return(all(result))
|
Python
| 0.000001
|
@@ -5741,108 +5741,8 @@
n')%0A
- LOGGER.info('%25r -- %25r', args, kwargs)%0A LOGGER.info(repr(kwargs.get('redis_client')))%0A
|
e0b298f1df9a2d4e8868d6f055a27b5fb0bb8296
|
Add helper method to model
|
links/maker/models.py
|
links/maker/models.py
|
import uuid
from datetime import datetime
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db import models
from django.utils import timezone
from maker.managers import (MakerManager,
PasswordResetTokenManager,
EmailChangeTokenManager)
def make_token():
return str(uuid.uuid4())
class Maker(PermissionsMixin, AbstractBaseUser):
REGULAR = 'RG'
SIGNUP_TYPES = (
(REGULAR, 'Regular'),
)
identifier = models.CharField(max_length=200, unique=True)
email = models.EmailField()
is_admin = models.BooleanField(default=False)
joined = models.DateTimeField(auto_now_add=True)
verified = models.BooleanField(default=False)
photo_url = models.URLField(blank=True)
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
bio = models.TextField(blank=True)
signup_type = models.CharField(max_length=2, choices=SIGNUP_TYPES,
default=REGULAR)
USERNAME_FIELD = 'identifier'
REQUIRED_FIELDS = ['first_name', 'last_name']
objects = MakerManager()
def __unicode__(self):
return self.identifier
@property
def is_staff(self):
return self.is_admin
def get_short_name(self):
return self.first_name
def get_full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def change_email(self, new_email):
self.email = new_email
self.identifier = new_email
self.save()
class EmailVerificationToken(models.Model):
maker = models.ForeignKey('Maker')
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Email Verification Token'
verbose_name_plural = 'Email Verification Tokens'
def __unicode__(self):
return self.maker.identifier
class PasswordResetToken(models.Model):
maker = models.ForeignKey('Maker')
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
objects = PasswordResetTokenManager()
def __unicode__(self):
return self.maker.identifier
@property
def is_valid(self):
return self.date < timezone.now()
class EmailChangeToken(models.Model):
maker = models.ForeignKey('Maker')
new_email = models.EmailField()
token = models.CharField(max_length=50, default=make_token)
date = models.DateTimeField(auto_now_add=True)
objects = EmailChangeTokenManager()
def __unicode__(self):
return self.maker.identifier
@property
def is_valid(self):
return self.date < timezone.now()
|
Python
| 0.000001
|
@@ -1578,24 +1578,102 @@
elf.save()%0A%0A
+ def verify_email(self):%0A self.verified = True%0A self.save()%0A%0A
%0Aclass Email
|
c95a54df6ea3e5e5c98d9c6de701038b37c95298
|
Kill duplicate __getitem__ from a bad merge.
|
src/python/twitter/pants/goal/products.py
|
src/python/twitter/pants/goal/products.py
|
from collections import defaultdict
class Products(object):
class ProductMapping(object):
"""Maps products of a given type by target. Each product is a map from basedir to a list of
files in that dir.
"""
def __init__(self, typename):
self.typename = typename
self.by_target = defaultdict(lambda: defaultdict(list))
def empty(self):
return len(self.by_target) == 0
def add(self, target, basedir, product_paths=None):
"""
Adds a mapping of products for the given target, basedir pair.
If product_paths are specified, these will over-write any existing mapping for this target.
If product_paths is omitted, the current mutable list of mapped products for this target
and basedir is returned for appending.
"""
if product_paths is not None:
self.by_target[target][basedir].extend(product_paths)
else:
return self.by_target[target][basedir]
def has(self, target):
"""Returns whether we have a mapping for the specified target."""
return target in self.by_target
def get(self, target):
"""
Returns the product mapping for the given target as a tuple of (basedir, products list).
Can return None if there is no mapping for the given target.
"""
return self.by_target.get(target)
def __getitem__(self, target):
"""
Support for subscripting into this mapping. Returns the product mapping for the given target
as a map of <basedir> -> <products list>.
If no mapping exists, returns an empty map whose values default to empty lists. So you
can use the result without checking for None.
"""
return self.by_target[target]
def __getitem__(self, target):
"""
Support for subscripting into this mapping. Returns the product mapping for the given target
as a map of <basedir> -> <products list>.
If no mapping exists, returns an empty map whose values default to empty lists. So you
can use the result without checking for None.
"""
return self.by_target[target]
def itermappings(self):
"""
Returns an iterable over all pairs (target, product) in this mapping.
Each product is itself a map of <basedir> -> <products list>.
"""
return self.by_target.iteritems()
def keys_for(self, basedir, file):
"""Returns the set of keys the given mapped product is registered under."""
keys = set()
for key, mappings in self.by_target.items():
for mapped in mappings.get(basedir, []):
if file == mapped:
keys.add(key)
break
return keys
def __repr__(self):
return 'ProductMapping(%s) {\n %s\n}' % (self.typename, '\n '.join(
'%s => %s\n %s' % (str(target), basedir, outputs)
for target, outputs_by_basedir in self.by_target.items()
for basedir, outputs in outputs_by_basedir.items()))
def __init__(self):
self.products = {} # type -> ProductMapping instance.
self.predicates_for_type = defaultdict(list)
self.data_products = {} # type -> arbitrary object.
self.required_data_products = set()
def require(self, typename, predicate=None):
"""Registers a requirement that file products of the given type by mapped.
If a target predicate is supplied, only targets matching the predicate are mapped.
"""
if predicate:
self.predicates_for_type[typename].append(predicate)
return self.products.setdefault(typename, Products.ProductMapping(typename))
def isrequired(self, typename):
"""Returns a predicate that selects targets required for the given type if mappings are required.
Otherwise returns None.
"""
if typename not in self.products:
return None
def combine(first, second):
return lambda target: first(target) or second(target)
return reduce(combine, self.predicates_for_type[typename], lambda target: False)
def get(self, typename):
"""Returns a ProductMapping for the given type name."""
return self.require(typename)
def require_data(self, typename):
""" Registers a requirement that data produced by tasks is required.
typename: the name of a data product that should be generated.
"""
self.required_data_products.add(typename)
def is_required_data(self, typename):
""" Checks if a particular data product is required by any tasks."""
return typename in self.required_data_products
def get_data(self, typename):
""" Returns a data product, or None if the product isn't found."""
return self.data_products.get(typename)
def set_data(self, typename, data):
""" Stores a required data product.
If the product already exists, the value is replaced.
"""
self.data_products[typename] = data
|
Python
| 0
|
@@ -1742,400 +1742,8 @@
t%5D%0A%0A
- def __getitem__(self, target):%0A %22%22%22%0A Support for subscripting into this mapping. Returns the product mapping for the given target%0A as a map of %3Cbasedir%3E -%3E %3Cproducts list%3E.%0A If no mapping exists, returns an empty map whose values default to empty lists. So you%0A can use the result without checking for None.%0A %22%22%22%0A return self.by_target%5Btarget%5D%0A%0A
|
88110772793f6c9a33e74c85a31edf7e7dd2f3e2
|
Fix error when catching exception: local variable 'releasedScenario' referenced before assignment
|
freppledb/execute/management/commands/scenario_release.py
|
freppledb/execute/management/commands/scenario_release.py
|
#
# Copyright (C) 2010-2019 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from datetime import datetime
import os
import subprocess
from django.core import management
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.utils.translation import gettext_lazy as _
from django.template.loader import render_to_string
from freppledb.execute.models import Task, ScheduledTask
from freppledb.common.models import User, Scenario
from freppledb import __version__
class Command(BaseCommand):
help = """
This command releases a scenario. It changes its status from "In use" to "Free".
"""
requires_system_checks = False
def get_version(self):
return __version__
def add_arguments(self, parser):
parser.add_argument("--user", help="User running the command"),
parser.add_argument(
"--task",
type=int,
help="Task identifier (generated automatically if not provided)",
)
parser.add_argument(
"--database", default=DEFAULT_DB_ALIAS, help="The scenario to be released."
)
def handle(self, **options):
if options["user"]:
try:
user = User.objects.all().get(username=options["user"])
except Exception:
raise CommandError("User '%s' not found" % options["user"])
else:
user = None
# Synchronize the scenario table with the settings
Scenario.syncWithSettings()
now = datetime.now()
task = None
if "task" in options and options["task"]:
try:
task = (
Task.objects.all().using(DEFAULT_DB_ALIAS).get(pk=options["task"])
)
except Exception:
raise CommandError("Task identifier not found")
if (
task.started
or task.finished
or task.status != "Waiting"
or task.name != "scenario_release"
):
raise CommandError("Invalid task identifier")
task.status = "0%"
task.started = now
else:
task = Task(
name="scenario_release",
submitted=now,
started=now,
status="0%",
user=user,
)
task.processid = os.getpid()
task.save(using=DEFAULT_DB_ALIAS)
# Validate the arguments
database = options["database"]
try:
try:
releasedScenario = Scenario.objects.using(DEFAULT_DB_ALIAS).get(
pk=database
)
except Exception:
raise CommandError(
"No destination database defined with name '%s'" % database
)
if database == DEFAULT_DB_ALIAS:
raise CommandError("Production scenario cannot be released.")
if releasedScenario.status != "In use":
raise CommandError("Scenario to release is not in use")
# Update the scenario table, set it free in the production database
releasedScenario.status = "Free"
releasedScenario.lastrefresh = datetime.today()
releasedScenario.save(using=DEFAULT_DB_ALIAS)
# Killing webservice
if "freppledb.webservice" in settings.INSTALLED_APPS:
management.call_command("stopwebservice", force=True, database=database)
# Logging message
task.processid = None
task.status = "Done"
task.finished = datetime.now()
# Update the task in the destination database
task.message = "Scenario %s released" % (database,)
task.save(using=DEFAULT_DB_ALIAS)
except Exception as e:
if task:
task.status = "Failed"
task.message = "%s" % e
task.finished = datetime.now()
if releasedScenario and releasedScenario.status == "Busy":
releasedScenario.status = "Free"
releasedScenario.save(using=DEFAULT_DB_ALIAS)
raise e
finally:
if task:
task.processid = None
task.save(using=DEFAULT_DB_ALIAS)
|
Python
| 0.000003
|
@@ -3214,32 +3214,68 @@
%22%5D%0A try:%0A
+ releasedScenario = None%0A
try:
|
63cdfe0de155ed32af0332310340b4d57dcef145
|
bump version for release
|
stdeb/__init__.py
|
stdeb/__init__.py
|
# setuptools is required for distutils.commands plugin we use
import logging
import setuptools
__version__ = '0.4.2.git'
log = logging.getLogger('stdeb')
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
|
Python
| 0
|
@@ -111,13 +111,9 @@
0.4.
-2.git
+3
'%0A%0Al
|
a88a01f9e6ba01be7d68719f493405ea584b1566
|
Fix merge fallout
|
lib/aquilon/worker/commands/search_machine.py
|
lib/aquilon/worker/commands/search_machine.py
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq search machine`."""
from sqlalchemy.orm import aliased, subqueryload, joinedload, lazyload
from aquilon.aqdb.model import (Machine, Cpu, Cluster, ClusterResource, Share,
VirtualNasDisk, Disk, MetaCluster, DnsRecord)
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.dbwrappers.hardware_entity import (
search_hardware_entity_query)
from aquilon.worker.formats.list import StringAttributeList
class CommandSearchMachine(BrokerCommand):
required_parameters = []
def render(self, session, hostname, machine, cpuname, cpuvendor, cpuspeed,
cpucount, memory, cluster, share, fullinfo, style, **arguments):
if fullinfo or style != 'raw':
q = search_hardware_entity_query(session, Machine, **arguments)
else:
q = search_hardware_entity_query(session, Machine.label, **arguments)
if machine:
q = q.filter_by(label=machine)
if hostname:
dns_rec = DnsRecord.get_unique(session, fqdn=hostname, compel=True)
q = q.filter(Machine.primary_name_id == dns_rec.id)
if cpuname or cpuvendor or cpuspeed is not None:
subq = Cpu.get_matching_query(session, name=cpuname,
vendor=cpuvendor, speed=cpuspeed,
compel=True)
q = q.filter(Machine.cpu_id.in_(subq))
if cpucount is not None:
q = q.filter_by(cpu_quantity=cpucount)
if memory is not None:
q = q.filter_by(memory=memory)
if cluster:
dbcluster = Cluster.get_unique(session, cluster, compel=True)
if isinstance(dbcluster, MetaCluster):
q = q.join('vm_container', ClusterResource, Cluster)
q = q.filter_by(metacluster=dbcluster)
else:
q = q.join('vm_container', ClusterResource)
q = q.filter_by(cluster=dbcluster)
q = q.reset_joinpoint()
if share:
#v2
v2shares = session.query(Share.id).filter_by(name=share).all()
if v2shares:
NasAlias = aliased(VirtualNasDisk)
q = q.join('disks', (NasAlias, NasAlias.id == Disk.id))
q = q.filter(
NasAlias.share_id.in_(map(lambda s: s[0], v2shares)))
q = q.reset_joinpoint()
if fullinfo:
q = q.options(joinedload('location'),
subqueryload('interfaces'),
lazyload('interfaces.hardware_entity'),
joinedload('interfaces.assignments'),
joinedload('interfaces.assignments.dns_records'),
joinedload('chassis_slot'),
subqueryload('chassis_slot.chassis'),
subqueryload('disks'),
subqueryload('host'),
lazyload('host.machine'),
subqueryload('host.services_used'),
subqueryload('host._cluster'),
lazyload('host._cluster.host'))
return q.all()
return StringAttributeList(q.all(), "label")
|
Python
| 0.000329
|
@@ -3758,23 +3758,31 @@
d('host.
-machine
+hardware_entity
'),%0A
|
cbe58b74f6d5fe5c96b197ced9c2269cf8886d24
|
make boolean functions in utils return real booleans
|
livesettings/utils.py
|
livesettings/utils.py
|
import sys
import types
import os
def can_loop_over(maybe):
"""Test value to see if it is list like"""
try:
iter(maybe)
except:
return 0
else:
return 1
def is_list_or_tuple(maybe):
return isinstance(maybe, (types.TupleType, types.ListType))
def is_scalar(maybe):
"""Test to see value is a string, an int, or some other scalar type"""
return is_string_like(maybe) or not can_loop_over(maybe)
def is_string_like(maybe):
"""Test value to see if it acts like a string"""
try:
maybe+""
except TypeError:
return 0
else:
return 1
def flatten_list(sequence, scalarp=is_scalar, result=None):
"""flatten out a list by putting sublist entries in the main list"""
if result is None:
result = []
for item in sequence:
if scalarp(item):
result.append(item)
else:
flatten_list(item, scalarp, result)
def load_module(module):
"""Load a named python module."""
try:
module = sys.modules[module]
except KeyError:
__import__(module)
module = sys.modules[module]
return module
def get_flat_list(sequence):
"""flatten out a list and return the flat list"""
flat = []
flatten_list(sequence, result=flat)
return flat
def url_join(*args):
"""Join any arbitrary strings into a forward-slash delimited string.
Do not strip leading / from first element, nor trailing / from last element.
This function can take lists as arguments, flattening them appropriately.
example:
url_join('one','two',['three','four'],'five') => 'one/two/three/four/five'
"""
if len(args) == 0:
return ""
args = get_flat_list(args)
if len(args) == 1:
return str(args[0])
else:
args = [str(arg).replace("\\", "/") for arg in args]
work = [args[0]]
for arg in args[1:]:
if arg.startswith("/"):
work.append(arg[1:])
else:
work.append(arg)
joined = reduce(os.path.join, work)
return joined.replace("\\", "/")
|
Python
| 0.999134
|
@@ -140,16 +140,26 @@
except
+ TypeError
:%0A
@@ -171,36 +171,29 @@
urn
-0%0A else:%0A
+False%0A
-
return
-1
+True
%0A%0Ade
@@ -591,24 +591,14 @@
urn
-0%0A e
+Fa
lse
-:
%0A
-
@@ -604,17 +604,20 @@
return
-1
+True
%0A%0A%0Adef f
|
60217450d91cc560c42c1b6e04e514df318d8014
|
Remove redundant comment (it's repeated in the called method)
|
gitlabform/gitlabform/processors/util/branch_protector.py
|
gitlabform/gitlabform/processors/util/branch_protector.py
|
import logging
import sys
import cli_ui
from gitlabform import EXIT_PROCESSING_ERROR
from gitlabform.gitlab import GitLab
from gitlabform.gitlab.core import NotFoundException
class BranchProtector(object):
old_api_keys = ["developers_can_push", "developers_can_merge"]
new_api_keys = [
"push_access_level",
"merge_access_level",
"unprotect_access_level",
]
def __init__(self, gitlab: GitLab, strict: bool):
self.gitlab = gitlab
self.strict = strict
def protect_branch(self, project_and_group, configuration, branch):
try:
requested_configuration = configuration["branches"][branch]
if requested_configuration.get("protected"):
# note that for old API *all* keys have to be defined...
if all(key in requested_configuration for key in self.old_api_keys):
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.protect_using_old_api(
requested_configuration, project_and_group, branch
)
# ...while for the new one we need ANY new key
elif any(key in requested_configuration for key in self.new_api_keys):
if self.configuration_update_needed(
requested_configuration, project_and_group, branch
):
self.protect_using_new_api(
requested_configuration, project_and_group, branch
)
else:
logging.debug(
"Skipping set branch '%s' access levels because they're already set"
)
return
# TODO: is this ok that we skip below code in this case?
if "code_owner_approval_required" in requested_configuration:
self.set_code_owner_approval_required(
requested_configuration, project_and_group, branch
)
else:
self.unprotect(project_and_group, branch)
except NotFoundException:
message = f"Branch '{branch}' not found when trying to set it as protected/unprotected!"
if self.strict:
cli_ui.error(message)
sys.exit(EXIT_PROCESSING_ERROR)
else:
cli_ui.warning(message)
def protect_using_old_api(self, requested_configuration, project_and_group, branch):
logging.warning(
f"Using keys {self.old_api_keys} for configuring protected"
" branches is deprecated and will be removed in future versions of GitLabForm."
f" Please start using new keys: {self.new_api_keys}"
)
logging.debug("Setting branch '%s' as *protected*", branch)
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
self.gitlab.protect_branch(
project_and_group,
branch,
requested_configuration["developers_can_push"],
requested_configuration["developers_can_merge"],
)
def protect_using_new_api(self, requested_configuration, project_and_group, branch):
logging.debug("Setting branch '%s' access level", branch)
# unprotect first to reset 'allowed to merge' and 'allowed to push' fields
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
self.gitlab.branch_access_level(
project_and_group,
branch,
requested_configuration.get("push_access_level", None),
requested_configuration.get("merge_access_level", None),
requested_configuration.get("unprotect_access_level", None),
)
def set_code_owner_approval_required(
self, requested_configuration, project_and_group, branch
):
logging.debug(
"Setting branch '%s' \"code owner approval required\" option",
branch,
)
self.gitlab.branch_code_owner_approval_required(
project_and_group,
branch,
requested_configuration["code_owner_approval_required"],
)
def configuration_update_needed(
self, requested_configuration, project_and_group, branch
):
requested_push_access_level = requested_configuration.get("push_access_level")
requested_merge_access_level = requested_configuration.get("merge_access_level")
requested_unprotect_access_level = requested_configuration.get(
"unprotect_access_level"
)
(
current_push_access_level,
current_merge_access_level,
current_unprotect_access_level,
) = self.gitlab.get_only_branch_access_levels(project_and_group, branch)
return (
requested_push_access_level,
requested_merge_access_level,
requested_unprotect_access_level,
) != (
current_push_access_level,
current_merge_access_level,
current_unprotect_access_level,
)
def unprotect(self, project_and_group, branch):
logging.debug("Setting branch '%s' as unprotected", branch)
self.gitlab.unprotect_branch_new_api(project_and_group, branch)
|
Python
| 0
|
@@ -884,104 +884,8 @@
):%0A%0A
- # unprotect first to reset 'allowed to merge' and 'allowed to push' fields%0A%0A
|
8a2a44df94c838d65058a6666f93cd92625debed
|
comment error check for NON tweets
|
stream_twitter.py
|
stream_twitter.py
|
import gc
import os
import sys
import logging
import datetime
import dateutil.parser as parser
import ConfigParser
import MySQLdb
from twitter import *
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('user')
logger.info( "Reading configurations..")
config = ConfigParser.ConfigParser()
file = config.read('config/twitter_config.cfg')
DB_HOST = config.get('DB_Config', 'db_host')
DB_NAME = config.get('DB_Config', 'db_name')
DB_USER = config.get('DB_Config', 'db_user')
DB_PASS = config.get('DB_Config', 'db_password')
CREDS_FILE = config.get('Twitter_Config', 'twitter_creds')
TWITTER_USERNAME = config.get('Twitter_Config', 'username')
CONSUMER_KEY = config.get('Twitter_Config', 'consumer_key')
CONSUMER_SECRET = config.get('Twitter_Config', 'consumer_secret')
TWITTER_CREDS = os.path.expanduser(CREDS_FILE)
oauth_token, oauth_secret = read_token_file(TWITTER_CREDS)
oauth = OAuth( oauth_token, oauth_secret,CONSUMER_KEY, CONSUMER_SECRET)
logger.info( "Trying to connect to" + DB_HOST +"...")
conn = MySQLdb.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASS, db=DB_NAME)
cursor = conn.cursor()
logger.info( "...done!")
tweet_fields_list = ['id', 'user_id', 'in_reply_to_status_id', 'in_reply_to_user_id', 'favorited', 'retweeted', 'retweet_count', 'lang', 'created_at']
tweet_fields = ', '.join(tweet_fields_list)
tweet_placeholders = ', '.join(['%s']*len(tweet_fields_list))
insert_tweets_sql = 'INSERT INTO tweet (' + tweet_fields + ') VALUES (' + tweet_placeholders + ')'
tweet_text_fields_list = ['tweet_id', 'user_id', 'text', 'lat', 'long', 'place_full_name', 'place_id']
tweet_text_fields = ', '.join(tweet_text_fields_list)
tweet_text_placeholders = ', '.join(['%s']*len(tweet_text_fields_list))
insert_tweets_texts_sql = 'INSERT INTO tweet_text (' + tweet_text_fields + ') VALUES (' + tweet_text_placeholders + ')'
tweet_url_fields_list = ['tweet_id', 'progressive', 'url']
tweet_url_fields = ', '.join(tweet_url_fields_list)
tweet_url_placeholders = ', '.join(['%s']*len(tweet_url_fields_list))
insert_tweets_urls_sql = 'INSERT INTO tweet_url (' + tweet_url_fields + ') VALUES ( ' + tweet_url_placeholders + ')'
tweet_hashtag_fields_list = ['tweet_id', 'user_id', 'hashtag_id']
tweet_hashtag_fields = ', '.join(tweet_hashtag_fields_list)
tweet_hashtag_placeholders = ', '.join(['%s']*len(tweet_hashtag_fields_list))
insert_tweets_hashtags_sql = 'INSERT INTO tweet_hashtag (' + tweet_hashtag_fields + ') VALUES (' + tweet_hashtag_placeholders + ')'
insert_hashtags_sql = 'INSERT INTO tweet_hashtag (hashtag) VALUES (%s)'
user_fields_list = ['id', 'screen_name', 'name', 'verified', 'protected', 'followers_count', 'friends_count', 'statuses_count', 'favourites_count', 'location', 'utc_offset', 'time_zone', 'geo_enabled', 'lang', 'description', 'url', 'created_at']
user_fields = ', '.join(user_fields_list)
user_placeholders = ', '.join(['%s']*len(user_fields_list))
insert_users_sql = 'INSERT INTO tweet (' + user_fields + ') VALUES (' + user_placeholders + ')'
logger.info( "Connecting to the stream...")
twitter_stream = TwitterStream(auth=oauth)
iterator = twitter_stream.statuses.sample()
# Use the stream
timer = datetime.datetime.now()
tweets = []
tweet_record = []
tweet_texts = []
urls = {}
hashtags = {}
users = {}
count = 0
for tweet in iterator:
if 'text' in tweet and tweet['text'] != None and tweet['lang'] == 'en' :
count = count + 1
for field in tweet_fields_list :
if field == 'user_id' :
tweet_record.apped(tweet['user']['id'])
if field == 'created_at' :
datetime = parser.parse(tweet['created_at'])
datetime = datetime.isoformat(' ')[:-6]
tweet_record.apped(datetime)
elif field in tweet :
tweet_record.apped(tweet[field])
else:
print field
print '++'
print tweet
print '++'
print tweet.keys()
break
if len(tweet['entities']) >0 and len(tweet['entities']['urls']) > 0 :
for url in tweet['entities']['urls'] :
print url
for hash in tweet['entities']['hashtags'] :
print hash
if count > 5 :
break
else :
print "What's this!?"
print tweet
break
print "-------"
print count
#Todo Save to DB
|
Python
| 0
|
@@ -4471,24 +4471,25 @@
break%0A
+#
else :%0A
@@ -4483,24 +4483,25 @@
#else :%0A
+#
print %22W
@@ -4514,24 +4514,25 @@
this!?%22%0A
+#
print tw
@@ -4535,24 +4535,25 @@
t tweet%0A
+#
break%0A
|
2bf6b59a129a9d93328c3478e57a27f35bdf2e6a
|
Trim the hardcoded list of keywords
|
screencasts/hello-weave/highlight.py
|
screencasts/hello-weave/highlight.py
|
#!/usr/bin/env python3
import json
prompt = 'ilya@weave-01:~$ '
highlight = [
('weave-01', 'red'),
('weave-02', 'red'),
('docker', 'red'),
('run', 'red'),
('--name', 'red'),
('hello', 'red'),
('netcat', 'red'),
('-lk', 'red'),
('1234', 'red'),
('sudo curl -s -L git.io/weave -o /usr/local/bin/weave', 'red'),
('b4e40e4b4665a1ffa23f90eb3ab57c83ef243e64151bedc1501235df6e532e09\r\n', 'red'),
('Hello, Weave!\r\n', 'red'),
]
highlight_tokens = [t[0] for t in highlight]
colours = {
'red': ('\033[91m', '\033[00m'),
}
for f in ['rec-weave-01.json', 'rec-weave-02.json']:
with open(f) as json_data:
tokens = []
d = json.load(json_data)
json_data.close()
commands = d['stdout']
word = ''
word_start = 0
for i,x in enumerate(commands):
curr = x[1]
if curr == prompt: continue
elif curr != '\r\n' and curr != ' ' and len(curr) == 1:
if word_start == 0:
word_start = i
word = curr
else:
word += curr
elif (curr == '\r\n' or curr == ' ') and word_start != 0:
tokens.append((word, word_start, True))
word_start = 0
elif curr != '\r\n' and len(curr) > 1:
tokens.append((curr, i, False))
offset = 0
for x in tokens:
if x[0] in highlight_tokens:
commands.insert(x[1] + offset, [0, colours['red'][0]])
offset += 1
l = len(x[0]) if x[2] else 1
commands.insert(x[1] + l + offset, [0, colours['red'][1]])
offset += 1
d['commands'] = commands
with open('fancy-' + f, 'w') as json_output:
json_output.write(json.dumps(d))
json_output.close()
|
Python
| 0.999999
|
@@ -279,162 +279,8 @@
'),%0A
- ('sudo curl -s -L git.io/weave -o /usr/local/bin/weave', 'red'),%0A ('b4e40e4b4665a1ffa23f90eb3ab57c83ef243e64151bedc1501235df6e532e09%5Cr%5Cn', 'red'),%0A
|
d387a1976e902bbf7fa6d960bee5d16db7aacbb0
|
Fix indentation. Comment out superfluous code
|
tools/_build.py
|
tools/_build.py
|
"""
The cython function was adapted from scikits-image (http://scikits-image.org/)
"""
import sys
import os
import shutil
import subprocess
import platform
from distutils.dist import Distribution
from distutils.command.config import config as distutils_config
from distutils import log
import optparse # deprecated in 2.7 for argparse
dummy_c_text = r'''
/* This file is generated from statsmodels/tools/_build.py to */
void do_nothing(void);
int main(void) {
do_nothing();
return 0;
}
'''
def has_c_compiler():
c = distutils_config(Distribution())
if platform.system() == "Windows": # HACK
# check if mingw was given in compiler options
parser = optparse.OptionParser()
parser.add_option('-c', '--compiler', dest='compiler')
options, args = parser.parse_args()
if options.compiler and 'mingw' in options.compiler:
return True
# if not, then check to see if compiler is set in disutils.cfg
try: # Josef's code to check the distutils.cfg file
c.distribution.parse_config_files(c.distribution.find_config_files())
# this will raise a key error if there's not one
c.distribution.command_options['build']['compiler'][1]
return True
except:
pass
# just see if there's a system compiler
try:
success = c.try_compile(dummy_c_text)
return True
except:
log.info("No C compiler detected. Not installing Cython version "
"of files.")
return False
def cython(pyx_files, working_path=''):
"""Use Cython to convert the given files to C.
Parameters
----------
pyx_files : list of str
The input .pyx files.
"""
# Do not build cython files if target is clean
if len(sys.argv) >= 2 and sys.argv[1] == 'clean':
return
try:
import Cython
except ImportError:
# If cython is not found, we do nothing -- the build will make use of
# the distributed .c files
print("Cython not found; falling back to pre-built %s" \
% " ".join([f.replace('.pyx', '.c') for f in pyx_files]))
else:
for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:
#TODO: replace this with already written hash_funcs once merged
# if the .pyx file stayed the same, we don't need to recompile
#if not _changed(pyxfile):
# continue
c_file = pyxfile[:-4] + '.c'
# run cython compiler
cmd = 'cython -o %s %s' % (c_file, pyxfile)
print(cmd)
if platform.system() == 'Windows':
status = subprocess.call(
[sys.executable,
os.path.join(os.path.dirname(sys.executable),
'Scripts', 'cython.py'),
'-o', c_file, pyxfile],
shell=True)
else:
status = subprocess.call(['cython', '-o', c_file, pyxfile])
|
Python
| 0.000001
|
@@ -604,16 +604,91 @@
# HACK%0A
+ # this doesn't matter because mingw won't be given at install step%0A
@@ -738,24 +738,25 @@
ons%0A
+#
parser = opt
@@ -784,16 +784,17 @@
+#
parser.a
@@ -848,16 +848,17 @@
+#
options,
@@ -893,16 +893,17 @@
+#
if optio
@@ -951,24 +951,25 @@
er:%0A
+#
return T
@@ -968,24 +968,25 @@
return True%0A
+%0A
# if
@@ -1044,24 +1044,28 @@
ils.cfg%0A
+
+
try: # Josef
@@ -1100,24 +1100,28 @@
ls.cfg file%0A
+
c.di
@@ -1190,24 +1190,28 @@
())%0A
+
# this will
@@ -1247,32 +1247,36 @@
not one%0A
+
+
c.distribution.c
@@ -1314,32 +1314,36 @@
er'%5D%5B1%5D%0A
+
return True%0A
@@ -1338,32 +1338,36 @@
rn True%0A
+
+
except:%0A
pass
@@ -1346,32 +1346,36 @@
except:%0A
+
pass%0A
|
a91ac10af21cf644bfc45ef729e465726491db7b
|
Enable android_test and friends as waf commands.
|
tools/flambe.py
|
tools/flambe.py
|
#!/usr/bin/env python
from waflib import *
from waflib.TaskGen import *
import os
# Waf hates absolute paths for some reason
FLAMBE_ROOT = os.path.dirname(__file__) + "/.."
def options(ctx):
ctx.add_option("--debug", action="store_true", default=False, help="Build a development version")
def configure(ctx):
ctx.load("haxe", tooldir=FLAMBE_ROOT+"/tools")
ctx.env.debug = ctx.options.debug
@feature("flambe")
def apply_flambe(ctx):
flags = ["-main", ctx.main]
hasBootstrap = ctx.path.find_dir("res/bootstrap")
if ctx.env.debug:
flags += "-debug --no-opt --no-inline".split()
else:
#flags += "--dead-code-elimination --no-traces".split()
flags += "--no-traces".split()
ctx.bld(features="haxe", classpath=["src", FLAMBE_ROOT+"/src"],
flags=flags,
swflib="bootstrap.swf" if hasBootstrap else None,
target="app.swf")
ctx.bld(features="haxe", classpath=["src", FLAMBE_ROOT+"/src"],
flags=flags + "-D amity --macro flambe.macro.AmityJSGenerator.use()".split(),
target="app.js")
res = ctx.path.find_dir("res")
if res is not None:
# Create asset swfs from the directories in /res
ctx.bld(features="haxe", classpath=FLAMBE_ROOT+"/tools",
flags="-main AssetPackager",
libs="format",
target="packager.n")
# -interp because neko JIT is unstable...
ctx.bld(rule="neko -interp ${SRC} " + res.abspath() + " .",
source="packager.n", target= "bootstrap.swf" if hasBootstrap else None, always=True)
# TODO: How can we expose these handy commands to the main wscript?
def android_test(ctx):
os.system("adb push res /sdcard/amity-dev")
os.system("adb push build/app.js /sdcard/amity-dev")
os.system("adb shell am start -a android.intent.action.MAIN " +
"-c android.intent.category.HOME")
os.system("adb shell am start -a android.intent.action.MAIN " +
"-n com.threerings.amity/.AmityActivity")
def flash_test(ctx):
os.system("flashplayer build/app.swf")
def android_log(ctx):
os.system("adb logcat -v tag amity:V SDL:V *:W")
|
Python
| 0
|
@@ -1576,76 +1576,8 @@
e)%0A%0A
-# TODO: How can we expose these handy commands to the main wscript?%0A
def
@@ -1933,72 +1933,64 @@
y%22)%0A
-%0Adef flash_test(ctx):%0A os.system(%22flashplayer build/app.swf%22)
+Context.g_module.__dict__%5B%22android_test%22%5D = android_test
%0A%0Ade
@@ -2062,8 +2062,181 @@
V *:W%22)%0A
+Context.g_module.__dict__%5B%22android_log%22%5D = android_log%0A%0Adef flash_test(ctx):%0A os.system(%22flashplayer build/app.swf%22)%0AContext.g_module.__dict__%5B%22flash_test%22%5D = flash_test%0A
|
15c9515a718fbb3e649a63582a4316f29c25fa6e
|
Check if regform requires users to be logged in
|
indico/modules/events/registration/controllers/display.py
|
indico/modules/events/registration/controllers/display.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import request, session, redirect
from indico.core.db import db
from indico.modules.events.registration.models.registration_forms import RegistrationForm
from indico.modules.events.registration.models.registrations import Registration
from indico.modules.events.registration.util import (get_event_section_data, make_registration_form,
was_regform_submitted, save_registration_to_session)
from indico.modules.events.registration.views import (WPDisplayRegistrationFormConference,
WPDisplayRegistrationFormMeeting,
WPDisplayRegistrationFormLecture)
from indico.modules.payment import event_settings
from indico.web.flask.util import url_for
from MaKaC.webinterface.rh.conferenceDisplay import RHConferenceBaseDisplay
def _can_redirect_to_single_regform(regforms):
return len(regforms) == 1 and regforms[0].is_active and not was_regform_submitted(regforms[0])
class RHRegistrationFormDisplayBase(RHConferenceBaseDisplay):
CSRF_ENABLED = True
def _checkParams(self, params):
RHConferenceBaseDisplay._checkParams(self, params)
self.event = self._conf
@property
def view_class(self):
mapping = {
'conference': WPDisplayRegistrationFormConference,
'meeting': WPDisplayRegistrationFormMeeting,
'simple_event': WPDisplayRegistrationFormLecture
}
return mapping[self.event.getType()]
class RHRegistrationFormList(RHRegistrationFormDisplayBase):
"""List of all registration forms in the event"""
def _process(self):
regforms = RegistrationForm.find_all(event_id=int(self.event.id))
if _can_redirect_to_single_regform(regforms):
return redirect(url_for('.display_regform', regforms[0]))
return self.view_class.render_template('display/regforms_list.html', self.event, regforms=regforms,
event=self.event)
class RHRegistrationFormSubmit(RHRegistrationFormDisplayBase):
"""Submit a registration form"""
normalize_url_spec = {
'locators': {
lambda self: self.regform
}
}
def _checkParams(self, params):
RHRegistrationFormDisplayBase._checkParams(self, params)
self.regform = RegistrationForm.find_one(id=request.view_args['reg_form_id'])
def _process(self):
form = make_registration_form(self.regform)()
if form.validate_on_submit():
self._save_registration(form.data)
return redirect(url_for('.display_regforms_list', self.regform))
return self.view_class.render_template('display/regform_display.html', self.event, event=self.event,
sections=get_event_section_data(self.regform), regform=self.regform,
currency=event_settings.get(self.event, 'currency'))
def _save_registration(self, data):
registration = Registration(user=session.user, registration_form=self.regform)
db.session.add(registration)
for form_item in self.regform.active_fields:
value = data.get('field_{0}-{1}'.format(form_item.parent_id, form_item.id), None)
form_item.wtf_field.save_data(registration, value)
db.session.flush()
save_registration_to_session(registration)
|
Python
| 0
|
@@ -811,38 +811,135 @@
ect%0A
-%0Afrom indico.core.db import db
+from werkzeug.exceptions import Forbidden%0A%0Afrom indico.core.db import db%0Afrom indico.modules.auth.util import redirect_to_login
%0Afro
@@ -1630,16 +1630,47 @@
ettings%0A
+from indico.util.i18n import _%0A
from ind
@@ -3161,16 +3161,378 @@
%0A %7D%0A%0A
+ def _checkProtection(self):%0A RHRegistrationFormDisplayBase._checkProtection(self)%0A if self.regform.require_user and not session.user:%0A raise Forbidden(response=redirect_to_login(reason=_('You are trying to register with a form '%0A 'that requires you to be logged in')))%0A%0A
def
|
5e57234ec619d0de930333a8dde3004d1dc575d6
|
Support automatically stashing local modifications during repo-rebase.
|
subcmds/rebase.py
|
subcmds/rebase.py
|
#
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from command import Command
from git_command import GitCommand
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from error import GitError
class Rebase(Command):
common = True
helpSummary = "Rebase local branches on upstream branch"
helpUsage = """
%prog {[<project>...] | -i <project>...}
"""
helpDescription = """
'%prog' uses git rebase to move local changes in the current topic branch to
the HEAD of the upstream history, useful when you have made commits in a topic
branch but need to incorporate new upstream changes "underneath" them.
"""
def _Options(self, p):
p.add_option('-i', '--interactive',
dest="interactive", action="store_true",
help="interactive rebase (single project only)")
p.add_option('-f', '--force-rebase',
dest='force_rebase', action='store_true',
help='Pass --force-rebase to git rebase')
p.add_option('--no-ff',
dest='no_ff', action='store_true',
help='Pass --no-ff to git rebase')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='Pass --quiet to git rebase')
p.add_option('--autosquash',
dest='autosquash', action='store_true',
help='Pass --autosquash to git rebase')
p.add_option('--whitespace',
dest='whitespace', action='store', metavar='WS',
help='Pass --whitespace to git rebase')
def Execute(self, opt, args):
all = self.GetProjects(args)
one_project = len(all) == 1
if opt.interactive and not one_project:
print >>sys.stderr, 'error: interactive rebase not supported with multiple projects'
return -1
for project in all:
cb = project.CurrentBranch
if not cb:
if one_project:
print >>sys.stderr, "error: project %s has a detatched HEAD" % project.relpath
return -1
# ignore branches with detatched HEADs
continue
upbranch = project.GetBranch(cb)
if not upbranch.LocalMerge:
if one_project:
print >>sys.stderr, "error: project %s does not track any remote branches" % project.relpath
return -1
# ignore branches without remotes
continue
args = ["rebase"]
if opt.whitespace:
args.append('--whitespace=%s' % opt.whitespace)
if opt.quiet:
args.append('--quiet')
if opt.force_rebase:
args.append('--force-rebase')
if opt.no_ff:
args.append('--no-ff')
if opt.autosquash:
args.append('--autosquash')
if opt.interactive:
args.append("-i")
args.append(upbranch.LocalMerge)
print >>sys.stderr, '# %s: rebasing %s -> %s' % \
(project.relpath, cb, upbranch.LocalMerge)
if GitCommand(project, args).Wait() != 0:
return -1
|
Python
| 0.000007
|
@@ -2095,24 +2095,181 @@
git rebase')
+%0A p.add_option('--auto-stash',%0A dest='auto_stash', action='store_true',%0A help='Stash local modifications before starting')
%0A%0A def Exec
@@ -3610,50 +3610,571 @@
-if GitCommand(project, args).Wait() != 0:%0A
+needs_stash = False%0A if opt.auto_stash:%0A stash_args = %5B%22update-index%22, %22--refresh%22, %22-q%22%5D%0A%0A if GitCommand(project, stash_args).Wait() != 0:%0A needs_stash = True%0A # Dirty index, requires stash...%0A stash_args = %5B%22stash%22%5D%0A%0A if GitCommand(project, stash_args).Wait() != 0:%0A return -1%0A%0A if GitCommand(project, args).Wait() != 0:%0A return -1%0A%0A if needs_stash:%0A stash_args.append('pop')%0A stash_args.append('--quiet')%0A if GitCommand(project, stash_args).Wait() != 0:%0A
|
7e519f6a04af460378b76d7d116131c528e8a25f
|
fix a logic error in directive evaluation
|
src/saga/utils/job/transfer_directives.py
|
src/saga/utils/job/transfer_directives.py
|
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
''' Provides a parser class for the file transfer specification as it is
defined in GFD.90, sction 4.1.3.
'''
import saga.exceptions as se
# 4.1.3 File Transfer Specifications (GFD90 p 176-177)
#
# The syntax of a file transfer directive for the job description is modeled on
# the LSF syntax (LSF stands for Load Sharing Facility, a commercial job
# scheduler by Platform Computing), and has the general syntax:
# local_file operator remote_file
# Both the local_file and the remote_file can be URLs. If they are not URLs,
# but full or relative pathnames, then the local_file is relative to the host
# where the submission is executed, and the remote_file is evaluated on the
# execution host of the job. The operator is one of the following four:
#
# '>' copies the local file to the remote file before the job starts.
# Overwrites the remote file if it exists.
# '>>' copies the local file to the remote file before the job starts.
# Appends to the remote file if it exists.
# '<' copies the remote file to the local file after the job finishes.
# Overwrites the local file if it exists.
# '<<' copies the remote file to the local file after the job finishes.
# Appends to the local file if it exists.
# ------------------------------------------------------------------------------
#
class TransferDirectives(object):
# --------------------------------------------------------------------------
#
def __init__(self, directives=None):
self._in_overwrite = list()
self._in_append = list()
self._out_overwrite = list()
self._out_append = list()
if not directives:
directives = {}
for d in directives:
if (d.count('>') > 2) or (d.count('<') > 2):
msg = "'%s' is not a valid transfer d string."
raise se.BadParameter(msg)
elif '>' in d:
(loc, rem) = d.split('>')
self._in_overwrite.append([loc.strip(), rem.strip()])
elif '>>' in d:
(loc, rem) = d.split('>>')
self._in_append.append([loc.strip(), rem.strip()])
elif '<' in d:
(loc, rem) = d.split('<')
self._out_overwrite.append([loc.strip(), rem.strip()])
elif '<<' in d:
(loc, rem) = d.split('<<')
self._out_append.append([loc.strip(), rem.strip()])
else:
msg = "'%s' is not a valid transfer directive string." % d
raise se.BadParameter(msg)
# --------------------------------------------------------------------------
#
def _to_string_list(self):
slist = list()
for (loc, rem) in self._in_overwrite:
slist.append('%s > %s' % (loc, rem))
for (loc, rem) in self._in_append:
slist.append('%s >> %s' % (loc, rem))
for (loc, rem) in self._out_overwrite:
slist.append('%s < %s' % (loc, rem))
for (loc, rem) in self._out_append:
slist.append('%s << %s' % (loc, rem))
return slist
# --------------------------------------------------------------------------
#
def __str__(self):
return str(self._to_string_list())
# --------------------------------------------------------------------------
#
@property
def in_overwrite(self):
return self._in_overwrite
# --------------------------------------------------------------------------
#
@property
def in_append(self):
return self._in_append
# --------------------------------------------------------------------------
#
@property
def out_overwrite(self):
return self._out_overwrite
# --------------------------------------------------------------------------
#
@property
def out_append(self):
return self._out_append
# --------------------------------------------------------------------------
#
@property
def string_list(self):
return self._to_string_list()
# ------------------------------------------------------------------------------
#
def _test_():
tdp = TransferDirectives(["ab","a>c", "c>>d","f<a","g<<h"])
print tdp.in_append
print tdp.in_overwrite
print tdp.out_append
print tdp.out_overwrite
# ------------------------------------------------------------------------------
|
Python
| 0.000054
|
@@ -2031,16 +2031,17 @@
elif '%3E
+%3E
' in d:%0A
@@ -2079,16 +2079,17 @@
split('%3E
+%3E
')%0A
@@ -2108,25 +2108,22 @@
elf._in_
-overwrite
+append
.append(
@@ -2162,33 +2162,32 @@
elif '%3E
-%3E
' in d:%0A
@@ -2209,33 +2209,32 @@
em) = d.split('%3E
-%3E
')%0A
@@ -2237,38 +2237,41 @@
self._in_
-append
+overwrite
.append(%5Bloc.str
@@ -2310,16 +2310,17 @@
elif '%3C
+%3C
' in d:%0A
@@ -2358,16 +2358,17 @@
split('%3C
+%3C
')%0A
@@ -2388,25 +2388,22 @@
lf._out_
-overwrite
+append
.append(
@@ -2442,33 +2442,32 @@
elif '%3C
-%3C
' in d:%0A
@@ -2489,33 +2489,32 @@
em) = d.split('%3C
-%3C
')%0A
@@ -2518,38 +2518,41 @@
self._out_
-append
+overwrite
.append(%5Bloc.str
|
de262762002f3068c560d2121080bc4f0ea07c33
|
remove cruft, logging
|
loslassa/devserver.py
|
loslassa/devserver.py
|
#!/usr/bin/env python
"""
To get quick feedback this implements an automatically reloading web server
to be run locally.
Reloading functionality is taken from the fabulous
`Werkzeug WSGI toolkit <http://www.pocoo.org/projects/werkzeug/#werkzeug>`
"""
import logging
import mimetypes
import os
import subprocess
import sys
import time
import thread
from wsgiref import simple_server, util
log = logging.getLogger(__name__)
RUN_MAIN_ENV_KEY = 'RUN_MAIN_ENV_KEY'
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while True:
log.info(' * Restarting with reloader')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ[RUN_MAIN_ENV_KEY] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt':
for key, value in new_environ.iteritems():
if isinstance(value, unicode):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, pathToWatch, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get(RUN_MAIN_ENV_KEY) == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(pathToWatch, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def reloader_loop(pathToWatch, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any files passed in here change..
Copyright notice: this function is based on ``_reloader_stat_loop()``
from Werkzeug which is based on autoreload.py
from CherryPy trac which originated from WSGIKit which is now dead.
:param LocalPath pathToWatch: path of the directory to be watched.
"""
pathTimeMap = {}
while True:
paths = [p for p in pathToWatch.walk()]
print(paths)
shortNames = [str(p).rpartition(str(pathToWatch))[-1][1:]
for p in paths]
log.debug("check for changes: %s" % (", ".join(shortNames)))
for filePath in paths:
try:
mtime = filePath.stat().st_mtime
except OSError:
continue
oldTime = pathTimeMap.get(filePath)
if oldTime is None:
pathTimeMap[filePath] = mtime
continue
elif mtime > oldTime:
log.info(' * Detected change in %r, reloading' % filePath)
sys.exit(3)
time.sleep(interval)
def make_server(path, port):
def minimal_wsgi_app(environ, respond):
"""simple wsgi app to serve html files"""
fn = os.path.join(path, environ['PATH_INFO'][1:])
if '.' not in fn.split(os.path.sep)[-1]:
fn = os.path.join(fn, 'index.html')
type_ = mimetypes.guess_type(fn)[0]
if os.path.exists(fn):
respond('200 OK', [('Content-Type', type_)])
return util.FileWrapper(open(fn, "rb"))
respond('404 Not Found', [('Content-Type', 'text/plain')])
return ['404 not found']
return simple_server.make_server('', port, minimal_wsgi_app)
def serve_with_reloader(serveFromPath, port, changedCallback, pathToWatch):
"""
:param serveFromPath: path to the folder to be served
:param pathToWatch: path to watch recursively for changed files
:param port: port to be served ond
:param changedCallback: function to be called if a monitored file changes
"""
def call_func_then_serve():
"""Calls the passed in function and then starts the server"""
log.info("call %s" % (changedCallback))
changedCallback()
server = make_server(serveFromPath, port)
log.info("serve %s on port %s, control-C to stop" %
(serveFromPath, port))
server.serve_forever()
log.info("Serve while watching folder %s" % (pathToWatch))
run_with_reloader(call_func_then_serve, pathToWatch)
|
Python
| 0
|
@@ -646,13 +646,13 @@
nfo(
-' * R
+%22***r
esta
@@ -670,17 +670,20 @@
reloader
-'
+***%22
)%0A
@@ -2365,202 +2365,8 @@
()%5D%0A
- print(paths)%0A shortNames = %5Bstr(p).rpartition(str(pathToWatch))%5B-1%5D%5B1:%5D%0A for p in paths%5D%0A log.debug(%22check for changes: %25s%22 %25 (%22, %22.join(shortNames)))%0A
@@ -2392,16 +2392,16 @@
paths:%0A
+
@@ -2486,16 +2486,91 @@
SError:%0A
+ log.warning(%22problem with %25s%22 %25 (filePath), exc_info=True)%0A
@@ -2802,13 +2802,10 @@
nfo(
-' * D
+%22d
etec
@@ -2819,18 +2819,18 @@
nge in %25
-r,
+s:
reloadi
@@ -2835,12 +2835,13 @@
ding
-'
+%22
%25
+(
file
@@ -2837,32 +2837,33 @@
ng%22 %25 (filePath)
+)
%0A
|
d42b47f971675af4b12f59089326276b3b8ff9f4
|
Bump version to 0.14.0
|
syntex/pkgmeta.py
|
syntex/pkgmeta.py
|
# -------------------------------------------------------------------------
# Package meta data.
# -------------------------------------------------------------------------
# Package version number.
__version__ = "0.13.4"
|
Python
| 0
|
@@ -215,9 +215,9 @@
%220.1
-3.4
+4.0
%22%0A
|
8390350b9b717258b2502cbcecc06ddfd73d1c59
|
Revert SAX changes
|
tslearn/docs/examples/misc/plot_sax.py
|
tslearn/docs/examples/misc/plot_sax.py
|
# -*- coding: utf-8 -*-
"""
PAA and SAX features
====================
This example presents a comparison between PAA [1], SAX [2] and 1d-SAX [3]
features.
PAA (Piecewise Aggregate Approximation) corresponds to a downsampling of the
original time series and, in each segment (segments have fixed size), the mean
value is retained.
SAX (Symbolic Aggregate approXimation) builds upon PAA by quantizing the mean
value. Quantization boundaries are computed for all symbols to be equiprobable,
under a standard normal distribution assumption.
Finally, 1d-SAX is an extension of SAX in which each segment is represented
by an affine function (2 parameters per segment are hence quantized: slope and
mean value).
[1] E. Keogh & M. Pazzani. Scaling up dynamic time warping for datamining
applications. SIGKDD 2000, pp. 285--289.
[2] J. Lin, E. Keogh, L. Wei, et al. Experiencing SAX: a novel symbolic
representation of time series. Data Mining and Knowledge Discovery,
2007. vol. 15(107)
[3] S. Malinowski, T. Guyet, R. Quiniou, R. Tavenard. 1d-SAX: a Novel
Symbolic Representation for Time Series. IDA 2013.
"""
# Author: Romain Tavenard
# License: BSD 3 clause
import numpy
import matplotlib.pyplot as plt
from tslearn.generators import random_walks
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn.piecewise import PiecewiseAggregateApproximation
from tslearn.piecewise import SymbolicAggregateApproximation, \
OneD_SymbolicAggregateApproximation, _breakpoints
numpy.random.seed(0)
# Generate a random walk time series
n_ts, sz, d = 1, 100, 1
dataset = random_walks(n_ts=n_ts, sz=sz, d=d)
scaler = TimeSeriesScalerMeanVariance(mu=0., std=1.) # Rescale time series
dataset = scaler.fit_transform(dataset)
# PAA transform (and inverse transform) of the data
n_paa_segments = 10
paa = PiecewiseAggregateApproximation(n_segments=n_paa_segments)
paa_dataset_inv = paa.inverse_transform(paa.fit_transform(dataset))
# SAX transform
n_sax_symbols = 8
sax = SymbolicAggregateApproximation(n_segments=n_paa_segments,
alphabet_size_avg=n_sax_symbols)
sax_dataset_inv = sax.inverse_transform(sax.fit_transform(dataset))
# 1d-SAX transform
n_sax_symbols_avg = 8
n_sax_symbols_slope = 8
one_d_sax = OneD_SymbolicAggregateApproximation(
n_segments=n_paa_segments,
alphabet_size_avg=n_sax_symbols_avg,
alphabet_size_slope=n_sax_symbols_slope)
transformed_data = one_d_sax.fit_transform(dataset)
one_d_sax_dataset_inv = one_d_sax.inverse_transform(transformed_data)
plt.figure()
plt.subplot(2, 2, 1) # First, raw time series
plt.plot(dataset[0].ravel(), "b-")
plt.title("Raw time series")
plt.subplot(2, 2, 2) # Second, PAA
plt.plot(dataset[0].ravel(), "b-", alpha=0.4)
plt.plot(paa_dataset_inv[0].ravel(), "b-")
plt.title("PAA")
plt.subplot(2, 2, 3) # Then SAX
plt.plot(dataset[0].ravel(), "b-", alpha=0.4)
plt.plot(sax_dataset_inv[0].ravel(), "b-")
for bp in _breakpoints(n_bins=n_sax_symbols):
plt.axhline(y=bp, linestyle="dashed", color="k", alpha=0.4)
plt.title("SAX, %d symbols" % n_sax_symbols)
plt.subplot(2, 2, 4) # Finally, 1d-SAX
plt.plot(dataset[0].ravel(), "b-", alpha=0.4)
plt.plot(one_d_sax_dataset_inv[0].ravel(), "b-")
plt.title("1d-SAX, %d symbols"
"(%dx%d)" % (n_sax_symbols_avg * n_sax_symbols_slope,
n_sax_symbols_avg,
n_sax_symbols_slope))
plt.tight_layout()
plt.show()
|
Python
| 0
|
@@ -1478,22 +1478,8 @@
tion
-, _breakpoints
%0A%0Anu
@@ -2912,118 +2912,8 @@
-%22)%0A
-for bp in _breakpoints(n_bins=n_sax_symbols):%0A plt.axhline(y=bp, linestyle=%22dashed%22, color=%22k%22, alpha=0.4)%0A
plt.
|
64d83d2f9c0d955b9d6ef721c0d953158ebfb72c
|
Add API to manually set the path of an item. + Automatic creation of files when getPath() is called.
|
jasy/item/Abstract.py
|
jasy/item/Abstract.py
|
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import os
from jasy import UserError
import jasy.core.File as File
class AbstractItem:
id = None
project = None
kind = "jasy.Item"
mtime = None
__path = None
__cache = None
__text = None
__textFilter = None
@classmethod
def fromPath(cls, project, relpath, package=None):
"Initialize MyData from a dict's items"
item = cls(project)
item.setId(item.generateId(relpath, package))
return item
def __init__(self, project, id=None, package=None):
self.project = project
if id:
self.setId(id)
def generateId(self, relpath, package):
return "%s/%s" % (package,relpath)
def attach(self, path):
self.__path = path
entry = None
try:
if type(path) is list:
mtime = 0
for entry in path:
entryTime = os.stat(entry).st_mtime
if entryTime > mtime:
mtime = entryTime
self.mtime = mtime
else:
entry = path
self.mtime = os.stat(entry).st_mtime
except OSError as oserr:
raise UserError("Invalid item path: %s" % entry)
return self
def getId(self):
"""Returns a unique identify of the class. Typically as it is stored inside the project."""
return self.id
def setId(self, id):
self.id = id
return self
def getProject(self):
"""Returns the project which the class belongs to"""
return self.project
def getPath(self):
"""Returns the exact position of the class file in the file system."""
return self.__path
def getModificationTime(self):
"""Returns last modification time of the class"""
return self.mtime
def setText(self, text):
"""Stores text from custom reader"""
self.__text = text
def saveText(self, text, path, encoding="utf-8"):
"""
Saves the given text under the given path and stores both for future access
This is mainly useful for "virtual" files which are not edited by the developer
but which are created dynamically during runtime.
"""
self.__text = text
self.__path = path
if not File.exists(path) or File.read(path) != text:
File.write(path, text)
self.mtime = os.stat(path).st_mtime
def getText(self, encoding="utf-8"):
"""
Reads the file (as UTF-8) and returns the text
"""
if self.__text is not None:
if self.__textFilter is not None:
return self.__textFilter(self.__text, self)
else:
return self.__text
if self.__path is None:
return None
if type(self.__path) == list:
text = "".join([open(filename, mode="r", encoding=encoding).read() for filename in self.__path])
else:
text = open(self.__path, mode="r", encoding=encoding).read()
if self.__textFilter is not None:
return self.__textFilter(text, self)
else:
return text
def setTextFilter(self, filterCallback):
"""
Sets text filter callback that is called on getText().
With this callback e.g. transformations from CoffeeScript to JavaScript are possible.
The callback gets two parameter (text, ItemClass)
"""
self.__textFilter = filterCallback
def getChecksum(self, mode="rb"):
"""
Returns the SHA1 checksum of the item
"""
return File.sha1(open(self.getPath(), mode))
# Map Python built-ins
__repr__ = getId
__str__ = getId
|
Python
| 0
|
@@ -1801,38 +1801,358 @@
tem.%22%22%22%0A
- return self.__
+%0A # Automatically write file (from eventually processed text content) when it does not exist%0A if self.__text is not None and not File.exists(self.__path):%0A File.write(self.__path, self.getText())%0A%0A return self.__path%0A%0A def setPath(self, path):%0A %22%22%22Sets the path for the item%22%22%22%0A self.__path =
path%0A%0A
|
a8679b6ac5392b80cd56fa2d67fd3bf3fb6f488f
|
Add distance handling to base class
|
turbustat/statistics/base_statistic.py
|
turbustat/statistics/base_statistic.py
|
from astropy.io import fits
import astropy.units as u
import numpy as np
from ..io import input_data
class BaseStatisticMixIn(object):
"""
Common properties to all statistics
"""
# Disable this flag when a statistic does not need a header
need_header_flag = True
# Disable this when the data property will not be used.
no_data_flag = False
@property
def header(self):
return self._header
@header.setter
def header(self, input_hdr):
if not self.need_header_flag:
input_hdr = None
elif not isinstance(input_hdr, fits.header.Header):
raise TypeError("The header must be a"
" astropy.io.fits.header.Header.")
self._header = input_hdr
@property
def data(self):
return self._data
@data.setter
def data(self, values):
if self.no_data_flag:
values = None
elif not isinstance(values, np.ndarray):
raise TypeError("Data is not a numpy array.")
self._data = values
def input_data_header(self, data, header):
'''
Check if the header is given separately from the data type.
'''
if header is not None:
self.data = input_data(data, no_header=True)
self.header = header
else:
self.data, self.header = input_data(data)
@property
def angular_equiv(self):
return [(u.pix, u.deg, lambda x: x * float(self.ang_size.value),
lambda x: x / float(self.ang_size.value))]
@property
def ang_size(self):
return np.abs(self.header["CDELT2"]) * u.deg
def to_pixel(self, value):
'''
Convert from angular to pixel scale.
'''
if not isinstance(value, u.Quantity):
raise TypeError("value must be an astropy Quantity object.")
return value.to(u.pix, equivalencies=self.angular_equiv)
|
Python
| 0
|
@@ -1948,8 +1948,1107 @@
_equiv)%0A
+%0A @property%0A def distance(self):%0A return self._distance%0A%0A @distance.setter%0A def distance(self, value):%0A '''%0A Value must be a quantity with a valid distance unit. Will keep the%0A units given.%0A '''%0A%0A if not isinstance(value, u.Quantity):%0A raise TypeError(%22Value for distance must an astropy Quantity.%22)%0A%0A if not value.unit.is_equivalent(u.pc):%0A raise u.UnitConversionError(%22Given unit (%7B%7D) is not a valid unit%22%0A %22 of distance.%22)%0A%0A if not value.isscalar:%0A raise TypeError(%22Distance must be a scalar quantity.%22)%0A%0A self._distance = value%0A%0A @property%0A def distance_size(self):%0A return (self.ang_size *%0A self.distance).to(self.distance.unit,%0A equivalencies=u.dimensionless_angles())%0A%0A @property%0A def distance_equiv(self):%0A return %5B(u.pix, self.distance.unit,%0A lambda x: x * float(self.distance_size.value),%0A lambda x: x / float(self.distance_size.value))%5D%0A
|
6b4c775219aade049fbed21ca10eed16317c4f7f
|
move to new apis
|
users_ldap_groups/users_ldap_groups.py
|
users_ldap_groups/users_ldap_groups.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models
from openerp import fields
from openerp import api
import logging
import users_ldap_groups_operators
import inspect
class CompanyLDAPGroupMapping(models.Model):
_name = 'res.company.ldap.group_mapping'
_rec_name = 'ldap_attribute'
_order = 'ldap_attribute'
def _get_operators(self):
operators = []
members = inspect.getmembers(
users_ldap_groups_operators,
lambda cls:
inspect.isclass(cls) and
cls != users_ldap_groups_operators.LDAPOperator)
for name, operator in members:
operators.append((name, name))
return tuple(operators)
ldap_id = fields.Many2one('res.company.ldap', 'LDAP server', required=True)
ldap_attribute = fields.Char(
'LDAP attribute', size=64,
help='The LDAP attribute to check.\n'
'For active directory, use memberOf.')
operator = fields.Selection(
_get_operators, 'Operator',
help='The operator to check the attribute against the value\n'
'For active directory, use \'contains\'', required=True)
value = fields.Char(
'Value', size=1024,
help='The value to check the attribute against.\n'
'For active directory, use the dn of the desired group',
required=True)
group = fields.Many2one(
'res.groups', 'OpenERP group',
help='The OpenERP group to assign', required=True)
class CompanyLDAP(models.Model):
_inherit = 'res.company.ldap'
group_mappings = fields.One2many(
'res.company.ldap.group_mapping',
'ldap_id', 'Group mappings',
help='Define how OpenERP groups are assigned to ldap users')
only_ldap_groups = fields.Boolean(
'Only ldap groups',
help='If this is checked, manual changes to group membership are '
'undone on every login (so OpenERP groups are always synchronous '
'with LDAP groups). If not, manually added groups are preserved.')
_default = {
'only_ldap_groups': False,
}
@api.multi
def get_or_create_user(self, cr, uid, conf, login, ldap_entry,
context=None):
user_id = super(CompanyLDAP, self).get_or_create_user(
cr, uid, conf, login, ldap_entry, context)
if not user_id:
return user_id
logger = logging.getLogger('users_ldap_groups')
mappingobj = self.pool.get('res.company.ldap.group_mapping')
userobj = self.pool.get('res.users')
conf_all = self.read(cr, uid, conf['id'], ['only_ldap_groups'])
if(conf_all['only_ldap_groups']):
logger.debug('deleting all groups from user %d' % user_id)
userobj.write(
cr, uid, [user_id], {'groups_id': [(5, )]}, context=context)
for mapping in mappingobj.read(cr, uid, mappingobj.search(
cr, uid, [('ldap_id', '=', conf['id'])]), []):
operator = getattr(users_ldap_groups_operators,
mapping['operator'])()
logger.debug('checking mapping %s' % mapping)
if operator.check_value(ldap_entry, mapping['ldap_attribute'],
mapping['value'], conf, self, logger):
logger.debug('adding user %d to group %s' %
(user_id, mapping['group'][1]))
userobj.write(cr, uid, [user_id],
{'groups_id': [(4, mapping['group'][0])]},
context=context)
return user_id
|
Python
| 0
|
@@ -3058,12 +3058,12 @@
pi.m
-ulti
+odel
%0A
@@ -3091,25 +3091,16 @@
er(self,
- cr, uid,
conf, l
@@ -3115,17 +3115,18 @@
ap_entry
-,
+):
%0A
@@ -3130,41 +3130,56 @@
- context=None):
+id_ = conf%5B'id'%5D%0A this = self.browse(id_)
%0A
@@ -3249,25 +3249,16 @@
- cr, uid,
conf, l
@@ -3273,25 +3273,16 @@
ap_entry
-, context
)%0A
@@ -3338,124 +3338,39 @@
-logger = logging.getLogger('users_ldap_groups')%0A mappingobj = self.pool.get('res.company.ldap.group_mapping')
+userobj = self.env%5B'res.users'%5D
%0A
@@ -3382,39 +3382,33 @@
user
-obj
=
+u
se
-lf.pool.get('res.
+robj.browse(
user
-s'
+_id
)%0A
@@ -3417,56 +3417,41 @@
-conf_all = self.read(cr, uid, conf%5B'id'%5D, %5B'only
+logger = logging.getLogger('users
_lda
@@ -3459,17 +3459,16 @@
_groups'
-%5D
)%0A
@@ -3475,19 +3475,14 @@
if
-(conf_all%5B'
+ self.
only
@@ -3493,19 +3493,16 @@
p_groups
-'%5D)
:%0A
@@ -3596,34 +3596,8 @@
ite(
-%0A cr, uid,
%5Buse
@@ -3629,25 +3629,8 @@
)%5D%7D
-, context=context
)%0A%0A
@@ -3655,114 +3655,68 @@
in
-mappingobj.read(cr, uid, mappingobj.search(%0A cr, uid, %5B('ldap_id', '=', conf%5B'id'%5D)%5D), %5B%5D):
+this.group_mappings:%0A operator = mapping.operator
%0A
@@ -3775,47 +3775,16 @@
ors,
-%0A
mapping
%5B'op
@@ -3783,18 +3783,17 @@
ping
-%5B'
+.
operator
'%5D)(
@@ -3788,18 +3788,16 @@
operator
-'%5D
)()%0A
@@ -4110,20 +4110,19 @@
ping
-%5B'
+.
group
-'%5D%5B1%5D
+.name
))%0A
@@ -4144,68 +4144,15 @@
user
-obj
.write(
-cr, uid, %5Buser_id%5D,%0A
%7B'gr
@@ -4177,70 +4177,20 @@
ping
-%5B'
+.
group
-'%5D%5B0%5D)%5D%7D,%0A context=context
+.id)%5D%7D
)%0A
|
dadd800384358356542ccc49bbdad1ae54006cfc
|
Fix test_Bucket.BucketDataTests to test `needed` attribute.
|
lib/bridgedb/test/test_Bucket.py
|
lib/bridgedb/test/test_Bucket.py
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :copyright: (c) 2007-2014, The Tor Project, Inc.
# (c) 2007-2014, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.Bucket` module.
These tests are meant to ensure that the :mod:`bridgedb.Bucket` module is
functioning as expected.
"""
from __future__ import print_function
from io import StringIO
import sure
from sure import this
from sure import the
from sure import expect
from bridgedb import Bucket
from twisted.trial import unittest
class BucketDataTest(unittest.TestCase):
"""Tests for :class:`bridgedb.Bucket.BucketData`."""
def test_alloc_some_of_the_bridges(self):
"""Set the needed number of bridges"""
alloc = 10
distname = "test-distributor"
bucket = Bucket.BucketData(distname, alloc)
this(distname).should.be.equal(bucket.name)
this(alloc).should.be.equal(bucket.needed)
def test_alloc_all_the_bridges(self):
"""Set the needed number of bridges to the default"""
alloc = '*'
distname = "test-distributor"
bucket = Bucket.BucketData(distname, alloc)
this(distname).should.be.equal(bucket.name)
this(alloc).should.be.equal(1000000)
class BucketManagerTest(unittest.TestCase):
"""Tests for :class:`bridgedb.Bucket.BucketManager`."""
TEST_CONFIG_FILE = StringIO(unicode("""\
FILE_BUCKETS = { 'test1': 7, 'test2': 11 }
COLLECT_TIMESTAMPS = False
COUNTRY_BLOCK_FILE = []"""))
def setUp(self):
configuration = {}
TEST_CONFIG_FILE.seek(0)
compiled = compile(TEST_CONFIG_FILE.read(), '<string>', 'exec')
exec compiled in configuration
self.config = persistent.Conf(**configuration)
self.state = persistent.State(**config.__dict__)
self.bucket = Bucket.BucketManager(self.config)
|
Python
| 0
|
@@ -837,21 +837,22 @@
-alloc
+needed
= 10%0A
@@ -944,36 +944,39 @@
c)%0A this(
-dist
+bucket.
name).should.be.
@@ -973,39 +973,36 @@
should.be.equal(
-bucket.
+dist
name)%0A th
@@ -996,37 +996,45 @@
e)%0A this(
-alloc
+bucket.needed
).should.be.equa
@@ -1035,23 +1035,16 @@
e.equal(
-bucket.
needed)%0A
@@ -1156,21 +1156,22 @@
-alloc
+needed
= '*'%0A
@@ -1272,20 +1272,23 @@
this(
-dist
+bucket.
name).sh
@@ -1301,23 +1301,20 @@
e.equal(
-bucket.
+dist
name)%0A
@@ -1324,21 +1324,29 @@
this(
-alloc
+bucket.needed
).should
@@ -1359,15 +1359,14 @@
ual(
-1000000
+needed
)%0A%0A%0A
|
eb856e854c3b6f94f49db6de41c3a5af758494b3
|
Change in forbidden_view_config in Pyramid 1.5a3
|
usingnamespace/views/authentication.py
|
usingnamespace/views/authentication.py
|
import logging
log = logging.getLogger(__name__)
from pyramid.view import (
view_config,
view_defaults,
forbidden_view_config,
)
from pyramid.security import (
remember,
forget,
authenticated_userid
)
from pyramid.httpexceptions import HTTPSeeOther
from deform import ValidationFailure
from ..forms.user import (
LoginForm,
)
@view_defaults(context='..traversal.ManagementRoot', route_name='management')
class Authentication(object):
"""Authentication provides views for things related to authentication"""
def __init__(self, context, request):
"""Initialises the view class
:context: The traversal context
:request: The current request
"""
self.context = context
self.request = request
@view_config(
name='auth',
renderer='management/authenticate.mako',
)
def authenticate(self):
if authenticated_userid(self.request) is not None:
return HTTPSeeOther(location=self.request.route_url('management',
traverse=self.request.session.get('next', '')))
(schema, f) = LoginForm.create_form(request=self.request,
action=self.request.current_route_url())
return {
'form': f.render(),
}
@view_config(
name='auth',
renderer='management/authenticate.mako',
request_method='POST',
)
def authenticate_submit(self):
controls = self.request.POST.items()
(schema, f) = LoginForm.create_form(request=self.request,
action=self.request.current_route_url())
try:
appstruct = f.validate(controls)
headers = remember(self.request, appstruct['email'])
log.debug("Sending user to: {}".format(self.request.session.get('next', None)))
return HTTPSeeOther(location=self.request.route_url(
'management', traverse=self.request.session.get('next', '')),
headers = headers)
except ValidationFailure as e:
if e.field['csrf_token'].error is not None:
e.field.error = e.field['csrf_token'].error
e.field['csrf_token'].cstruct = self.request.session.get_csrf_token()
return {
'form': e.render(),
}
@view_config(
name='deauth',
)
def deauth(self):
headers = forget(self.request)
return HTTPSeeOther(location=self.request.route_url('management',
traverse=''), headers = headers)
@forbidden_view_config(
containment='..traversal.ManagementRoot',
renderer='string',
)
def forbidden(self):
# Check to see if a user is already logged in...
if authenticated_userid(self.request):
request.response.status_int = 403
return {}
if self.request.path != '/':
self.request.session['next'] = self.request.path
return HTTPSeeOther(location=self.request.route_url(
'management', traverse='auth'))
|
Python
| 0
|
@@ -295,16 +295,49 @@
import
+(%0A HTTPForbidden,%0A
HTTPSeeO
@@ -340,16 +340,27 @@
SeeOther
+,%0A )
%0A%0Afrom d
@@ -2718,26 +2718,16 @@
)%0A%0A @
-forbidden_
view_con
@@ -2723,32 +2723,67 @@
@view_config(%0A
+ context=HTTPForbidden,%0A
cont
|
c86c80854ac5ea60f43619610a21bfba9b1094f2
|
add ratio
|
example/simple_male_female_ratio.py
|
example/simple_male_female_ratio.py
|
import pydcard
def main():
male = 0
female = 0
for page_num in range(1, 41):
print ('Sending request to page %d' % page_num)
page = pydcard.getAllPage(page_num)
for post_thread in range(0, len(page)):
if page[post_thread].get('member').get('gender') == 'M':
male = male + 1
elif page[post_thread].get('member').get('gender') == 'F':
female = female + 1
else:
print ('Unknown gender')
print (page[post_thread].get('member').get('gender'))
print ('Female posts: %d, Male posts: %d' % (female, male))
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -171,12 +171,14 @@
.get
-AllP
+_all_p
age(
@@ -635,16 +635,71 @@
, male))
+%0A print ('Female to Male ratio: %25f' %25 (female/male))
%0A%0Aif __n
|
eb41e61e80cfc29957edfa30221cbbca3d8e7958
|
Update variance_reduction.py
|
libact/query_strategies/variance_reduction.py
|
libact/query_strategies/variance_reduction.py
|
"""Variance Reduction"""
import copy
from multiprocessing import Pool
import numpy as np
from libact.base.interfaces import QueryStrategy
from libact.base.dataset import Dataset
import libact.models
from libact.query_strategies._variance_reduction import estVar
class VarianceReduction(QueryStrategy):
"""Variance Reduction
This class implements Variance Reduction active learning algorithm [1]_.
Parameters
----------
model: {libact.model.LogisticRegression instance, 'LogisticRegression'}
The model used for variance reduction to evaluate the variance.
Only Logistic regression are supported now.
sigma: float, >0, optional (default=100.0)
1/sigma is added to the diagonal of the Fisher information matrix as
regularization term.
optimality : {'trace', 'determinant', 'eigenvalue'}, optional (default='trace')
The type of optimal design. The options are the trace, determinant, or
maximum eigenvalue of the inverse Fisher information matrix.
Only 'trace' are supported now.
Attributes
----------
References
----------
.. [1] Schein, Andrew I., and Lyle H. Ungar. "Active learning for logistic
regression: an evaluation." Machine Learning 68.3 (2007): 235-265.
.. [2] Settles, Burr. "Active learning literature survey." University of
Wisconsin, Madison 52.55-66 (2010): 11.
"""
def __init__(self, *args, **kwargs):
super(VarianceReduction, self).__init__(*args, **kwargs)
model = kwargs.pop('model', None)
if type(model) is str:
self.model = getattr(libact.models, model)()
else:
self.model = model
self.optimality = kwargs.pop('optimality', 'trace')
self.sigma = kwargs.pop('sigma', 1.0)
def Phi(self, PI, X, epi, ex, label_count, feature_count):
ret = estVar(self.sigma, PI, X, epi, ex)
return ret
def E(self, args):
X, y, qx, clf, label_count = args
sigmoid = lambda x: 1 / (1 + np.exp(-x))
query_point = sigmoid(clf.predict_real([qx]))
feature_count = len(X[0])
ret = 0.0
for i in range(label_count):
clf = copy.copy(self.model)
clf.train(Dataset(np.vstack((X, [qx])), np.append(y, i)))
PI = sigmoid(clf.predict_real(np.vstack((X, [qx]))))
ret += query_point[-1][i] * self.Phi(PI[:-1], X, PI[-1], qx,
label_count, feature_count)
return ret
def make_query(self, n_jobs=1):
"""
Calculate which point to query.
Parameters
----------
n_jobs : int, optional (default=1)
The number of jobs to run in parallel.
Returns
-------
ask_id : int
The entry id of the sample wants to query.
"""
labeled_entries = self.dataset.get_labeled_entries()
Xlabeled, y = zip(*labeled_entries)
Xlabeled = np.array(Xlabeled)
y = list(y)
unlabeled_entries = self.dataset.get_unlabeled_entries()
unlabeled_entry_ids, X_pool = zip(*unlabeled_entries)
label_count = self.dataset.get_num_of_labels()
clf = copy.copy(self.model)
clf.train(Dataset(Xlabeled, y))
p = Pool(n_jobs)
errors = p.map(self.E, [(Xlabeled, y, x, clf, label_count) for x in
X_pool])
p.terminate()
return unlabeled_entry_ids[errors.index(min(errors))]
|
Python
| 0.000001
|
@@ -759,16 +759,18 @@
atrix as
+ a
%0A
|
832525402091562950b1d14ccca40a68be5f306d
|
test that large big decimal roundtrips
|
tests/regression.py
|
tests/regression.py
|
## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
# This test suite verifies that issues corrected remain corrected.
import unittest
from transit.reader import Reader
from transit.writer import Writer
from transit.transit_types import Symbol, frozendict, true, false
from StringIO import StringIO
class RegressionBaseTest(unittest.TestCase):
pass
def regression(name, value):
class RegressionTest(RegressionBaseTest):
def test_roundtrip(self):
in_data = value
io = StringIO()
w = Writer(io, "json")
w.write(in_data)
r = Reader("json")
out_data = r.read(StringIO(io.getvalue()))
self.assertEqual(in_data, out_data)
globals()["test_" + name + "_json"] = RegressionTest
regression("cache_consistency", ({"Problem?":true},
Symbol("Here"),
Symbol("Here")))
regression("one_pair_frozendict", frozendict({"a":1}))
regression("json_int_max", (2**53+100, 2**63+100))
regression("newline_in_string", "a\nb")
class BooleanTest(unittest.TestCase):
"""Even though we're roundtripping transit_types.true and
transit_types.false now, make sure we can still write Python bools.
Additionally, make sure we can still do basic logical evaluation on transit
Boolean values.
"""
def test_write_bool(self):
for protocol in ("json", "json-verbose", "msgpack"):
io = StringIO()
w = Writer(io, protocol)
w.write((True, False))
r = Reader(protocol)
io.seek(0)
out_data = r.read(io)
assert out_data[0] == true
assert out_data[1] == false
def test_basic_eval(self):
assert true
assert not false
def test_or(self):
assert true or false
assert not (false or false)
assert true or true
def test_and(self):
assert not (true and false)
assert true and true
assert not (false and false)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.998839
|
@@ -821,16 +821,44 @@
, false%0A
+from decimal import Decimal%0A
from Str
@@ -1656,16 +1656,90 @@
%22a%5Cnb%22)
+%0Aregression(%22big_decimal%22, Decimal(%22190234710272.2394720347203642836434%22))
%0A%0Aclass
|
b3f33521bc7f837a7e4f055758cd035339446a98
|
Fix an undefined variable in DB code
|
utils/database.py
|
utils/database.py
|
import json
import copy
from zirc.wrappers import connection_wrapper
class Database(dict):
"""Holds a dict that contains all the information
about the users and their last seen actions in a channel"""
def __init__(self, bot):
with open("userdb.json") as f:
super(Database, self).__init__(json.load(f))
class x(object):
def __init__(self, bot):
self.send = bot.send
self._config = bot.config
self.irc = connection_wrapper(x(bot))
def change_attr(self, name, attr, value, channel=None):
if channel is not None:
self[channel][name][attr] = value
for i in self:
try:
if attr == "host":
nick_ident = self[i][name]["hostmask"].split("@")[0]
self[i][name]["hostmask"] = nick_indent + '@' + value
self[i][name][attr] = value
elif attr == "ident":
self[i][name]["hostmask"] = name + '!' + value + '@' + self[i][name]["host"]
else:
self[i][name][attr] = value
except KeyError:
pass
def remove_entry(self, event, nick):
try:
del self[event.target][nick]
except KeyError:
for i in self[event.target].values():
if i['host'] == event.source.host:
del self[event.target][i['hostmask'].split("!")[0]]
break
def add_entry(self, channel, nick, hostmask, account):
temp = {
'hostmask': hostmask,
'host': hostmask.split("@")[1],
'account': account,
'seen': None
}
if nick in self[channel]:
del temp['seen']
self[channel][nick].update(temp)
else:
self[channel][nick] = temp
def get_user_host(self, channel, nick):
try:
host = "*!*@" + self[channel][nick]['host']
except KeyError:
self.irc.send("WHO {0} nuhs%nhuac".format(channel))
host = "*!*@" + self[channel][nick]['host']
return host
def flush(self):
with open('userdb.json', 'w') as f:
# Use dict(self) to onyly get the actual dict object
# Use copy.deepcopy() to avoid having errors due to the DB being updated while we flush it
json.dump(copy.deepcopy(dict(self)), f, indent=2, separators=(',', ': '))
f.write("\n")
|
Python
| 0.018335
|
@@ -858,17 +858,16 @@
= nick_i
-n
dent + '
|
2894f0e4ca62723c9b2507f2f0ec1cdf5ccc4774
|
Fix error messages
|
src/streamlink/plugins/twitcasting.py
|
src/streamlink/plugins/twitcasting.py
|
import hashlib
import logging
import re
from streamlink.buffers import RingBuffer
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, PluginError, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.plugin.api.websocket import WebsocketClient
from streamlink.stream.stream import Stream
from streamlink.stream.stream import StreamIO
from streamlink.utils.url import update_qsd
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://twitcasting\.tv/(?P<channel>[^/]+)"
))
class TwitCasting(Plugin):
arguments = PluginArguments(
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
help="Password for private Twitcasting streams."
)
)
_STREAM_INFO_URL = "https://twitcasting.tv/streamserver.php?target={channel}&mode=client"
_STREAM_REAL_URL = "{proto}://{host}/ws.app/stream/{movie_id}/fmp4/bd/1/1500?mode={mode}"
_STREAM_INFO_SCHEMA = validate.Schema({
"movie": {
"id": int,
"live": bool
},
"fmp4": {
"host": validate.text,
"proto": validate.text,
"source": bool,
"mobilesource": bool
}
})
def __init__(self, url):
super().__init__(url)
self.channel = self.match.group("channel")
self.session.http.headers.update({'User-Agent': useragents.CHROME})
def _get_streams(self):
stream_info = self._get_stream_info()
log.debug("Live stream info: {}".format(stream_info))
if not stream_info["movie"]["live"]:
raise PluginError("The live stream is offline")
# Keys are already validated by schema above
proto = stream_info["fmp4"]["proto"]
host = stream_info["fmp4"]["host"]
movie_id = stream_info["movie"]["id"]
if stream_info["fmp4"]["source"]:
mode = "main" # High quality
elif stream_info["fmp4"]["mobilesource"]:
mode = "mobilesource" # Medium quality
else:
mode = "base" # Low quality
if (proto == '') or (host == '') or (not movie_id):
raise PluginError("No stream available for user {}".format(self.channel))
real_stream_url = self._STREAM_REAL_URL.format(proto=proto, host=host, movie_id=movie_id, mode=mode)
password = self.options.get("password")
if password is not None:
password_hash = hashlib.md5(password.encode()).hexdigest()
real_stream_url = update_qsd(real_stream_url, {"word": password_hash})
log.debug("Real stream url: {}".format(real_stream_url))
return {mode: TwitCastingStream(session=self.session, url=real_stream_url)}
def _get_stream_info(self):
url = self._STREAM_INFO_URL.format(channel=self.channel)
res = self.session.http.get(url)
return self.session.http.json(res, schema=self._STREAM_INFO_SCHEMA)
class TwitCastingWsClient(WebsocketClient):
def __init__(self, buffer: RingBuffer, *args, **kwargs):
self.buffer = buffer
super().__init__(*args, **kwargs)
def on_close(self, *args, **kwargs):
super().on_close(*args, **kwargs)
self.buffer.close()
def on_message(self, wsapp, data: str) -> None:
try:
self.buffer.write(data)
except Exception as err:
log.error(err)
self.close()
class TwitCastingReader(StreamIO):
def __init__(self, stream: "TwitCastingStream", timeout=None):
super().__init__()
self.session = stream.session
self.stream = stream
self.timeout = timeout or self.session.options.get("stream-timeout")
buffer_size = self.session.get_option("ringbuffer-size")
self.buffer = RingBuffer(buffer_size)
self.wsclient = TwitCastingWsClient(
self.buffer,
stream.session,
stream.url,
origin="https://twitcasting.tv/"
)
def open(self):
self.wsclient.start()
def close(self):
self.wsclient.close()
self.buffer.close()
def read(self, size):
return self.buffer.read(
size,
block=self.wsclient.is_alive(),
timeout=self.timeout
)
class TwitCastingStream(Stream):
def __init__(self, session, url):
super().__init__(session)
self.url = url
def __repr__(self):
return f"<TwitCastingStream({self.url!r})>"
def open(self):
reader = TwitCastingReader(self)
reader.open()
return reader
__plugin__ = TwitCasting
|
Python
| 0.000277
|
@@ -211,20 +211,8 @@
port
- useragents,
val
@@ -1016,23 +1016,42 @@
+validate.optional(
%22movie%22
+)
: %7B%0A
@@ -1113,22 +1113,41 @@
+validate.optional(
%22fmp4%22
+)
: %7B%0A
@@ -1162,29 +1162,19 @@
%22host%22:
-validate.text
+str
,%0A
@@ -1192,21 +1192,11 @@
o%22:
-validate.text
+str
,%0A
@@ -1385,84 +1385,8 @@
el%22)
-%0A self.session.http.headers.update(%7B'User-Agent': useragents.CHROME%7D)
%0A%0A
@@ -1475,16 +1475,17 @@
g.debug(
+f
%22Live st
@@ -1496,26 +1496,16 @@
info: %7B
-%7D%22.format(
stream_i
@@ -1507,17 +1507,18 @@
eam_info
-)
+%7D%22
)%0A%0A
@@ -1527,16 +1527,48 @@
if not
+stream_info.get(%22movie%22) or not
stream_i
@@ -1646,24 +1646,113 @@
offline%22)%0A%0A
+ if not stream_info.get(%22fmp4%22):%0A raise PluginError(%22Login required%22)%0A%0A
# Ke
@@ -2259,16 +2259,17 @@
inError(
+f
%22No stre
@@ -2291,26 +2291,16 @@
r user %7B
-%7D%22.format(
self.cha
@@ -2303,17 +2303,18 @@
.channel
-)
+%7D%22
)%0A%0A
@@ -2672,16 +2672,17 @@
g.debug(
+f
%22Real st
@@ -2696,18 +2696,8 @@
l: %7B
-%7D%22.format(
real
@@ -2707,17 +2707,18 @@
ream_url
-)
+%7D%22
)%0A%0A
|
c5e5bb6b066c0f6afa50dc469a0dc5bbcea68b6d
|
Explicit return False
|
ynr/apps/candidates/models/popolo_extra.py
|
ynr/apps/candidates/models/popolo_extra.py
|
import datetime
from os.path import join
import re
from django.conf import settings
from django.contrib.admin.utils import NestedObjects
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericRelation
from django.core.files.storage import DefaultStorage
from django.urls import reverse
from django.db import connection
from django.db import models
from django.utils.html import mark_safe
from django.utils import timezone
from dateutil import parser
from slugify import slugify
from candidates.models.auth import TRUSTED_TO_LOCK_GROUP_NAME
from elections.models import Election
"""Extensions to the base django-popolo classes for YourNextRepresentative
These are done via explicit one-to-one fields to avoid the performance
problems with multi-table inheritance; it's preferable to state when you
want a join or not.
http://stackoverflow.com/q/23466577/223092
"""
class UnsafeToDelete(Exception):
pass
def raise_if_unsafe_to_delete(model):
related_models = model_has_related_objects(model)
if related_models:
msg = (
"Trying to delete a {model} (pk={pk}) that other "
"objects depend on ({related_models})"
)
raise UnsafeToDelete(
msg.format(
model=model._meta.model.__name__,
pk=model.id,
related_models=str(related_models),
)
)
def model_has_related_objects(model):
collector = NestedObjects(using="default")
collector.collect([model])
collected = collector.nested()
if len(collected) >= 2:
return collected[1]
assert collected[0] == model
return False
class BallotQueryset(models.QuerySet):
def get_previous_ballot_for_post(self, ballot):
"""
Given a ballot object, get the previous (by election date) ballot for
the ballot's post.
:type ballot: Ballot
"""
qs = self.filter(
post=ballot.post,
election__election_date__lt=ballot.election.election_date,
).order_by("election__election_date")
if qs.exists():
return qs.first()
return None
class Ballot(models.Model):
post = models.ForeignKey("popolo.Post", on_delete=models.CASCADE)
election = models.ForeignKey(Election, on_delete=models.CASCADE)
ballot_paper_id = models.CharField(blank=True, max_length=255, unique=True)
candidates_locked = models.BooleanField(default=False)
winner_count = models.IntegerField(blank=True, null=True)
cancelled = models.BooleanField(default=False)
UnsafeToDelete = UnsafeToDelete
objects = BallotQueryset.as_manager()
class Meta:
unique_together = ("election", "post")
def __str__(self):
fmt = "<Ballot ballot_paper_id='{e}'{l}{w}>"
return fmt.format(
e=self.ballot_paper_id,
l=(" candidates_locked=True" if self.candidates_locked else ""),
w=(
" winner_count={}".format(self.winner_count)
if (self.winner_count is not None)
else ""
),
)
def get_absolute_url(self):
return reverse("election_view", args=[self.ballot_paper_id])
def safe_delete(self):
collector = NestedObjects(using=connection.cursor().db.alias)
collector.collect([self])
if len(collector.nested()) > 1:
raise self.UnsafeToDelete(
"Can't delete PEE {} with related objects".format(
self.ballot_paper_id
)
)
self.delete()
@property
def cancelled_status_html(self):
if self.cancelled:
return mark_safe(
'<abbr title="The poll for this election was cancelled">(❌ cancelled)</abbr>'
)
return ""
@property
def locked_status_html(self):
if self.candidates_locked:
return mark_safe(
'<abbr title="Candidates verified and post locked">🔐</abbr>'
)
if self.has_lock_suggestion:
self.suggested_lock_html
return ""
@property
def suggested_lock_html(self):
return mark_safe(
'<abbr title="Someone suggested locking this post">🔓</abbr>'
)
@property
def sopn(self):
return self.officialdocument_set.filter(
document_type=self.officialdocument_set.model.NOMINATION_PAPER
).latest()
@property
def has_results(self):
if getattr(self, "resultset", None):
return True
if self.membership_set.filter(elected=True).exists():
return True
return False
@property
def polls_closed(self):
# TODO: City of London and other complex cases. Take this from EE?
normal_poll_close_time = datetime.time(hour=22)
poll_close_datetime = timezone.make_aware(
datetime.datetime.combine(
self.election.election_date, normal_poll_close_time
)
)
return poll_close_datetime <= timezone.now()
@property
def has_lock_suggestion(self):
return self.suggestedpostlock_set.exists()
@property
def get_winner_count(self):
"""
Returns 0 rather than None if the winner_count is unknown. See comment in
https://github.com/DemocracyClub/yournextrepresentative/pull/621#issuecomment-417252565
:return:
"""
if self.winner_count:
return self.winner_count
return 0
def user_can_edit_membership(self, user):
"""
Can a given user edit this ballot?
"""
# users have to be logged in to an account
if not user.is_authenticated:
return False
# If the ballot is unlocked, anyone can edit the memberships
if not self.candidates_locked:
return True
# If a user is trusted to lock then they can edit memberships
# TODO: Is this right?
# https://github.com/DemocracyClub/yournextrepresentative/issues/991
if user.groups.filter(name=TRUSTED_TO_LOCK_GROUP_NAME).exists():
return True
def people_not_standing_again(self, previous_ballot):
"""
Returns a queryset of People objects that are known not to be standing
again for this ballot.
"Not standing again" means that the person stood in this ballot's post
previously and someone has asserted that they're not standing again.
The current data model only stores "not standing" against an election,
not a post or ballot, so we have to filter all people not standing
in this election by the post they previously stood in.
"""
return self.election.persons_not_standing_tmp.filter(
memberships__ballot=previous_ballot
).only("pk")
class PartySet(models.Model):
slug = models.CharField(max_length=256, unique=True)
name = models.CharField(max_length=1024)
parties = models.ManyToManyField(
"popolo.Organization", related_name="party_sets"
)
def __str__(self):
return self.name
|
Python
| 0.999999
|
@@ -6200,24 +6200,46 @@
eturn True%0A%0A
+ return False%0A%0A
def peop
|
8d438da54a15fa213c5b57899505e040a42548bf
|
Fix init for tests
|
linked_list.py
|
linked_list.py
|
from __future__ import unicode_literals
class LinkedList(object):
"""Class for a singly-linked list."""
def __init__(self, iterable=()):
self.length = 0
for val in iterable:
self.insert(val)
def __repr__(self):
"""Print LinkedList as Tuple literal."""
end_flag = False
vals = [] # Can't use list!
current_node = self.header
while not end_flag:
vals.append(current_node.val)
if current_node.next:
current_node = current_node.next
else:
end_flag = True
break
vals = tuple(vals) # No tuples, even for formatting.
return str(vals)
def insert(self, val):
"""Insert val at head of LinkedList."""
self.header = Node(val, self.header)
self.length += 1
return None
def pop(self):
"""Pop the first val off the head and return it."""
to_return = self.header # Use tuple reassignment
self.header = to_return.next
to_return.next = None
self.length -= 1
return to_return
def size(self):
"""Return current length of LinkedList."""
return self.length
def search(self, val):
"""Return the node containing val if present, else None"""
node, left = self._find(val)
return node
def remove(self, val): # Check Spec: Pass node vs val
"""Remove given node from list, return None"""
node_to_remove, left_neighbor = self._find(val)
if self.header == node_to_remove:
self.pop()
else:
left_neighbor.next = node_to_remove.next
node_to_remove.next = None
return None
def display(self):
"""Print LinkedList as Tuple literal"""
return self.__repr__()
def _find(self, val): # Check with spec re: this.
"""Return a Node and left-neighboor by val."""
val_present = False
node_inspected = self.header
left_node = None
while not val_present:
# Interrogate each Node
if node_inspected.val == val:
val_present = True
break
else:
# Keeping track of node to left; incrementing node
left_node = node_inspected # use tuple assignment
node_inspected = node_inspected.next
return node_inspected, left_node
class Node(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
def __repr__(self):
# Just display value
return "{val}".format(val=self.val)
|
Python
| 0.000007
|
@@ -35,16 +35,226 @@
erals%0A%0A%0A
+class Node(object):%0A%0A def __init__(self, val, next=None):%0A self.val = val%0A self.next = next%0A%0A def __repr__(self):%0A # Just display value%0A return %22%7Bval%7D%22.format(val=self.val)%0A%0A%0A
class Li
@@ -350,16 +350,43 @@
le=()):%0A
+ self.header = None%0A
@@ -420,24 +420,33 @@
val in
+reversed(
iterable
:%0A
@@ -437,16 +437,17 @@
iterable
+)
:%0A
@@ -2717,214 +2717,4 @@
ode%0A
-%0A%0Aclass Node(object):%0A%0A def __init__(self, val, next=None):%0A self.val = val%0A self.next = next%0A%0A def __repr__(self):%0A # Just display value%0A return %22%7Bval%7D%22.format(val=self.val)%0A
|
1bb4059a783fdbc8f397b596d5d5d5ed6d97a7b4
|
use radiasoft/beamsim-jupyter image
|
srv/salt/jupyterhub/jupyterhub_config.py
|
srv/salt/jupyterhub/jupyterhub_config.py
|
c.Authenticator.admin_users = {'{{ pillar.jupyterhub.admin_user }}',}
c.JupyterHub.confirm_no_ssl = True
c.JupyterHub.ip = '0.0.0.0'
import base64
c.JupyterHub.cookie_secret = base64.b64decode('{{ pillar.jupyterhub.cookie_secret }}')
c.JupyterHub.proxy_auth_token = '{{ pillar.jupyterhub.proxy_auth_token }}'
# Allow both local and GitHub users; Useful for bootstrap
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = 'https://jupyter.radiasoft.org/hub/oauth_callback'
c.GitHubOAuthenticator.client_id = '{{ pillar.jupyterhub.github_client_id }}'
c.GitHubOAuthenticator.client_secret = '{{ pillar.jupyterhub.github_client_secret }}'
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
c.DockerSpawner.use_internal_ip = True
from IPython.utils.localinterfaces import public_ips
c.JupyterHub.hub_ip = public_ips()[0]
# jupyter_client.localinterfaces
#container_image = radiasoft/jupyterhub-singleuser
|
Python
| 0
|
@@ -885,42 +885,24 @@
%5B0%5D%0A
-# jupyter_client.localinterfaces%0A#
+c.DockerSpawner.
cont
@@ -915,16 +915,17 @@
image =
+'
radiasof
@@ -930,26 +930,21 @@
oft/
-jupyterhub-singleuser
+beamsim-jupyter'
%0A
|
8d00922e10ff2703237aafb95d16f4352c1a03e4
|
Add replaces and replaced_by to BallotSerializer
|
ynr/apps/elections/api/next/serializers.py
|
ynr/apps/elections/api/next/serializers.py
|
from drf_yasg.utils import swagger_serializer_method
from rest_framework import serializers
from api.next.serializers import OrganizationSerializer
from candidates import models as candidates_models
from elections import models as election_models
from official_documents.api.next.serializers import OfficialDocumentSerializer
from popolo.api.next.serializers import (
CandidacyOnBallotSerializer,
MinimalPostSerializer,
)
class MinimalElectionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = election_models.Election
ref_name = None # Tells swagger that this is always embedded
fields = (
"election_id",
"url",
"name",
"election_date",
"current",
"party_lists_in_use",
"created",
"last_updated",
)
election_id = serializers.ReadOnlyField(
source="slug", label="An election ID"
)
url = serializers.HyperlinkedIdentityField(
view_name="election-detail",
lookup_field="slug",
lookup_url_kwarg="slug",
)
last_updated = serializers.DateTimeField(source="modified")
class MinimalBallotSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = candidates_models.Ballot
ref_name = None # Tells swagger that this is always embedded
fields = ("url", "ballot_paper_id")
url = serializers.HyperlinkedIdentityField(
view_name="ballot-detail",
lookup_field="ballot_paper_id",
lookup_url_kwarg="ballot_paper_id",
)
class ElectionTypeSerializer(serializers.Serializer):
slug = serializers.CharField(max_length=50)
label = serializers.CharField(max_length=100, source="for_post_role")
class ElectionSerializer(MinimalElectionSerializer):
class Meta:
model = election_models.Election
fields = (
"slug",
"url",
"name",
"election_date",
"current",
"organization",
"party_lists_in_use",
"ballots",
)
organization = OrganizationSerializer(read_only=True)
ballots = serializers.SerializerMethodField(read_only=True)
@swagger_serializer_method(serializer_or_field=MinimalBallotSerializer)
def get_ballots(self, obj):
return MinimalBallotSerializer(
obj.ballot_set, many=True, context=self.context
).data
class BallotSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = candidates_models.Ballot
fields = (
"url",
"history_url",
"results_url",
"election",
"post",
"winner_count",
"ballot_paper_id",
"cancelled",
"sopn",
"candidates_locked",
"candidacies",
"created",
"last_updated",
)
url = serializers.HyperlinkedIdentityField(
view_name="ballot-detail",
lookup_field="ballot_paper_id",
lookup_url_kwarg="ballot_paper_id",
)
history_url = serializers.HyperlinkedIdentityField(
view_name="ballot-history",
lookup_field="ballot_paper_id",
lookup_url_kwarg="ballot_paper_id",
)
election = MinimalElectionSerializer(read_only=True)
post = MinimalPostSerializer(read_only=True)
sopn = serializers.SerializerMethodField()
candidacies = serializers.SerializerMethodField()
results_url = serializers.HyperlinkedIdentityField(
view_name="resultset-detail",
lookup_field="ballot_paper_id",
lookup_url_kwarg="ballot_paper_id",
)
last_updated = serializers.DateTimeField(source="modified")
@swagger_serializer_method(serializer_or_field=OfficialDocumentSerializer)
def get_sopn(self, instance):
try:
sopn = instance.officialdocument_set.all()[0]
except IndexError:
return None
return OfficialDocumentSerializer(instance=sopn, read_only=True).data
@swagger_serializer_method(serializer_or_field=CandidacyOnBallotSerializer)
def get_candidacies(self, instance):
"""
A candidacy represents a `Person` standing on this `Ballot`.
This is different to simply insluding a `Person` object, as a person
can stand more than once, and stand for different parties.
"""
qs = instance.membership_set.all()
if instance.election.party_lists_in_use:
order_by = [
"-elected",
"-result__is_winner",
"-result__num_ballots",
"party__name",
"party_list_position",
]
qs = qs.order_by(*order_by)
return CandidacyOnBallotSerializer(
qs, many=True, context=self.context
).data
|
Python
| 0
|
@@ -2935,26 +2935,288 @@
d%22,%0A
+ %22replaces%22,%0A %22replaced_by%22,%0A )%0A%0A replaces = serializers.SlugRelatedField(%0A read_only=True, slug_field=%22ballot_paper_id%22%0A )%0A replaced_by = serializers.SlugRelatedField(%0A read_only=True, slug_field=%22ballot_paper_id%22%0A
)
-%0A
%0A url = s
|
16fc80f36fa0bade1f4e5e7bef5595b3617a42bc
|
fix bartlett to pass participant not participant uuid
|
examples/bartlett1932/experiment.py
|
examples/bartlett1932/experiment.py
|
"""Bartlett's trasmission chain experiment from Remembering (1932)."""
from wallace.networks import Chain
from wallace.nodes import Source, ReplicatorAgent
from wallace import processes
from wallace.experiments import Experiment
import random
class Bartlett1932(Experiment):
"""Defines the experiment."""
def __init__(self, session):
"""Set up the initial networks."""
super(Bartlett1932, self).__init__(session)
self.practice_repeats = 0
self.experiment_repeats = 1
self.agent = ReplicatorAgent
self.network = lambda: Chain(max_size=3)
if not self.networks():
self.setup()
self.save()
def setup(self):
super(Bartlett1932, self).setup()
# Setup for first time experiment is accessed
for net in self.networks():
if not net.nodes(type=Source):
source = WarOfTheGhostsSource(network=net)
net.add_source(source)
def create_agent_trigger(self, agent, network):
"""When an agent is created, add it to the network and take a step."""
network.add_agent(agent)
processes.random_walk(network)
def recruit(self):
"""Recruit participants to the experiment as needed."""
if self.networks(full=False):
self.recruiter().recruit_participants(n=1)
else:
self.recruiter().close_recruitment()
def bonus(self, participant_uuid=None):
"""Compute the bonus for the given participant.
This is called automatically when a participant finishes,
it is called immediately prior to the participant_submission_trigger
"""
return 1
class WarOfTheGhostsSource(Source):
"""Transmit a story from Bartlett (1932)."""
__mapper_args__ = {"polymorphic_identity": "war_of_the_ghosts_source"}
def _contents(self):
"""Read the markdown source of the story from a file."""
stories = [
"ghosts.md",
"cricket.md",
"moochi.md",
"outwit.md",
"raid.md",
"species.md",
"tennis.md",
"vagabond.md"
]
story = random.choice(stories)
with open("static/stimuli/{}".format(story), "r") as f:
return f.read()
|
Python
| 0
|
@@ -1449,13 +1449,8 @@
pant
-_uuid
=Non
|
5ab014dc437707367806a97c52c0b48c8449c672
|
handle blank expanded urls
|
utils/json2csv.py
|
utils/json2csv.py
|
#!/usr/bin/env python
"""
A sample JSON to CSV program. Multivalued JSON properties are space delimited
CSV columns. If you'd like it adjusted send a pull request!
"""
import sys
import json
import codecs
import argparse
import fileinput
if sys.version_info[0] < 3:
try:
import unicodecsv as csv
except ImportError:
sys.exit("unicodecsv is required for python 2")
else:
import csv
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--output', '-o', help='write output to file instead of stdout')
parser.add_argument('files', metavar='FILE', nargs='*', help='files to read, if empty, stdin is used')
args = parser.parse_args()
if args.output:
sheet = csv.writer(codecs.open(args.output, 'wb', 'utf-8'))
else:
sheet = csv.writer(sys.stdout)
sheet.writerow(get_headings())
files = args.files if len(args.files) > 0 else ('-',)
for line in fileinput.input(files, openhook=fileinput.hook_encoded("utf-8")):
tweet = json.loads(line)
sheet.writerow(get_row(tweet))
def get_headings():
return [
'id',
'tweet_url',
'created_at',
'user_screen_name',
'text',
'coordinates',
'hashtags',
'media',
'urls',
'favorite_count',
'in_reply_to_screen_name',
'in_reply_to_status_id',
'in_reply_to_user_id',
'lang',
'place',
'possibly_sensitive',
'retweet_count',
'reweet_id',
'retweet_screen_name',
'source',
'user_id',
'user_created_at',
'user_default_profile_image',
'user_description',
'user_favourites_count',
'user_followers_count',
'user_friends_count',
'user_listed_count',
'user_location',
'user_name',
'user_statuses_count',
'user_time_zone',
'user_urls',
'user_verified',
]
def get_row(t):
get = t.get
user = t.get('user').get
row = [
get('id_str'),
tweet_url(t),
get('created_at'),
user('screen_name'),
text(t),
coordinates(t),
hashtags(t),
media(t),
urls(t),
get('favorite_count'),
get('in_reply_to_screen_name'),
get('in_reply_to_status_id'),
get('in_reply_to_user_id'),
get('lang'),
place(t),
get('possibly_sensitive'),
get('retweet_count'),
retweet_id(t),
retweet_screen_name(t),
get('source'),
user('id_str'),
user('created_at'),
user('default_profile_image'),
user('description'),
user('favourites_count'),
user('followers_count'),
user('friends_count'),
user('listed_count'),
user('location'),
user('name'),
user('statuses_count'),
user('time_zone'),
user_urls(t),
user('verified'),
]
return row
def text(t):
if 'full_text' in t:
return t['full_text']
return t['text']
def coordinates(t):
if 'coordinates' in t and t['coordinates']:
return '%f %f' % tuple(t['coordinates']['coordinates'])
return None
def hashtags(t):
return ' '.join([h['text'] for h in t['entities']['hashtags']])
def media(t):
if 'extended_entities' in t and 'media' in t['extended_entities']:
return ' '.join([h['expanded_url'] for h in t['extended_entities']['media']])
elif 'media' in t['entities']:
return ' '.join([h['expanded_url'] for h in t['entities']['media']])
else:
return None
def urls(t):
return ' '.join([h['expanded_url'] for h in t['entities']['urls']])
def place(t):
if t['place']:
return t['place']['full_name']
def retweet_id(t):
if 'retweeted_status' in t and t['retweeted_status']:
return t['retweeted_status']['id_str']
def retweet_screen_name(t):
if 'retweeted_status' in t and t['retweeted_status']:
return t['retweeted_status']['user']['screen_name']
def tweet_url(t):
return "https://twitter.com/%s/status/%s" % (t['user']['screen_name'], t['id_str'])
def user_urls(t):
u = t.get('user')
if not u:
return None
urls = []
if 'entities' in u and 'url' in u['entities'] and 'urls' in u['entities']['url']:
for url in u['entities']['url']['urls']:
if url['expanded_url']:
urls.append(url['expanded_url'])
return ' '.join(urls)
if __name__ == "__main__":
main()
|
Python
| 0.000011
|
@@ -3529,32 +3529,38 @@
%5B'expanded_url'%5D
+ or ''
for h in t%5B'ent
|
f6f88b0dfaac0edf6494e4691bf9100f20267d25
|
handle socket errors
|
mailmerge/__main__.py
|
mailmerge/__main__.py
|
"""
Mail merge using CSV database and jinja2 template email.
Command line interface implementation.
Andrew DeOrio <awdeorio@umich.edu>
"""
import os
import io
import sys
import csv
import configparser
import smtplib
import jinja2
import click
from . api import sendall
@click.command(context_settings={"help_option_names": ['-h', '--help']})
@click.version_option() # Auto detect version
@click.option("--sample", is_flag=True, default=False,
help="Create sample database, template email, and config")
@click.option("--dry-run/--no-dry-run", default=True,
help="Don't send email, just print")
@click.option("--limit", is_flag=False, default=1,
help="Limit the number of messages; default 1")
@click.option("--no-limit", is_flag=True, default=False,
help="Do not limit the number of messages")
@click.option("--database", "database_path",
default="mailmerge_database.csv",
help="database CSV file name; default mailmerge_database.csv ")
@click.option("--template", "template_path",
default="mailmerge_template.txt",
help="template email file name; default mailmerge_template.txt")
@click.option("--config", "config_path",
default="mailmerge_server.conf",
help="configuration file name; default mailmerge_server.conf")
def cli(sample, dry_run, limit, no_limit,
database_path, template_path, config_path):
"""Command line interface."""
# pylint: disable=too-many-arguments
# Create a sample email template and database if there isn't one already
if sample:
create_sample_input_files(
template_path,
database_path,
config_path,
)
sys.exit(0)
if not os.path.exists(template_path):
print("Error: can't find template email " + template_path)
print("Create a sample (--sample) or specify a file (--template)")
sys.exit(1)
if not os.path.exists(database_path):
print("Error: can't find database_path " + database_path)
print("Create a sample (--sample) or specify a file (--database)")
sys.exit(1)
# No limit is an alias for limit=-1
if no_limit:
limit = -1
try:
send_messages_generator = sendall(
database_path,
template_path,
config_path,
limit,
dry_run,
)
for _, _, message, i in send_messages_generator:
print(">>> message {}".format(i))
print(message.as_string())
print(">>> sent message {}".format(i))
except jinja2.exceptions.TemplateError as err:
print(">>> Error in Jinja2 template: {}".format(err))
sys.exit(1)
except csv.Error as err:
print(">>> Error reading CSV file: {}".format(err))
sys.exit(1)
except smtplib.SMTPAuthenticationError as err:
print(">>> Authentication error: {}".format(err))
sys.exit(1)
except configparser.Error as err:
print(">>> Error reading config file {}: {}".format(
config_path, err))
sys.exit(1)
except smtplib.SMTPException as err:
print(">>> Error sending message", err, sep=' ', file=sys.stderr)
# Hints for user
if not no_limit:
print(">>> Limit was {} messages. ".format(limit) +
"To remove the limit, use the --no-limit option.")
if dry_run:
print((">>> This was a dry run. "
"To send messages, use the --no-dry-run option."))
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
cli()
def create_sample_input_files(template_path,
database_path,
config_path):
"""Create sample template email and database."""
print("Creating sample template email {}".format(template_path))
if os.path.exists(template_path):
print("Error: file exists: " + template_path)
sys.exit(1)
with io.open(template_path, "w") as template_file:
template_file.write(
u"TO: {{email}}\n"
u"SUBJECT: Testing mailmerge\n"
u"FROM: My Self <myself@mydomain.com>\n"
u"\n"
u"Hi, {{name}},\n"
u"\n"
u"Your number is {{number}}.\n"
)
print("Creating sample database {}".format(database_path))
if os.path.exists(database_path):
print("Error: file exists: " + database_path)
sys.exit(1)
with io.open(database_path, "w") as database_file:
database_file.write(
u'email,name,number\n'
u'myself@mydomain.com,"Myself",17\n'
u'bob@bobdomain.com,"Bob",42\n'
)
print("Creating sample config file {}".format(config_path))
if os.path.exists(config_path):
print("Error: file exists: " + config_path)
sys.exit(1)
with io.open(config_path, "w") as config_file:
config_file.write(
u"# Example: GMail\n"
u"[smtp_server]\n"
u"host = smtp.gmail.com\n"
u"port = 465\n"
u"security = SSL/TLS\n"
u"username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: Wide open\n"
u"# [smtp_server]\n"
u"# host = open-smtp.example.com\n"
u"# port = 25\n"
u"# security = Never\n"
u"# username = None\n"
u"#\n"
u"# Example: University of Michigan\n"
u"# [smtp_server]\n"
u"# host = smtp.mail.umich.edu\n"
u"# port = 465\n"
u"# security = SSL/TLS\n"
u"# username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: University of Michigan EECS Dept., with STARTTLS security\n" # noqa: E501
u"# [smtp_server]\n"
u"# host = newman.eecs.umich.edu\n"
u"# port = 25\n"
u"# security = STARTTLS\n"
u"# username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: University of Michigan EECS Dept., with no encryption\n" # noqa: E501
u"# [smtp_server]\n"
u"# host = newman.eecs.umich.edu\n"
u"# port = 25\n"
u"# security = Never\n"
u"# username = YOUR_USERNAME_HERE\n"
)
print("Edit these files, and then run mailmerge again")
|
Python
| 0.000001
|
@@ -176,16 +176,30 @@
ort csv%0A
+import socket%0A
import c
@@ -3268,16 +3268,129 @@
.stderr)
+%0A sys.exit(1)%0A except socket.error:%0A print(%22%3E%3E%3E Error connecting to server%22)%0A sys.exit(1)
%0A%0A #
|
6073610cb08e03e142b80dc7b1196ce359a1f55a
|
fix pylint import error
|
selfdrive/debug/toyota_eps_factor.py
|
selfdrive/debug/toyota_eps_factor.py
|
#!/usr/bin/env python3
import sys
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from tools.lib.route import Route
from tools.lib.logreader import MultiLogIterator
MIN_SAMPLES = 30*100
def to_signed(n, bits):
if n >= (1 << max((bits - 1), 0)):
n = n - (1 << max(bits, 0))
return n
def get_eps_factor(lr, plot=False):
engaged = False
torque_cmd, eps_torque = None, None
cmds, eps = [], []
for msg in lr:
if msg.which() != 'can':
continue
for m in msg.can:
if m.address == 0x2e4 and m.src == 128:
engaged = bool(m.dat[0] & 1)
torque_cmd = to_signed((m.dat[1] << 8) | m.dat[2], 16)
elif m.address == 0x260 and m.src == 0:
eps_torque = to_signed((m.dat[5] << 8) | m.dat[6], 16)
if engaged and torque_cmd is not None and eps_torque is not None:
cmds.append(torque_cmd)
eps.append(eps_torque)
else:
if len(cmds) > MIN_SAMPLES:
break
cmds, eps = [], []
if len(cmds) < MIN_SAMPLES:
raise Exception("too few samples found in route")
lm = linear_model.LinearRegression(fit_intercept=False)
lm.fit(np.array(cmds).reshape(-1, 1), eps)
scale_factor = 1./lm.coef_[0]
if plot:
plt.plot(np.array(eps)*scale_factor)
plt.plot(cmds)
plt.show()
return scale_factor
if __name__ == "__main__":
r = Route(sys.argv[1])
lr = MultiLogIterator(r.log_paths(), wraparound=False)
n = get_eps_factor(lr, plot="--plot" in sys.argv)
print("EPS torque factor: ", n)
|
Python
| 0.000001
|
@@ -110,16 +110,47 @@
ar_model
+ # pylint: disable=import-error
%0A%0Afrom t
|
a6f95b71030026693683588287f8c54bbd7e3ee8
|
use persistor to upload trained model to s3
|
src/trainers/spacy_sklearn_trainer.py
|
src/trainers/spacy_sklearn_trainer.py
|
import spacy
import os, datetime, json
import cloudpickle
import util
from rasa_nlu.featurizers.spacy_featurizer import SpacyFeaturizer
from rasa_nlu.classifiers.sklearn_intent_classifier import SklearnIntentClassifier
from rasa_nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
class SpacySklearnTrainer(object):
def __init__(self):
self.name="spacy_sklearn"
self.training_data = None
self.nlp = spacy.load('en', tagger=False, parser=False, entity=False)
self.featurizer = SpacyFeaturizer(self.nlp)
self.intent_classifier = SklearnIntentClassifier()
self.entity_extractor = SpacyEntityExtractor()
def train(self,data):
self.training_data = data
self.entity_extractor.train(self.nlp,data.entity_examples)
self.train_intent_classifier(data.intent_examples)
def train_intent_classifier(self,intent_examples):
labels = [e["intent"] for e in intent_examples]
sents = [e["text"] for e in intent_examples]
y = self.intent_classifier.transform_labels(labels)
X = self.featurizer.create_bow_vecs(sents)
self.intent_classifier.train(X,y)
def persist(self,path):
tstamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
dirname = os.path.join(path,"model_"+tstamp)
os.mkdir(dirname)
data_file = os.path.join(dirname,"training_data.json")
classifier_file = os.path.join(dirname,"intent_classifier.pkl")
ner_dir = os.path.join(dirname,'ner')
os.mkdir(ner_dir)
entity_extractor_config_file = os.path.join(ner_dir,"config.json")
entity_extractor_file = os.path.join(ner_dir,"model")
metadata = {
"trained_at":tstamp,
"training_data":data_file,
"backend":self.name,
"intent_classifier":classifier_file,
"entity_extractor": ner_dir
}
with open(os.path.join(dirname,'metadata.json'),'w') as f:
f.write(json.dumps(metadata,indent=4))
with open(data_file,'w') as f:
f.write(self.training_data.as_json(indent=2))
with open(classifier_file,'w') as f:
cloudpickle.dump(self.intent_classifier,f)
with open(entity_extractor_config_file,'w') as f:
json.dump(self.entity_extractor.ner.cfg, f)
self.entity_extractor.ner.model.dump(entity_extractor_file)
util.sync_to_s3(dirname,'us-east-1','rasa_nlu')
|
Python
| 0
|
@@ -51,16 +51,30 @@
dpickle%0A
+from rasa_nlu
import u
@@ -78,16 +78,16 @@
t util %0A
-
from ras
@@ -1259,16 +1259,31 @@
elf,path
+,persistor=None
):%0A
@@ -2522,63 +2522,80 @@
-%0A util.sync_to_s3(dirname,'us-east-1','rasa_nlu'
+if (persistor is not None):%0A persistor.send_tar_to_s3(dirname
)%0A
|
60ebebb4cc167a010904763c5a4ffed6347c029e
|
Fix license tab.
|
lms/djangoapps/labster_course_license/tabs.py
|
lms/djangoapps/labster_course_license/tabs.py
|
"""
Registers the Labster Course License for the edX platform.
"""
from django.conf import settings
from django.utils.translation import ugettext_noop
from xmodule.tabs import CourseTab
from student.roles import CourseCcxCoachRole
class LicenseCourseTab(CourseTab):
"""
The representation of the LTI Passport course tab
"""
type = "course_license"
title = ugettext_noop("License")
view_name = "labster_license_handler"
is_dynamic = True
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if CCX has been enabled and the specified user is a coach
"""
if not user:
return True
if not settings.FEATURES.get('CUSTOM_COURSES_EDX', False) or not course.enable_ccx:
# If ccx is not enable do not show License tab.
return False
if has_access(user, 'staff', course) or has_access(user, 'instructor', course):
# if user is staff or instructor then he can always see License tab.
return True
role = CourseCcxCoachRole(course.id)
return role.has_user(user)
|
Python
| 0
|
@@ -226,16 +226,57 @@
achRole%0A
+from courseware.access import has_access%0A
%0A%0Aclass
@@ -673,53 +673,8 @@
%22%22%22%0A
- if not user:%0A return True%0A
|
d8e872c3d2aa141c29d993c08c207c1b7994b055
|
Add missing filter decorators
|
sequere/templatetags/sequere_tags.py
|
sequere/templatetags/sequere_tags.py
|
from django import template
from sequere.registry import registry
from sequere.models import (get_followers_count, get_followings_count)
register = template.Library()
def identifier(instance, arg=None):
return registry.get_identifier(instance)
def followers_count(instance, identifier=None):
return get_followers_count(instance, identifier)
def followings_count(instance, identifier=None):
return get_followings_count(instance, identifier)
|
Python
| 0.000001
|
@@ -160,24 +160,41 @@
Library()%0A%0A%0A
+@register.filter%0A
def identifi
@@ -260,24 +260,41 @@
instance)%0A%0A%0A
+@register.filter%0A
def follower
@@ -384,16 +384,33 @@
fier)%0A%0A%0A
+@register.filter%0A
def foll
|
90103ce492a77070a0d6e30c5247b334c803b5e7
|
check access and execute as superuser
|
mail_move_message/mail_move_message_models.py
|
mail_move_message/mail_move_message_models.py
|
from openerp import api, models, fields, SUPERUSER_ID
from openerp.tools.translate import _
class wizard(models.TransientModel):
_name = 'mail_move_message.wizard'
message_id = fields.Many2one('mail.message', string='Message')
message_body = fields.Html(related='message_id.body', string='Message to move', readonly=True)
parent_id = fields.Many2one('mail.message', string='Search by name')
model_id = fields.Many2one('ir.model', string='Record type')
res_id = fields.Integer('Record ID')
record_url = fields.Char('Link to record', readonly=True)
@api.onchange('parent_id')
def on_change_parent_id(self):
if self.parent_id and self.parent_id.model:
self.model_id = self.env['ir.model'].search([('model', '=', self.parent_id.model)])[0]
self.res_id = self.parent_id.res_id
else:
self.model_id = None
self.res_id = None
@api.onchange('model_id', 'res_id')
def on_change_res(self):
if not ( self.model_id and self.res_id ):
self.record_url = ''
return
self.record_url = '/web#id=%s&model=%s' % (self.res_id, self.model_id.model)
@api.multi
def move(self):
for r in self:
if r.parent_id:
if not (r.parent_id.model == r.model_id.model and
r.parent_id.res_id == r.res_id):
r.parent_id = None
r.message_id.write({'parent_id': r.parent_id.id, 'res_id': r.res_id, 'model': r.model_id.model})
if not ( r.model_id and r.res_id ):
obj = self.pool.get('ir.model.data').get_object_reference(self._cr, SUPERUSER_ID, 'mail', 'mail_archivesfeeds')[1]
return {
'type' : 'ir.actions.client',
'name' : 'Archive',
'tag' : 'reload',
'params' : {'menu_id': obj},
}
return {
'name': _('Record'),
'view_type': 'form',
'view_mode': 'form',
'res_model': r.model_id.model,
'res_id': r.res_id,
'views': [(False, 'form')],
'type': 'ir.actions.act_window',
}
class mail_message(models.Model):
_inherit = 'mail.message'
def name_get(self, cr, uid, ids, context=None):
if not (context or {}).get('extended_name'):
return super(mail_message, self).name_get(cr, uid, ids, context=context)
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['record_name','model', 'res_id'], context=context)
res = []
for record in reads:
name = record['record_name']
extended_name = ' [%s] ID %s' % (record.get('model', 'UNDEF'), record.get('res_id', 'UNDEF'))
res.append((record['id'], name + extended_name))
return res
|
Python
| 0
|
@@ -1173,16 +1173,645 @@
model)%0A%0A
+ @api.one%0A def check_access(self):%0A cr = self._cr%0A uid = self.env.user.id%0A operation = 'write'%0A context = self._context%0A%0A if not ( self.model_id and self.res_id ):%0A return True%0A model_obj = self.pool%5Bself.model_id.model%5D%0A mids = model_obj.exists(cr, uid, %5Bself.res_id%5D)%0A if hasattr(model_obj, 'check_mail_message_access'):%0A model_obj.check_mail_message_access(cr, uid, mids, operation, context=context)%0A else:%0A self.pool%5B'mail.thread'%5D.check_mail_message_access(cr, uid, mids, operation, model_obj=model_obj, context=context)%0A%0A
@api
@@ -1817,16 +1817,16 @@
i.multi%0A
-
def
@@ -1860,16 +1860,45 @@
n self:%0A
+ r.check_access()%0A
@@ -2076,24 +2076,24 @@
_id = None%0A%0A
-
@@ -2105,16 +2105,23 @@
sage_id.
+sudo().
write(%7B'
|
99c3eba0d6384cd42c90ef347823e6d66659d6e3
|
Fix typo in division operator
|
viper/interpreter/prelude/operators.py
|
viper/interpreter/prelude/operators.py
|
from ..value import ForeignCloVal
def plus(a: int, b: int) -> int:
return a + b
def minus(a: int, b: int) -> int:
return a - b
def times(a: int, b: int) -> int:
return a * b
def divide(a: int, b: int) -> float:
return a / b
env = {
'+': ForeignCloVal(plus, {}),
'-': ForeignCloVal(minus, {}),
'*': ForeignCloVal(times, {}),
'//': ForeignCloVal(divide, {}),
}
|
Python
| 0.014756
|
@@ -361,17 +361,16 @@
,%0A '/
-/
': Forei
|
340e872114363ddc041b2c5cdcc5769c9b793efe
|
Add test_select_with_seed_too_small_raise_Exception
|
tests/test_bingo.py
|
tests/test_bingo.py
|
"""Unit tests for cat2cohort."""
import unittest
from bingo import bingo
class TestBingoGenerator(unittest.TestCase):
"""Test methods from bingo."""
def test_bingo_generator_has_default_size(self):
bingo_generator = bingo.BingoGenerator()
expected = pow(bingo.DEFAULT_SIZE, 2)
self.assertEquals(bingo_generator.size, expected)
def test_bingo_generator_has_given_size(self):
bingo_generator = bingo.BingoGenerator(4)
self.assertEquals(bingo_generator.size, 16)
def test_select_words_should_have_the_right_size(self):
test_size = 2
bingo_generator = bingo.BingoGenerator(size=test_size)
seed_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
bingo_generator.words = seed_list
selection = bingo_generator.select_words()
self.assertEquals(len(selection), pow(test_size, 2))
def test_select_words_should_return_words_from_the_seed_list(self):
test_size = 2
bingo_generator = bingo.BingoGenerator(size=test_size)
seed_list = set(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'])
bingo_generator.words = seed_list
selection = set(bingo_generator.select_words())
self.assertTrue(seed_list.difference(selection))
|
Python
| 0.000008
|
@@ -1261,8 +1261,338 @@
ction))%0A
+%0A def test_select_with_seed_too_small_raise_Exception(self):%0A with self.assertRaises(ValueError):%0A test_size = 2%0A bingo_generator = bingo.BingoGenerator(size=test_size)%0A seed_list = %5B'a', 'b', 'c'%5D%0A bingo_generator.words = seed_list%0A bingo_generator.select_words()%0A
|
fa26aa1651532af0f2862451c2c7a7583be0ad6f
|
fix typo
|
llvm/target.py
|
llvm/target.py
|
import llvm
from llvmpy import api, extra
from io import BytesIO
import contextlib
from llvm.passes import TargetData
#===----------------------------------------------------------------------===
# Enumerations
#===----------------------------------------------------------------------===
BO_BIG_ENDIAN = 0
BO_LITTLE_ENDIAN = 1
# CodeModel
CM_DEFAULT = api.llvm.CodeModel.Model.Default
CM_JITDEFAULT = api.llvm.CodeModel.Model.JITDefault
CM_SMALL = api.llvm.CodeModel.Model.Small
CM_KERNEL = api.llvm.CodeModel.Model.Kernel
CM_MEDIUM = api.llvm.CodeModel.Model.Medium
CM_LARGE = api.llvm.CodeModel.Model.Large
# Reloc
RELOC_DEFAULT = api.llvm.Reloc.Model.Default
RELOC_STATIC = api.llvm.Reloc.Model.Static
RELOC_PIC = api.llvm.Reloc.Model.PIC_
RELOC_DYNAMIC_NO_PIC = api.llvm.Reloc.Model.DynamicNoPIC
def initialize_all():
api.llvm.InitializeAllTargets()
api.llvm.InitializeAllTargetInfos()
api.llvm.InitializeAllTargetMCs()
api.llvm.InitializeAllAsmPrinters()
api.llvm.InitializeAllDisassemblers()
api.llvm.InitializeAllAsmParsers()
def initialize_target(target, noraise=False):
"""Initialize target by name.
It is safe to initialize the same target multiple times.
"""
prefix = 'LLVMInitialize'
postfixes = ['Target', 'TargetInfo', 'TargetMC', 'AsmPrinter', 'AsmParser']
try:
for postfix in postfixes:
getattr(api, '%s%s%s' % (prefix, target, postfix))()
except AttributeError:
if noraise:
return False
else:
raise
else:
return True
def print_registered_targets():
'''
Note: print directly to stdout
'''
api.llvm.TargetRegistry.printRegisteredTargetsForVersion()
def get_host_cpu_name():
'''return the string name of the host CPU
'''
return api.llvm.sys.getHostCPUName()
def get_default_triple():
'''return the target triple of the host in str-rep
'''
return api.llvm.sys.getDefaultTargetTriple()
class TargetMachine(llvm.Wrapper):
@staticmethod
def new(triple='', cpu='', features='', opt=2, cm=CM_DEFAULT,
reloc=RELOC_DEFAULT):
if not triple:
triple = get_default_triple()
if not cpu:
cpu = get_host_cpu_name()
with contextlib.closing(BytesIO()) as error:
target = api.llvm.TargetRegistry.lookupTarget(triple, error)
if not target:
raise llvm.LLVMException(error.getvalue())
if not target.hasTargetMachine():
raise llvm.LLVMException(target, "No target machine.")
target_options = api.llvm.TargetOptions.new()
tm = target.createTargetMachine(triple, cpu, features,
target_options,
reloc, cm, opt)
if not tm:
raise llvm.LLVMException("Cannot create target machine")
return TargetMachine(tm)
@staticmethod
def lookup(arch, cpu='', features='', opt=2, cm=CM_DEFAULT,
reloc=RELOC_DEFAULT):
'''create a targetmachine given an architecture name
For a list of architectures,
use: `llc -help`
For a list of available CPUs,
use: `llvm-as < /dev/null | llc -march=xyz -mcpu=help`
For a list of available attributes (features),
use: `llvm-as < /dev/null | llc -march=xyz -mattr=help`
'''
triple = api.llvm.Triple.new()
with contextlib.closing(BytesIO()) as error:
target = api.llvm.TargetRegistry.lookupTarget(arch, triple, error)
if not target:
raise llvm.LLVMException(error.getvalue())
if not target.hasTargetMachine():
raise llvm.LLVMException(target, "No target machine.")
target_options = api.llvm.TargetOptions.new()
tm = target.createTargetMachine(str(triple), cpu, features,
target_options,
reloc, cm, opt)
if not tm:
raise llvm.LLVMException("Cannot create target machine")
return TargetMachine(tm)
@staticmethod
def x86():
return TargetMachine.lookup('x86')
@staticmethod
def x86_64():
return TargetMachine.lookup('x86-64')
@staticmethod
def arm():
return TargetMachine.lookup('arm')
@staticmethod
def thumb():
return TargetMachine.lookup('thumb')
def _emit_file(self, module, cgft):
pm = api.llvm.PassManager.new()
os = extra.make_raw_ostream_for_printing()
pm.add(api.llvm.DataLayout.new(str(self.target_data)))
failed = self._ptr.addPassesToEmitFile(pm, os, cgft)
pm.run(module)
CGFT = api.llvm.TargetMachine.CodeGenFileType
if cgft == CGFT.CGFT_ObjectFile:
return os.bytes()
else:
return os.str()
def emit_assembly(self, module):
'''returns byte string of the module as assembly code of the target machine
'''
CGFT = api.llvm.TargetMachine.CodeGenFileType
return self._emit_file(module._ptr, CGFT.CGFT_AssemblyFile)
def emit_object(self, module):
'''returns byte string of the module as native code of the target machine
'''
CGFT = api.llvm.TargetMachine.CodeGenFileType
return self._emit_file(module._ptr, CGFT.CGFT_ObjectFile)
@property
def target_data(self):
'''get target data of this machine
'''
return TargetData(self._ptr.getDataLayout())
@property
def target_name(self):
return self._ptr.getTarget().getName()
@property
def target_short_description(self):
return self._ptr.getTarget().getShortDescription()
@property
def triple(self):
return self._ptr.getTargetTriple()
@property
def cpu(self):
return self._ptr.getTargetCPU()
@property
def feature_string(self):
return self._ptr.getTargetFeatureString()
@property
def target(self):
return self._ptr.getTarget()
if llvm.version >= (3, 4):
@property
def reg_info(self):
if not getattr(self, '_mri', False):
self._mri = self.target.createMCRegInfo(self.triple)
return self._mri
@property
def subtarget_info(self):
return self._ptr.getSubtargetImpl()
@property
def asm_info(self):
return self._ptr.getMCAsmInfo()
@property
def instr_info(self):
return self._ptr.getInstrInfo()
@property
def instr_analysis(self):
if not getattr(self, '_mia', False):
self._mia = self.target.getMCInstrAnalysis(self.instr_info)
return self._mia
@property
def disassembler(self):
if not getattr(self, '_dasm', False):
self._dasm = self.target.createMCDisassembler(self.subtarget_info)
return self._dasm
def is_little_endian(self):
return self.asm_info.isLittleEndian()
|
Python
| 0.999991
|
@@ -6887,19 +6887,22 @@
.target.
-get
+create
MCInstrA
|
36f2376a2f23b295bba8cc2af16577efd3fe03ff
|
Add a couple of snippets.
|
utils/snippets.py
|
utils/snippets.py
|
#!/usr/bin/env python
# A hacky script to do dynamic snippets.
import sys
import os
import datetime
snippet_map = {
'date' : datetime.datetime.now().strftime('%b %d %G %I:%M%p '),
'time' : datetime.datetime.now().strftime('%I:%M%p '),
'sign' : 'Best,\nSameer',
}
keys = '\n'.join(snippet_map.keys())
result = os.popen('printf "%s" | rofi -dmenu ' % keys)
selected_key = result.read().strip()
os.system('sleep 0.1; xdotool type --clearmodifiers "$(printf "%s")"' % str(snippet_map[selected_key]))
|
Python
| 0.000002
|
@@ -121,17 +121,16 @@
'date'
-
: dateti
@@ -188,17 +188,16 @@
'time'
-
: dateti
@@ -245,21 +245,86 @@
'
-sign' : 'Best
+best': 'Best,%5CnSameer',%0A 'cheers': 'Cheers,%5CnSameer',%0A 'thanks': 'Thanks
,%5CnS
@@ -541,16 +541,21 @@
' %25 str(
+%0A
snippet_
@@ -574,9 +574,8 @@
_key%5D))%0A
-%0A
|
7d2c4140a74fa052eda6a6a19593321056c9eb80
|
convert prints to logging in jsonparser
|
src/unix/plugins/jsonparser/jsonparser.py
|
src/unix/plugins/jsonparser/jsonparser.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
JSON agent command parser main code module
"""
import nova_agent
import logging
import anyjson
class CommandNotFoundError(Exception):
def __init__(self, cmd):
self.cmd = cmd
def __str__(self):
return "No such agent command '%s'" % self.cmd
class command_metaclass(type):
def __init__(cls, cls_name, bases, attrs):
if not hasattr(cls, '_cmd_classes'):
cls._cmd_classes = []
cls._cmd_instances = []
cls._cmds = {}
else:
cls._cmd_classes.append(cls)
class command(object):
"""
The class that all command classes should inherit from
"""
# Set the metaclass
__metaclass__ = command_metaclass
@classmethod
def _get_commands(self, inst):
cmds = {}
for objname in dir(inst):
obj = getattr(inst, objname)
if getattr(obj, '_is_cmd', False):
try:
cmds[obj._cmd_name] = obj
except AttributeError:
# skip it if there's no _cmd_name
pass
return cmds
@classmethod
def create_instances(self, *args, **kwargs):
for cls in self._cmd_classes:
inst = cls(*args, **kwargs)
self._cmd_instances.append(inst)
self._cmds.update(self._get_commands(inst))
@classmethod
def command_names(self):
return [x for x in self._cmds]
@classmethod
def run_command(self, cmd_name, arg):
try:
result = self._cmds[cmd_name](arg)
except KeyError:
raise CommandNotFoundError(cmd_name)
return result
def command_add(cmd_name):
"""
Decorator for command classes to use to add commands
"""
def wrap(f):
f._is_cmd = True
f._cmd_name = cmd_name
return f
return wrap
class command_parser(nova_agent.plugin):
"""
JSON command parser plugin for nova-agent
"""
type = "parser"
def __init__(self, *args, **kwargs):
super(command_parser, self).__init__(*args, **kwargs)
__import__("plugins.jsonparser.commands")
command.create_instances()
def encode_result(self, result):
our_format = {"returncode": str(result[0]),
"message": result[1]}
return {"data": anyjson.serialize(our_format)}
def parse_request(self, request):
try:
request = anyjson.deserialize(request['data'])
except Exception, e:
# log it
print "Missing data"
print e
return None
try:
cmd_name = request['name']
except KeyError:
print "Missing command name"
return None
try:
cmd_string = request['value']
except KeyError:
cmd_string = ''
logging.info("Received command '%s' with argument: '%s'" % \
(cmd_name, cmd_string))
try:
result = command.run_command(cmd_name, cmd_string)
except CommandNotFoundError, e:
logging.warn(str(e))
return self.encode_result((404, str(e)))
logging.info("'%s' completed with code '%s', message '%s'" % \
(cmd_name, result[0], result[1]))
return self.encode_result(result)
|
Python
| 0.002846
|
@@ -3219,70 +3219,70 @@
- #
log
- it%0A print %22Missing data%22%0A print e
+ging.error(%22Request dictionary contains no 'data' key%22)%25 %5C
%0A
@@ -3396,36 +3396,57 @@
-print %22Missing command name%22
+logging.error(%22Request is missing 'name' key%22)%25 %5C
%0A
|
fc4340629c53f11aaeb77a829b81e685fc7f9b9f
|
fix test for python2.6.
|
tests/test_cbson.py
|
tests/test_cbson.py
|
#!/usr/bin/env python
#
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Performance tests comparing cbson and bson Python modules."""
import bson
import cbson
from datetime import datetime
import sys
import threading
import timeit
N_THREADS = 12
EMPTY_BSON = '\x05\x00\x00\x00\x00'
o = {}
oo = o
for i in range(25):
oo['o'] = {}
oo = oo['o']
_25X_BSON = bson.BSON.encode(o)
DATETIME_BSON = bson.BSON.encode(dict((str(k),datetime.utcnow()) for k in [0,1,2,3,4,5,6,7,8,9]))
def compare_modules(name, bson_func, cbson_func, number=1, thread_count=None):
"""
Runs two performance tests. One using the bson module and one
using the cbson module. The resulting times are output for
comparison.
"""
sys.stdout.write('%-42s : %6d passes : ' % (name, number))
sys.stdout.flush()
bson_wrapper = lambda v: v.append(timeit.timeit(bson_func, number=number))
cbson_wrapper = lambda v: v.append(timeit.timeit(cbson_func, number=number))
if thread_count is not None:
results = []
start = datetime.utcnow()
threads = [threading.Thread(target=bson_wrapper, args=[results]) for i in range(thread_count)]
[t.start() for t in threads]
[t.join() for t in threads]
end = datetime.utcnow()
bson_time = (end - start).total_seconds()
results = []
start = datetime.utcnow()
threads = [threading.Thread(target=cbson_wrapper, args=[results]) for i in range(thread_count)]
[t.start() for t in threads]
[t.join() for t in threads]
end = datetime.utcnow()
cbson_time = (end - start).total_seconds()
else:
results = []
bson_wrapper(results)
bson_time = sum(results)
results = []
cbson_wrapper(results)
cbson_time = sum(results)
speedup = bson_time / cbson_time
print >> sys.stdout, 'bson %0.4lf : cbson %0.4lf : %0.3lf x faster' % (bson_time, cbson_time, speedup)
def bson_generate_object_id():
bson.ObjectId()
def cbson_generate_object_id():
cbson.ObjectId()
def bson_decode_empty():
bson.BSON(EMPTY_BSON).decode()
def cbson_decode_empty():
cbson.loads(EMPTY_BSON)
def bson_decode_25x_level():
bson.BSON(_25X_BSON).decode()
def cbson_decode_25x_level():
cbson.loads(_25X_BSON)
def bson_decode_datetime_bson():
bson.BSON(DATETIME_BSON).decode()
def cbson_decode_datetime_bson():
cbson.loads(DATETIME_BSON)
if __name__ == '__main__':
compare_modules('Generate ObjectId', bson_generate_object_id, cbson_generate_object_id, number=10000)
compare_modules('Generate ObjectId (threaded)', bson_generate_object_id, cbson_generate_object_id, number=10000, thread_count=N_THREADS)
compare_modules('Decode Empty BSON', bson_decode_empty, cbson_decode_empty, number=10000)
compare_modules('Decode Empty BSON (threaded)', bson_decode_empty, cbson_decode_empty, number=10000, thread_count=N_THREADS)
compare_modules('Decode 25x level BSON', bson_decode_empty, cbson_decode_empty, number=10000)
compare_modules('Decode 25x level BSON (threaded)', bson_decode_empty, cbson_decode_empty, number=10000, thread_count=N_THREADS)
compare_modules('Decode Datetime BSON', bson_decode_datetime_bson, cbson_decode_datetime_bson, number=10000)
compare_modules('Decode Datetime BSON (threaded)', bson_decode_datetime_bson, cbson_decode_datetime_bson, number=10000, thread_count=N_THREADS)
|
Python
| 0
|
@@ -1007,16 +1007,293 @@
8,9%5D))%0A%0A
+def timediff(a, b):%0A delta = b - a%0A if hasattr(delta, 'total_seconds'):%0A return delta.total_seconds()%0A ret = abs(delta.days * 86400)%0A ret += abs(delta.seconds) %0A ret += abs(delta.microseconds / 1000000.0)%0A if b %3C a:%0A return -ret%0A return ret%0A%0A
def comp
@@ -2092,36 +2092,27 @@
e =
-(end - start).total_seconds(
+timediff(start, end
)%0A%0A
@@ -2399,36 +2399,27 @@
e =
-(end - start).total_seconds(
+timediff(start, end
)%0A
|
6dcde2c4931b0b8945e235005c28f7eb344cbebc
|
build LOAFER_ROUTE based on envvars
|
loafer/conf.py
|
loafer/conf.py
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from prettyconf import config
class Settings(object):
# Logging
LOAFER_LOGLEVEL = config('LOAFER_LOGLEVEL', default='WARNING')
LOAFER_LOG_FORMAT = config('LOAFER_LOG_FORMAT',
default='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Max concurrent jobs (asyncio)
LOAFER_MAX_JOBS = config('LOAFER_MAX_JOBS', default=10)
# Default value are determined from the number of machine cores
LOAFER_MAX_THREAD_POOL = config('LOAFER_MAX_THREAD_POOL', default=None)
# Translator
LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS = 'loafer.message_translator.StringMessageTranslator'
# Routes
LOAFER_ROUTES = [
{'name': 'example_route',
'source': 'route_source',
'handler': 'loafer.example.jobs.async_example_job',
'message_translator': LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS},
]
# Consumer
# Currently, only AWS is supported, references:
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-long-polling.html
# http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ReceiveMessage.html
# By default, SQS does not set long-polling (WaitTimeSeconds) and the MaxNumberOfMessages is 1
# TODO: tweak default values for acceptable performance
LOAFER_DEFAULT_CONSUMER_CLASS = 'loafer.aws.consumer.Consumer'
LOAFER_DEFAULT_CONSUMER_OPTIONS = {'WaitTimeSeconds': 5, # from 1-20
'MaxNumberOfMessages': 5} # from 1-10
# Setting LOAFER_CONSUMERS is only needed when there's more than one consumer.
# Otherwise, all routes will use the LOAFER_DEFAULT_CONSUMER_CLASS
# and LOAFER_DEFAULT_MESSAGE_TRANSLATOR_CLASS automatically.
# This is an example configuration and will not match anything (probably).
LOAFER_CONSUMERS = [
{'route_source': {'consumer_class': LOAFER_DEFAULT_CONSUMER_CLASS,
'consumer_options': LOAFER_DEFAULT_CONSUMER_OPTIONS}},
]
def __init__(self, **defaults):
if defaults:
safe_defaults = {k: v for k, v in defaults.items()
if k.isupper() and k.startswith('LOAFER_')}
self.__dict__.update(safe_defaults)
settings = Settings()
|
Python
| 0
|
@@ -744,80 +744,216 @@
e':
-'example_route',%0A 'source': 'route_source',%0A 'handler':
+config('LOAFER_DEFAULT_ROUTE_NAME', default='default'),%0A 'source': config('LOAFER_DEFAULT_ROUTE_SOURCE'),%0A 'handler': config('LOAFER_DEFAULT_ROUTE_HANDLER',%0A default=
'loa
@@ -987,16 +987,17 @@
ple_job'
+)
,%0A
@@ -1980,47 +1980,45 @@
ion
-and
+that
will
-not match anything (probably).
+be available in the future:
%0A
|
8887ac66a221b443215e7ab57a2f21b1521b167b
|
move docs to readme
|
utils/workflow.py
|
utils/workflow.py
|
"""
Helpers for my evolving workflow.
draft [art] "My super article"
creates a prepared md file with all the necessary settings to work on.
publish drafts/my-super-article.md
will make the necessary adjustments and publish it in the contents.
deploy [clean]
will create a [clean] build and push it online.
"""
from __future__ import print_function
import os
import subprocess
import sys
from datetime import datetime
from string import Template
from lektor.utils import slugify
HERE = os.path.dirname(__file__)
PROJECT_PATH = os.path.join(HERE, '..')
DRAFTS_PATH = os.path.join(PROJECT_PATH, 'drafts')
CONTENT_PATH = os.path.join(PROJECT_PATH, 'content')
def draft():
title = sys.argv[1]
with open(os.path.join(HERE, 'article-blueprint.md')) as f:
content = f.read()
rep = dict(title=title)
content = Template(content).safe_substitute(rep)
dst = os.path.join(DRAFTS_PATH, '%s.md' % slugify(title))
assert not os.path.exists(dst), dst
with open(dst, 'w') as f:
f.write(content)
def publish():
srcPath = sys.argv[1]
with open(srcPath) as f:
content = f.read()
rep = dict(date=datetime.now().strftime('%Y-%m-%d'))
content = Template(content).safe_substitute(rep)
slug = os.path.splitext(os.path.basename(srcPath))[0]
containerPath = os.path.join(CONTENT_PATH, slug)
assert not os.path.exists(containerPath), containerPath
os.mkdir(containerPath)
dst = os.path.join(containerPath, 'contents.lr')
with open(dst, 'w') as f:
f.write(content)
os.remove(srcPath)
def deploy():
if len(sys.argv) > 2 and sys.argv[2] == 'clean':
print(subprocess.check_output(['lektor', 'clean', '--yes']))
else:
print(subprocess.check_output(['lektor', 'build']))
print(subprocess.check_output(['lektor', 'deploy']))
|
Python
| 0
|
@@ -1,332 +1,4 @@
-%22%22%22%0AHelpers for my evolving workflow.%0A%0A draft %5Bart%5D %22My super article%22%0A%0Acreates a prepared md file with all the necessary settings to work on.%0A%0A publish drafts/my-super-article.md%0A%0Awill make the necessary adjustments and publish it in the contents.%0A%0A deploy %5Bclean%5D%0A%0Awill create a %5Bclean%5D build and push it online.%0A%22%22%22%0A
from
|
692afa4421104109037b5bafbf5e3cdba3f54d52
|
Introduce Reggie to ui
|
src/victims_web/blueprints/ui/__init__.py
|
src/victims_web/blueprints/ui/__init__.py
|
# This file is part of victims-web.
#
# Copyright (C) 2013 The Victims Project
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Main web ui.
"""
import re
from flask import (
Blueprint, current_app, escape, render_template, helpers,
url_for, request, redirect, flash)
from flask.ext import login
from victims_web.cache import cache
from victims_web.config import SUBMISSION_GROUPS
from victims_web.errors import ValidationError
from victims_web.handlers.forms import \
SUBMISSION_FORMS, ArtifactSubmit, flash_errors
from victims_web.models import Hash, CoordinateDict
from victims_web.plugin.crosstalk import indexmon
from victims_web.submissions import submit, upload
from victims_web.util import groups
ui = Blueprint(
'ui', __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/') # Last argument needed since we register on /
def _is_hash(data):
"""
Verifies the hash is a sha1 hash.
"""
if re.match('^([a-zA-Z0-9]{128})$', data):
return True
return False
@ui.route('/', methods=['GET'])
def index():
_cache_key = 'view/%s/get_data' % (request.path)
@cache.cached(key_prefix=_cache_key)
def get_data():
indexmon.refresh(True)
return indexmon.get_data()
if indexmon.refreshed_flag:
cache.delete(_cache_key)
# make sure cached hashes for ui.hashes are cleared
cache.delete_memoized(hashes)
indexmon.refreshed_flag = False
return render_template('index.html', **get_data())
@cache.memoize()
def hashes(groups):
hashes = Hash.objects(
status='RELEASED', group__in=groups
).only('name', 'version', 'hashes.sha512.combined')
return render_template('hashes.html', hashes=hashes)
@ui.route('/hashes/<group>/', methods=['GET'])
def hashes_singlegroup(group):
if group not in groups():
flash(
'%s is not a known group. Displaying all hashes.' % (group),
'error')
return render_template('hashes.html', hashes=[])
return hashes([group])
@ui.route('/hashes/', methods=['GET'])
def hashes_multigroup():
# expect a comma seperated arg
_groups = request.args.get('groups')
if _groups is None:
# default to all groups
_groups = groups()
else:
_groups = [str(g.strip()) for g in _groups.split(',')]
return hashes(_groups)
@ui.route('/hash/<value>', methods=['GET'])
def onehash(value):
if _is_hash(value):
a_hash = Hash.objects.get_or_404(hashes__sha512__combined=value)
return render_template('onehash.html', hash=a_hash)
else:
flash('Not a valid hash', 'error')
return redirect(url_for('ui.hashes_multigroup'))
def process_submission(form, group=None):
try:
cves = []
for cve in form.cves.data.split(','):
cves.append(cve.strip())
if group is None:
group = form.group.data
coordinates = CoordinateDict({
coord: form._fields.get('%s' % coord).data.strip()
for coord in SUBMISSION_GROUPS.get(group, [])
})
# remove any empty values
coordinates = dict(
(k, v)
for k, v in coordinates.iteritems()
if v is not None and len(v) > 0
)
# if no coordinates given, make None
if len(coordinates) == 0:
coordinates = None
files = upload(group, request.files.get('archive', None), coordinates)
for (ondisk, filename, suffix) in files:
submit(
login.current_user.username, ondisk, group, filename, suffix,
cves, coordinates=coordinates
)
current_app.config['INDEX_REFRESH_FLAG'] = True
flash('Archive Submitted for processing', 'info')
except ValueError, ve:
flash(escape(ve.message), 'error')
except ValidationError, ve:
flash(escape(ve.message), 'error')
except OSError, oe:
flash('Could not upload file due to a server side error', 'error')
current_app.logger.debug(oe)
@ui.route('/submit/<group>/', methods=['GET', 'POST'])
@login.login_required
def submit_artifact(group):
form = SUBMISSION_FORMS.get(group, ArtifactSubmit)()
if form.validate_on_submit():
process_submission(form, group)
return redirect(url_for('ui.index'))
elif request.method == 'POST':
flash_errors(form)
return render_template(
'submit_artifact.html', form=form, group=group)
@ui.route('/<page>.html', methods=['GET'])
def static_page(page):
# These are the only 'static' pages
if page in ['about', 'client', 'bugs']:
return render_template('%s.html' % page)
return helpers.NotFound()
|
Python
| 0
|
@@ -1499,16 +1499,94 @@
on /%0A%0A%0A
+_GROUP_REGEX = '%3Cregex(%22%25s%22):group%3E' %25 ('%7C'.join(SUBMISSION_GROUPS.keys()))%0A%0A%0A
def _is_
@@ -2466,25 +2466,37 @@
/hashes/
-%3Cgroup%3E/'
+%25s/' %25 (_GROUP_REGEX)
, method
@@ -4809,17 +4809,29 @@
mit/
-%3Cgroup%3E/'
+%25s/' %25 (_GROUP_REGEX)
, me
|
377f2120b3474d131b02dab90b6e51c35deb0c74
|
Add comments
|
mathphys/constants.py
|
mathphys/constants.py
|
"""Constants module."""
import math as _math
from . import base_units as _u
# temporary auxiliary derived units
_volt = (_u.kilogram * _u.meter**2) / (_u.ampere * _u.second**2)
_coulomb = _u.second * _u.ampere
_joule = _u.kilogram * _u.meter**2 / _u.second**2
_pascal = _u.kilogram / (_u.meter * _u.second**2)
# physical constants
# ==================
# --- exact --
light_speed = 299792458 * (_u.meter / _u.second)
gas_constant = 8.314462618 * (_joule / _u.mole / _u.kelvin)
boltzmann_constant = 1.380649e-23 * (_joule / _u.kelvin)
avogadro_constant = 6.02214076e23 * (1 / _u.mole)
elementary_charge = 1.602176634e-19 * (_coulomb)
reduced_planck_constant = 1.054571817e-34 * (_joule * _u.second)
# --- measured ---
# 2021-04-15 - https://physics.nist.gov/cgi-bin/cuu/Value?me|search_for=electron+mass
electron_mass = 9.1093837015e-31 * (_u.kilogram)
# 2021-04-15 - https://physics.nist.gov/cgi-bin/cuu/Value?mu0|search_for=vacuum+permeability
vacuum_permeability = 1.25663706212e-6 * \
(_volt * _u.second / _u.ampere / _u.meter)
# --- derived ---
# [Kg̣*m^2/s^2] - derived
electron_rest_energy = electron_mass * _math.pow(light_speed, 2)
# [V·s/(A.m)] - derived
vacuum_permitticity = 1.0/(vacuum_permeability * _math.pow(light_speed, 2))
# [T·m^2/(A·s)] - derived
vacuum_impedance = vacuum_permeability * light_speed
# [m] - derived
electron_radius = _math.pow(elementary_charge, 2) / \
(4*_math.pi*vacuum_permitticity*electron_rest_energy)
_joule_2_eV = _joule / elementary_charge
# [m]/[GeV]^3 - derived
rad_cgamma = 4*_math.pi*electron_radius / \
_math.pow(electron_rest_energy/elementary_charge/1.0e9, 3) / 3
# [m] - derived
Cq = (55.0/(32*_math.sqrt(3.0))) * (reduced_planck_constant) * \
light_speed / electron_rest_energy
# [m^2/(s·GeV^3)] - derived
Ca = electron_radius*light_speed / \
(3*_math.pow(electron_rest_energy*_joule_2_eV/1.0e9, 3))
|
Python
| 0
|
@@ -363,16 +363,30 @@
- exact
+by definition
--%0A%0Aligh
|
f585619f5844250e5216aaa1d6894b7c9b651d8f
|
Change function name
|
tests/test_frame.py
|
tests/test_frame.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pytest import fixture, yield_fixture
from unittest.mock import patch
import numpy as np
from space.frames.poleandtimes import ScalesDiff
from space.utils.date import Date
from space.orbits.orbit import Orbit
from space.frames.frame import *
@yield_fixture
def time():
with patch('space.frames.poleandtimes.TimeScales.get') as mock_ts:
mock_ts.return_value = ScalesDiff(-32.4399519, -0.4399619, 32)
yield
@yield_fixture()
def pole_position(time):
with patch('space.frames.poleandtimes.PolePosition.get') as mock_pole:
mock_pole.return_value = {
'X': -0.140682,
'Y': 0.333309,
'dpsi': -52.195,
'deps': -3.875,
'LOD': 1.5563,
}
yield
@fixture
def ref_orbit():
return Orbit(
Date(2004, 4, 6, 7, 51, 28, 386009),
[-1033479.383, 7901295.2754, 6380356.5958, -3225.636520, -2872.451450, 5531.924446],
'cartesian',
'ITRF',
None
)
def state_vector_testing(ref, pv, precision=(4, 6)):
np.testing.assert_almost_equal(ref[:3], pv[:3], precision[0]) # Position
np.testing.assert_almost_equal(ref[3:], pv[3:], precision[1]) # Velocity
pef_ref = np.array([-1033475.03131, 7901305.5856, 6380344.5328,
-3225.632747, -2872.442511, 5531.931288])
tod_ref = np.array([5094514.7804, 6127366.4612, 6380344.5328,
-4746.088567, 786.077222, 5531.931288])
mod_ref = np.array([5094028.3745, 6127870.8164, 6380248.5164,
-4746.263052, 786.014045, 5531.790562])
gcrf_ref = np.array([5102508.958, 6123011.401, 6378136.928,
-4743.22016, 790.53650, 5533.75528])
eme_ref = np.array([5102509.6, 6123011.52, 6378136.3,
-4743.2196, 790.5366, 5533.75619])
def test_unit_change(ref_orbit, pole_position):
"""These reference data are extracted from Vallado §3.7.3.
"""
pv = ITRF(ref_orbit.date, ref_orbit).transform('PEF')
state_vector_testing(pef_ref, pv)
# Going back to ITRF
pv2 = PEF(ref_orbit.date, pv).transform('ITRF')
state_vector_testing(ref_orbit, pv2)
# PEF to TOD
pv = PEF(ref_orbit.date, pv).transform('TOD')
state_vector_testing(tod_ref, pv)
# Going back to PEF
pv2 = TOD(ref_orbit.date, pv).transform("PEF")
state_vector_testing(pef_ref, pv2)
# TOD to EME2000 (via MOD)
pv2 = TOD(ref_orbit.date, tod_ref).transform('EME2000')
state_vector_testing(eme_ref, pv2)
# # TOD to MOD
# pv = TOD(ref_orbit.date, pv).transform('MODbis')
# state_vector_testing(mod_ref, pv, (3, 4))
# # MOD to GCRF
# pv = MODbis(ref_orbit.date, pv).transform('GCRF')
# state_vector_testing(gcrf_ref, pv, (3, 5))
def test_global_change(ref_orbit, pole_position):
# pv = ITRF(ref_orbit.date, ref_orbit).transform('GCRF')
# state_vector_testing(gcrf_ref, pv)
pv = ITRF(ref_orbit.date, ref_orbit).transform('EME2000')
state_vector_testing(eme_ref, pv)
pv = EME2000(ref_orbit.date, pv).transform('ITRF')
state_vector_testing(ref_orbit, pv)
|
Python
| 0.000092
|
@@ -1036,36 +1036,29 @@
)%0A%0A%0Adef
-state
+assert
_vector
-_testing
(ref, pv
@@ -2017,36 +2017,29 @@
F')%0A
-state
+assert
_vector
-_testing
(pef_ref
@@ -2126,36 +2126,29 @@
F')%0A
-state
+assert
_vector
-_testing
(ref_orb
@@ -2228,36 +2228,29 @@
D')%0A
-state
+assert
_vector
-_testing
(tod_ref
@@ -2335,36 +2335,29 @@
F%22)%0A
-state
+assert
_vector
-_testing
(pef_ref
@@ -2459,36 +2459,29 @@
0')%0A
-state
+assert
_vector
-_testing
(eme_ref
@@ -2568,36 +2568,29 @@
)%0A #
-state
+assert
_vector
-_testing
(mod_ref
@@ -2686,36 +2686,29 @@
)%0A #
-state
+assert
_vector
-_testing
(gcrf_re
@@ -2842,36 +2842,29 @@
)%0A #
-state
+assert
_vector
-_testing
(gcrf_re
@@ -2937,36 +2937,29 @@
0')%0A
-state
+assert
_vector
-_testing
(eme_ref
@@ -3028,28 +3028,21 @@
-state
+assert
_vector
-_testing
(ref
@@ -3049,12 +3049,13 @@
_orbit, pv)%0A
+%0A
|
8441b7dbd0e56fd04361df778628078406a21f19
|
fix hastily-mocked unit test
|
tests/test_guano.py
|
tests/test_guano.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from guano import GuanoFile, wavparams, parse_timestamp
class UnicodeTest(unittest.TestCase):
NOTE = u'¡GUANO is the 💩 !'
MD = u"""GUANO|Version: 1.0\nNote: %s""" % NOTE
def setUp(self):
pass
def test_from_string(self):
"""Parse a GUANO metadata block containing Unicode data"""
g = GuanoFile.from_string(self.MD)
self.assertEqual(self.NOTE, g['Note'])
def test_file_roundtrip(self):
"""Write a GUANO .WAV file containing Unicode data, re-read it and confirm value is identical"""
fname = 'test_guano.wav'
# write a fake .WAV file
g = GuanoFile.from_string(self.MD)
g.filename = fname
g.wav_params = wavparams(1, 2, 500000, 2, 'NONE', None)
g.wav_data = b'\0\0'
g.write()
# read it back in
g2 = GuanoFile(fname)
self.assertEqual(self.NOTE, g2['Note'])
class GeneralTest(unittest.TestCase):
MD = r'''GUANO|Version: 1.0
Timestamp: 2017-04-20T01:23:45-07:00
Note: This is a \nmultiline text note\nfor testing.
User|Haiku: five\nseven\nfive
User|Answer: 42
MSFT|Transect|Version: 1.0.16
'''
def setUp(self):
GuanoFile.register('User', 'Answer', int)
GuanoFile.register('', 'Note', lambda x: x.replace('\\n', '\n'))
self.md = GuanoFile.from_string(self.MD)
def test_get_namespaces(self):
"""Test that we can extract namespaces"""
expected = {'GUANO', '', 'User', 'MSFT'}
namespaces = set(self.md.get_namespaces())
self.assertSetEqual(expected, namespaces)
def test_get_types(self):
"""Test multiple ways of requesting a namespaced value"""
self.assertEqual(42, self.md['User|Answer'])
self.assertEqual(42, self.md['User', 'Answer'])
self.assertEqual(42, self.md.get('User|Answer'))
def test_multiline(self):
"""Ensure multiline string `Note` is parsed as `\n` containing string"""
self.assertEqual(3, len(self.md['Note'].splitlines()))
def test_parse_timestamps(self):
"""Verify that we can at least parse all timestamp formats"""
fmts = [
'2016-12-10T01:02:03',
'2016-12-10T01:02:03.123',
'2016-12-10T01:02:03.123456',
'2016-12-10T01:02:03Z',
'2016-12-10T01:02:03.123Z',
'2016-12-10T01:02:03.123456Z',
'2016-12-10T01:02:03-07:00',
'2016-12-10T01:02:03.123-07:00',
'2016-12-10T01:02:03.123456-07:00',
'2016-12-10 01:02:03', # bonus
]
for fmt in fmts:
parse_timestamp(fmt)
class BadDataTest(unittest.TestCase):
"""
These are hacks that may go against the specification, done in the name of permissive reading.
John Postel: "Be conservative in what you do, be liberal in what you accept from others."
"""
def test_sb41_bad_te(self):
"""SonoBat 4.1 "optional" TE value"""
md = '''GUANO|Version: 1.0
TE:
'''
GuanoFile.from_string(md)
def test_sb41_bad_key(self):
"""SonoBat 4.1 disembodied colon"""
md = '''GUANO|Version: 1.0
:
'''
self.assertEqual(1, len(list(GuanoFile.from_string(md).items())))
def test_sb42_bad_timestamp(self):
"""SonoBat 4.2 blank timestamp"""
md = '''GUANO|Version: 1.0
Timestamp:
'''
GuanoFile.from_string(md)
def test_sb42_bad_encoding(self):
"""SonoBat 4.2 doesn't actually encode as UTF-8. At least try not to blow up when reading."""
# SonoBat *probably* uses mac-roman on OS X and windows-1252 on Windows... in the US at least.
md = b'GUANO|Version: 1.0\nNote: Mobile transect with mic 4\xd5 above roof.\n\x00\x00'
GuanoFile.from_string(md)
def test_sb42_bad_guano_version(self):
"""Some version of SonoBat 4.2 writes a GUANO|Version of "1.0:" by accident."""
md = b'GUANO|Version: 1.0:\n1.0:\n'
GuanoFile.from_string(md)
class StrictParsingTest(unittest.TestCase):
"""
Test our strict/lenient parsing modes.
Note that we are always lenient for some types of "bad data", as in :class:BadDataTest above.
"""
def test_strict_mode(self):
md = '''GUANO|Version: 1.0
TE: no
Loc Position: 10N 567288E 4584472N
'''
try:
GuanoFile.from_string(md, strict=True)
except ValueError as e:
pass
g = GuanoFile.from_string(md, strict=False)
self.assertEqual(g.get('TE', None), 'no')
self.assertEqual(g.get('Loc Position', None), '10N 567288E 4584472N')
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -811,24 +811,25 @@
)%0A g.
+_
wav_data = b
@@ -835,11 +835,80 @@
b'%5C0
+1
%5C0
-'
+2' # faking it, don't try this at home!%0A g._wav_data_size = 2
%0A
|
c72d67fbbbf5ea67e6e449ca941ae1760a43a24a
|
fix python 3 error
|
tests/test_music.py
|
tests/test_music.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponse
import pytest
from music.decorators import json_view
from music.models import Music
from music.views import next_music
@pytest.mark.django_db
class TestMusic(object):
def setup(self):
self.a = dict(title='music_a', author='author_a',
cover='http://www.example.com/a.jpg',
douban='http://music.douban.com/a',
mp3='http://www.example.com/a.mp3',
ogg='http://www.example.com/a.ogg'
)
self.b = dict(title='music_b', author='author_b',
cover='http://www.example.com/b.jpg',
douban='http://music.douban.com/b',
mp3='http://www.example.com/b.mp3',
ogg='http://www.example.com/b.ogg'
)
Music.objects.create(**self.a)
Music.objects.create(**self.b)
def test_music(self):
a = Music.objects.get(title=self.a['title'])
b = Music.objects.get(title=self.b['title'])
assert a.author == self.a['author']
assert a.mp3 == self.a['mp3']
assert b.author == self.b['author']
assert b.mp3 == self.b['mp3']
assert self.a['author'] in str(a)
@pytest.mark.django_db
class TestView(object):
def setup(self):
self.c = dict(title='music_c', author='author_c',
cover='http://www.example.com/c.jpg',
douban='http://music.douban.com/c',
mp3='http://www.example.com/c.mp3',
ogg='http://www.example.com/c.ogg'
)
self.d = dict(title='music_d', author='author_d',
cover='http://www.example.com/d.jpg',
douban='http://music.douban.com/d',
mp3='http://www.example.com/d.mp3',
ogg='http://www.example.com/d.ogg'
)
Music.objects.create(**self.c)
Music.objects.create(**self.d)
def test_next_music(self, rf):
request = rf.get(reverse('music:next', kwargs={'next_number': 1}))
response = next_music(request, 1)
assert response.status_code == 200
assert json.loads(response.content.decode())['data'] in (self.c, self.d)
def test_next_music_index_error(self, rf):
request = rf.get(reverse('music:next', kwargs={'next_number': 10000}))
response = next_music(request, 10000)
assert response.status_code == 200
assert json.loads(response.content.decode())['data'] in (self.c, self.d)
def test_random(self, client):
response = client.get(reverse('music:random'))
assert response.status_code == 200
assert json.loads(response.content.decode())['data'] in (self.c, self.d)
class TestDeocrator(object):
def test_json_view(self, rf):
d1 = {'data': 'hello'}
view1 = lambda request: d1
view2 = lambda request: (d1, 400)
view3 = lambda request: HttpResponse(str(d1))
response = json_view(view1)(rf.get('/'))
assert isinstance(response, HttpResponse)
assert response.status_code == 200
assert response['Content-Type'] == 'application/json'
assert json.loads(response.content.decode()) == d1
response = json_view(view2)(rf.get('/'))
assert isinstance(response, HttpResponse)
assert response.status_code == 400
assert response['Content-Type'] == 'application/json'
assert json.loads(response.content.decode()) == d1
response = json_view(view3)(rf.get('/'))
assert isinstance(response, HttpResponse)
assert response.status_code == 200
assert response['Content-Type'] == 'text/html; charset=utf-8'
assert response.content == str(d1)
|
Python
| 0.000268
|
@@ -4011,16 +4011,25 @@
.content
+.decode()
== str(
|
e732615e2e8586cc3f6a31614372ef16bae26a36
|
update tests for prices.py
|
tests/test_price.py
|
tests/test_price.py
|
from bitshares import BitShares
from bitshares.instance import set_shared_bitshares_instance
from bitshares.amount import Amount
from bitshares.price import Price
from bitshares.asset import Asset
import unittest
class Testcases(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Testcases, self).__init__(*args, **kwargs)
bitshares = BitShares(
"wss://node.bitshares.eu"
)
set_shared_bitshares_instance(bitshares)
def test_init(self):
# self.assertEqual(1, 1)
Price("0.315 USD/BTS")
Price(1.0, "USD/GOLD")
Price(0.315, base="USD", quote="BTS")
Price(0.315, base=Asset("USD"), quote=Asset("BTS"))
Price({
"base": {"amount": 1, "asset_id": "1.3.0"},
"quote": {"amount": 10, "asset_id": "1.3.106"}})
Price({
"receives": {"amount": 1, "asset_id": "1.3.0"},
"pays": {"amount": 10, "asset_id": "1.3.106"},
}, base_asset=Asset("1.3.0"))
Price(quote="10 GOLD", base="1 USD")
Price("10 GOLD", "1 USD")
Price(Amount("10 GOLD"), Amount("1 USD"))
def test_multiplication(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
p3 = p1 * p2
p4 = p3.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 0.5)
# Inline multiplication
p5 = p1
p5 *= p2
p4 = p5.as_base("GOLD")
self.assertEqual(p4["quote"]["symbol"], "EUR")
self.assertEqual(p4["base"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 0.5)
def test_div(self):
p1 = Price(10.0, "USD/GOLD")
p2 = Price(5.0, "USD/EUR")
# 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD
p3 = p1 / p2
p4 = p3.as_base("EUR")
self.assertEqual(p4["base"]["symbol"], "EUR")
self.assertEqual(p4["quote"]["symbol"], "GOLD")
# 10 USD/GOLD * 0.2 EUR/USD = 2 EUR/GOLD = 0.5 GOLD/EUR
self.assertEqual(float(p4), 2)
|
Python
| 0
|
@@ -2234,8 +2234,271 @@
p4), 2)%0A
+%0A def test_div2(self):%0A p1 = Price(10.0, %22USD/GOLD%22)%0A p2 = Price(5.0, %22USD/GOLD%22)%0A%0A # 10 USD/GOLD / 5 USD/EUR = 2 EUR/GOLD%0A p3 = p1 / p2%0A self.assertTrue(isinstance(p3, (float, int)))%0A self.assertEqual(float(p3), 2.0)%0A
|
86c43cdc1bddc8cf869e293b1deadd5126631410
|
Update test_spell.py
|
tests/test_spell.py
|
tests/test_spell.py
|
# -*- coding: utf-8 -*-
import unittest
from pythainlp.spell import (
NorvigSpellChecker,
correct,
spell,
spell_sent,
correct_sent
)
class TestSpellPackage(unittest.TestCase):
def test_spell(self):
self.assertEqual(spell(None), [""])
self.assertEqual(spell(""), [""])
result = spell("เน้ร")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เกสมร์")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เน้ร", engine="phunspell")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เกสมร์", engine="phunspell")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เน้ร", engine="symspellpy")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เกสมร์", engine="symspellpy")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เน้ร", engine="tltk")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
result = spell("เกสมร์", engine="tltk")
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
def test_word_correct(self):
self.assertEqual(correct(None), "")
self.assertEqual(correct(""), "")
self.assertEqual(correct("1"), "1")
self.assertEqual(correct("05"), "05")
self.assertEqual(correct("56"), "56")
self.assertEqual(correct("1.01"), "1.01")
result = correct("ทดสอง")
self.assertIsInstance(result, str)
self.assertNotEqual(result, "")
result = correct("ทดสอง", engine="phunspell")
self.assertIsInstance(result, str)
self.assertNotEqual(result, "")
result = correct("ทดสอง", engine="symspellpy")
self.assertIsInstance(result, str)
self.assertNotEqual(result, "")
def test_norvig_spell_checker(self):
checker = NorvigSpellChecker(dict_filter=None)
self.assertTrue(len(checker.dictionary()) > 0)
self.assertGreaterEqual(checker.prob("มี"), 0)
user_dict = [
("การงาน", 31), # longer than max_len
("กาม", 1), # fewer than min_freq
("กาล0", 64), # has digit
("๒๔๗๕", 64), # has digit
("hello", 8), # not Thai
("ลบ", -1), # negative count
("การ", 42), # OK
]
checker = NorvigSpellChecker(
custom_dict=user_dict, min_freq=2, max_len=5
)
self.assertEqual(len(checker.dictionary()), 1)
user_dict = [
"เอกราช",
"ปลอดภัย",
"เศรษฐกิจ",
"เสมอภาค",
"เสรีภาพ",
"การศึกษา",
]
checker = NorvigSpellChecker(custom_dict=user_dict)
self.assertEqual(len(checker.dictionary()), len(user_dict))
user_dict = {
"พหลโยธิน": 1,
"ขีตตะสังคะ": 2,
"พนมยงค์": 3,
"ภมรมนตรี": 4,
"มิตรภักดี": 5,
"ลพานุกรม": 6,
"สิงหเสนี": 7,
}
checker = NorvigSpellChecker(custom_dict=user_dict)
# "พหลโยธิน" will be removed,
# as it has frequency less than default min_freq (2)
self.assertEqual(len(checker.dictionary()), len(user_dict) - 1)
user_dict = [24, 6, 2475]
with self.assertRaises(TypeError):
checker = NorvigSpellChecker(custom_dict=user_dict)
def test_spell_sent(self):
self.spell_sent = ["เด็", "อินอร์เน็ต", "แรง"]
self.assertIsNotNone(spell_sent(self.spell_sent))
self.assertIsNotNone(spell_sent(self.spell_sent, engine="pn"))
self.assertIsNotNone(spell_sent(self.spell_sent, engine="phunspell"))
self.assertIsNotNone(spell_sent(self.spell_sent, engine="symspellpy"))
def test_correct_sent(self):
self.spell_sent = ["เด็", "อินอร์เน็ต", "แรง"]
self.assertIsNotNone(correct_sent(self.spell_sent))
self.assertIsNotNone(correct_sent(self.spell_sent, engine="pn"))
self.assertIsNotNone(correct_sent(self.spell_sent, engine="phunspell"))
self.assertIsNotNone(
correct_sent(self.spell_sent, engine="symspellpy")
)
|
Python
| 0.000004
|
@@ -1259,37 +1259,34 @@
esult = spell(%22%E0%B9%80
+%E0%B8%94
%E0%B8%81
-%E0%B8%AA%E0%B8%A1%E0%B8%A3%E0%B9%8C
%22, engine=%22tltk%22
|
5297e301de33304fc00742bb944ffae654db7ee6
|
fix removing other colour option
|
vehicles/forms.py
|
vehicles/forms.py
|
import requests
from django import forms
from django.core.exceptions import ValidationError
from django.db.models import Count, Q, Exists, OuterRef
from busstops.models import Operator, Service
from .models import VehicleType, VehicleFeature, Livery, Vehicle, get_text_colour
from .fields import RegField
def get_livery_choices(operator):
choices = {}
vehicles = operator.vehicle_set.filter(withdrawn=False)
liveries = Livery.objects.filter(vehicle__in=vehicles).annotate(popularity=Count('vehicle'))
for livery in liveries.order_by('-popularity').distinct():
choices[livery.id] = livery
for vehicle in vehicles.distinct('colours'):
if not vehicle.livery_id and vehicle.colours and vehicle.colours != 'Other':
choices[vehicle.colours] = Livery(colours=vehicle.colours, name=f'Like {vehicle}')
choices = [(key, livery.preview(name=True)) for key, livery in choices.items()]
if choices:
choices.append(('Other', 'Other'))
return choices
class EditVehiclesForm(forms.Form):
vehicle_type = forms.ModelChoiceField(queryset=VehicleType.objects, label='Type', required=False, empty_label='')
colours = forms.ChoiceField(label='Livery', widget=forms.RadioSelect, required=False)
other_colour = forms.CharField(widget=forms.TextInput(attrs={"type": "color"}), required=False, initial='#ffffff')
features = forms.ModelMultipleChoiceField(queryset=VehicleFeature.objects, label='Features',
widget=forms.CheckboxSelectMultiple, required=False)
depot = forms.ChoiceField(required=False)
withdrawn = forms.BooleanField(label='Permanently withdrawn', required=False)
def clean_url(self):
if self.cleaned_data['url']:
try:
response = requests.get(self.cleaned_data['url'], timeout=5)
if response.ok:
return self.cleaned_data['url']
except requests.RequestException:
pass
raise ValidationError('That URL doesn’t work for me. Maybe it’s too long, or Facebook')
def clean_other_colour(self):
if self.cleaned_data['other_colour']:
if self.cleaned_data.get('colours') != 'Other':
return
try:
get_text_colour(self.cleaned_data['other_colour'])
except ValueError as e:
raise ValidationError(str(e))
return self.cleaned_data['other_colour']
def has_really_changed(self):
if not self.has_changed():
return False
if all(key == 'url' or key == 'other_colour' for key in self.changed_data):
return False
return True
def __init__(self, *args, operator=None, user, vehicle=None, **kwargs):
super().__init__(*args, **kwargs)
colours = None
depots = None
if operator:
colours = get_livery_choices(operator)
if vehicle:
colours = [('', 'None/mostly white')] + colours
else:
colours = [('', 'No change')] + colours
if user.trusted:
depots = operator.vehicle_set.distinct('data__Depot').values_list('data__Depot', flat=True)
depots = [(depot, depot) for depot in depots if depot]
depots.sort()
elif vehicle and vehicle.data and 'Depot' in vehicle.data:
depots = [(vehicle.data['Depot'], vehicle.data['Depot'])]
if colours:
self.fields['colours'].choices = colours
else:
del self.fields['colours']
del self.fields['other_colour']
if depots:
self.fields['depot'].choices = [('', '')] + depots
else:
del self.fields['depot']
class EditVehicleForm(EditVehiclesForm):
"""With some extra fields, only applicable to editing a single vehicle
"""
fleet_number = forms.CharField(required=False, max_length=14)
reg = RegField(label='Number plate', required=False, max_length=10)
operator = forms.ModelChoiceField(queryset=None, label='Operator', empty_label='')
branding = forms.CharField(label="Other branding", required=False, max_length=255)
name = forms.CharField(label='Name', help_text="Not your name", required=False, max_length=255)
previous_reg = RegField(required=False, max_length=14)
depot = forms.ChoiceField(required=False)
notes = forms.CharField(required=False, max_length=255)
url = forms.URLField(label='URL', help_text="Optional link to a public web page (not a private Facebook group)"
" or picture showing repaint", required=False, max_length=255)
field_order = ['fleet_number', 'reg', 'operator', 'vehicle_type', 'colours', 'other_colour', 'branding', 'name',
'previous_reg', 'features', 'depot', 'notes']
def __init__(self, *args, user, vehicle, **kwargs):
super().__init__(*args, **kwargs, user=user, vehicle=vehicle)
if vehicle.fleet_code and vehicle.fleet_code in vehicle.code or str(vehicle.fleet_number) in vehicle.code:
self.fields['fleet_number'].disabled = True
elif vehicle.fleet_code and vehicle.latest_journey and vehicle.latest_journey.data:
try:
if vehicle.latest_journey.data['Extensions']['VehicleJourney']['VehicleUniqueId'] == vehicle.fleet_code:
self.fields['fleet_number'].disabled = True
except KeyError:
pass
if vehicle.reg and vehicle.reg in vehicle.code.replace('_', '').replace(' ', '').replace('-', ''):
self.fields['reg'].disabled = True
if not user.is_staff:
if not vehicle.notes:
del self.fields['notes']
if not vehicle.branding:
del self.fields['branding']
if not vehicle.operator or vehicle.operator.parent:
operators = Operator.objects
if user.trusted and vehicle.operator:
# any sibling operator
operators = operators.filter(parent=vehicle.operator.parent)
condition = Exists(Service.objects.filter(current=True, operator=OuterRef('pk')).only('id'))
condition |= Exists(Vehicle.objects.filter(operator=OuterRef('pk')).only('id'))
elif vehicle.latest_journey:
# only operators whose services the vehicle has operated
condition = Exists(
Service.objects.filter(
operator=OuterRef('pk'),
id=vehicle.latest_journey.service_id
)
)
else:
del self.fields['operator']
return
if vehicle.operator:
condition |= Q(pk=vehicle.operator_id)
self.fields['operator'].queryset = operators.filter(condition)
else:
del self.fields['operator']
|
Python
| 0
|
@@ -2940,170 +2940,8 @@
tor)
-%0A if vehicle:%0A colours = %5B('', 'None/mostly white')%5D + colours%0A else:%0A colours = %5B('', 'No change')%5D + colours
%0A%0A
@@ -3342,16 +3342,178 @@
olours:%0A
+ if vehicle:%0A colours = %5B('', 'None/mostly white')%5D + colours%0A else:%0A colours = %5B('', 'No change')%5D + colours%0A
|
a26f04bddcdb92af050c2d8237ccb6c2ef1406e5
|
Fix identation
|
jst/common/context.py
|
jst/common/context.py
|
'''
Created on Jan 18, 2015
@author: rz
'''
import configparser
import os
from os.path import expanduser
def load():
global_cfg_file = expanduser("~") + '/.jst/jst.properties'
if (not os.path.isfile(global_cfg_file)):
raise FileNotFoundError(global_cfg_file)
cwd = os.getcwd()
ctx_file = cwd + '/jstcontext.properties'
if (not os.path.isfile(ctx_file)):
raise FileNotFoundError(ctx_file)
global_cfg = configparser.ConfigParser()
global_cfg.read(global_cfg_file)
ctx = configparser.ConfigParser()
ctx.read(ctx_file)
ctx['src']['url_ce'] = 'svn+ssh://' + global_cfg['src']['user'] + '@' + global_cfg['src']['url_ce'] + '/' + ctx['src']['branch_ce']
ctx['src']['url_pro'] = 'svn+ssh://' + global_cfg['src']['user'] + '@' + global_cfg['src']['url_pro'] + '/' + ctx['src']['branch_pro']
ctx['src']['working_copy_ce'] = cwd + '/ce'
ctx['src']['working_copy_pro'] = cwd + '/pro'
ctx['tc']['distribution'] = global_cfg['tc']['distribution']
ctx['tc']['home'] = cwd + '/tc'
return ctx
def show(ctx):
print('src.url_ce = ' + ctx['src']['url_ce'])
print('src.url_pro = ' + ctx['src']['url_pro'])
print('src.working_copy_ce = ' + ctx['src']['working_copy_ce'])
print('src.working_copy_pro = ' + ctx['src']['working_copy_pro'])
print('tc.home = ' + ctx['tc']['home'])
print('tc.distribution = ' + ctx['tc']['distribution'])
print('tc.catalina_opts = ' + ctx['tc']['catalina_opts'])
print('tc.java_opts = ' + ctx['tc']['java_opts'])
|
Python
| 0.001406
|
@@ -221,32 +221,33 @@
_file)):%0A
+
raise FileNotFou
@@ -375,24 +375,25 @@
ctx_file)):%0A
+
raise
|
99bc38b7d33eef76fd99d7ce362b00080edf5067
|
Change dependencies
|
stock_shipment_management/__openerp__.py
|
stock_shipment_management/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-Guillaume
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more description.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{"name": "Transportation Plan",
"version": "0.1",
"author": "Camptocamp",
"category": "Transportation",
"license": 'AGPL-3',
'complexity': "normal",
"images" : [],
"website": "http://www.camptocamp.com",
"depends" : ["sale",
"purchase",
"stock",
],
"demo": [],
"data": ["data/tranport_plan_sequence.xml",
"data/tranport_mode_data.xml",
"view/transport_plan.xml",
"view/transport_mode.xml",
"security/ir.model.access.csv",
],
"auto_install": False,
"test": [],
'installable': True,
}
|
Python
| 0.000001
|
@@ -1170,38 +1170,16 @@
: %5B%22
-sale%22,%0A %22purchase
+delivery
%22,%0A
@@ -1197,16 +1197,30 @@
%22stock
+_route_transit
%22,%0A
|
aad19b0373f2b331ffbada431385173d2bf3e43e
|
Update cronjob.py
|
k8s/models/cronjob.py
|
k8s/models/cronjob.py
|
#!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
from .common import ObjectMeta, ObjectReference, Time, ListMeta
from .job import JobTemplateSpec
from ..base import Model
from ..fields import Field, ListField
class CronJobSpec(Model):
concurrencyPolicy = Field(six.text_type)
failedJobsHistoryLimit = Field(int)
jobTemplate = Field(JobTemplateSpec)
schedule = Field(six.text_type)
startingDeadlineSeconds = Field(int)
successfulJobsHistoryLimit = Field(int)
suspend = Field(bool)
class CronJobStatus(Model):
active = ListField(ObjectReference)
lastScheduleTime = Field(Time)
class CronJob(Model):
class Meta:
list_url = "/apis/batch/v1beta1/cronjobs"
url_template = "/apis/batch/v1beta1/namespaces/{namespace}/cronjobs/{name}"
metadata = Field(ObjectMeta)
spec = Field(CronJobSpec)
status = Field(CronJobStatus)
class CronJobList(Model):
apiVersion = Field(six.text_type)
items = ListField(CronJob)
kind = Field(six.text_type)
metadata = Field(ListMeta)
|
Python
| 0.000004
|
@@ -626,16 +626,17 @@
icense.%0A
+%0A
from __f
@@ -1525,162 +1525,4 @@
%0A%0A%0A
-class CronJobList(Model):%0A apiVersion = Field(six.text_type)%0A items = ListField(CronJob)%0A kind = Field(six.text_type)%0A metadata = Field(ListMeta)%0A
|
e7dca1dae8300dd702ecfc36110518b16c9c5231
|
change directory back to previous location (prevents following tests from pointing into the forest)
|
tests/testhelper.py
|
tests/testhelper.py
|
from contextlib import contextmanager
import tempfile
import os
import shutil
from configuration import Builder
from gitFunctions import Initializer
import configuration
@contextmanager
def mkchdir(subfolder, folderprefix="rtc2test_case"):
tempfolder = tempfile.mkdtemp(prefix=folderprefix + subfolder)
os.chdir(tempfolder)
try:
yield tempfolder
finally:
shutil.rmtree(tempfolder, ignore_errors=True) # on windows folder remains in temp, git process locks it
@contextmanager
def createrepo(reponame="test.git", folderprefix="rtc2test_case"):
repodir = tempfile.mkdtemp(prefix=folderprefix)
configuration.config = Builder().setworkdirectory(repodir).setgitreponame(reponame).build()
initializer = Initializer()
os.chdir(repodir)
initializer.initalize()
try:
yield
finally:
shutil.rmtree(repodir, ignore_errors=True) # on windows folder remains in temp, git process locks it
|
Python
| 0
|
@@ -299,24 +299,54 @@
subfolder)%0A
+ previousdir = os.getcwd()%0A
os.chdir
@@ -397,32 +397,62 @@
er%0A finally:%0A
+ os.chdir(previousdir)%0A
shutil.r
@@ -813,16 +813,46 @@
lizer()%0A
+ previousdir = os.getcwd()%0A
os.c
@@ -921,32 +921,62 @@
ld%0A finally:%0A
+ os.chdir(previousdir)%0A
shutil.r
|
5a8199744bf658d491721b16fea7639303e47d3f
|
Edit view pre-populates with data from user object
|
july/people/views.py
|
july/people/views.py
|
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from django.template.context import RequestContext
#from google.appengine.ext import db
from july.people.models import Commit
from gae_django.auth.models import User
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
def user_profile(request, username):
user = User.all().filter("username", username).get()
if user == None:
raise Http404("User not found")
commits = Commit.all().ancestor(request.user.key())
return render_to_response('people/profile.html',
{"commits":commits},
RequestContext(request))
@login_required
def edit_profile(request, username, template_name='people/edit.html'):
from forms import EditUserForm
user = request.user
#CONSIDER FILES with no POST? Can that happen?
form = EditUserForm(request.POST or None, request.FILES or None)
if form.is_valid():
for key in form.cleaned_data:
setattr(user,key,form.cleaned_data.get(key))
user.put()
return HttpResponseRedirect(
reverse('member-profile', kwargs={'username': request.user.username})
)
if user == None:
raise Http404("User not found")
return render_to_response(template_name,
{'form':form,},
RequestContext(request))
|
Python
| 0
|
@@ -849,60 +849,8 @@
er%0A%0A
- #CONSIDER FILES with no POST? Can that happen?%0A
@@ -891,24 +891,29 @@
r None,
+user=
request.
FILES or
@@ -908,21 +908,12 @@
est.
-FILES or None
+user
)%0A
@@ -1001,12 +1001,14 @@
ser,
+
key,
+
form
@@ -1099,24 +1099,16 @@
-
reverse(
@@ -1144,17 +1144,16 @@
ername':
-
request.
@@ -1342,17 +1342,16 @@
rm':form
-,
%7D, %0A
|
8e9edf002368df0cd4bfa33975271b75af191ef0
|
fix cache expiring
|
ujt/dash_app.py
|
ujt/dash_app.py
|
""" Configuration for Dash app.
Exposes app and cache to enable other files (namely callbacks) to register callbacks and update cache.
App is actually started by ujt.py
"""
import dash
import dash_bootstrap_components as dbc
import dash_cytoscape as cyto
from flask_caching import Cache
# Initialize Dash app and Flask-Cache
cyto.load_extra_layouts()
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
cache = Cache()
cache.init_app(
app.server,
config={"CACHE_TYPE": "filesystem", "CACHE_DIR": "cache_dir"},
)
|
Python
| 0
|
@@ -478,16 +478,25 @@
config=%7B
+%0A
%22CACHE_T
@@ -514,16 +514,24 @@
system%22,
+%0A
%22CACHE_
@@ -547,13 +547,85 @@
che_dir%22
+,%0A %22CACHE_DEFAULT_TIMEOUT%22: 0,%0A %22CACHE_THRESHOLD%22: 0,%0A
%7D,%0A)%0A
|
a8e43dcdbdd00de9d4336385b3f3def1ae5c2515
|
Update UserX, with back compatibility
|
main/modelx.py
|
main/modelx.py
|
# -*- coding: utf-8 -*-
import hashlib
class BaseX(object):
@classmethod
def retrieve_one_by(cls, name, value):
cls_db_list = cls.query(getattr(cls, name) == value).fetch(1)
if cls_db_list:
return cls_db_list[0]
return None
class ConfigX(object):
@classmethod
def get_master_db(cls):
return cls.get_or_insert('master')
class UserX(object):
def avatar_url(self, size=None):
return '//gravatar.com/avatar/%(hash)s?d=identicon&r=x%(size)s' % {
'hash': hashlib.md5((self.email or self.name).encode('utf-8')).hexdigest().lower(),
'size': '&s=%d' % size if size > 0 else '',
}
|
Python
| 0
|
@@ -386,16 +386,21 @@
atar_url
+_size
(self, s
@@ -628,8 +628,52 @@
,%0A %7D%0A
+ %0A avatar_url = property(avatar_url_size)%0A
|
6ba3fc5c9fade3609695aa7f5b0498b77a8c18fa
|
revert to 0.2.7 tag
|
keras_cv/__init__.py
|
keras_cv/__init__.py
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv import layers
from keras_cv import metrics
from keras_cv import utils
from keras_cv import version_check
from keras_cv.core import ConstantFactorSampler
from keras_cv.core import FactorSampler
from keras_cv.core import NormalFactorSampler
from keras_cv.core import UniformFactorSampler
version_check.check_tf_version()
__version__ = "0.2.8dev"
|
Python
| 0.000001
|
@@ -936,10 +936,7 @@
0.2.
-8dev
+7
%22%0A
|
97523ce0b98d97a4ce6d9d99d5807e1f32b21077
|
correcting a typo
|
constructiveness_toxicity_crowdsource/common/crowd_data_aggregator.py
|
constructiveness_toxicity_crowdsource/common/crowd_data_aggregator.py
|
import pandas as pd
import numpy as np
import math
from crowd_data_aggregation_functions import *
class CrowdsourceAggregator:
'''
Aggregator for crowdsourced data for constructiveness and toxicity
'''
def __init__(self, input_csv):
self.df = pd.read_csv(input_csv)
def get_gold_questions(self):
return self.df.query('_golden == True')
def get_non_gold_questions(self):
return self.df.query('_golden == False')
def get_nannotators(self):
return len(self.non_gold_df['_worker_id'].unique())
def aggregate_annotations(self, df, attribs):
# Relevant columns
unit_id_col = attribs['unit_id_col']
meta_cols = attribs['meta_cols']
avg_cols = attribs['avg_cols']
nominal_cols = attribs['nominal_cols']
text_cols = attribs['text_cols']
# Replace text values with numerical values in the dataframe
#attrs = self.df[avg_cols].replace(['yes', 'no', 'partially', 'not_sure', 'noopinion'], [1, 0, 0.5, 0.5, np.nan])
attrs = self.df[avg_cols].replace(['yes', 'no', 'partially', 'not_sure', 'noopinion'], [1, 0, 0.5, 0.5, 0.5])
other_cols = unit_id_col + meta_cols + nominal_cols + text_cols
df = df[other_cols].join(attrs)
# aggregation method for each class of attributes
avg_dict = {k: 'mean' for k in avg_cols}
meta_dict = {k: 'first' for k in meta_cols}
nominal_dict = {k: list_and_sort for k in nominal_cols}
text_dict = {k: concatenate for k in text_cols}
agg_dict = {**avg_dict, **meta_dict, **nominal_dict, **text_dict}
# Aggregate the results for all workers on a particular comment
aggregated_df = df.groupby(unit_id_col).agg(agg_dict)
for col in avg_cols:
aggregated_df[col] = aggregated_df[col].apply(pd.to_numeric)
aggregated_df[col] = aggregated_df[col].apply(lambda x: round(x,2))
return aggregated_df
def write_csv(self, dframe, cols, csv_path):
dframe.to_csv(csv_path, columns = cols, index = False)
print('CSV written: ', csv_path)
if __name__=='__main__':
pass
|
Python
| 0.999886
|
@@ -535,16 +535,20 @@
en(self.
+get_
non_gold
@@ -548,18 +548,27 @@
on_gold_
-df
+questions()
%5B'_worke
@@ -2257,21 +2257,157 @@
-pass%0A%0A%0A%0A%0A
+ca = CrowdsourceAggregator('../CF_output/constructiveness/batch8/batch8_f1285429.csv')%0A print('The number of annotators: ', ca.get_nannotators())%0A
|
55a3b3a845014d0e4c4c4d057bbe088d7791d43d
|
Prepare for v1.10.0
|
src/pyckson/__init__.py
|
src/pyckson/__init__.py
|
from pyckson.decorators import *
from pyckson.json import *
from pyckson.parser import parse
from pyckson.parsers.base import Parser
from pyckson.serializer import serialize
from pyckson.serializers.base import Serializer
from pyckson.dates.helpers import configure_date_formatter, configure_explicit_nulls
from pyckson.defaults import set_defaults
__version__ = '1.9.0'
|
Python
| 0.000001
|
@@ -364,9 +364,10 @@
'1.
-9
+10
.0'%0A
|
7fcb80a43d39473001610015e92973d95c0b0267
|
Fix not track percent so it is not track percent
|
mica/web/star_hist.py
|
mica/web/star_hist.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.table import Table
from Chandra.Time import DateTime
from mica.stats import acq_stats, guide_stats
def get_acq_data(agasc_id):
"""
Fetch acquisition history from mica acq stats for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of acquisitions
"""
acq = acq_stats.get_star_stats(agasc_id)
# make list of dicts for use in light templates in kadi web app
if not len(acq):
return []
acq = Table(acq)
acq.sort('guide_start')
acq_table = []
for s in acq:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot', 'mag', 'mag_obs', 'star_tracked']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['guide_start']
srec['acq_dy'] = s['cdy']
srec['acq_dz'] = s['cdz']
srec['id'] = s['acqid']
acq_table.append(srec)
return acq_table
def get_gui_data(agasc_id):
"""
Fetch guide/track history for an agasc id
:param agasc_id: AGASC id
:returns: list of dicts of uses as guide stars
"""
gui = guide_stats.get_star_stats(agasc_id)
if not len(gui):
return []
gui = Table(gui)
gui.sort('kalman_datestart')
# make list of dicts for use in light templates in kadi web app
gui_table = []
for s in gui:
srec = {}
# Use these columns as they are named from the mica acq stats table
for col in ['type', 'obsid', 'obi', 'slot']:
srec[col] = s[col]
# rename these columns in the dictionary
srec['date'] = s['kalman_datestart']
srec['mag'] = s['mag_aca']
srec['mag_obs'] = s['aoacmag_mean']
srec['perc_not_track'] = s['f_track'] * 100.0
gui_table.append(srec)
return gui_table
def get_star_stats(agasc_id, start=None, stop=None):
"""
Fetch acq and gui history of a star
:param agasc_id: AGASC id
:param start: start of optional time filter (>=) (Chandra.Time compatible)
:param stop: stop time of optional time filter (<) (Chandra.time compatible)
:returns: 2 lists, first of acq attempts, second of guide attempts
"""
acq_table = get_acq_data(agasc_id)
gui_table = get_gui_data(agasc_id)
if start is not None:
acq_table = [s for s in acq_table if s['date'] >= DateTime(start).date]
gui_table = [s for s in gui_table if s['date'] >= DateTime(start).date]
if stop is not None:
acq_table = [s for s in acq_table if s['date'] < DateTime(stop).date]
gui_table = [s for s in gui_table if s['date'] < DateTime(stop).date]
return acq_table, gui_table
|
Python
| 0.998902
|
@@ -1846,24 +1846,29 @@
ot_track'%5D =
+ (1 -
s%5B'f_track'
@@ -1868,16 +1868,17 @@
_track'%5D
+)
* 100.0
|
6d63ab2ef50512a794948c86cf1ce834b59acd90
|
Add str method for map area
|
maps/models.py
|
maps/models.py
|
import json
from django.conf import settings
# from django.contrib.postgres.fields import JSONField
from django.db import models
JSONTextField = models.TextField
# See
# https://developers.google.com/maps/documentation/javascript/reference?hl=en#LatLngBoundsLiteral
class LatLngBounds(models.Model):
east = models.FloatField()
north = models.FloatField()
south = models.FloatField()
west = models.FloatField()
class MapArea(models.Model):
title = models.CharField(max_length=100, blank=True)
display_area = models.ForeignKey(LatLngBounds, related_name='+')
# This data should be obviously moved to a storage suitable
# for blobs. Keeping in here to ease deployment. Migration is
# an exercise for the happy future developer.
# The data stored is image in PNG or JPEG format.
contour_map_image = models.BinaryField()
# Which part of the Earth the rectangular stored
# in contour_map_image represents.
contour_map_reference = models.ForeignKey(LatLngBounds, related_name='+')
class Question(models.Model):
map_area = models.ForeignKey(MapArea)
max_duration = models.DurationField()
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
# See "JSON Objects per Question Type" for more details.
type = models.TextField()
statement_data = JSONTextField()
reference_data = JSONTextField()
class QuestionSet(models.Model):
title = models.CharField(max_length=100)
creator = models.ForeignKey(settings.AUTH_USER_MODEL)
max_duration = models.DurationField()
question_ids = JSONTextField(default=json.dumps(None))
def get_questions(self):
questions = []
for question_id in json.loads(self.question_ids):
questions.append(Question.objects.get(id=question_id))
return questions
class AnswerSet(models.Model):
student = models.ForeignKey(settings.AUTH_USER_MODEL, db_index=True)
question_set = models.ForeignKey(QuestionSet, null=True)
start_time = models.DateTimeField()
end_time = models.DateTimeField(null=True)
class Answer(models.Model):
answer_set = models.ForeignKey(AnswerSet, db_index=True)
question_set = models.ForeignKey(QuestionSet, null=True)
question = models.ForeignKey(Question, null=True)
answer_data = JSONTextField(default=json.dumps(None))
scoring_data = JSONTextField(default=json.dumps(None)) # May be recalculated
duration = models.DurationField()
submission_time = models.DateTimeField()
|
Python
| 0
|
@@ -1031,16 +1031,100 @@
e='+')%0A%0A
+ def __str__(self):%0A return self.title or 'Map area #%7B%7D'.format(self.id)%0A%0A
%0Aclass Q
|
f8fde8fd984242f75e36644d2e54c1d306c1b785
|
Remove --population=default
|
keysmith/__main__.py
|
keysmith/__main__.py
|
"""Keysmith Default Interface"""
import argparse
import math
import string
import pkg_resources
import keysmith
def cli(parser=None):
"""Parse CLI arguments and options."""
if parser is None:
parser = argparse.ArgumentParser(prog=keysmith.CONSOLE_SCRIPT)
parser.add_argument(
'-d', '--delimiter',
help='a delimiter for the samples (teeth) in the key',
default=' ',
)
parser.add_argument(
'-n', '--nsamples',
help='the number of random samples to take',
type=int,
default=3,
dest='nteeth',
)
parser.add_argument(
'-p', '--population',
help='alphanumeric, default, printable, or a path',
default='default',
)
parser.add_argument(
'--stats',
help='statistics for the key',
default=False,
action='store_true',
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {0}'.format(keysmith.__version__),
)
return parser
def main(args=None):
"""Execute CLI commands."""
if args is None:
args = cli().parse_args()
words = {
'alphanumeric': string.ascii_letters + string.digits,
'printable': string.printable,
}.get(args.population)
if words is None:
if args.population == 'default':
args.population = pkg_resources.resource_filename('keysmith', 'words.txt')
with open(args.population, 'r') as f:
words = f.read().splitlines()
key = keysmith.key(
seq=words,
nteeth=args.nteeth,
delimiter=args.delimiter,
)
print(key)
if args.stats:
print('=' * len(key))
print('characters = {characters}'.format(characters=len(key)))
print(' samples = {nteeth}'.format(nteeth=args.nteeth))
print('population = {pop}'.format(pop=len(words)))
print(' entropy {sign} {bits}b'.format(
sign='<' if len(args.delimiter) < 1 else '~',
bits=round(math.log(len(words), 2) * args.nteeth, 2),
))
if __name__ == '__main__':
main()
|
Python
| 0.000009
|
@@ -671,17 +671,8 @@
ric,
- default,
pri
@@ -707,25 +707,72 @@
default=
-'defaul
+pkg_resources.resource_filename('keysmith', 'words.tx
t'
+)
,%0A )%0A
@@ -1347,136 +1347,8 @@
ne:%0A
- if args.population == 'default':%0A args.population = pkg_resources.resource_filename('keysmith', 'words.txt')%0A
|
e2883d40a5c4bd49676a91da0a9aab54231634fe
|
Fix metric name.
|
st2reactor/st2reactor/rules/worker.py
|
st2reactor/st2reactor/rules/worker.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from kombu import Connection
from st2common import log as logging
from st2common.constants.trace import TRACE_CONTEXT, TRACE_ID
from st2common.constants import triggers as trigger_constants
from st2common.util import date as date_utils
from st2common.services import trace as trace_service
from st2common.transport import consumers
from st2common.transport import utils as transport_utils
import st2reactor.container.utils as container_utils
from st2reactor.rules.engine import RulesEngine
from st2common.transport.queues import RULESENGINE_WORK_QUEUE
from st2common.metrics.base import CounterWithTimer
from st2common.metrics.base import Timer
from st2common.metrics.base import get_driver
LOG = logging.getLogger(__name__)
class TriggerInstanceDispatcher(consumers.StagedMessageHandler):
message_type = dict
def __init__(self, connection, queues):
super(TriggerInstanceDispatcher, self).__init__(connection, queues)
self.rules_engine = RulesEngine()
def pre_ack_process(self, message):
'''
TriggerInstance from message is create prior to acknowledging the message. This
gets us a way to not acknowledge messages.
'''
trigger = message['trigger']
payload = message['payload']
# Accomodate for not being able to create a TrigegrInstance if a TriggerDB
# is not found.
trigger_instance = container_utils.create_trigger_instance(
trigger,
payload or {},
date_utils.get_datetime_utc_now(),
raise_on_no_trigger=True)
return self._compose_pre_ack_process_response(trigger_instance, message)
def process(self, pre_ack_response):
trigger_instance, message = self._decompose_pre_ack_process_response(pre_ack_response)
if not trigger_instance:
raise ValueError('No trigger_instance provided for processing.')
get_driver().inc_counter('st2.trigger.%s.processed' % (trigger_instance.trigger))
try:
# Use trace_context from the message and if not found create a new context
# and use the trigger_instance.id as trace_tag.
trace_context = message.get(TRACE_CONTEXT, None)
if not trace_context:
trace_context = {
TRACE_ID: 'trigger_instance-%s' % str(trigger_instance.id)
}
# add a trace or update an existing trace with trigger_instance
trace_service.add_or_update_given_trace_context(
trace_context=trace_context,
trigger_instances=[
trace_service.get_trace_component_for_trigger_instance(trigger_instance)
]
)
container_utils.update_trigger_instance_status(
trigger_instance, trigger_constants.TRIGGER_INSTANCE_PROCESSING)
with CounterWithTimer(key='st2.rule.processed'):
with Timer(key='st2.trigger_instance.%s.processed' % (trigger_instance.id)):
self.rules_engine.handle_trigger_instance(trigger_instance)
container_utils.update_trigger_instance_status(
trigger_instance, trigger_constants.TRIGGER_INSTANCE_PROCESSED)
except:
# TODO : Capture the reason for failure.
container_utils.update_trigger_instance_status(
trigger_instance, trigger_constants.TRIGGER_INSTANCE_PROCESSING_FAILED)
# This could be a large message but at least in case of an exception
# we get to see more context.
# Beyond this point code cannot really handle the exception anyway so
# eating up the exception.
LOG.exception('Failed to handle trigger_instance %s.', trigger_instance)
return
@staticmethod
def _compose_pre_ack_process_response(trigger_instance, message):
"""
Codify response of the pre_ack_process method.
"""
return {'trigger_instance': trigger_instance, 'message': message}
@staticmethod
def _decompose_pre_ack_process_response(response):
"""
Break-down response of pre_ack_process into constituents for simpler consumption.
"""
return response.get('trigger_instance', None), response.get('message', None)
def get_worker():
with Connection(transport_utils.get_messaging_urls()) as conn:
return TriggerInstanceDispatcher(conn, [RULESENGINE_WORK_QUEUE])
|
Python
| 0.000533
|
@@ -3775,25 +3775,24 @@
'st2.trigger
-_
instance.%25s.
|
ef3e73cb7dc530a3e322225cd95a4ef3bbb319fa
|
Fix docstring for read
|
vtki/readers.py
|
vtki/readers.py
|
"""
Contains a dictionary that maps file extensions to VTK readers
"""
import os
import vtk
import vtki
READERS = {
# Standard dataset readers:
'.vtk': vtk.vtkDataSetReader,
'.vti': vtk.vtkXMLImageDataReader,
'.vtr': vtk.vtkXMLRectilinearGridReader,
'.vtu': vtk.vtkXMLUnstructuredGridReader,
'.ply': vtk.vtkPLYReader,
'.obj': vtk.vtkOBJReader,
'.stl': vtk.vtkSTLReader,
'.vts': vtk.vtkXMLStructuredGridReader,
'.vtm': vtk.vtkXMLMultiBlockDataReader,
'.vtmb': vtk.vtkXMLMultiBlockDataReader,
# Image formats:
'.bmp': vtk.vtkBMPReader,
'.dem': vtk.vtkDEMReader,
'.dcm': vtk.vtkDICOMImageReader,
'.jpeg': vtk.vtkJPEGReader,
'.jpg': vtk.vtkJPEGReader,
'.png': vtk.vtkPNGReader,
'.pnm': vtk.vtkPNMReader,
'.slc': vtk.vtkSLCReader,
'.tiff': vtk.vtkTIFFReader,
'.tif': vtk.vtkTIFFReader,
# ExodusII files:
#.g, .e, .ex2, .ex2v2, .exo, .gen, .exoII, .exii, .0, .00, .000
'.g': vtk.vtkExodusIIReader,
'.e': vtk.vtkExodusIIReader,
'.ex2': vtk.vtkExodusIIReader,
'.ex2v2': vtk.vtkExodusIIReader,
'.exo': vtk.vtkExodusIIReader,
'.gen': vtk.vtkExodusIIReader,
'.exoii': vtk.vtkExodusIIReader,
'.exii': vtk.vtkExodusIIReader,
'.0': vtk.vtkExodusIIReader,
'.00': vtk.vtkExodusIIReader,
'.000': vtk.vtkExodusIIReader,
# Other formats:
'.byu': vtk.vtkBYUReader,
'.chemml': vtk.vtkCMLMoleculeReader,
'.cml': vtk.vtkCMLMoleculeReader,
# TODO: '.csv': vtk.vtkCSVReader, # vtkTables are currently not supported
'.facet': vtk.vtkFacetReader,
'.cas': vtk.vtkFLUENTReader,
'.dat': vtk.vtkFLUENTReader,
'.cube': vtk.vtkGaussianCubeReader,
'.res': vtk.vtkMFIXReader,
'.foam': vtk.vtkOpenFOAMReader,
'.pdb': vtk.vtkPDBReader,
'.p3d': vtk.vtkPlot3DMetaReader,
'.pts': vtk.vtkPTSReader,
'.particles': vtk.vtkParticleReader,
#TODO: '.pht': vtk.vtkPhasta??????,
#TODO: '.vpc': vtk.vtkVPIC?????,
'.xyz': vtk.vtkXYZMolReader,
}
if (vtk.vtkVersion().GetVTKMajorVersion() >= 8 and
vtk.vtkVersion().GetVTKMinorVersion() >= 2):
READERS['.sgy'] = vtk.vtkSegYReader
READERS['.segy'] = vtk.vtkSegYReader
def get_ext(filename):
"""Extract the extension of the filename"""
ext = os.path.splitext(filename)[1].lower()
return ext
def get_reader(filename):
"""Gets the corresponding reader based on file extension and instantiates it
"""
ext = get_ext(filename)
return READERS[ext]() # Get and instantiate the reader
def standard_reader_routine(reader, filename, attrs=None):
"""Use a given reader from the ``READERS`` mapping in the common VTK reading
pipeline routine.
Parameters
----------
reader : vtkReader
Any instantiated VTK reader class
filename : str
The string filename to the data file to read.
attrs : dict, optional
A dictionary of attributes to call on the reader. Keys of dictionary are
the attribute/method names and values are the arguments passed to those
calls. If you do not have any attributes to call, pass ``None`` as the
value.
"""
if attrs is None:
attrs = {}
if not isinstance(attrs, dict):
raise TypeError('Attributes must be a dictionary of name and arguments.')
reader.SetFileName(filename)
# Apply any attributes listed
for name, args in attrs.items():
attr = getattr(reader, name)
if args is not None:
if not isinstance(args, (list, tuple)):
args = [args]
attr(*args)
else:
attr()
# Perform the read
reader.Update()
return vtki.wrap(reader.GetOutputDataObject(0))
def read_legacy(filename):
"""Use VTK's legacy reader to read a file"""
reader = vtk.vtkDataSetReader()
reader.SetFileName(filename)
# Ensure all data is fetched with poorly formated legacy files
reader.ReadAllScalarsOn()
reader.ReadAllColorScalarsOn()
reader.ReadAllNormalsOn()
reader.ReadAllTCoordsOn()
reader.ReadAllVectorsOn()
# Perform the read
reader.Update()
return reader.GetOutputDataObject(0)
def read(filename, attrs=None):
"""This will read any VTK file! It will figure out what reader to use
then wrap the VTK object for use in ``vtki``.
Parameters
----------
attrs :
A string list of reader attributes to call. If specified, the standard
reading routine will be used with each attribute specified called.
"""
filename = os.path.abspath(os.path.expanduser(filename))
ext = get_ext(filename)
# From the extension, decide which reader to use
if attrs is not None:
reader = get_reader(filename)
return standard_reader_routine(reader, filename, attrs=attrs)
elif ext in '.vti': # ImageData
return vtki.UniformGrid(filename)
elif ext in '.vtr': # RectilinearGrid
return vtki.RectilinearGrid(filename)
elif ext in '.vtu': # UnstructuredGrid
return vtki.UnstructuredGrid(filename)
elif ext in ['.ply', '.obj', '.stl']: # PolyData
return vtki.PolyData(filename)
elif ext in '.vts': # StructuredGrid
return vtki.StructuredGrid(filename)
elif ext in ['.vtm', '.vtmb']:
return vtki.MultiBlock(filename)
elif ext in ['.vtk']:
# Attempt to use the legacy reader...
output = vtki.wrap(read_legacy(filename))
if output is None:
raise AssertionError('No output when using VTKs legacy reader')
return output
else:
# Attempt find a reader in the readers mapping
try:
reader = get_reader(filename)
return standard_reader_routine(reader, filename)
except KeyError:
pass
raise IOError("This file was not able to be automatically read by vtki.")
def load_texture(filename):
"""Loads a ``vtkTexture`` from an image file."""
filename = os.path.abspath(os.path.expanduser(filename))
try:
# intitialize the reader using the extnesion to find it
reader = get_reader(filename)
except KeyError:
# Otherwise, use the imageio reader
return vtki.numpy_to_texture(imageio.imread(filename))
reader.SetFileName(filename)
reader.Update()
return vtki.image_to_texture(reader.GetOutputDataObject(0))
|
Python
| 0.000001
|
@@ -4371,87 +4371,104 @@
rs :
-%0A A string list of reader attributes to call. If specified, the standard
+ dict, optional%0A A dictionary of attributes to call on the reader. Keys of dictionary are
%0A
@@ -4476,73 +4476,172 @@
-reading routine will be used with each attribute specified called
+the attribute/method names and values are the arguments passed to those%0A calls. If you do not have any attributes to call, pass %60%60None%60%60 as the%0A value
.%0A
|
deaf4bac5ccc04b23cb340dcd1514ec967260db1
|
Fix session code in version.py
|
src/lesson/version.py
|
src/lesson/version.py
|
# lesson/version.py
#
# This file is part of LESSON. LESSON is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2 or later.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright (C) 2012 Jonathan Dieter <jdieter@lesbg.com>
from model.database import Version
import subprocess, os.path
class VersionCheck(object):
"""
Version is a virtual class that will check whether the database version
of a module matches the current version of the module
If the database version is less than the module version, Version will
attempt to run any update files in <script_dir> in the form of
(uuid)-update-(old version)-(new version) when auto_update is True
If the versions differ by more than one, the class will check for the
existence of intermediate update files and run them if applicable
Child classes *must* specify uuid and module version (version). Child
classes may also specify update script directory (script_dir) for automatic
database updates, as well as turning off automatic database updates by
setting auto_update to False
You do not need to actually run the check, as the backend will do so
automatically when it starts up.
"""
db = None
uuid = None
script_dir = None
version = None
auto_update = True
extension = 'sh'
def __init__(self, db, script_dir=None):
if self.__class__.__name__ == "VersionCheck":
raise ValueError("'VersionCheck' is a virtual class and should not be instantiated directly")
self.db = db
self.script_dir = script_dir
def check_file(self, ufile):
"""
Check whether update file exists. Return True if file exists, False
if it doesn't. This function may be overridden if desired
"""
if os.path.exists(ufile):
return True
return False
def run_file(self, ufile):
"""
Run an update file and return True if update was successful, False if
it wasn't successful. This function may be overridden if desired
"""
try:
messages = subprocess.check_output(ufile, stderr=subprocess.STDOUT, shell=True) #@UnusedVariable
return (True, None)
except:
return (False, u"Error running %s" % (ufile,))
def __check_file(self, start_ver, stop_ver):
ufile = os.path.join("%s" % (self.script_dir,),
"%s-update-%i-%i.%s" % (self.uuid, start_ver, stop_ver, self.extension))
# Check whether update exists for start_ver -> stop_ver
if self.check_file(ufile):
return [ufile]
# Version jump is > 1, so split version jump in half and recursively
# check for updates for each half
elif start_ver + 1 < stop_ver:
filelist = []
middle = ((stop_ver - start_ver) / 2) + start_ver
for item in self.__check_file(start_ver, middle):
if item is None:
return [None]
filelist.append(item)
for item in self.__check_file(middle, stop_ver):
if item is None:
return [None]
filelist.append(item)
return filelist
# No updates for this start_ver -> stop_ver, so return [None]
else:
return [None]
def check_version(self):
"""
Returns a tuple of (boolean, string|None) where the boolean is
whether the versions match and the string is the error message
if they don't match
"""
print "Checking..."
if self.db is None:
raise ValueError("Database variable 'db' isn't set")
if self.uuid is None:
raise ValueError("UUID variable 'uuid' isn't set")
cur_ver = self.db.session.query(Version).get(self.uuid)
if cur_ver.Version > self.version:
return (False, u"The %s version in the database is %i, while our version is %i. Please upgrade module %s" % (cur_ver.Type, cur_ver.Version, self.version, cur_ver.Type))
elif cur_ver.Version < self.version:
script_files = [None]
if self.script_dir is not None:
script_files = self.__check_file(cur_ver.Version, self.version)
if script_files == [None]:
return (False, u"The %s version in the database is %i, while our version is %i. Please manually upgrade the database for %s" % (cur_ver.Type, cur_ver.Version, self.version, cur_ver.Type))
for ufile in script_files:
retval, error = self.run_file(ufile)
if not retval:
return (False, error)
return (True, None)
class PyVersionCheck(VersionCheck):
extension = 'py'
def __init__(self, db, script_dir):
if self.__class__.__name__ == "PyVersionCheck":
raise ValueError("'PyVersionCheck' is a virtual class and should not be instantiated directly")
super(PyVersionCheck, self).__init__(db, script_dir)
def run_file(self, ufile):
import imp
uname = os.path.basename(ufile)
if ufile.endswith('.py'):
uname = uname[:-3]
try:
f = open(ufile, 'U')
except:
return (False, u"Error opening %s" % (ufile,))
try:
upgrade = imp.load_module('upgrade', f, ufile, ('.py', 'U', 1))
upgrade.upgrade(self.db)
cur_ver = self.db.session.query(Version).get(self.uuid)
if cur_ver.Version != self.version:
return (False, u"Module %s didn't upgrade version in database" % (ufile,))
return True
except:
return (False, u"Error loading update module %s" % (ufile,))
finally:
f.close()
|
Python
| 0
|
@@ -4386,39 +4386,39 @@
t set%22)%0A
-cur_ver
+session
= self.db.sessi
@@ -4404,32 +4404,67 @@
ssion = self.db.
+create_session()%0A cur_ver =
session.query(Ve
@@ -4477,32 +4477,56 @@
.get(self.uuid)%0A
+ session.close()%0A
if cur_v
@@ -6130,31 +6130,31 @@
-cur_ver
+session
= self.db.s
@@ -6152,16 +6152,55 @@
self.db.
+create_session()%0A cur_ver =
session.
@@ -6221,32 +6221,60 @@
.get(self.uuid)%0A
+ session.close()%0A
if c
|
646548dff38ea476a35462cf51ba028e3275748a
|
Fix some undefined reference and attribute errors in the deallocate simprocedure
|
simuvex/procedures/cgc/deallocate.py
|
simuvex/procedures/cgc/deallocate.py
|
import simuvex
class deallocate(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, addr, length): #pylint:disable=unused-argument
# return code (see deallocate() docs)
r = self.state.se.ite_cases((
(addr % 0x1000 != 0, self.state.cgc.EINVAL),
(length == 0, self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr), self.state.cgc.EINVAL),
(self.state.cgc.addr_invalid(addr + length), self.state.cgc.EINVAL),
), self.state.se.BVV(0, self.state.arch.bits))
return r
|
Python
| 0.000001
|
@@ -9,16 +9,91 @@
imuvex%0A%0A
+import logging%0Al = logging.getLogger(%22simuvex.procedures.cgc.deallocate%22)%0A%0A
class de
@@ -647,16 +647,249 @@
bits))%0A%0A
+ aligned_length = ((length + 0xfff) / 0x1000) * 0x1000%0A%0A # TODO: not sure if this is valuable until we actually model CGC%0A # allocations accurately%0A # self.state.memory.unmap_region(addr, aligned_length)%0A%0A
|
70c520d3ff882b499febfe021d02108f79171773
|
Fix ST2(python26) compatibility.
|
OmniMarkupLib/Renderers/MarkdownRenderer.py
|
OmniMarkupLib/Renderers/MarkdownRenderer.py
|
from .base_renderer import *
import re
import markdown
@renderer
class MarkdownRenderer(MarkupRenderer):
FILENAME_PATTERN_RE = re.compile(r'\.(md|mkdn?|mdwn|mdown|markdown|litcoffee)$')
YAML_FRONTMATTER_RE = re.compile(r'\A---\s*\n.*?\n?^---\s*$\n?', re.DOTALL | re.MULTILINE)
def load_settings(self, renderer_options, global_setting):
super(MarkdownRenderer, self).load_settings(renderer_options, global_setting)
if 'extensions' in renderer_options:
extensions = set(renderer_options['extensions'])
else:
# Fallback to the default GFM style
extensions = {'tables', 'strikeout', 'fenced_code', 'codehilite'}
if global_setting.mathjax_enabled:
if 'mathjax' not in extensions:
extensions.add('mathjax')
if 'smartypants' in extensions:
extensions.remove('smartypants')
extensions.add('smarty')
self.extensions = list(extensions)
@classmethod
def is_enabled(cls, filename, syntax):
if syntax == "text.html.markdown":
return True
return cls.FILENAME_PATTERN_RE.search(filename) is not None
def render(self, text, **kwargs):
text = self.YAML_FRONTMATTER_RE.sub('', text)
return markdown.markdown(text, output_format='html5',
extensions=self.extensions)
|
Python
| 0.000001
|
@@ -504,12 +504,8 @@
s =
-set(
rend
@@ -530,17 +530,16 @@
nsions'%5D
-)
%0A
@@ -622,9 +622,9 @@
s =
-%7B
+%5B
'tab
@@ -673,9 +673,46 @@
ite'
-%7D
+%5D%0A extensions = set(extensions)
%0A
|
f5c9b1a31226d364426e0d435d64caaf6b432262
|
allow pickle for t-king
|
king/tking-server.py
|
king/tking-server.py
|
#!/usr/bin/python
import exceptions, sys, os, socket, rpyc, pickle
import dns.query, dns.rdatatype, dns.exception
from twisted.internet import reactor
from twisted.names import dns as twisted_dns
from twisted.names import server
from datetime import datetime, timedelta
from random import randrange
from threading import Thread
from time import sleep
from datetime import datetime
from rpyc.utils.server import ThreadedServer
from daemon import Daemon
from sysping import ping
myHostName = socket.gethostname().replace('.', '---')
myIP = socket.gethostbyname(socket.gethostname()).replace('.', '---')
myAddr = '%s---%s.nbapuns.com' % (myIP, myHostName)
outstandingQueries = {}
returnedQueries = {}
fullQueries = {}
###############
# RPC Service #
###############
class TurboKingService(rpyc.Service):
def on_connect(self):
pass
def on_disconnect(self):
pass
def exposed_test(self):
return 1
def exposed_exit(self):
exit(0)
def generate_query_id(self):
query_id = randrange(0, sys.maxint)
while query_id in outstandingQueries or query_id in returnedQueries:
query_id = randrange(0, sys.maxint)
return query_id
def exposed_get_ping(self, ip1):
code, pingTimes = ping(ip1)
if len(pingTimes) > 0:
avg_ping_rtt = str(sum(pingTimes)/len(pingTimes))
mil, mic = avg_ping_rtt.split('.')
mil = int(mil)
mic = int(mic)
ping_time = timedelta(milliseconds = mil, microseconds=mic)
return ping_time, (10 - len(pingTimes)) # number of dropped pings
else:
return None, 10 # 10 dropped pings
def exposed_get_latency(self, t1, ip1, t2, ip2):
query_id = self.generate_query_id()
outstandingQueries[query_id] = (t2, ip2)
# Start DNS Client
end_time = dnsClientQuery(query_id, ip1)
try:
print '...1'
start_time, address = returnedQueries[query_id]
print '...2'
del returnedQueries[query_id]
print '...3'
ping_time = self.exposed_get_ping(ip1)
print '...4'
return end_time, start_time, ping_time, address
except Exception, e:
print "End error:", e
return None, None, None, None
def exposed_get_k(self, t1, ip1):
query_id = self.generate_query_id()
outstandingQueries[query_id] = (t1, ip1)
dnsClientQuery(query_id, ip1, query_type="kvalue", timeout=20)
try:
k = returnedQueries[query_id]
del returnedQueries[query_id]
return k
except Exception, e:
print e
return None
def exposed_full_response(self, query_id, msg):
fullQueries[query_id] = msg
def exposed_full_test(self, t1, ip1, t2, ip2):
query_id = self.generate_query_id()
outstandingQueries[query_id] = (t2, ip2)
dnsClientQuery(query_id, ip1, query_type="full")
# Wait 10 seconds to get a response from the remote server
sleep(10)
try:
return fullQueries[query_id]
except Exception, e:
print "Did Not Recieve RPC from Last Hop"
return "Did Not Recieve RPC from Last Hop"
##########
# Client #
##########
def dnsClientQuery(query_id, target1_ip, query_type="latency", timeout=5):
addr = "%s.%i.%s" % (query_type, query_id, myAddr)
print addr
query = dns.message.make_query(addr, dns.rdatatype.A)
try:
response = dns.query.udp(query, target1_ip, timeout=timeout)
end_time = datetime.now()
print "Recieved Response:", response
except dns.exception.Timeout, e:
end_time = None
print "Error:", e
return end_time
##############
# DNS Server #
##############
class DNSServerFactory(server.DNSServerFactory):
def __init__(self, authorities=None, caches=None, clients=None, verbose=0):
print 'Starting Server'
server.DNSServerFactory.__init__(self)
def handleQuery(self, message, protocol, address):
query_time = datetime.now()
print "Recieved Query", address, message
try:
encoded_url, query_id, origin, query_type = self.processMessage(message)
query_id = int(query_id)
print encoded_url
if query_type == 'kvalue':
if query_id not in returnedQueries:
returnedQueries[query_id] = 1
else:
returnedQueries[query_id] += 1
elif query_type == 'latency' or query_type == 'full':
returnedQueries[query_id] = (query_time, address)
target2, target2_ip = outstandingQueries[query_id]
del outstandingQueries[query_id]
response = self.createReferral(encoded_url, target2, target2_ip, protocol, message, address)
return server.DNSServerFactory.gotResolverResponse(*response)
except Exception, e:
print "Bad Request", e
return None
def processMessage(self, message):
query = message.queries[0]
encoded_url = query.name.name
query_type, query_id, origin = encoded_url.split('.')[0:3]
return encoded_url, query_id, origin, query_type
def createReferral(self, encoded_url, target2, target2_ip, protocol, message, address):
NS = twisted_dns.RRHeader(name=encoded_url, type=twisted_dns.NS, cls=twisted_dns.IN, ttl=0, auth=True,
payload=twisted_dns.Record_NS(name=target2, ttl=0))
A = twisted_dns.RRHeader(name=target2, type=twisted_dns.A, cls=twisted_dns.IN, ttl=0,
payload=twisted_dns.Record_A(address=target2_ip, ttl=None))
ans = []
auth = [NS]
add = [A]
return (self, (ans, auth, add), protocol, message, address)
def startDnsServer():
# Setup DNS Server
factory = DNSServerFactory()
protocol = twisted_dns.DNSDatagramProtocol(factory)
try:
udp = reactor.listenUDP(53, protocol)
tcp = reactor.listenTCP(53, factory)
# Start DNS Server
reactor.run()
except:
print 'Could Not Bind/Start Reactor'
exit(1)
udp.stopListening()
tcp.stopListening()
print "Reactor Stopped"
class TkingServerDaemon(Daemon):
def run(self):
# Start RPYC
t = ThreadedServer(TurboKingService, hostname='localhost', port = 18861)
dnsClient = Thread(target=t.start)
dnsClient.daemon = True
dnsClient.start()
startDnsServer()
if __name__ == "__main__":
daemon = TkingServerDaemon('/tmp/tking-daemon.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
print 'Started Server'
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'normal' == sys.argv[1]:
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
|
Python
| 0.000001
|
@@ -471,16 +471,74 @@
t ping%0A%0A
+rpyc.core.protocol.DEFAULT_CONFIG%5B'allow_pickle'%5D = True%0A%0A
myHostNa
|
b7523a8bbac9fdce7d97afda32b9a7982f00a6d0
|
Update Exp 7_2
|
examples/sparkfun_redbot/sparkfun_experiments/Exp7_2_DriveDistance.py
|
examples/sparkfun_redbot/sparkfun_experiments/Exp7_2_DriveDistance.py
|
"""
Exp7_2_DriveDistance -- RedBot Experiment 7.2
In an earlier experiment, we used a combination of speed and time to
drive a certain distance. Using the encoders, we can me much more accurate.
In this example, we will show you how to setup your robot to drive a certain
distance regardless of the motorPower.
This sketch was written by SparkFun Electronics, with lots of help from
the Arduino community. This code is completely free for any use.
8 Oct 2013 M. Hord
Revised, 31 Oct 2014 B. Huang
"""
from pymata_aio.pymata3 import PyMata3
from pymata_aio.constants import Constants
from library.redbot import RedBotMotors,RedBotEncoder
import math
# This line "includes" the RedBot library into your sketch.
# Provides special objects, methods, and functions for the RedBot.
board = PyMata3()
encoders = RedBotEncoder(board)
motors = RedBotMotors(board)
encoder_pin_left = 16
encoder_pin_right = 10
BUTTON_PIN = 12
counts_per_rev = 192 # 4 pairs of N-S x 48:1 gearbox = 192 ticks per wheel rev
wheel_diam = 2.56 # diam = 65mm / 25.4 mm/in
wheel_circ = math.pi * wheel_diam
# variables used to store the left and right encoder counts.
left_count = 0
right_count = 0
def setup():
board.set_pin_mode(BUTTON_PIN, Constants.INPUT)
board.digital_write(BUTTON_PIN, 1) # writing pin high sets the pull-up resistor
def loop():
# wait for a button press to start driving.
if board.digital_read(BUTTON_PIN) == 0:
board.sleep(0.05)
if board.digital_read(BUTTON_PIN) == 0:
driveDistance(12, 150) # drive 12 inches at motor_power = 150
def driveDistance(distance, motor_power):
global left_count
global right_count
left_count= 0
right_count = 0
numRev = float(distance/wheel_circ)
# debug
print("drive_distance() {} inches at {} power".format(distance,motor_power))
print(numRev)
encoders.clear_enc() # clear the encoder count
motors.drive(motor_power)
# TODO: Find the 'proper' way to access these variables
iteration = 0
while right_count< numRev*counts_per_rev:
left_count = encoders.get_ticks(encoder_pin_left)
right_count = encoders.get_ticks(encoder_pin_right)
print("{} {}".format(left_count,right_count)) # stores the encoder count to a variable
# print(numRev*counts_per_rev)
board.sleep(0.01)
# if either left or right motor are more than 5 revolutions, stop
motors.brake()
if __name__ == "__main__":
setup()
while True:
loop()
board.sleep(.01)
# print("Encoder Read: {}".format(board.encoder_read(encoder_pin_right)))
|
Python
| 0
|
@@ -668,261 +668,234 @@
ath%0A
-# This line %22includes%22 the RedBot library into your sketch.%0A# Provides special objects, methods, and functions for the RedBot.%0A%0A%0Aboard = PyMata3()%0Aencoders = RedBotEncoder(board)%0Amotors = RedBotMotors(board)%0Aencoder_pin_left = 16%0Aencoder_pin_right = 10%0A
+%0ACOM_PORT = None # Use automatic com port detection (the default)%0A#COM_PORT = %22COM10%22 # Manually specify the com port (optional)%0A%0A%0Aboard = PyMata3(com_port=COM_PORT)%0Amotors = RedBotMotors(board)%0Aencoders = RedBotEncoder(board)
%0ABUT
@@ -911,23 +911,21 @@
12%0A
-%0Acounts_per_rev
+COUNT_PER_REV
= 1
@@ -992,19 +992,18 @@
rev%0A
-%0Awheel_diam
+WHEEL_DIAM
= 2
@@ -1033,26 +1033,26 @@
4 mm/in%0A
-wheel_circ
+WHEEL_CIRC
= math.
@@ -1060,110 +1060,81 @@
i *
-wheel_diam%0A%0A# variables used to store the left and right encoder counts.%0Aleft_count = 0%0Aright_count
+WHEEL_DIAM%0Aprint(WHEEL_CIRC)%0A%0AENCODER_PIN_LEFT = 16%0AENCODER_PIN_RIGHT
=
+1
0%0A%0A%0A
@@ -1285,17 +1285,16 @@
istor%0A%0A%0A
-%0A
def loop
@@ -1393,86 +1393,8 @@
0:%0A
- board.sleep(0.05)%0A if board.digital_read(BUTTON_PIN) == 0:%0A
@@ -1508,53 +1508,8 @@
r):%0A
- global left_count%0A global right_count%0A
@@ -1553,28 +1553,23 @@
num
-R
+_r
ev =
-float(
distance
/whe
@@ -1568,20 +1568,21 @@
ance
-/wheel_circ)
+ / WHEEL_CIRC
%0A%0A
@@ -1640,16 +1640,39 @@
%7B%7D power
+ for %7B:.2f%7D revolutions
%22.format
@@ -1681,16 +1681,17 @@
istance,
+
motor_po
@@ -1697,30 +1697,20 @@
ower
-))%0A%0A%0A print(numR
+, num_r
ev)
+)%0A
%0A
@@ -1792,85 +1792,8 @@
er)%0A
- # TODO: Find the 'proper' way to access these variables%0A iteration = 0
%0A
@@ -1814,33 +1814,35 @@
ount
+
%3C num
-Rev*counts_per_rev:%0A
+_rev * COUNT_PER_REV:
%0A
@@ -1882,24 +1882,24 @@
cks(
-encoder_pin_left
+ENCODER_PIN_LEFT
)%0A
@@ -1937,33 +1937,33 @@
t_ticks(
-encoder_pin_right
+ENCODER_PIN_RIGHT
)%0A
@@ -1982,16 +1982,50 @@
%7B%7D
+ stop once over %7B:.0f%7D ticks
%22.format
@@ -2036,16 +2036,17 @@
t_count,
+
right_co
@@ -2052,90 +2052,34 @@
ount
-)) # stores the encoder count to a variable%0A # print(numRev*counts_per_rev
+, num_rev * COUNT_PER_REV)
)%0A
@@ -2102,82 +2102,11 @@
p(0.
-0
1)%0A
- # if either left or right motor are more than 5 revolutions, stop
%0A
@@ -2122,16 +2122,17 @@
rake()%0A%0A
+%0A
if __nam
|
b58eaf077ff748c3604aa7520a956b03cdce6995
|
Add libevent_home command line parameter
|
site_scons/community/command_line.py
|
site_scons/community/command_line.py
|
from SCons.Script import *
## Command Line Variables
#
# Setup all of the command line variables across all of the products and
# platforms. NOTE: if a path is configurable and will be created in the
# build process then the validation MUST be PathAccept
def get_command_line_opts( host, products, VERSIONS ):
opts = Variables('omama.conf')
opts.format = '\n%s: %s\n Default: %s [ %s ]\n'
opts.AddVariables(
# Must be #install by default, otherwise when it comes to cleaning the
# install folder, can remove whole tree
PathVariable('prefix', 'Installation prefix', '#openmama_install_%s' % (VERSIONS['mama']['releaseString']),
PathVariable.PathAccept),
PathVariable('blddir', 'Object directory', '#objdir',
PathVariable.PathAccept),
PathVariable('java_home', 'JAVA Home folder', os.environ.get('JAVA_HOME',None) , PathVariable.PathAccept),
PathVariable('logfile', 'Output Log File', 'scons.log', PathVariable.PathAccept),
BoolVariable('verbose','Whether to print verbose output',True),
BoolVariable('package','Whether to tar up the installation directory',False),
BoolVariable('with_docs','Build with documentation',False),
BoolVariable('with_unittest','Build with gunit tests',False),
BoolVariable('with_testtools','Build with test tools',False),
BoolVariable('with_examples','Build with test tools',True),
BoolVariable('entitled','Whether the build is entitled or unentitled',False),
PathVariable('gtest_home','Path to Google Test home',None, PathVariable.PathIsDir),
ListVariable('middleware','Middleware(s) to be compiled in', 'avis', names = ['avis', 'qpid'] ),
)
if host['os'] == 'Windows':
opts.AddVariables(
ListVariable( 'buildtype', 'Windows Build type e.g dynamic', 'all', names = ['dynamic','dynamic-debug','static','static-debug'] ),
PathVariable('avis_home', 'Path to Avis',
'c:\\avis', PathVariable.PathAccept),
PathVariable('qpid_home', 'Path to QPID Proton Libraries',
'c:\\proton', PathVariable.PathAccept),
EnumVariable('vsver','Visual Studio Version to use', '10.0',
allowed_values=('8.0','9.0','10.0')),
EnumVariable('product', 'Product to be built', 'mamda',
allowed_values=( products )),
EnumVariable('dotnet_version', 'Dotnet Version used to determine framework directory', '2.0',
allowed_values=('1.0','2.0', '4.0')),
PathVariable('dotnet_framework', 'Path to desired dotnet framework', None,
PathVariable.PathIsDir),
)
if host['os'] == 'Linux':
opts.AddVariables(
PathVariable('avis_home','Path to Avis', '/usr/local/', PathVariable.PathIsDir),
PathVariable('qpid_home','Path to QPID Proton Libraries',
'/usr/local/', PathVariable.PathIsDir),
PathVariable('cache_dir','Path to object cache', None, PathVariable.PathIsDir),
EnumVariable('product', 'Product to be built', 'mamda',
#mamda all is a windows only build
allowed_values=( [ x for x in products if x != "mamdaall" ] )),
)
return opts
|
Python
| 0.000001
|
@@ -2720,32 +2720,172 @@
ble.PathIsDir),%0A
+ PathVariable('libevent_home', 'Path to libevent Libraries',%0A 'c:%5C%5Clibevent', PathVariable.PathAccept),%0A
)%0A%0A i
|
f0f542e0a9afb1eb849ce2f9e402006658f31f5e
|
Deshace cambios del commit anterior
|
webapp/forms.py
|
webapp/forms.py
|
from decimal import Decimal as D
from django import forms
from django.utils.translation import ugettext_lazy as _
from core import models
class CreatePerson(forms.ModelForm):
class Meta:
model = models.Person
fields = ('name', 'surname',)
widgets = {
'name': forms.TextInput(),
'surname': forms.TextInput(),
}
class EditPerson(forms.ModelForm):
class Meta:
model = models.Person
fields = (
'name', 'surname', 'birthday',
'id_number', 'ss_number',
'phone_number', 'mobile_number', 'email',
'address_street', 'address_locality', 'postal_code', 'address_region', 'address_country',
'health_warnings', 'comment',
)
widgets = {
'name': forms.TextInput(),
'surname': forms.TextInput(),
'id_number': forms.TextInput(),
'ss_number': forms.TextInput(),
'phone_number': forms.TextInput(),
'mobile_number': forms.TextInput(),
'address_locality': forms.TextInput(),
'postal_code': forms.TextInput(),
'address_region': forms.TextInput(),
'address_country': forms.TextInput(),
}
class RecipientCreate(forms.ModelForm):
class Meta:
model = models.Recipient
fields = 'person',
widgets = {
'person': forms.HiddenInput(),
}
class RecipientEdit(forms.ModelForm):
class Meta:
model = models.Recipient
fields = 'category', 'courses', 'sibling',
class VolunteerCreate(forms.ModelForm):
class Meta:
model = models.Volunteer
fields = 'person',
widgets = {
'person': forms.HiddenInput(),
}
class VolunteerEdit(forms.ModelForm):
class Meta:
model = models.Volunteer
fields = 'lack_of_sexual_offenses_date_certificate', 'comment',
widgets = {'lack_of_sexual_offenses_date_certificate': forms.DateInput(), }
class EventCreate(forms.ModelForm):
class Meta:
model = models.Event
fields = ('event_name', 'event_start', 'event_end')
widgets = {
'event_name': forms.TextInput(),
'event_start': forms.DateField(),
'event_end': forms.DateField(),
}
class EventEdit(forms.ModelForm):
class Meta:
model = models.Event
fields = ('event_name', 'event_start', 'event_end', 'comment')
widgets = {
'event_name': forms.TextInput(),
'event_start': forms.DateField(),
'event_end': forms.DateField(),
}
class MemberCreate(forms.ModelForm):
membership_fee = forms.DecimalField(label=_('Cuota de membresía'))
class Meta:
model = models.Member
fields = 'person',
widgets = {
'person': forms.HiddenInput(),
}
def save(self, *args, **kwargs):
membership_data = {
'membership_fee': self.cleaned_data['membership_fee']
}
self.instance.membership = models.Membership(**membership_data)
return super().save(*args, **kwargs)
class MemberEdit(forms.ModelForm):
class Meta:
model = models.Member
fields = ('category', 'id_card_status', 'ss_card_status', 'dpa_status', 'photo_status', 'card_status', 'bursary', 'photo')
class MembershipCreate(forms.ModelForm):
class Meta:
model = models.Membership
fields = 'type_of_membership', 'payment_status', 'membership_fee', 'membership_status',
widgets = {
'payment_status': forms.HiddenInput(),
}
class MembershipEdit(forms.ModelForm):
class Meta:
model = models.Membership
fields = ('type_of_membership', 'payment_status', 'membership_fee', 'membership_status')
class CustodianEdit(forms.ModelForm):
class Meta:
model = models.Custodian
fields = 'category', 'authorized_signature', 'emergency_contact',
widgets = {
'authorized_signature': forms.TextInput(),
'emergency_contact': forms.TextInput(),
}
class GroupCreate(forms.ModelForm):
class Meta:
model = models.Group
fields = ('group_name',)
widgets = {
'group_name': forms.TextInput(),
}
class GroupEdit(forms.ModelForm):
class Meta:
model = models.Group
fields = ('group_name',)
class ProjectCreate(forms.ModelForm):
class Meta:
model = models.Project
fields = ('project_name',)
widgets = {
'project_name': forms.TextInput(),
}
class ProjectEdit(forms.ModelForm):
class Meta:
model = models.Project
fields = ('project_name',)
class NewIndividualMember(forms.Form):
name = forms.CharField(label=_('Nombre'))
surname = forms.CharField(label=_('Apellidos'))
phone = forms.CharField(label=_('Teléfono'))
adress = forms.CharField(label=_('Dirección'))
mail = forms.EmailField(label=_('Correo electrónico'))
id_number = forms.CharField(label='DNI')
membership_fee = D('15.00')
def execute(self):
cleaned_data = self.cleaned_data
membership = models.Membership.objects.create(membership_fee=self.membership_fee)
person = models.Person.objects.create(
name=cleaned_data['name'],
surname=cleaned_data['surname'],
phone_number=cleaned_data['phone'],
address_street=cleaned_data['adress'],
id_number=cleaned_data['id_number'],
email=cleaned_data['mail'])
models.Member.objects.create(person=person, membership=membership)
return membership
class NewFamilyMember(forms.Form):
name1 = forms.CharField(label=_('Nombre'))
phone1 = forms.CharField(label=_('Teléfono'))
surname1 = forms.CharField(label=_('Apellidos'))
id_number1 = forms.CharField(label='DNI')
mail1 = forms.CharField(label=_('Correo electrónico'))
name2 = forms.CharField(label=_('Nombre'))
phone2 = forms.CharField(label=_('Teléfono'))
surname2 = forms.CharField(label=_('Apellidos'))
id_number2 = forms.CharField(label='DNI')
mail2 = forms.CharField(label=_('Correo electrónico'))
name3 = forms.CharField(label=_('Nombre'))
phone3 = forms.CharField(label=_('Teléfono'))
surname3 = forms.CharField(label=_('Apellidos'))
id_number3 = forms.CharField(label='DNI')
mail3 = forms.CharField(label=_('Correo electrónico'))
name4 = forms.CharField(label=_('Nombre'))
phone4 = forms.CharField(label=_('Teléfono'))
surname4 = forms.CharField(label=_('Apellidos'))
id_number4 = forms.CharField(label='DNI')
mail4 = forms.CharField(label=_('Correo electrónico'))
membership_fee = D('40.00')
def execute(self):
cleaned_data = self.cleaned_data
membership = models.Membership.objects.create(membership_fee=self.membership_fee)
for i in range(1, 5):
person = models.Person.objects.create(
name=cleaned_data['name' + str(i)],
surname=cleaned_data['surname' + str(i)],
phone_number=cleaned_data['phone' + str(i)],
id_number=cleaned_data['id_number' + str(i)],
email=cleaned_data['mail' + str(i)],
)
models.Member.objects.create(person=person, membership=membership)
return membership
|
Python
| 0
|
@@ -3489,76 +3489,15 @@
= '
-type_of_membership', 'payment_status', 'membership_fee', 'membership
+payment
_sta
|
686fe1f0318237e37b3c5f0fdb99cf02be3fe038
|
make server ip a variable
|
misc/build-windows.py
|
misc/build-windows.py
|
#!/usr/bin/env python
# start up windows vm
# some script should start on bootup and try to connect back to this script (through some port)
# this script should send commands for building the tree and creating the exe
# could also have a simple file transfer mechanism so I can get the exe without having to muck around with ftp or whatever
port = 15421
quit_message = '**quit**'
# higher numbers of verbose output more stuff
verbose = 1
def log_debug(str, level = 2):
if verbose >= level:
print str
def log_info(str):
log_debug(str, 1)
def log_error(str):
log_debug(str, 0)
def client_side():
def connect(address):
import socket
connection = socket.socket()
res = socket.getaddrinfo(address, port)
af, socktype, proto, canonname, socket_address = res[0]
log_info("Connecting to %s:%d.." % (address, port))
connection.connect(socket_address)
log_info("Connected!")
return connection
# execute a command
def do_command(command, connection):
import subprocess
args = command.split(' ')
if args[0] == 'cd':
import os
os.chdir(args[1])
connection.send('changed directory to ' + args[1])
else:
process = subprocess.Popen(command.split(' '), stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
stdout = process.stdout
out = stdout.readline()
while out != None and out != "":
log_debug("Sending line '%s'" % out)
connection.send(out)
out = stdout.readline()
process.wait()
def read_commands(connection):
import re
try:
line = re.compile('(.*)\n\n')
data = ""
while True:
more = connection.recv(4096)
if not more:
break
data += more
log_debug("Buffer is now '%s'" % data)
get = line.match(data)
while get != None:
command = get.group(1)
if command == quit_message:
connection.close()
return
log_debug("Got command '%s'" % command)
out = do_command(command, connection)
# chop of the command from the buffer
data = data[(len(command) + 2):]
get = line.match(data)
except Exception, e:
log_error("Got an error.. closing the connection: " + str(e))
connection.close()
def run():
read_commands(connect('192.168.90.2'))
run()
def server_side():
def start_windows_vm():
def start_virtualbox():
import subprocess
# specific to jon's setup
vm_name = "xp-dev"
executable = "VBoxSDL"
return subprocess.Popen([executable, "-startvm", vm_name])
start_virtualbox()
# returns a connection
def wait_for_connect():
import socket
server = socket.socket()
server.bind(('0.0.0.0', port))
server.listen(1)
log_info("Waiting for a connection on port %d.." % port)
(client, address) = server.accept()
log_info("Got a connection from %s!" % str(address))
return client
# write the command and two newlines
def send_command(connection, command):
connection.send(command)
connection.send("\n\n")
# gets the text output from sending commands
def send_build_commands(connection):
# send_command(connection, 'ls')
send_command(connection, 'cd c:/svn/paintown')
send_command(connection, 'svn update')
send_command(connection, 'make win')
send_command(connection, quit_message)
size = 4096
data = connection.recv(size)
while data:
print data.strip()
data = connection.recv(size)
connection.close()
def run():
# start_windows_vm()
send_build_commands(wait_for_connect())
log_info("All done")
run()
import sys
if len(sys.argv) < 2:
log_error("""valid arguments:
client - run as the client
server - run as the server
verbose=# - set verbose level. 1 is the default. higher numbers is more verbose
""")
else:
import re
verbose_arg = re.compile('verbose=(\d+)')
for arg in sys.argv[1:]:
if arg == 'client':
client_side()
elif arg == 'server':
server_side()
elif verbose_arg.match(arg) != None:
out = verbose_arg.match(arg)
verbose = int(out.group(1))
|
Python
| 0.000003
|
@@ -349,16 +349,84 @@
= 15421%0A
+# network settings in jon's virtual box %0Aserver_ip = '192.168.90.2'%0A
quit_mes
@@ -2764,30 +2764,25 @@
connect(
-'192.168.90.2'
+server_ip
))%0A%0A
|
168c80e3bf024f74fbb49184ceffbc2a09abe6c1
|
Allow empty labels
|
kk/models/hearing.py
|
kk/models/hearing.py
|
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from .base import ModifiableModel
class Label(ModifiableModel):
label = models.CharField(verbose_name=_('Label'), default='', max_length=200)
def __str__(self):
return self.label
class Hearing(ModifiableModel):
COMMENT_OPTION_DISALLOW = '1'
COMMENT_OPTION_REGISTERED = '2'
COMMENT_OPTION_ANONYMOUS = '3'
COMMENT_OPTION = (
(COMMENT_OPTION_DISALLOW, 'Disallow'),
(COMMENT_OPTION_REGISTERED, 'Registered'),
(COMMENT_OPTION_ANONYMOUS, 'Anonymous')
)
close_at = models.DateTimeField(verbose_name=_('Closing time'), default=timezone.now)
n_comments = models.IntegerField(verbose_name=_('Number of comments'), blank=True, default=0)
closed = models.BooleanField(verbose_name=_('Whether hearing is closed'), default=False)
heading = models.TextField(verbose_name=_('Heading'), blank=True, default='')
abstract = models.TextField(verbose_name=_('Abstract'), blank=True, default='')
heading = models.TextField(verbose_name=_('Content'), blank=True, default='')
borough = models.CharField(verbose_name=_('Borough to which hearing concerns'), blank=True, default='', max_length=200)
comment_option = models.CharField(verbose_name=_('Commenting option'), max_length=1, choices=COMMENT_OPTION, default='1')
servicemap_url = models.CharField(verbose_name=_('Servicemap url'), default='', max_length=255, blank=True)
latitude = models.CharField(verbose_name=_('Latitude'), max_length=20, default='', blank=True)
longitude = models.CharField(verbose_name=_('Longitude'), max_length=20, default='', blank=True)
labels = models.ManyToManyField(Label)
|
Python
| 0.998839
|
@@ -1790,10 +1790,22 @@
ld(Label
+, blank=True
)%0A
|
d58b82997d9e5d616da2f517c19c5191c43cd823
|
make membship optional, on which we revert to matching_dissim; speed improvement
|
kmodes/util/dissim.py
|
kmodes/util/dissim.py
|
"""
Dissimilarity measures for clustering
"""
import numpy as np
def matching_dissim(a, b, **_):
"""Simple matching dissimilarity function"""
return np.sum(a != b, axis=1)
def euclidean_dissim(a, b, **_):
"""Euclidean distance dissimilarity function"""
if np.isnan(a).any() or np.isnan(b).any():
raise ValueError("Missing values detected in numerical columns.")
return np.sum((a - b) ** 2, axis=1)
def ng_dissim(a, b, X, membship):
"""Ng et al.'s dissimilarity measure, as presented in
Michael K. Ng, Mark Junjie Li, Joshua Zhexue Huang, and Zengyou He, "On the
Impact of Dissimilarity Measure in k-Modes Clustering Algorithm", IEEE
Transactions on Pattern Analysis and Machine Intelligence, Vol. 29, No. 3,
January, 2007
Note that membship must be a rectangular array such that the
len(membship) = len(a) and len(membship[i]) = X.shape[1]
"""
def calcCJR(b, X, memj, idr):
"""Num objects w/ category value x_{i,r} for rth attr in jth cluster"""
xcids = np.where(np.in1d(memj.ravel(), [1]).reshape(memj.shape))
return float((np.take(X, xcids, axis=0)[0][:, idr] == b[idr]).sum(0))
def calc_dissim(b, X, memj, idr):
# Size of jth cluster
CJ = float(np.sum(memj))
return (1.0 - (calcCJR(b, X, memj, idr) / CJ)) if CJ != 0.0 else 0.0
if len(membship) != a.shape[0] and len(membship[0]) != X.shape[1]:
raise ValueError("'membship' must be a rectangular array where "
"the number of rows in 'membship' equals the "
"number of rows in 'a' and the number of "
"columns in 'membship' equals the number of rows in 'X'.")
return np.array([np.array([calc_dissim(b, X, membship[idj], idr)
if b[idr] == t else 1.0
for idr, t in enumerate(val_a)]).sum(0)
for idj, val_a in enumerate(a)])
|
Python
| 0
|
@@ -457,16 +457,21 @@
membship
+=None
):%0A %22
@@ -902,23 +902,237 @@
hape%5B1%5D%0A
- %22%22%22
+%0A In case of missing membship, this function reverts back to%0A matching dissimilarity.%0A %22%22%22%0A # Without membership, revert to matching dissimilarity%0A if membship is None:%0A return matching_dissim(a, b)%0A
%0A def
@@ -1132,27 +1132,28 @@
def calc
-CJR
+_cjr
(b, X, memj,
@@ -1268,54 +1268,17 @@
ere(
-np.in1d(memj.ravel(), %5B1%5D).reshape(memj.shape)
+memj == 1
)%0A
@@ -1430,18 +1430,18 @@
-CJ
+cj
= float
@@ -1486,11 +1486,12 @@
calc
-CJR
+_cjr
(b,
@@ -1510,18 +1510,18 @@
) /
-CJ
+cj
)) if
-CJ
+cj
!=
|
1b972c4ab088fd6566dd144992167f4a4ae62356
|
rebuild LevelRenderData after saving changed_geometries
|
src/c3nav/mapdata/models/update.py
|
src/c3nav/mapdata/models/update.py
|
from contextlib import contextmanager
from django.conf import settings
from django.core.cache import cache
from django.db import models, transaction
from django.utils.http import int_to_base36
from django.utils.timezone import make_naive
from django.utils.translation import ugettext_lazy as _
from c3nav.mapdata.tasks import delete_old_cached_tiles
class MapUpdate(models.Model):
"""
A map update. created whenever mapdata is changed.
"""
datetime = models.DateTimeField(auto_now_add=True, db_index=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.PROTECT)
type = models.CharField(max_length=32)
class Meta:
verbose_name = _('Map update')
verbose_name_plural = _('Map updates')
default_related_name = 'mapupdates'
get_latest_by = 'datetime'
@classmethod
def last_update(cls):
last_update = cache.get('mapdata:last_update', None)
if last_update is not None:
return last_update
with cls.lock():
last_update = cls.objects.latest()
cache.set('mapdata:last_update', (last_update.pk, last_update.datetime), 900)
return last_update.pk, last_update.datetime
@property
def cache_key(self):
return int_to_base36(self.pk)+'_'+int_to_base36(int(make_naive(self.datetime).timestamp()))
@classmethod
def current_cache_key(cls):
pk, dt = cls.last_update()
return int_to_base36(pk)+'_'+int_to_base36(int(make_naive(dt).timestamp()))
@classmethod
@contextmanager
def lock(cls):
with transaction.atomic():
yield cls.objects.select_for_update().earliest()
def save(self, **kwargs):
if self.pk is not None:
raise TypeError
old_cache_key = MapUpdate.current_cache_key()
from c3nav.mapdata.models import AltitudeArea
AltitudeArea.recalculate()
from c3nav.mapdata.render.base import LevelRenderData
LevelRenderData.rebuild()
super().save(**kwargs)
from c3nav.mapdata.cache import changed_geometries
changed_geometries.save(old_cache_key, self.cache_key)
cache.set('mapdata:last_update', (self.pk, self.datetime), 900)
delete_old_cached_tiles.apply_async(countdown=5)
|
Python
| 0
|
@@ -1925,105 +1925,8 @@
()%0A%0A
- from c3nav.mapdata.render.base import LevelRenderData%0A LevelRenderData.rebuild()%0A%0A
@@ -2076,16 +2076,113 @@
e_key)%0A%0A
+ from c3nav.mapdata.render.base import LevelRenderData%0A LevelRenderData.rebuild()%0A%0A
|
dc300cf24651036e93e94f8c40f00f1126da4a85
|
fix 404 when user want to verify payments
|
lms/djangoapps/commerce/views.py
|
lms/djangoapps/commerce/views.py
|
""" Commerce views. """
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from edxmako.shortcuts import render_to_response
from microsite_configuration import microsite
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from shoppingcart.processors.CyberSource2 import is_user_payment_error
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.theming.helpers import is_request_in_themed_site
log = logging.getLogger(__name__)
@csrf_exempt
def checkout_cancel(_request):
""" Checkout/payment cancellation view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_cancel.html", context)
@csrf_exempt
def checkout_error(_request):
""" Checkout/payment error view. """
context = {'payment_support_email': microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)}
return render_to_response("commerce/checkout_error.html", context)
@csrf_exempt
@login_required
def checkout_receipt(request):
""" Receipt view. """
page_title = _('Receipt')
is_payment_complete = True
payment_support_email = microsite.get_value('payment_support_email', settings.PAYMENT_SUPPORT_EMAIL)
payment_support_link = '<a href=\"mailto:{email}\">{email}</a>'.format(email=payment_support_email)
is_cybersource = all(k in request.POST for k in ('signed_field_names', 'decision', 'reason_code'))
if is_cybersource and request.POST['decision'] != 'ACCEPT':
# Cybersource may redirect users to this view if it couldn't recover
# from an error while capturing payment info.
is_payment_complete = False
page_title = _('Payment Failed')
reason_code = request.POST['reason_code']
# if the problem was with the info submitted by the user, we present more detailed messages.
if is_user_payment_error(reason_code):
error_summary = _("There was a problem with this transaction. You have not been charged.")
error_text = _(
"Make sure your information is correct, or try again with a different card or another form of payment."
)
else:
error_summary = _("A system error occurred while processing your payment. You have not been charged.")
error_text = _("Please wait a few minutes and then try again.")
for_help_text = _("For help, contact {payment_support_link}.").format(payment_support_link=payment_support_link)
else:
# if anything goes wrong rendering the receipt, it indicates a problem fetching order data.
error_summary = _("An error occurred while creating your receipt.")
error_text = None # nothing particularly helpful to say if this happens.
for_help_text = _(
"If your course does not appear on your dashboard, contact {payment_support_link}."
).format(payment_support_link=payment_support_link)
context = {
'page_title': page_title,
'is_payment_complete': is_payment_complete,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
'verified': SoftwareSecurePhotoVerification.verification_valid_or_pending(request.user).exists(),
'error_summary': error_summary,
'error_text': error_text,
'for_help_text': for_help_text,
'payment_support_email': payment_support_email,
'username': request.user.username,
'nav_hidden': True,
'is_request_in_themed_site': is_request_in_themed_site(),
'subscription_course_key': settings.SUBSCRIPTION_COURSE_KEY
}
return render_to_response('commerce/checkout_receipt.html', context)
|
Python
| 0
|
@@ -480,86 +480,8 @@
s _%0A
-from openedx.core.djangoapps.theming.helpers import is_request_in_themed_site%0A
%0A%0Alo
@@ -3624,35 +3624,12 @@
e':
-is_request_in_themed_site()
+True
,%0A
|
7b617d289d1771cf87635b0cfb2334c02352d9d2
|
remove commented legacy code
|
kneed/knee_locator.py
|
kneed/knee_locator.py
|
import numpy as np
from scipy import interpolate
from scipy.signal import argrelextrema
class KneeLocator(object):
def __init__(self, x, y, S=1.0, curve='concave', direction='increasing'):
"""
x = x values
y = y values
S = Sensitivity parameter, original paper suggests default of 1.0
curve = If True, algorithm will detect elbows instead of knees.
direction = {"increasing", "decreasing"}
"""
# Step 0: Raw Input
self.x = x
self.y = y
self.curve = curve
# if (self.curve == 'concave' and direction == 'decreasing') or (self.curve == 'convex' and direction == 'increasing'):
# self.original_x = self.x
# self.original_y = self.y
# self.x = [max(self.x) - x_ for x_ in self.x]
# self.y = [max(self.y) - y_ for y_ in self.y]
self.direction = direction
# if (not np.array_equal(np.array(self.x), np.sort(self.x))
# and self.curve == 'convex'):
# raise ValueError('x values must be sorted')
# if (not np.array_equal(np.array(self.x[::-1]), np.sort(self.x))
# and self.curve == 'concave'):
#raise ValueError('x values must be sorted')
# parameters
self.N = len(self.x)
self.S = S
# Step 1: fit a smooth line
uspline = interpolate.interp1d(self.x, self.y)
self.Ds_x = np.linspace(np.min(self.x), np.max(self.x), self.N)
self.Ds_y = uspline(self.Ds_x)
# Step 2: normalize values
self.xsn = self.__normalize(self.Ds_x)
self.ysn = self.__normalize(self.Ds_y)
# Step 3: Calculate difference curve
self.xd = self.xsn
if self.curve == 'convex' and direction == 'decreasing':
self.yd = self.ysn + self.xsn
self.yd = 1 - self.yd
elif self.curve == 'concave' and direction == 'decreasing':
self.yd = self.ysn + self.xsn
elif self.curve == 'concave' and direction == 'increasing':
self.yd = self.ysn - self.xsn
if self.curve == 'convex' and direction == 'increasing':
self.yd = abs(self.ysn - self.xsn)
# Step 4: Identify local maxima/minima
# local maxima
self.xmx_idx = argrelextrema(self.yd, np.greater)[0]
self.xmx = self.xd[self.xmx_idx]
self.ymx = self.yd[self.xmx_idx]
# local minima
self.xmn_idx = argrelextrema(self.yd, np.less)[0]
self.xmn = self.xd[self.xmn_idx]
self.ymn = self.yd[self.xmn_idx]
# Step 5: Calculate thresholds
self.Tmx = self.__threshold(self.ymx)
# Step 6: find knee
self.knee, self.norm_knee, self.knee_x = self.find_knee()
def __normalize(self, a):
return (a - min(a)) / (max(a) - min(a))
def __threshold(self, ymx_i):
"""
calculates the difference threshold for a
given difference local maximum
"""
return ymx_i - (self.S * np.diff(self.xsn).mean())
def find_knee(self, ):
if len(self.xmx_idx) == 0:
print("No local maxima found in the distance curve\n"
"The line is probably not polynomial, try plotting\n"
"the distance curve with plt.plot(knee.xd, knee.yd)\n"
"Also check that you aren't mistakenly setting the curve argument")
return None, None, None
mxmx_iter = np.arange(self.xmx_idx[0], len(self.xsn))
xmx_idx_iter = np.append(self.xmx_idx, len(self.xsn))
knee_, norm_knee_, knee_x = 0.0, 0.0, None
for mxmx_i in range(len(xmx_idx_iter)):
# stopping criteria for exhasuting array
if mxmx_i == len(xmx_idx_iter) - 1:
break
# indices between maxima/minima
idxs = (mxmx_iter > xmx_idx_iter[mxmx_i]) * \
(mxmx_iter < xmx_idx_iter[mxmx_i + 1])
between_local_mx = mxmx_iter[np.where(idxs)]
for j in between_local_mx:
if j in self.xmn_idx:
# reached a minima, x indices are unique
# only need to check if j is a min
if self.yd[j + 1] > self.yd[j]:
self.Tmx[mxmx_i] = 0
knee_x = None # reset x where yd crossed Tmx
elif self.yd[j + 1] <= self.yd[j]:
unknown_condition = ("If this is a minima, "
"how would you ever get here:")
print(unknown_condition)
if self.yd[j] < self.Tmx[mxmx_i] or self.Tmx[mxmx_i] < 0:
# declare a knee
if not knee_x:
knee_x = j
knee_ = self.x[self.xmx_idx[mxmx_i]]
norm_knee_ = self.xsn[self.xmx_idx[mxmx_i]]
return knee_, norm_knee_, knee_x
def plot_knee_normalized(self, ):
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 8))
plt.plot(self.xsn, self.ysn)
plt.plot(self.xd, self.yd, 'r')
plt.xticks(np.arange(min(self.xsn), max(self.xsn) + 0.1, 0.1))
plt.yticks(np.arange(min(self.xd), max(self.ysn) + 0.1, 0.1))
plt.vlines(self.norm_knee, plt.ylim()[0], plt.ylim()[1])
def plot_knee(self, ):
import matplotlib.pyplot as plt
plt.figure(figsize=(8, 8))
plt.plot(self.x, self.y)
plt.vlines(self.knee, plt.ylim()[0], plt.ylim()[1])
|
Python
| 0
|
@@ -555,699 +555,35 @@
-# if (self.curve == 'concave' and direction == 'decreasing') or (self.curve == 'convex' and direction == 'increasing'):%0A # self.original_x = self.x%0A # self.original_y = self.y%0A # self.x = %5Bmax(self.x) - x_ for x_ in self.x%5D%0A # self.y = %5Bmax(self.y) - y_ for y_ in self.y%5D%0A self.direction = direction%0A%0A # if (not np.array_equal(np.array(self.x), np.sort(self.x))%0A # and self.curve == 'convex'):%0A # raise ValueError('x values must be sorted')%0A # if (not np.array_equal(np.array(self.x%5B::-1%5D), np.sort(self.x))%0A# and self.curve == 'concave'):%0A #raise ValueError('x values must be sorted')
+self.direction = direction%0A
%0A
|
d095663247585110247e709dfd9aeca503824111
|
fix start_time in shootexec
|
yandextank/plugins/ShootExec/plugin.py
|
yandextank/plugins/ShootExec/plugin.py
|
import errno
import collections
import logging
import os.path
import subprocess
import time
from ...common.interfaces import AbstractPlugin, GeneratorPlugin, AggregateResultListener, AbstractInfoWidget, \
StatsReader
from ...common.util import FileScanner
from ..Console import Plugin as ConsolePlugin
from ..Phantom import PhantomReader
_INFO = collections.namedtuple(
"Info",
"address, port, instances, ammo_count, loop_count, duration, steps, stat_log, rps_schedule, ammo_file"
)
_LOGGER = logging.getLogger(__name__)
_PROCESS_KILL_TIMEOUT = 10 # Kill running process after specified number of seconds
_OUTPUT_WAIT_TIMEOUT = 10 # Output files should be found after specified number of seconds
class Plugin(GeneratorPlugin):
"""Simple executor of shooting process with phantom compatible output"""
SECTION = 'shootexec'
def __init__(self, core, cfg, name):
AbstractPlugin.__init__(self, core, cfg, name)
self.stats_reader = None
self.reader = None
self.__process = None
self.__stderr_file = None
self.__processed_ammo_count = 0
self.__start_time = 0
self.opened_file = None
@staticmethod
def get_key():
return __file__
def get_available_options(self):
return ["cmd", "output_path", "stats_path"]
def configure(self):
self.__cmd = self.get_option("cmd")
self.__output_path = self.get_option("output_path")
self.core.add_artifact_file(self.__output_path)
self.__stats_path = self.get_option("stats_path")
if self.__stats_path:
self.core.add_artifact_file(self.__stats_path)
def get_reader(self):
if self.reader is None:
# Touch output_path to clear it
open(self.__output_path, "w").close()
self.opened_file = open(self.__output_path, 'r')
self.add_cleanup(lambda: self.opened_file.close())
self.reader = PhantomReader(self.opened_file)
return self.reader
def get_stats_reader(self):
if self.stats_reader is None:
self.stats_reader = _FileStatsReader(self.__stats_path) if self.__stats_path else _DummyStatsReader()
return self.stats_reader
def prepare_test(self):
stderr_path = self.core.mkstemp(".log", "shootexec_stdout_stderr_")
self.__stderr_file = open(stderr_path, 'w')
_LOGGER.debug("Linking sample reader to aggregator. Reading samples from %s", self.__output_path)
self.__start_time = time.time()
self.core.job.aggregator.add_result_listener(self)
try:
console = self.core.get_plugin_of_type(ConsolePlugin)
except Exception as ex:
_LOGGER.debug("Console not found: %s", ex)
console = None
if console:
widget = _InfoWidget(self)
console.add_info_widget(widget)
self.core.job.aggregator.add_result_listener(widget)
def start_test(self):
_LOGGER.info("Starting shooting process: '%s'", self.__cmd)
self.__process = subprocess.Popen(
self.__cmd,
shell=True,
stderr=self.__stderr_file,
stdout=self.__stderr_file,
close_fds=True
)
# Ensure that all expected output files are ready to use
_LOGGER.info("Waiting until output files are ready")
waitfor = time.time() + _OUTPUT_WAIT_TIMEOUT
while time.time() < waitfor:
output_path_is_ready = os.path.isfile(self.__output_path)
stats_path_is_ready = (not self.__stats_path or os.path.isfile(self.__stats_path))
if output_path_is_ready and stats_path_is_ready:
break
time.sleep(0.1)
else:
raise Exception("Failed to wait until output resources are ready: output={}, stats={}".format(
output_path_is_ready,
stats_path_is_ready
))
_LOGGER.info("Shooting proces is ready to use")
def is_test_finished(self):
retcode = self.__process.poll()
if retcode is not None:
_LOGGER.info("Shooting process done its work with exit code: %s", retcode)
return abs(retcode)
else:
return -1
def end_test(self, retcode):
if self.__process and self.__process.poll() is None:
_LOGGER.warn("Terminating shooting process with PID %s", self.__process.pid)
self.__terminate()
else:
_LOGGER.debug("Seems shooting process finished OK")
return retcode
def post_process(self, retcode):
return retcode
def on_aggregated_data(self, data, stats):
self.__processed_ammo_count += data["overall"]["interval_real"]["len"]
_LOGGER.debug("Processed ammo count: %s", self.__processed_ammo_count)
def get_info(self):
""" returns info object """
return _INFO(
"",
"",
"0",
"0",
"0",
time.time() - self.__start_time,
None,
"",
"",
""
)
def __terminate(self):
"""Gracefull termination of running process"""
if self.__stderr_file:
self.__stderr_file.close()
if not self.__process:
return
waitfor = time.time() + _PROCESS_KILL_TIMEOUT
while time.time() < waitfor:
try:
self.__process.terminate()
except EnvironmentError as e:
if e.errno != errno.ESRCH:
_LOGGER.warning("Failed to terminate process '{}': {}".format(self.__cmd, e))
return
time.sleep(0.1)
try:
self.__process.kill()
except EnvironmentError as e:
if e.errno != errno.ESRCH:
_LOGGER.warning("Failed to kill process '{}': {}".format(self.__cmd, e))
return
class _InfoWidget(AbstractInfoWidget, AggregateResultListener):
"""
Widget with information about current run state
"""
def get_index(self):
return 2
def __init__(self, sender):
AbstractInfoWidget.__init__(self)
self.owner = sender
def render(self, screen):
return ""
def on_aggregated_data(self, data, stats):
pass
class _FileStatsReader(FileScanner, StatsReader):
"""
Read shooting stats line by line
Line format is 'timestamp\trps\tinstances'
"""
def __init__(self, *args, **kwargs):
super(_FileStatsReader, self).__init__(*args, **kwargs)
self.__last_ts = 0
def _read_data(self, lines):
"""
Parse lines and return stats
"""
results = []
for line in lines:
timestamp, rps, instances = line.split("\t")
curr_ts = int(float(timestamp)) # We allow floats here, but tank expects only seconds
if self.__last_ts < curr_ts:
self.__last_ts = curr_ts
results.append(self.stats_item(self.__last_ts, float(rps), float(instances)))
return results
class _DummyStatsReader(StatsReader):
"""
Dummy stats reader for shooters without stats file
"""
def __init__(self):
self.__closed = False
self.__last_ts = 0
def __iter__(self):
while not self.__closed:
cur_ts = int(time.time())
if cur_ts > self.__last_ts:
yield [self.stats_item(cur_ts, 0, 0)]
self.__last_ts = cur_ts
else:
yield []
def close(self):
self.__closed = True
|
Python
| 0.000002
|
@@ -3056,16 +3056,54 @@
.__cmd)%0A
+ self.start_time = time.time()%0A
|
292f88104ecb93705e7c54f0e43382667d1d3937
|
version bump
|
jupyterthemes/__init__.py
|
jupyterthemes/__init__.py
|
"""
Juypiter theme installer
Author: miraculixx at github.com, dunovank at github.com
"""
from __future__ import print_function
from jupyter_core.paths import jupyter_config_dir, jupyter_data_dir
import os
import shutil
import argparse
from glob import glob
from tempfile import mkstemp
__version__ = 0.3.1
jnb_config_dir = jupyter_config_dir()
HOME = os.path.expanduser('~')
install_path = os.path.join(jnb_config_dir, 'custom')
nbconfig_path = os.path.join(jnb_config_dir, 'nbconfig')
# Ensure all install dirs exist
if not os.path.isdir(jnb_config_dir):
os.makedirs(jnb_config_dir)
if not os.path.isdir(install_path):
os.makedirs(install_path)
if not os.path.isdir(nbconfig_path):
os.makedirs(nbconfig_path)
package_dir = os.path.dirname(os.path.realpath(__file__))
styles_dir = os.path.join(package_dir, 'styles')
default_toolbar_string='div#maintoolbar {display: none !important;}'
default_font_string="div.CodeMirror pre {font-family: 'Hack', monospace; font-size: 11pt;}"
def get_themes():
""" return list of available themes """
themes = [os.path.basename(theme).replace('.css', '')
for theme in glob('%s/*.css' % styles_dir)]
return themes
def install_theme(name, toolbar=False, fontsize=12, font="'Hack'"):
""" copy given styles/<theme name>.css --> ~/.jupyter/custom/custom.css
"""
source_path = glob('%s/%s.css' % (styles_dir, name))[0]
font_string="div.CodeMirror pre {font-family: %s, monospace; font-size: %dpt;}" % (font, fontsize)
# -- install theme
customcss_path = '%s/custom.css' % install_path
shutil.copy(source_path, customcss_path)
print("Installing %s at %s" % (name, install_path))
fh, abs_path = mkstemp()
with open(abs_path, 'w') as cssfile:
with open(customcss_path) as old_file:
for line in old_file:
if toolbar:
# -- enable toolbar if requested
restore_toolbar='/*'+default_toolbar_string+'*/'
line = line.replace(default_toolbar_string, restore_toolbar)
# -- set CodeCell font and fontsize
line = line.replace(default_font_string, font_string)
cssfile.write(line)
os.close(fh)
os.remove(customcss_path)
shutil.move(abs_path, customcss_path)
def edit_config(linewrap=False, iu=4):
""" toggle linewrapping and set size of indent unit
with notebook.json config file in ~/.jupyter/nbconfig/
"""
if linewrap:
lw='true'
else:
lw='false'
PARAMS_string = '{{\n{:<2}"CodeCell": {{\
\n{:<4}"cm_config": {{\
\n{:<6}"indentUnit": {},\
\n{:<6}"lineWrapping": {}\
\n{:<4}}}\n{:<2}}},\
\n{:<2}"nbext_hide_incompat": false\n}}'.format('','','', iu,'',lw,'','','')
actual_config_path = os.path.expanduser(os.path.join(nbconfig_path))
if not os.path.exists(actual_config_path):
os.makedirs(actual_config_path)
config_file_path = '%s/notebook.json' % actual_config_path
with open(config_file_path, 'w+') as cfile:
cfile.write(PARAMS_string)
def reset_default():
""" remove custom.css import"""
jnb_cached = os.path.join(jupyter_data_dir(), 'nbextensions')
paths = [install_path, jnb_cached]
for fpath in paths:
old = '%s/%s.css' % (fpath, 'custom')
old_save = '%s/%s.css' % (fpath, 'custom_old')
try:
shutil.copy(old, old_save)
os.remove(old)
print("Reset default theme here: %s" % fpath)
except Exception:
print("Already set to default theme in %s" % fpath)
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', "--theme", action='store',
help="name of the theme to install")
parser.add_argument('-l', "--list", action='store_true',
help="list available themes")
parser.add_argument('-r', "--reset", action='store_true',
help="reset to default theme")
# notebook options
parser.add_argument('-T', "--toolbar", action='store_true',
default=False,
help="if specified will enable the toolbar")
parser.add_argument('-fs', "--fontsize", action='store',
default=11, help='set the CodeCell font-size')
parser.add_argument('-f', "--font", action='store',
default='Hack', help='set the CodeCell font')
# nb config options
parser.add_argument('-lw', "--linewrap", action='store_true',
default=False,
help="if specified will enable linewrapping in code cells")
parser.add_argument('-iu', "--indentunit", action='store',
default='4', help="set indent unit for code cells")
args = parser.parse_args()
if args.reset:
reset_default()
exit(0)
if args.list:
themes = get_themes()
print("Available Themes")
print('\n'.join(themes))
exit(0)
if args.theme:
themes = get_themes()
if args.theme not in themes:
print("Theme %s not found. Available: %s"%(args.theme, ' '.join(themes)))
exit(1)
if args.toolbar:
print('Enabling Toolbar')
install_theme(args.theme, toolbar=args.toolbar, fontsize=int(args.fontsize), font="'"+args.font+"'")
exit(0)
if args.linewrap or args.indentunit!='4':
edit_config(linewrap=args.linewrap, iu=str(args.indentunit))
|
Python
| 0.000001
|
@@ -298,13 +298,15 @@
_ =
+'
0.3.1
+'
%0A%0Ajn
|
73b67a30495e7a6d638421ba8b9544a5e2dc4185
|
Fix task full resource
|
zou/app/resources/project/task_full.py
|
zou/app/resources/project/task_full.py
|
from flask import abort
from flask_login import login_required
from zou.app.models.task import Task
from zou.app.models.project import Project
from zou.app.models.person import Person
from zou.app.models.entity import Entity
from zou.app.models.entity_type import EntityType
from zou.app.models.task_status import TaskStatus
from zou.app.models.task_type import TaskType
from zou.app.resources.data.base import BaseModelResource
from zou.app.project import task_info
from zou.app.project.exception import TaskNotFoundException
class TaskFullResource(BaseModelResource):
def __init__(self):
BaseModelResource.__init__(self, Task)
@login_required
def get(self, instance_id):
try:
task = task_info.get_task(instance_id)
except TaskNotFoundException:
abort(404)
result = task.serialize()
task_type = TaskType.get(task.task_type_id)
result["task_type"] = task_type.serialize()
assigner = Person.get(task.assigner_id)
result["assigner"] = assigner.serialize()
project = Project.get(task.project_id)
result["project"] = project.serialize()
task_status = TaskStatus.get(task.task_status_id)
result["task_status"] = task_status.serialize()
entity = Entity.get(task.entity_id)
result["entity"] = entity.serialize()
parent = Entity.get(entity.parent_id)
if parent is not None:
result["entity_parent"] = parent.serialize()
entity_type = EntityType.get(entity.entity_type_id)
result["entity_type"] = entity_type.serialize()
assignees = []
for assignee in task.assignees:
assignees.append(assignee.serialize())
result["persons"] = assignees
return result, 200
|
Python
| 0.000029
|
@@ -1367,28 +1367,11 @@
-parent = Entity.get(
+if
enti
@@ -1378,25 +1378,37 @@
ty.parent_id
-)
+ is not None:
%0A if
@@ -1408,18 +1408,19 @@
-if
+
parent
is n
@@ -1415,28 +1415,46 @@
parent
-is not None:
+= Entity.get(entity.parent_id)
%0A
|
807ea3a97377ec80e47b419baeaab1c1a432fd40
|
fix for python 3
|
language_tags/tags.py
|
language_tags/tags.py
|
# -*- coding: utf-8 -*-
import os
import json
from io import open
from language_tags.Subtag import Subtag
from language_tags.Tag import Tag
parent_dir = os.path.dirname(__file__)
data_dir = '../node_modules/language-subtag-registry/data/json/'
index = json.load(open(os.path.join(parent_dir, data_dir + "index.json"), encoding='utf-8'))
registry = json.load(open(os.path.join(parent_dir, data_dir + "registry.json"), encoding='utf-8'))
class tags():
@staticmethod
def tag(tag):
"""
Get a :class:`language_tags.Tag.Tag` of a string (hyphen-separated) tag.
:param str tag: (hyphen-separated) tag.
:return: :class:`language_tags.Tag.Tag`.
"""
return Tag(tag)
@staticmethod
def check(tag):
"""
Check if a string (hyphen-separated) tag is valid.
:param str tag: (hyphen-separated) tag.
:return: bool -- True if valid.
"""
return Tag(tag).valid
@staticmethod
def types(subtag):
"""
Get the types of a subtag string (excludes redundant and grandfathered).
:param str subtag: subtag.
:return: list of types. The return list can be empty.
"""
if subtag in index:
types = index[subtag]
return [type for type in types.keys() if type != 'redundant' or type != 'grandfathered']
else:
return []
@staticmethod
def subtags(subtags):
"""
Get a list of existing :class:`language_tags.Subtag.Subtag` objects given the input subtag(s).
:param subtags: string subtag or list of string subtags.
:return: a list of existing :class:`language_tags.Subtag.Subtag` objects. The return list can be empty.
"""
result = []
if not isinstance(subtags, list):
subtags = [subtags]
for subtag in subtags:
for type in tags.types(subtag):
result.append(Subtag(subtag, type))
return result
@staticmethod
def filter(subtags):
"""
Get a list of non-existing string subtag(s) given the input string subtag(s).
:param subtags: string subtag or a list of string subtags.
:return: list of non-existing string subtags. The return list can be empty.
"""
if not isinstance(subtags, list):
subtags = [subtags]
return [subtag for subtag in subtags if len(tags.types(subtag)) == 0]
@staticmethod
def search(description, all=False):
"""
Gets a list of :class:`language_tags.Subtag.Subtag` objects where the description matches.
:param description: a string or compiled regular expression. For example: ``search(re.compile('\d{4}'))`` if the
description of the returned subtag must contain four contiguous numerical digits.
:type description: str or RegExp
:param all: If set on True grandfathered and redundant tags will be included in the return
list.
:type all: bool, optional
:return: list of :class:`language_tags.Subtag.Subtag` objects each including the description.
The return list can be empty.
"""
# If the input query is all lowercase, make a case-insensitive match.
if isinstance(description, str):
list_to_string = lambda l: ', '.join(l).lower() if description.lower() == description else ', '.join(l)
def test(record):
return description in list_to_string(record['Description'])
elif hasattr(description.search, '__call__'):
def test(record):
return description.search(', '.join(record['Description'])) is not None
records = filter(lambda r: False if ('Subtag' not in r and not all) else test(r), registry)
# Sort by matched description string length. This is a quick way to push precise matches towards the top.
results = sorted(records, key=lambda r: min([abs(len(r_description) - len(description))
for r_description in r['Description']])) \
if isinstance(description, str) else records
return map(lambda r: Subtag(r['Subtag'], r['Type']) if 'Subtag' in r else Tag(['Tag']), results)
@staticmethod
def description(tag):
"""
Gets a list of descriptions given the tag.
:param str tag: (hyphen-separated) tag.
:return: list of string descriptions. The return list can be empty.
"""
tag_object = Tag(tag)
results = tag_object.descriptions
subtags = tag_object.subtags
for subtag in subtags:
results += subtag.description
return results
@staticmethod
def languages(macrolanguage):
"""
Get a list of :class:`language_tags.Subtag.Subtag` objects given the string macrolanguage.
:param string macrolanguage: subtag macrolanguage.
:return: a list of the macrolanguage :class:`language_tags.Subtag.Subtag` objects.
:raise Exception: if the macrolanguage does not exists.
"""
results = []
macrolanguage = macrolanguage.lower()
macrolanguage_data = json.load(open(os.path.join(parent_dir, data_dir + "macrolanguage.json")))
if macrolanguage not in macrolanguage_data:
raise Exception('\'' + macrolanguage + '\' is not a macrolanguage.')
for registry_item in registry:
record = registry_item
if 'Macrolanguage' in record:
if record['Macrolanguage'] == macrolanguage:
results.append(Subtag(record['Subtag'], record['Type']))
return results
@staticmethod
def type(subtag, type):
"""
Get a :class:`language_tags.Subtag.Subtag` by subtag and type. Can be None if not exists.
:param str subtag: subtag.
:param str type: type of the subtag.
:return: :class:`language_tags.Subtag.Subtag` if exists, otherwise None.
"""
subtag = subtag.lower()
if subtag in index:
types = index[subtag]
if type in types:
return Subtag(subtag, type)
return None
@staticmethod
def language(subtag):
"""
Get a language :class:`language_tags.Subtag.Subtag` of the subtag string.
:param str subtag: subtag.
:return: language :class:`language_tags.Subtag.Subtag` if exists, otherwise None.
"""
return tags.type(subtag, 'language')
@staticmethod
def region(subtag):
"""
Get a region :class:`language_tags.Subtag.Subtag` of the subtag string.
:param str subtag: subtag.
:return: region :class:`language_tags.Subtag.Subtag` if exists, otherwise None.
"""
return tags.type(subtag, 'region')
@staticmethod
def date():
"""
Get the file date of the underlying data as a string.
:return: date as string (for example: '2014-03-27').
"""
meta = json.load(open(os.path.join(parent_dir, data_dir + "meta.json")))
return meta['File-Date']
|
Python
| 0.000052
|
@@ -38,16 +38,27 @@
ort json
+%0Aimport six
%0A%0Afrom i
@@ -3814,16 +3814,72 @@
gistry)%0A
+ if six.PY3:%0A records = list(records)%0A
@@ -4251,22 +4251,9 @@
urn
-map(lambda r:
+%5B
Subt
@@ -4317,17 +4317,25 @@
g'%5D)
-,
+ for r in
results
)%0A%0A
@@ -4330,17 +4330,17 @@
results
-)
+%5D
%0A%0A @s
|
d4563fe6991ee644350528a469884f697f02308d
|
Add production of very high S/N model images
|
models/make_images.py
|
models/make_images.py
|
#!/usr/bin/env python
from glob import glob
import pyfits
import sys, os
import numpy
shape = (100,100)
bands = ['u', 'g', 'r', 'i', 'z', 'Y', 'J', 'H', 'K']
zp = numpy.array([16.75,15.957,15.0,14.563,14.259,14.162,13.955,13.636,13.525])
def make_images(model='A', noiselevel=5,
bandsel=['u', 'g', 'r', 'i', 'z', 'Y', 'J', 'H', 'K']):
noisebands = 10**(-0.4*(zp-15.0)) * noiselevel/2.0
noise = []
for n in noisebands:
noise.append(numpy.random.normal(0.0, n, shape))
gals = glob('model%s.galfit'%model)
for g in gals:
os.system('nice galfit %s > %s.out'%(g,g))
imgname = g.replace('.galfit', '')
img = pyfits.open(imgname+'.fits')
for j, b in enumerate(bands):
if b in bandsel:
ext = img['MODEL_'+b]
print g, b, j, ext.name, noisebands[j]
ext.data += noise[j]
pyfits.writeto(imgname+'_%i%s_n%i.fits'%(j+1, b, noiselevel), ext.data, clobber=True)
if __name__ =='__main__':
make_images('A', 5)
make_images('A', 50, ['H'])
make_images('B', 5)
make_images('B', 50, ['H'])
for x in 'abcdefghi':
make_images('C'+x, 5, ['r'])
make_images('D', 5)
make_images('E', 5)
|
Python
| 0
|
@@ -1030,16 +1030,43 @@
ain__':%0A
+ make_images('A', 0.01)%0A
make
@@ -1109,24 +1109,51 @@
50, %5B'H'%5D)%0A
+ make_images('B', 0.01)%0A
make_ima
@@ -1253,40 +1253,131 @@
+x,
-5, %5B'r'%5D)%0A make_images('D', 5
+0, %5B'r'%5D)%0A make_images('C'+x, 5, %5B'r'%5D)%0A make_images('D', 0.01)%0A make_images('D', 5)%0A make_images('E', 0.01
)%0A
|
183722bf88f04f5f8735cc61e62e8d89016bd32d
|
fix bug with user_course
|
models/user_course.py
|
models/user_course.py
|
import itertools
import mongoengine as me
import course
import professor
import rating
import review
import term
class CritiqueCourse(me.Document):
meta = {
'indexes': [
'course_id',
'professor_id',
],
}
# id = me.ObjectIdField(primary_key=True)
course_id = me.StringField(required=True)
# TODO(mack): need section_id or equiv
# course_id = me.StringField(required=True, unique_with='section_id')
# section_id = me.IntField(required=True)
professor_id = me.StringField(required=True)
term_id = me.StringField(required=True)
interest = me.EmbeddedDocumentField(rating.AggregateRating, default=rating.AggregateRating())
easiness = me.EmbeddedDocumentField(rating.AggregateRating, default=rating.AggregateRating())
overall_course = me.EmbeddedDocumentField(rating.AggregateRating, default=rating.AggregateRating())
clarity = me.EmbeddedDocumentField(rating.AggregateRating, default=rating.AggregateRating())
passion = me.EmbeddedDocumentField(rating.AggregateRating, default=rating.AggregateRating())
overall_prof = me.EmbeddedDocumentField(rating.AggregateRating, default=rating.AggregateRating())
class MenloCourse(me.Document):
meta = {
'indexes': [
'course_id',
'professor_id',
],
}
# id = me.ObjectIdField(primary_key=True)
course_id = me.StringField(required=True)
professor_id = me.StringField()
course_review = me.EmbeddedDocumentField(review.CourseReview)
professor_review = me.EmbeddedDocumentField(review.ProfessorReview)
# TODO(mack): should be UserCourseOffering?
class UserCourse(me.Document):
meta = {
'indexes': [
'user_id',
'course_id',
'professor_id',
# TODO(mack): check if below indices are necessary
#('course_id', 'professor_id'),
#('professor_id', 'course_id'),
],
}
# date review created implictly stored here
# id = me.ObjectIdField(primary_key=True)
# TODO(mack): verify this works when user_id is not required
# TODO(mack): might be better to just enforce uniqueness on
# ['course_id', 'offering_id']?
user_id = me.ObjectIdField(unique_with=['course_id', 'term_id'])
course_id = me.StringField(required=True)
term_id = me.StringField(required=True)
# TODO(mack): might not appropriate to store here, maybe should be
# calculated using combination of info about when user started
# school and term this course was taken
# eg. 3A
program_year_id = me.StringField()
# TODO(mack): add fields for grade tracking; eg. grades on assignments
# TODO(mack): should range be 0..1 or 0..100 ?
grade = me.FloatField(min_value=0.0, max_value=1.0)
professor_id = me.StringField()
course_review = me.EmbeddedDocumentField(review.CourseReview, default=review.CourseReview())
professor_review = me.EmbeddedDocumentField(review.ProfessorReview, default=review.ProfessorReview())
# TODO(mack): add section_id
# section_id = StringField()
# TODO(mack): should we have update_time?
# update_date = me.DateTimeField()
@property
def term_name(self):
return term.Term(self.term_id).name
@property
def has_reviewed(self):
return (self.course_review.comment_date is not None
or self.course_review.easiness is not None
or self.course_review.usefulness is not None
or self.professor_review.comment_date is not None
or self.professor_review.clarity is not None
or self.professor_review.passion is not None
)
def to_dict(self):
return {
'id': self.id,
'user_id': self.user_id,
# TODO(Sandy): We probably don't need to pass down term_id
'term_id': self.term_id,
'term_name': term.Term(id=self.term_id).name,
'course_id': self.course_id,
'professor_id': self.professor_id,
'course_review': self.course_review.to_dict(),
'professor_review': self.professor_review.to_dict(),
'has_reviewed': self.has_reviewed,
}
def save(self, *args, **kwargs):
# TODO(Sandy): Use transactions
# http://docs.mongodb.org/manual/tutorial/perform-two-phase-commits/
# or run nightly ratings aggregation script to fix race condtions
cur_course = course.Course.objects.with_id(self.course_id)
self.course_review.update_course_aggregate_ratings(cur_course)
cur_course.save()
if self.professor_id:
cur_professor = professor.Professor.objects.with_id(
self.professor_id)
self.professor_review.update_professor_aggregate_ratings(
cur_professor)
cur_professor.save()
super(UserCourse, self).save(*args, **kwargs)
# TODO(david): Should be static method of ProfCourse
def get_reviews_for_course_prof(course_id, prof_id):
menlo_reviews = MenloCourse.objects(
course_id=course_id,
professor_id=prof_id,
).only('professor_review', 'course_review')
user_reviews = UserCourse.objects(
course_id=course_id,
professor_id=prof_id,
).only('professor_review', 'course_review', 'user_id', 'term_id')
return itertools.chain(menlo_reviews, user_reviews)
|
Python
| 0
|
@@ -4206,16 +4206,69 @@
viewed,%0A
+ 'program_year_id': self.program_year_id,%0A
|
2417f7e3c445c7f369c9eb8cb48c83ebb4c2e43d
|
Change blueprints to be more container-like.
|
kyokai/blueprints.py
|
kyokai/blueprints.py
|
"""
Kyōkai are simply groups of routes.
They're a simpler way of grouping your routes together instead of having to import your app object manually all of
the time.
"""
from kyokai.route import Route
class Blueprint(object):
"""
A Blueprint contains one public method: `bp.route`. It acts exactly the same as a normal route method.
If you set a `url_prefix` in the constructor, this prefix will be added onto your routes.
"""
def __init__(self, name: str, url_prefix: str=""):
self._prefix = url_prefix
self._name = name
self.routes = []
self.errhandlers = {}
def _bp_get_errhandler(self, code: int):
return self.errhandlers.get(code)
def route(self, regex, methods: list = None, hard_match: bool = False):
"""
Create an incoming route for a function.
Parameters:
regex:
The regular expression to match the path to.
In standard Python `re` forme.
Group matches are automatically extracted from the regex, and passed as arguments.
methods:
The list of allowed methods, e.g ["GET", "POST"].
You can check the method with `request.method`.
hard_match:
Should we match based on equality, rather than regex?
This prevents index or lower level paths from matching 404s at higher levels.
"""
if not methods:
methods = ["GET"]
# Override hard match if it's a `/` route.
if regex == "/":
hard_match = True
regex = self._prefix + regex
r = Route(regex, methods, hard_match)
r.set_errorhandler_factory(self._bp_get_errhandler)
self.routes.append(r)
return r
def errorhandler(self, code: int):
"""
Create an error handler for the specified code.
This will wrap the function in a Route.
"""
r = Route("", [])
self.errhandlers[code] = r
return r
def _init_bp(self):
return self.routes
|
Python
| 0
|
@@ -4,16 +4,27 @@
%0AKy%C5%8Dkai
+blueprints
are simp
@@ -260,184 +260,25 @@
int
-contains one public method: %60bp.route%60. It acts exactly the same as a normal route method.%0A%0A If you set a %60url_prefix%60 in the constructor, this prefix will be added onto you
+is a container fo
r ro
@@ -324,16 +324,54 @@
me: str,
+ parent: 'Blueprint',%0A
url_pre
@@ -510,47 +510,84 @@
-def _bp_get_errhandler(self, code: int)
+ self._parent = parent%0A%0A @property%0A def parent(self) -%3E 'Blueprint'
:%0A
@@ -596,41 +596,115 @@
-return self.errhandlers.get(code)
+%22%22%22%0A Returns the parent Blueprint of the currentl Blueprint.%0A %22%22%22%0A return self._parent
%0A%0A
@@ -1655,16 +1655,22 @@
= Route(
+self,
regex, m
@@ -1693,68 +1693,8 @@
ch)%0A
- r.set_errorhandler_factory(self._bp_get_errhandler)%0A
@@ -1923,16 +1923,22 @@
= Route(
+self,
%22%22, %5B%5D)%0A
@@ -1993,55 +1993,4 @@
n r%0A
-%0A def _init_bp(self):%0A return self.routes
|
a43ada7785db136f3a5d7d96c6b64b0a686d052e
|
fix total_force missing
|
labs/lab2/analyze.py
|
labs/lab2/analyze.py
|
#!/usr/bin/env python
import re
import sys
import csv
import argparse
# This defines the patterns for extracting relevant data from the output
# files.
patterns = {
"energy": re.compile("total energy\s+=\s+([\d\.\-]+)\sRy"),
"ecut": re.compile("kinetic\-energy cutoff\s+=\s+([\d\.\-]+)\s+Ry"),
"alat": re.compile("celldm\(1\)=\s+([\d\.]+)\s"),
"nkpts": re.compile("number of k points=\s+([\d]+)"),
"total_force": re.compile("Total force =\s+([\d\.]+)")
}
def get_results(filename):
data = {}
with open(filename) as f:
for l in f:
for k, p in patterns.items():
m = p.search(l)
if m:
data[k] = float(m.group(1))
continue
return data
def analyze(filenames):
fieldnames = ['filename', 'ecut', 'nkpts', 'alat', 'energy']
with open('results.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for f in filenames:
r = get_results(f)
r["filename"] = f
writer.writerow(r)
print("Results written to results.csv!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='''Tool for analysis of PWSCF calculations.''')
parser.add_argument(
'filenames', metavar='filenames', type=str, nargs="+",
help='Files to process. You may use wildcards, e.g., "python analyze.py *.out".')
args = parser.parse_args()
analyze(args.filenames)
|
Python
| 0.000053
|
@@ -842,16 +842,30 @@
'energy'
+,'total_force'
%5D%0A wi
|
6e4e79afc3167d0b3a2816c35aef88aaf3314335
|
Add setLogLevel as an export.
|
mininet/log.py
|
mininet/log.py
|
"Logging functions for Mininet."
import logging
from logging import Logger
import types
# Create a new loglevel, 'CLI info', which enables a Mininet user to see only
# the output of the commands they execute, plus any errors or warnings. This
# level is in between info and warning. CLI info-level commands should not be
# printed during regression tests.
OUTPUT = 25
LEVELS = { 'debug': logging.DEBUG,
'info': logging.INFO,
'output': OUTPUT,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL }
# change this to logging.INFO to get printouts when running unit tests
LOGLEVELDEFAULT = logging.WARNING
#default: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOGMSGFORMAT = '%(message)s'
# Modified from python2.5/__init__.py
class StreamHandlerNoNewline( logging.StreamHandler ):
"""StreamHandler that doesn't print newlines by default.
Since StreamHandler automatically adds newlines, define a mod to more
easily support interactive mode when we want it, or errors-only logging
for running unit tests."""
def emit( self, record ):
"""Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[ N.B. this may be removed depending on feedback ]. If exception
information is present, it is formatted using
traceback.printException and appended to the stream."""
try:
msg = self.format( record )
fs = '%s' # was '%s\n'
if not hasattr( types, 'UnicodeType' ): # if no unicode support...
self.stream.write( fs % msg )
else:
try:
self.stream.write( fs % msg )
except UnicodeError:
self.stream.write( fs % msg.encode( 'UTF-8' ) )
self.flush()
except ( KeyboardInterrupt, SystemExit ):
raise
except:
self.handleError( record )
class Singleton( type ):
"""Singleton pattern from Wikipedia
See http://en.wikipedia.org/wiki/SingletonPattern#Python
Intended to be used as a __metaclass_ param, as shown for the class
below.
Changed cls first args to mcs to satisfy pylint."""
def __init__( mcs, name, bases, dict_ ):
super( Singleton, mcs ).__init__( name, bases, dict_ )
mcs.instance = None
def __call__( mcs, *args, **kw ):
if mcs.instance is None:
mcs.instance = super( Singleton, mcs ).__call__( *args, **kw )
return mcs.instance
class MininetLogger( Logger, object ):
"""Mininet-specific logger
Enable each mininet .py file to with one import:
from mininet.log import [lg, info, error]
...get a default logger that doesn't require one newline per logging
call.
Inherit from object to ensure that we have at least one new-style base
class, and can then use the __metaclass__ directive, to prevent this
error:
TypeError: Error when calling the metaclass bases
a new-style class can't have only classic bases
If Python2.5/logging/__init__.py defined Filterer as a new-style class,
via Filterer( object ): rather than Filterer, we wouldn't need this.
Use singleton pattern to ensure only one logger is ever created."""
__metaclass__ = Singleton
def __init__( self ):
Logger.__init__( self, "mininet" )
# create console handler
ch = StreamHandlerNoNewline()
# create formatter
formatter = logging.Formatter( LOGMSGFORMAT )
# add formatter to ch
ch.setFormatter( formatter )
# add ch to lg
self.addHandler( ch )
self.setLogLevel()
def setLogLevel( self, levelname=None ):
"""Setup loglevel.
Convenience function to support lowercase names.
levelName: level name from LEVELS"""
level = LOGLEVELDEFAULT
if levelname != None:
if levelname not in LEVELS:
raise Exception( 'unknown levelname seen in setLogLevel' )
else:
level = LEVELS.get( levelname, level )
self.setLevel( level )
self.handlers[ 0 ].setLevel( level )
# pylint: disable-msg=E0202
# "An attribute inherited from mininet.log hide this method"
# Not sure why this is occurring - this function definitely gets called.
# See /usr/lib/python2.5/logging/__init__.py; modified from warning()
def output( self, msg, *args, **kwargs ):
"""Log 'msg % args' with severity 'OUTPUT'.
To pass exception information, use the keyword argument exc_info
with a true value, e.g.
logger.warning("Houston, we have a %s", "cli output", exc_info=1)
"""
if self.manager.disable >= OUTPUT:
return
if self.isEnabledFor( OUTPUT ):
self._log( OUTPUT, msg, args, kwargs )
# pylint: enable-msg=E0202
lg = MininetLogger()
# Make things a bit more convenient by adding aliases
# (info, warn, error, debug) and allowing info( 'this', 'is', 'OK' )
# In the future we may wish to make things more efficient by only
# doing the join (and calling the function) unless the logging level
# is high enough.
def makeListCompatible( fn ):
"""Return a new function allowing fn( 'a 1 b' ) to be called as
newfn( 'a', 1, 'b' )"""
def newfn( *args ):
"Generated function. Closure-ish."
if len( args ) == 1:
return fn( *args )
args = ' '.join( [ str( arg ) for arg in args ] )
return fn( args )
# Fix newfn's name and docstring
setattr( newfn, '__name__', fn.__name__ )
setattr( newfn, '__doc__', fn.__doc__ )
return newfn
info, output, warn, error, debug = lg.info, lg.output, lg.warn, lg.error, \
lg.debug = [ makeListCompatible( f ) for f in lg.info, lg.output, lg.warn,
lg.error, lg.debug ]
|
Python
| 0
|
@@ -5930,16 +5930,23 @@
debug =
+ ( %0A
lg.info
@@ -5980,14 +5980,8 @@
ror,
- %5C%0A
lg.
@@ -5989,13 +5989,24 @@
ebug
+ )
= %5B
+%0A
make
@@ -6034,16 +6034,29 @@
or f in
+%0A
lg.info,
@@ -6075,33 +6075,16 @@
lg.warn,
-%0A
lg.erro
@@ -6097,8 +6097,38 @@
debug %5D%0A
+%0AsetLogLevel = lg.setLogLevel%0A
|
0ce8050b797b3e2c2a9b0e74cbc67fd8e31736b3
|
Remove working distros to focus on non-working ones
|
fog-aws-testing/scripts/settings.py
|
fog-aws-testing/scripts/settings.py
|
# The list of OSs.
OSs = ["debian9","centos7","rhel7","fedora29","arch","ubuntu18_04"]
dnsAddresses = ["debian9.fogtesting.cloud","centos7.fogtesting.cloud","rhel7.fogtesting.cloud","fedora29.fogtesting.cloud","arch.fogtesting.cloud","ubuntu18_04.fogtesting.cloud"]
# The list of branches to process.
branches = ["master","dev-branch"]
# The region we operate in, dictated by terraform.
theRegion = "us-east-2"
# The availibility zone, which we use just one zone.
zone = theRegion + 'a'
# For when we need to wait for something to get done while in a loop, wait this long.
wait = 1
scriptDir = "/home/admin/fog-community-scripts/fog-aws-testing/scripts"
webdir = '/tmp/webdir'
statusDir = '/tmp/statuses'
indexHtml = 'index.html'
green = "green.png"
orange = "orange.png"
red = "red.png"
s3bucket = "fogtesting2.theworkmans.us"
http = "http://"
port = ""
netdir = ""
remoteResult = "/root/result"
ssh = "/usr/bin/ssh"
scp = "/usr/bin/scp"
timeout = "/usr/bin/timeout"
s3cmd = "/usr/bin/s3cmd"
ssh_keyscan = "/usr/bin/ssh-keyscan"
sshTimeout = "15"
fogTimeout= "15m" #Time to wait for FOG installation to complete. Must end with a unit of time. s for seconds, m for minutes.
sshTime="15s" #Time to wait for small SSH commands to complete. Must end with a unit of time. s for seconds, m for minutes.
codes = {
"-1":{
"reason":"Installer did not complete within alotted time.",
"status":orange
},
"0":{
"reason":"Success.",
"status":green
},
"1":{
"reason":"Failed to call script properly.",
"status":orange
},
"2":{
"reason":"Failed to reset git.",
"status":orange
},
"3":{
"reason":"Failed to pull git.",
"status":orange
},
"4":{
"reason":"Failed to checkout git.",
"status":orange
},
"5":{
"reason":"Failed to change directory.",
"status":orange
},
"6":{
"reason":"Installation failed.",
"status":red
}
}
|
Python
| 0
|
@@ -12,16 +12,17 @@
of OSs.%0A
+#
OSs = %5B%22
@@ -81,16 +81,65 @@
18_04%22%5D%0A
+OSs = %5B%22rhel7%22,%22fedora29%22,%22arch%22,%22ubuntu18_04%22%5D%0A#
dnsAddre
@@ -308,16 +308,141 @@
.cloud%22%5D
+%0AdnsAddresses = %5B%22rhel7.fogtesting.cloud%22,%22fedora29.fogtesting.cloud%22,%22arch.fogtesting.cloud%22,%22ubuntu18_04.fogtesting.cloud%22%5D
%0A%0A# The
@@ -470,16 +470,38 @@
rocess.%0A
+branches = %5B%22master%22%5D%0A
branches
|
7f3c8fd66b95e3246901efc0795e5631aadb7d36
|
Fix factories
|
src/competition/tests/factories.py
|
src/competition/tests/factories.py
|
import factory
import random
from datetime import timedelta
from datetime import datetime
from django.contrib.auth.models import User
from competition.models import (Competition, Game, GameScore, Avatar, Team,
Organizer, OrganizerRole,
Registration, Invitation)
from competition.models import RegistrationQuestion as Question
from competition.models import RegistrationQuestionChoice as Choice
from competition.models import RegistrationQuestionResponse as Response
def now(_=None):
"""Returns the current time. Takes a single optional argument,
which gets thrown away. The argument is to make factory_boy happy.
"""
return datetime.now()
def later(_=None):
"""Returns the current time + 12 hours. Takes a single optional
argument, which gets thrown away. The argument is to make
factory_boy happy.
"""
return datetime.now() + timedelta(hours=12)
class UserFactory(factory.Factory):
FACTORY_FOR = User
username = factory.Sequence(lambda n: 'user' + n)
password = "123"
@classmethod
def _prepare(cls, create, **kwargs):
password = kwargs.pop('password', None)
user = super(UserFactory, cls)._prepare(create, **kwargs)
if password:
user.set_password(password)
if create:
user.save()
return user
class CompetitionFactory(factory.Factory):
FACTORY_FOR = Competition
name = factory.Sequence(lambda n: "MegaMinerAI: %s" % n)
start_time = factory.LazyAttribute(now)
end_time = factory.LazyAttribute(later)
cost = 8.00
min_num_team_members = 1
max_num_team_members = 3
description = "This is the best MegaMinerAI ever yay!"
class AvatarFactory(factory.Factory):
FACTORY_FOR = Avatar
class TeamFactory(factory.Factory):
FACTORY_FOR = Team
competition = factory.SubFactory(CompetitionFactory)
name = factory.Sequence(lambda n: "Team #%s" % n)
@classmethod
def _prepare(cls, create, **kwargs):
"""Register some users and add them as members of the new team"""
num_choices = int(kwargs.pop('num_members', 3))
team = super(TeamFactory, cls)._prepare(create, **kwargs)
if team.members.count() == 0:
for _i in range(num_choices):
u = UserFactory.create()
RegistrationFactory.create(user=u, competition=team.competition)
team.add_team_member(u)
return team
class GameFactory(factory.Factory):
FACTORY_FOR = Game
competition = factory.SubFactory(CompetitionFactory)
start_time = factory.LazyAttribute(now)
end_time = factory.LazyAttribute(now)
class GameScoreFactory(factory.Factory):
FACTORY_FOR = GameScore
game = factory.SubFactory(GameFactory)
team = factory.SubFactory(TeamFactory)
score = factory.LazyAttribute(lambda _: random.randint(0, 100))
class OrganizerRoleFactory(factory.Factory):
FACTORY_FOR = OrganizerRole
name = factory.Sequence(lambda n: "Role #%s" % n)
description = "Role description."
class OrganizerFactory(factory.Factory):
FACTORY_FOR = Organizer
user = factory.SubFactory(UserFactory)
competition = factory.SubFactory(CompetitionFactory)
@classmethod
def _prepare(cls, create, **kwargs):
"""Add a role to the Organizer"""
num_roles = int(kwargs.pop('num_roles', 1))
organizer = super(OrganizerFactory, cls)._prepare(create, **kwargs)
if num_roles > 0:
for _i in range(num_roles):
organizer.role.add(OrganizerRoleFactory.create())
return organizer
class RegistrationFactory(factory.Factory):
FACTORY_FOR = Registration
user = factory.SubFactory(UserFactory)
competition = factory.SubFactory(CompetitionFactory)
class RegistrationQuestionFactory(factory.Factory):
FACTORY_FOR = Question
question = factory.Sequence(lambda n: "Question #%s" % n)
question_type = factory.LazyAttribute(
lambda _: random.choice(Question.QUESTION_TYPES)[0]
)
@classmethod
def _prepare(cls, create, **kwargs):
num_choices = int(kwargs.pop('num_choices', 4))
q = super(RegistrationQuestionFactory, cls)._prepare(create, **kwargs)
if q.question_type in ('SC', 'MC'):
for _i in range(num_choices):
RegistrationQuestionChoiceFactory.create(question=q)
return q
class RegistrationQuestionChoiceFactory(factory.Factory):
FACTORY_FOR = Choice
question = factory.SubFactory(RegistrationQuestionFactory)
choice = factory.Sequence(lambda n: "Choice #%s" % n)
class RegistrationQuestionResponseFactory(factory.Factory):
FACTORY_FOR = Response
question = factory.SubFactory(RegistrationQuestionFactory)
registration = factory.SubFactory(RegistrationFactory)
class InvitationFactory(factory.Factory):
FACTORY_FOR = Invitation
team = factory.SubFactory(TeamFactory)
sender = factory.SubFactory(UserFactory)
receiver = factory.SubFactory(UserFactory)
message = factory.Sequence(lambda n: "Message #%s" % n)
|
Python
| 0.000001
|
@@ -965,32 +965,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -1076,13 +1076,21 @@
user
-' + n
+%7B%7D'.format(n)
)%0A
@@ -1440,32 +1440,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -1811,32 +1811,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -1892,32 +1892,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -2600,32 +2600,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -2828,32 +2828,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -3076,32 +3076,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -3262,32 +3262,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -3841,32 +3841,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -4045,32 +4045,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -4688,32 +4688,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -4915,32 +4915,50 @@
Factory(factory.
+django.DjangoModel
Factory):%0A FA
@@ -5135,16 +5135,34 @@
factory.
+django.DjangoModel
Factory)
|
b72ab35056ca6ec1e48db963d61c31d89ec80161
|
fix on winsock2
|
autoconf/winsock2.py
|
autoconf/winsock2.py
|
from _external import *
winsock2 = LibWithHeaderChecker( 'winsock2',
['winsock2.h'],
'c',
name='ws2_32' )
|
Python
| 0
|
@@ -53,22 +53,20 @@
cker( 'w
-insock
+s2_3
2',%0A%09%09
@@ -115,17 +115,19 @@
%09name='w
-s2_3
+insock
2' )%0A
|
0b499f01d517775fb03294c1c785318ca6224874
|
Bump to v0.0.5
|
backache/__init__.py
|
backache/__init__.py
|
from . core import *
from . antioxidant import celerize # flake8: noqa
from . errors import *
__version__ = (0, 0, 4)
|
Python
| 0.000001
|
@@ -114,7 +114,7 @@
0,
-4
+5
)%0A
|
db0dd0dd97f2370eac5173c6b000589ceaa7458f
|
put euare show_user debug methods in try catch blocks
|
testcases/cloud_admin/create_resources.py
|
testcases/cloud_admin/create_resources.py
|
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: vic.iglesias@eucalyptus.com
'''
Create resources (keypairs,groups, volumes,snapshots, buckets) for each user in the cloud.
'''
from eucaops import Eucaops
import re
import string
from eutester.euinstance import EuInstance
from eutester.eutestcase import EutesterTestCase
class ResourceGeneration(EutesterTestCase):
def __init__(self):
self.setuptestcase()
self.setup_parser()
self.parser.add_argument("--no-cleanup", action='store_true')
self.get_args()
# Setup basic eutester object
self.tester = Eucaops( credpath=self.args.credpath, config_file=self.args.config, password=self.args.password)
self.testers = []
def clean_method(self):
if not self.args.no_cleanup:
for tester in self.testers:
tester.show_euare_whoami()
tester.cleanup_artifacts()
def CreateResources(self):
users = self.tester.get_all_users()
self.testers.append(self.tester)
self.tester.show_all_users()
for user in users:
user_name = user['user_name']
user_account = user['account_name']
if not re.search("eucalyptus", user_account ):
self.tester.debug("Creating access key for " + user_name + " in account " + user_account)
keys = self.tester.create_access_key(user_name=user_name, delegate_account=user_account)
access_key = keys['access_key_id']
secret_key = keys['secret_access_key']
self.tester.debug("Creating Eucaops object with access key " + access_key + " and secret key " + secret_key)
new_tester = Eucaops(aws_access_key_id=access_key, aws_secret_access_key=secret_key, ec2_ip=self.tester.ec2.host, s3_ip=self.tester.s3.host,username=user_name, account=user_account)
self.testers.append(new_tester)
self.tester.debug("Created a total of " + str(len(self.testers)) + " testers" )
self.tester.show_all_users()
for resource_tester in self.testers:
import random
assert isinstance(resource_tester, Eucaops)
resource_tester.show_euare_whoami()
zone = random.choice(resource_tester.get_zones())
keypair = resource_tester.add_keypair(resource_tester.id_generator())
group = resource_tester.add_group(resource_tester.id_generator())
resource_tester.authorize_group_by_name(group_name=group.name)
resource_tester.authorize_group_by_name(group_name=group.name, port=-1, protocol="icmp" )
reservation = resource_tester.run_instance(keypair=keypair.name,group=group.name,zone=zone)
instance = reservation.instances[0]
assert isinstance(instance, EuInstance)
if not instance.ip_address == instance.private_ip_address:
address = resource_tester.allocate_address()
resource_tester.associate_address(instance=instance, address=address)
resource_tester.disassociate_address_from_instance(instance)
if not self.args.no_cleanup:
resource_tester.release_address(address)
self.tester.sleep(5)
instance.update()
instance.reset_ssh_connection()
volume = resource_tester.create_volume(size=1, zone=zone)
instance.attach_volume(volume)
snapshot = resource_tester.create_snapshot(volume_id=volume.id)
volume_from_snap = resource_tester.create_volume(snapshot=snapshot, zone=zone)
bucket = resource_tester.create_bucket(resource_tester.id_generator(12, string.ascii_lowercase + string.digits))
key = resource_tester.upload_object(bucket_name= bucket.name, key_name= resource_tester.id_generator(12, string.ascii_lowercase + string.digits), contents= resource_tester.id_generator(200))
if not self.args.no_cleanup:
resource_tester.terminate_instances(reservation)
if __name__ == "__main__":
testcase = ResourceGeneration()
### Either use the list of tests passed from config/command line to determine what subset of tests to run
list = testcase.args.tests or [ "CreateResources"]
### Convert test suite methods to EutesterUnitTest objects
unit_list = [ ]
for test in list:
unit_list.append( testcase.create_testunit_by_name(test) )
### Run the EutesterUnitTest objects
result = testcase.run_test_case_list(unit_list,clean_on_exit=True)
exit(result)
|
Python
| 0
|
@@ -2257,33 +2257,87 @@
t
-ester.show_euare_whoami()
+ry:%0A tester.show_euare_whoami()%0A except: pass
%0A
@@ -2493,16 +2493,33 @@
tester)%0A
+ try:%0A
@@ -2539,32 +2539,53 @@
how_all_users()%0A
+ except: pass%0A
for user
@@ -3519,24 +3519,41 @@
testers%22 )%0A
+ try:%0A
self
@@ -3573,24 +3573,44 @@
all_users()%0A
+ except: pass
%0A for
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.