commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
c54fb802932717b417ed2e189da85b500c8e06b8
|
Update rasa_core/policies/mapping_policy.py
|
rasa_core/policies/mapping_policy.py
|
rasa_core/policies/mapping_policy.py
|
import logging
import os
from typing import Any, List, Text, Optional
from rasa_core.actions.action import (ACTION_LISTEN_NAME, ACTION_RESTART_NAME,
ACTION_BACK_NAME)
from rasa_core import utils
from rasa_core.domain import Domain
from rasa_core.policies.policy import Policy
from rasa_core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
class MappingPolicy(Policy):
"""Policy which maps intents directly to actions.
Intents can be assigned actions in the domain file which are to be
executed whenever the intent is detected. This policy takes precedence over
any other policy."""
def __init__(self, priority: Optional[int] = 5) -> None:
"""Create a new Mapping policy."""
super(MappingPolicy, self).__init__(priority=priority)
def train(self, *args, **kwargs) -> None:
"""Does nothing. This policy is deterministic."""
pass
def predict_action_probabilities(self, tracker: DialogueStateTracker,
domain: Domain) -> List[float]:
"""Predicts the assigned action.
If the current intent is assigned to an action that action will be
predicted with the highest probability of all policies. If it is not
the policy will predict zero for every action."""
prediction = [0.0] * domain.num_actions
if tracker.latest_action_name == ACTION_LISTEN_NAME:
intent = tracker.latest_message.intent.get('name')
action = domain.intent_properties.get(intent, {}).get('triggers')
if action:
idx = domain.index_for_action(action)
if idx is None:
logger.warning("MappingPolicy tried to predict unkown "
"action '{}'.".format(action))
else:
prediction[idx] = 1
elif tracker.latest_message.intent.get('name') == 'restart':
idx = domain.index_for_action(ACTION_RESTART_NAME)
prediction[idx] = 1
elif tracker.latest_message.intent.get('name') == 'back':
idx = domain.index_for_action(ACTION_BACK_NAME)
prediction[idx] = 1
return prediction
def persist(self, *args) -> None:
"""Does nothing since there is no data to be saved."""
pass
@classmethod
def load(cls, *args) -> 'MappingPolicy':
"""Just returns the class since there is no data to be loaded."""
return cls()
|
Python
| 0
|
@@ -699,21 +699,11 @@
ty:
-Optional%5B
int
-%5D
= 5
|
e7c6d70fccf709e2d84a8e63fb01ce15aa6aff92
|
An Thing
|
django_www/common_settings.py
|
django_www/common_settings.py
|
# Settings common to www.djangoproject.com and docs.djangoproject.com
import json
import os
import platform
from unipath import FSPath as Path
### Utilities
# The full path to the repository root.
BASE = Path(__file__).absolute().ancestor(2)
# Far too clever trick to know if we're running on the deployment server.
PRODUCTION = ('DJANGOPROJECT_DEBUG' not in os.environ)
# It's a secret to everybody
with open(BASE.child('secrets.json')) as handle:
SECRETS = json.load(handle)
### Django settings
ADMINS = (
('Adrian Holovaty', 'holovaty@gmail.com'),
('Jacob Kaplan-Moss', 'jacob@jacobian.org'),
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
} if PRODUCTION else {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache'
},
}
CACHE_MIDDLEWARE_SECONDS = 60 * 5 # 5 minutes
CSRF_COOKIE_SECURE = PRODUCTION
CSRF_COOKIE_HTTPONLY = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'djangoproject',
'USER': 'djangoproject'
'HOST': SECRETS.get('db_host', 'localhost'),
'PASSWORD': SECRETS.get('db_password', ''),
},
}
DEBUG = not PRODUCTION
DEFAULT_FROM_EMAIL = "noreply@djangoproject.com"
if not PRODUCTION:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"simple": {"format": "[%(name)s] %(levelname)s: %(message)s"},
"full": {"format": "%(asctime)s [%(name)s] %(levelname)s: %(message)s"},
},
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
},
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ['require_debug_false'],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": False,
},
}
}
if PRODUCTION:
LOGGING["handlers"]["logfile"] = {
"formatter": "full",
"level": "DEBUG",
"class": "logging.handlers.TimedRotatingFileHandler",
"filename": "/var/log/django_website/website.log",
"when": "D",
"interval": 7,
"backupCount": 5,
}
LOGGING["loggers"]["django.request"]["handlers"].append("logfile")
MANAGERS = (
('Jacob Kaplan-Moss', 'jacob@jacobian.org'),
)
MEDIA_ROOT = BASE.child('media')
MEDIA_URL = '/m/'
SECRET_KEY = str(SECRETS['secret_key'])
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
SERVER_EMAIL = "root@djangoproject.com"
SESSION_COOKIE_SECURE = PRODUCTION
SESSION_COOKIE_HTTPONLY = True
STATICFILES_DIRS = [BASE.child('static')]
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'
STATIC_ROOT = BASE.child('static_root')
STATIC_URL = '/s/'
TEMPLATE_DIRS = [BASE.child('templates')]
TIME_ZONE = 'America/Chicago'
USE_I18N = False
USE_L10N = False
USE_TZ = False
### django-secure settings
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_HSTS_SECONDS = 600
|
Python
| 0.999997
|
@@ -1121,16 +1121,17 @@
project'
+,
%0A
|
71f22b00c3a643a0615c54764db4d6a88291e1c1
|
Version bump
|
djangocms_snippet/__init__.py
|
djangocms_snippet/__init__.py
|
__version__ = '1.0.1'
|
Python
| 0.000001
|
@@ -12,11 +12,11 @@
= '1.0.
-1
+2
'%0A
|
673b123b147b99f49357b02c227b1d34ae653485
|
Set up some realistic defaults.
|
bootstrap.py
|
bootstrap.py
|
"""This script will bootstrap the database to a minimal level for usage.
You will be left with the following buildings:
* Town Hall (homely).
* Farm (requires Town Hall).
* Stable (requires Farm)
You will be left with the following land features:
* Mine (provides gold)
* Quarry (provides stone)
* Lake (provides water)
* Forest (provides wood)
* Field (provides food)
You will be left with the following mobiles:
* Peasant (provided by Town Hall).
Peasants can build town halls and farms, and can exploit wood, and gold.
* Farmer (provided by Farm)
Farmers can build farms and stables, and can exploit food, water, and wood.
* Scout (provided by Stable)
"""
import os.path
from server.db import BuildingType, FeatureType, MobileType, dump
from server.db.util import _filename as fn
def main():
if os.path.isfile(fn):
return print('Refusing to continue with existing database file.')
town_hall = BuildingType(name='Town Hall', homely=True)
farm = BuildingType(name='Farm', depends=town_hall)
stable = BuildingType(name='Stable', depends=farm)
for thing in (town_hall, farm, stable):
thing.save()
peasant = MobileType(name='Peasant', wood=1, gold=1)
peasant.save()
for t in (town_hall, farm):
t.builders.append(peasant)
town_hall.add_recruit(peasant, food=1, water=1, gold=1).save()
farmer = MobileType(name='Farmer', food=1, water=1, wood=1)
farmer.save()
for t in (farm, stable):
t.builders.append(farmer)
farm.add_recruit(farmer, food=2, gold=2, water=2)
scout = MobileType(name='Scout', stone=1)
scout.save()
stable.add_recruit(scout, food=4, water=5, gold=3)
FeatureType(name='Mine', gold=1).save()
FeatureType(name='Quarry', stone=1).save()
FeatureType(name='Lake', water=1).save()
FeatureType(name='Forest', wood=1).save()
FeatureType(name='Field', food=1).save()
dump()
print('Done.')
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -921,24 +921,33 @@
uildingType(
+%0A
name='Town H
@@ -963,16 +963,49 @@
ely=True
+, gold=15, wood=30, stone=10%0A
)%0A fa
@@ -1018,24 +1018,33 @@
uildingType(
+%0A
name='Farm',
@@ -1061,16 +1061,46 @@
own_hall
+, gold=5, wood=5, stone=1%0A
)%0A st
@@ -1115,24 +1115,33 @@
uildingType(
+%0A
name='Stable
@@ -1155,16 +1155,49 @@
nds=farm
+, wood=30, stone=15, gold=30%0A
)%0A fo
@@ -1320,21 +1320,104 @@
-peasant.save(
+farmer = MobileType(name='Farmer', food=1, water=1)%0A scout = MobileType(name='Scout', stone=1
)%0A
@@ -1427,64 +1427,128 @@
or t
+hing
in (
-town_hall, farm):%0A t.builders.append(peasant
+peasant, farmer, scout):%0A thing.save()%0A peasant.add_building(town_hall)%0A peasant.add_building(farm
)%0A
@@ -1598,25 +1598,25 @@
ter=1, gold=
-1
+3
).save()%0A
@@ -1626,141 +1626,26 @@
rmer
- = MobileType(name='Farmer', food=1, water=1, wood=1)%0A farmer.save()%0A for t in (farm, stable):%0A t.builders.append
+.add_building
(farm
-er
)%0A
@@ -1684,17 +1684,17 @@
2, gold=
-2
+4
, water=
@@ -1700,71 +1700,8 @@
=2)%0A
- scout = MobileType(name='Scout', stone=1)%0A scout.save()%0A
@@ -1748,17 +1748,17 @@
5, gold=
-3
+6
)%0A Fe
|
96afec8d7a8adc60547dca4591dd9bc36ba31712
|
Move instance list generator into class
|
botox/aws.py
|
botox/aws.py
|
# If developing this further, see the following e-buddies to collaborate as they
# also have internal/unreleased solutions and have indicated interest:
#
# * Gavin McQuillan (gmcquillan)
# * Travis Swicegood (tswicegood)
# * Christopher Groskopf (onyxfish)
import os
import pprint
import sys
import time
from boto.ec2 import regions as _ec2_regions
from boto.ec2.connection import EC2Connection as _EC2
from boto.ec2 import instance
from boto.exception import EC2ResponseError as _ResponseError
from prettytable import PrettyTable as _Table
from .utils import puts
BLANK = '-'
# Monkeypatching
@property
def _instance_name(self):
return self.tags.get('Name', BLANK)
instance.Instance.name = _instance_name
class AWS(object):
def __init__(self, verbose=False, **kwargs):
"""
Set up AWS connection with the following possible parameters:
* ``access_key_id``: AWS key ID. Will check your shell's
``$AWS_ACCESS_KEY_ID`` value if nothing is given at the Python level.
* ``secret_access_key``: AWS secret key. Defaults to
``$AWS_SECRET_ACCESS_KEY``.
* ``ami``: EC2 AMI ID (e.g. ``"ami-4b4ba522"``.) Default:
``$AWS_AMI``.
* ``size``: EC2 size ID (e.g. ``"m1.large"``.) Default: ``$AWS_SIZE``.
* ``region``: AWS region ID (e.g. ``"us-east-1"``.) Default:
``$AWS_REGION``.
* ``zone``: AWS zone ID (e.g. ``"us-east-1d"``.). Default:
``$AWS_ZONE``.
* ``keypair``: EC2 login authentication keypair name. Default:
``$AWS_KEYPAIR``.
* ``security_groups``: EC2 security groups instances should default to.
Default: ``$AWS_SECURITY_GROUPS``.
Other behavior-controlling options:
* ``verbose``: Whether or not to print out detailed info about what's
going on.
"""
# Merge values from kwargs/shell env
required = "access_key_id secret_access_key region".split()
optional = "ami zone size keypair security_groups".split()
for var in required + optional:
env_value = os.environ.get("AWS_%s" % var.upper())
setattr(self, var, kwargs.get(var, env_value))
# Handle other kwargs
self.verbose = verbose
# Must at least have credentials + region
missing = filter(lambda x: not getattr(self, x), required)
if missing:
msg = ", ".join(missing)
raise ValueError("Missing required parameters: %s" % msg)
# Auth creds
boto_args = {
'aws_access_key_id': self.access_key_id,
'aws_secret_access_key': self.secret_access_key,
}
# Obtain our default region
regions = _ec2_regions(**boto_args)
region = filter(lambda x: x.name == self.region, regions)[0]
boto_args.update(region=region)
# Get a connection to that region
self.connection= _EC2(**boto_args)
def __getattr__(self, name):
return getattr(self.connection, name)
def get_image(self, name):
return self.get_all_images([name])[0]
def log(self, *args, **kwargs):
"""
If ``self.verbose`` is True, acts as a proxy for ``utils.puts``.
Otherwise, this function is a no-op.
"""
if self.verbose:
return puts(*args, **kwargs)
def create(self, hostname, **kwargs):
"""
Create new EC2 instance named ``hostname``.
Available keyword arguments follow. All values will default to the
attributes set when initializing this AWS object, e.g. if ``size`` is
omitted, it will fall back to ``self.size``.
* ``size``
* ``ami``
* ``keypair``
* ``zone``
* ``security_groups``
This method returns a ``boto.EC2.instance.Instance`` object.
Example usage::
AWS(credentials).create(
hostname='web1.example.com',
size='m1.large',
ami='abc123'
)
"""
# Parameter handling
params = {
'ami': "an AMI to use",
'size': "an instance size",
'keypair': "an access keypair name",
'zone': "a zone ID",
'security_groups': "security groups",
}
for var, msg in params.iteritems():
kwargs[var] = kwargs.get(var, getattr(self, var))
missing = filter(lambda x: not kwargs[x], params.keys())
if missing:
msgs = ", ".join([params[x] for x in missing])
raise ValueError("Missing the following parameters: %s" % msgs)
# Create instance
self.log("Creating '%s' (a %s instance of %s)..." % (
hostname, kwargs['size'], kwargs['ami']))
image = self.get_image(kwargs['ami'])
groups = self.get_all_security_groups(kwargs['security_groups'])
instance = image.run(
instance_type=kwargs['size'],
key_name=kwargs['keypair'],
placement=kwargs['zone'],
security_groups=groups
).instances[0]
self.log("done.\n")
# Name it
self.log("Tagging as '%s'..." % hostname)
try:
instance.add_tag('Name', hostname)
except _ResponseError:
time.sleep(1)
instance.add_tag('Name', hostname)
self.log("done.\n")
# Wait for it to finish booting
self.log("Waiting for boot: ")
tick = 5
while instance.state != 'running':
self.log(".")
time.sleep(tick)
instance.update()
self.log("done.\n")
return instance
|
Python
| 0.000001
|
@@ -3338,24 +3338,294 @@
**kwargs)%0A%0A
+ @property%0A def instances(self):%0A %22%22%22%0A Generator yielding all instances in this connection's account.%0A %22%22%22%0A for reservation in self.get_all_instances():%0A for instance in reservation.instances:%0A yield instance%0A%0A
def crea
|
7d60136a49fc908e0f1912d35b7154e15f548ba8
|
Remove redundant lines
|
bqx/query.py
|
bqx/query.py
|
import textwrap
import bqx.parts
import bqx.abstract
from copy import deepcopy
Column = bqx.parts.Column
Table = bqx.parts.Table
Alias = bqx.abstract.Alias
class Query:
def __init__(self, udf=[], indent=True, auto_alias=False):
self.partial = True
self.applied_c = []
self.alias_name = None
self.udf_funcs = udf
self.indent = indent
self.auto_alias = auto_alias
self.selected = False
def __getattr__(self, item):
if self.alias_name:
return Column('%s.%s' % (self.alias_name, str(item)))
else:
raise Exception("Attribute/Function %s is not found. Call AS or register UDF funcs." % item)
def __deepcopy__(self, memo):
copied = type(self)()
for k, v in self.__dict__.items():
setattr(copied, k, deepcopy(v, memo))
return copied
def SELECT(self, *args, **kwargs):
if self.selected:
self.selected = False
q = self.SELECT(*args, **kwargs)
q = q.FROM(self)
q.applied_c = q.applied_c[-2:]
return q
else:
self.selected = True
col = []
for arg in args:
if isinstance(arg, str):
col.append(arg)
elif isinstance(arg, Column) or isinstance(arg, Case):
col.append(arg.as_claus(auto_alias=self.auto_alias))
elif arg == self._Special.ALL:
col.append('*')
col += [Column(real).AS(alias).as_claus() for alias, real in kwargs.items()]
return self._apply('SELECT %s' % ', '.join(col))
def FROM(self, arg):
t = self._as_claus(arg)
if self.indent:
t = textwrap.indent(t, ' ').lstrip()
return self._apply('FROM %s' % t)
def WHERE(self, cond):
if self._is_next_to('FROM'):
return self._apply('WHERE %s' % cond)
else:
raise Exception('WHERE clause is put in wrong place.')
def ON(self, cond):
if self._is_next_to('JOIN'):
return self._apply('ON %s' % cond)
else:
raise Exception('ON clause is put in wrong place. Last clause: %s' % self.applied_c[-1])
def ORDER_BY(self, row):
s = 'ORDER BY %s' % str(row)
return self._apply(s)
def ASC(self):
return self._add_decorator('ORDER BY', 'ASC')
def DESC(self):
return self._add_decorator('ORDER BY', 'DESC')
def GROUP_BY(self, *rows):
return self._apply('GROUP BY %s' % ', '.join([str(x) for x in rows]))
def _JOIN(self, type, table):
t = self._as_claus(table)
return self._apply('%s JOIN %s' % (type, t))
def INNER_JOIN(self, table):
return self._JOIN('INNER', table)
def LEFT_OUTER_JOIN(self, table):
return self._JOIN('LEFT OUTER', table)
def CROSS_JOIN(self, table):
return self._JOIN('CROSS', table)
def EACH(self):
if self._is_next_to('JOIN'):
q = self.applied_c[-1].replace('JOIN', 'JOIN EACH')
return self._replace(-1, q)
elif self._is_next_to('GROUP BY'):
q = self.applied_c[-1].replace('GROUP', 'GROUP EACH')
return self._replace(-1, q)
else:
raise Exception('Not allowed to place EACH here.')
def UDF(self, func):
if func in self.udf_funcs:
last_q = self.applied_c[-1]
if last_q.startswith('FROM '):
last_q = last_q[5:].strip('()')
func_name = func.upper()
s = 'FROM %s(%s)' % (func_name, last_q)
return self._replace(-1, s)
else:
raise Exception("Can't apply func other than FROM clause.")
else:
raise Exception('%s is not registered as an UDF.' % func)
def LIMIT(self, limit):
return self._apply('LIMIT %d' % limit)
def AS(self, alias_name):
self.alias_name = alias_name
return self
def getq(self, end='\n', bracket=False):
if bracket:
s = '(%s)'
else:
s = '%s'
return s % end.join(self.applied_c)
def _apply(self, clause):
newself = deepcopy(self)
newself.applied_c.append(clause)
return newself
def _replace(self, index, new_clause):
newself = deepcopy(self)
newself.applied_c[index] = new_clause
return newself
def _is_next_to(self, last_claus):
try:
if self.applied_c[-1].find(last_claus) >= 0:
return True
except IndexError:
pass
return False
def _as_claus(self, arg):
if isinstance(arg, Table):
t = arg.as_claus()
elif isinstance(arg, Query):
t = arg.getq(bracket=True)
if arg.alias_name:
t = '%s AS %s' % (t, arg.alias_name)
else:
t = arg
return t
def _add_decorator(self, last_clause, deco):
if self._is_next_to(last_clause):
return self._apply(deco)
else:
raise Exception("Can't add decorator %s here." % deco)
class Case(Alias):
def __init__(self):
super().__init__(self)
self.conds = []
def __str__(self):
if self.alias_name:
return self.alias_name
else:
return self.real_name
def WHEN(self, cond):
self.conds.append([cond, 0])
return self
def THEN(self, val):
self.conds[-1][1] = val
return self
def END(self):
template = '\nCASE %s\nEND'
conds_str = ['WHEN {cond} THEN {val}'.format(cond=c[0], val=repr(str(c[1]))) for c in self.conds]
self.real_name = template % '\n'.join(conds_str)
return self
|
Python
| 0.999999
|
@@ -1404,83 +1404,8 @@
s))%0A
- elif arg == self._Special.ALL:%0A col.append('*')%0A
@@ -5215,110 +5215,33 @@
-if self.alias_name:%0A return self.alias_name%0A else:%0A return self.real_name
+raise NotImplementedError
%0A%0A
|
208b6cf99d90494df9a0f6d66a0ea3669ff5fe66
|
remove get, add ls and rm
|
dog/ext/config.py
|
dog/ext/config.py
|
import logging
from discord.ext import commands
from dog import Cog
log = logging.getLogger(__name__)
class Config(Cog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.permitted_keys = [
'woof_response'
]
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def config(self, ctx):
""" Manages server-specific configuration for the bot. """
@config.command(name='set')
async def config_set(self, ctx, name: str, value: str):
""" Sets a config field for this server. """
if name not in self.permitted_keys:
await ctx.send('That configuration value is not allowed.')
return
await self.bot.redis.set(f'{ctx.guild.id}:{name}', value)
await ctx.send('\N{OK HAND SIGN}')
@config.command(name='permitted')
async def config_permitted(self, ctx):
""" Views permitted configuration keys. """
await ctx.send(', '.join(self.permitted_keys))
@config.command(name='is_set')
async def config_is_set(self, ctx, name: str):
""" Checks if a configuration key is set. """
is_set = await self.bot.config_is_set(ctx.guild, name)
await ctx.send('Yes, it is set.' if is_set else 'No, it is not set.')
@config.command(name='get')
async def config_get(self, ctx, name: str):
""" Fetches a config field for this server. """
result = await self.bot.redis.get(f'{ctx.guild.id}:{name}')
if result is not None:
result = result.decode()
else:
result = '`<nothing>`'
await ctx.send(f'`{name}`: {result}')
def setup(bot):
bot.add_cog(Config(bot))
|
Python
| 0.000002
|
@@ -1362,12 +1362,29 @@
me='
-get'
+list', aliases=%5B'ls'%5D
)%0A
@@ -1402,18 +1402,19 @@
config_
-ge
+lis
t(self,
@@ -1412,35 +1412,24 @@
st(self, ctx
-, name: str
):%0A %22
@@ -1435,30 +1435,36 @@
%22%22%22
-Fetches a config field
+Lists set configuration keys
for
@@ -1481,25 +1481,24 @@
er. %22%22%22%0A
-%0A
result =
@@ -1489,24 +1489,57 @@
-result =
+keys = %5Bk.decode().split(':')%5B1%5D for k in
await s
@@ -1552,19 +1552,20 @@
t.redis.
-get
+keys
(f'%7Bctx.
@@ -1578,17 +1578,12 @@
id%7D:
-%7Bname%7D')%0A
+*')%5D
%0A
@@ -1591,117 +1591,311 @@
-if result is not None:%0A result = result.decode()%0A else:%0A result = '%60%3Cnothing%3E%60'%0A
+await ctx.send('Set configuration keys in this server: ' + ', '.join(keys))%0A%0A @config.command(name='remove', aliases=%5B'rm', 'del'%5D)%0A async def config_remove(self, ctx, name: str):%0A %22%22%22 Removes a config field for this server. %22%22%22%0A await self.bot.redis.delete(f'%7Bctx.guild.id%7D:%7Bname%7D')
%0A
@@ -1918,27 +1918,24 @@
end(
-f'%60%7Bname%7D%60: %7Bresult
+'%5CN%7BOK HAND SIGN
%7D')%0A
|
786cedc72c913d6026c45a75947a3f7195f6b118
|
Check isn't necessary
|
purchase_partial_invoicing/wizard/po_line_invoice.py
|
purchase_partial_invoicing/wizard/po_line_invoice.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from __future__ import division
from openerp import models, fields, api, exceptions
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class PurchaseLineInvoice(models.TransientModel):
_inherit = 'purchase.order.line_invoice'
line_ids = fields.One2many('purchase.order.line_invoice.line',
'wizard_id', string='Lines')
@api.model
def default_get(self, fields):
ctx = self.env.context.copy()
po_ids = ctx.get('active_ids', [])
po_line_obj = self.env['purchase.order.line']
lines = []
for po_line in po_line_obj.browse(po_ids):
lines.append({
'po_line_id': po_line.id,
'product_qty': po_line.product_qty - po_line.invoiced_qty,
'invoiced_qty': po_line.product_qty - po_line.invoiced_qty,
'price_unit': po_line.price_unit,
})
defaults = super(PurchaseLineInvoice, self).default_get(fields)
defaults['line_ids'] = lines
return defaults
@api.multi
def makeInvoices(self):
self.ensure_one()
ctx = self.env.context.copy()
changed_lines = {}
ctx['active_ids'] = []
not_invoiced_lines = self.env['purchase.order.line']
invoiced_lines = self.env['purchase.order.line']
for line in self.line_ids:
if line.invoiced_qty > line.product_qty:
raise exceptions.Warning(
_("""Quantity to invoice is greater
than available quantity"""))
ctx['active_ids'].append(line.po_line_id.id)
changed_lines[
line.po_line_id.id
] = line.invoiced_qty
if line.po_line_id.fully_invoiced:
invoiced_lines += line.po_line_id
else:
not_invoiced_lines += line.po_line_id
if not_invoiced_lines.ids:
not_invoiced_lines.write({'invoiced': False})
if invoiced_lines.ids:
invoiced_lines.write({'invoiced': True})
ctx.update({'partial_quantity_lines': changed_lines})
res = super(PurchaseLineInvoice, self.with_context(ctx))\
.makeInvoices()
po_lines = self.env['purchase.order.line'].browse(changed_lines.keys())
for po_line in po_lines:
if po_line.invoiced_qty != po_line.product_qty:
po_line.invoiced = False
return res
class PurchaseLineInvoiceLine(models.TransientModel):
_name = 'purchase.order.line_invoice.line'
po_line_id = fields.Many2one('purchase.order.line', 'Purchase order line',
readonly=True)
product_qty = fields.Float(
'Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
readonly=True)
price_unit = fields.Float(related='po_line_id.price_unit',
string='Unit Price', readonly=True)
invoiced_qty = fields.Float(
string='Quantity to invoice',
digits_compute=dp.get_precision('Product Unit of Measure'))
wizard_id = fields.Many2one('purchase.order.line_invoice', 'Wizard')
|
Python
| 0
|
@@ -2915,47 +2915,8 @@
_id%0A
- if not_invoiced_lines.ids:%0A
@@ -2969,43 +2969,8 @@
e%7D)%0A
- if invoiced_lines.ids:%0A
|
546f1188444365a365dc1dd7a81c2ffc974cf8b2
|
change of documentation in the param vector class
|
ParamVector.py
|
ParamVector.py
|
class ParamVector(object):
"""
This class represents the vectors that defines a firewall
a ParamVector is a class that represents a vector that defines a firewall.
we have our indicator functions that should get parameters, lets call these functions g1...gn
for each gi we can say that there is a vector (ai1,...,aim) of scalars. so we can represent every firewall Fl
as the sum of Fi^gi where Fi is a mutate function.
so we can think about a vector of different sized vectors, where every vector i is:
fi, ai1,...aim
"""
# the functions that can be used to mutate a ParamVector, instances of ProbabilityFunction
mutate_functions = []
@staticmethod
def generate_random_data():
"""
creates a ParamVector with random data
:return: an instance of ParamVector that is defined using random data
"""
pass
|
Python
| 0
|
@@ -399,17 +399,17 @@
sum of
-F
+f
i%5Egi wh
@@ -412,17 +412,17 @@
where
-F
+f
i is a m
|
edd054533e122d67b1d60cf51d366591044d443e
|
add docstring for remove [ticket:1715]
|
lib/sqlalchemy/orm/scoping.py
|
lib/sqlalchemy/orm/scoping.py
|
# scoping.py
# Copyright (C) the SQLAlchemy authors and contributors
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import sqlalchemy.exceptions as sa_exc
from sqlalchemy.util import ScopedRegistry, ThreadLocalRegistry, \
to_list, get_cls_kwargs, deprecated
from sqlalchemy.orm import (
EXT_CONTINUE, MapperExtension, class_mapper, object_session
)
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm.session import Session
__all__ = ['ScopedSession']
class ScopedSession(object):
"""Provides thread-local management of Sessions.
Usage::
Session = scoped_session(sessionmaker(autoflush=True))
... use session normally.
"""
def __init__(self, session_factory, scopefunc=None):
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
self.extension = _ScopedExt(self)
def __call__(self, **kwargs):
if kwargs:
scope = kwargs.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError("Scoped session is already present; no new arguments may be specified.")
else:
sess = self.session_factory(**kwargs)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kwargs)
else:
return self.registry()
def remove(self):
if self.registry.has():
self.registry().close()
self.registry.clear()
@deprecated("Session.mapper is deprecated. "
"Please see http://www.sqlalchemy.org/trac/wiki/UsageRecipes/SessionAwareMapper "
"for information on how to replicate its behavior.")
def mapper(self, *args, **kwargs):
"""return a mapper() function which associates this ScopedSession with the Mapper.
DEPRECATED.
"""
from sqlalchemy.orm import mapper
extension_args = dict((arg, kwargs.pop(arg))
for arg in get_cls_kwargs(_ScopedExt)
if arg in kwargs)
kwargs['extension'] = extension = to_list(kwargs.get('extension', []))
if extension_args:
extension.append(self.extension.configure(**extension_args))
else:
extension.append(self.extension)
return mapper(*args, **kwargs)
def configure(self, **kwargs):
"""reconfigure the sessionmaker used by this ScopedSession."""
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a `Query` object against the
class when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(ScopedSession, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', 'is_active', 'autoflush'):
setattr(ScopedSession, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(ScopedSession, prop, clslevel(prop))
class _ScopedExt(MapperExtension):
def __init__(self, context, validate=False, save_on_init=True):
self.context = context
self.validate = validate
self.save_on_init = save_on_init
self.set_kwargs_on_init = True
def validating(self):
return _ScopedExt(self.context, validate=True)
def configure(self, **kwargs):
return _ScopedExt(self.context, **kwargs)
def instrument_class(self, mapper, class_):
class query(object):
def __getattr__(s, key):
return getattr(self.context.registry().query(class_), key)
def __call__(s):
return self.context.registry().query(class_)
def __get__(self, instance, cls):
return self
if not 'query' in class_.__dict__:
class_.query = query()
if self.set_kwargs_on_init and class_.__init__ is object.__init__:
class_.__init__ = self._default__init__(mapper)
def _default__init__(ext, mapper):
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
if ext.validate:
if not mapper.get_property(key, resolve_synonyms=False,
raiseerr=False):
raise sa_exc.ArgumentError(
"Invalid __init__ argument: '%s'" % key)
setattr(self, key, value)
return __init__
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
if self.save_on_init:
session = kwargs.pop('_sa_session', None)
if session is None:
session = self.context.registry()
session._save_without_cascade(instance)
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
sess = object_session(instance)
if sess:
sess.expunge(instance)
return EXT_CONTINUE
def dispose_class(self, mapper, class_):
if hasattr(class_, 'query'):
delattr(class_, 'query')
|
Python
| 0.000004
|
@@ -1690,32 +1690,98 @@
f remove(self):%0A
+ %22%22%22Dispose of the current contextual session.%22%22%22%0A %0A
if self.
|
1c7317ea85206541c8d518a3fc6cb338ad6873d3
|
Fix requires_auth decorator
|
fickle/api.py
|
fickle/api.py
|
import os
from functools import wraps
import flask
from flask import request, json
USERNAME = 'fickle'
def Response(data, status = 200):
body = json.dumps(data)
return flask.Response(body, status = status, mimetype = 'application/json')
def SuccessResponse(dataset_id = None):
return Response({ 'success': True, 'id': dataset_id })
def ErrorResponse(status = 400):
return Response({ 'success': False }, status = status)
def check_auth(username, password):
setting = os.environ.get('FICKLE_PASSWORD')
if setting:
return username == USERNAME and password == setting
else:
return True
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return ErrorResponse(403)
return f(*args, **kwargs)
return decorated
def API(name, backend):
app = flask.Flask(name)
app.config.from_object(name)
@app.route('/')
@requires_auth
def api_root():
return SuccessResponse(backend.dataset_id)
@app.route('/load', methods=['POST'])
@requires_auth
def api_load():
backend.load(request.json)
return SuccessResponse(backend.dataset_id)
@app.route('/fit', methods=['POST'])
@requires_auth
def api_fit():
if not backend.loaded():
return ErrorResponse()
backend.fit()
return SuccessResponse(backend.dataset_id)
@app.route('/validate', methods=['POST'])
@requires_auth
def api_validate():
if not backend.loaded():
return ErrorResponse()
data = backend.validate()
return Response(data)
@app.route('/predict', methods=['POST'])
@requires_auth
def api_predict():
if not backend.trained():
return ErrorResponse()
data = backend.predict(request.json).tolist()
return Response(data)
return app
|
Python
| 0.000001
|
@@ -648,16 +648,85 @@
uth(f):%0A
+ if not bool(os.environ.get('FICKLE_PASSWORD')):%0A return f%0A
@wra
|
de09ac74af398e30f41acea106235cdafdc55c7d
|
Fix typo
|
grip/assets.py
|
grip/assets.py
|
from __future__ import print_function, unicode_literals
import errno
import os
import posixpath
import re
import sys
import shutil
from abc import ABCMeta, abstractmethod
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import requests
from flask import safe_join
from .constants import (
STYLE_URLS_SOURCE, STYLE_URLS_RE, STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT)
from .vendor.six import add_metaclass
@add_metaclass(ABCMeta)
class ReadmeAssetManager(object):
"""
Manages the style and font assets rendered with Readme pages.
Set cache_path to None to disable caching.
"""
def __init__(self, cache_path, style_urls=None, quiet=None):
super(ReadmeAssetManager, self).__init__()
self.cache_path = cache_path
self.style_urls = list(style_urls) if style_urls else []
self.styles = []
self.quiet = quiet
def _stip_url_params(self, url):
return url.rsplit('?', 1)[0].rsplit('#', 1)[0]
def clear(self):
"""
Clears the asset cache.
"""
if self.cache_path and os.path.exists(self.cache_path):
shutil.rmtree(self.cache_path)
def cache_filename(self, url):
"""
Gets a suitable relative filename for the specified URL.
"""
# FUTURE: Use url exactly instead of flattening it here
url = posixpath.basename(url)
return self._stip_url_params(url)
@abstractmethod
def retrieve_styles(self, asset_url_path):
"""
Get style URLs from the source HTML page and specified cached asset
URL path.
"""
pass
class GitHubAssetManager(ReadmeAssetManager):
"""
Reads the styles used for rendering Readme pages.
Set cache_path to None to disable caching.
"""
def __init__(self, cache_path, style_urls=None, quiet=None):
super(GitHubAssetManager, self).__init__(cache_path, style_urls, quiet)
def _get_style_urls(self, asset_url_path):
"""
Gets the specified resource and parses all style URLs and their
assets in the form of the specified patterns.
"""
# Check cache
if self.cache_path:
cached = self._get_cached_style_urls(asset_url_path)
# Skip fetching styles if there's any already cached
if cached:
return cached
# Find style URLs
r = requests.get(STYLE_URLS_SOURCE)
if not 200 <= r.status_code < 300:
print('Warning: retrieving styles gave status code',
r.status_code, file=sys.stderr)
urls = re.findall(STYLE_URLS_RE, r.text)
# Cache the styles and their assets
if self.cache_path:
is_cached = self._cache_contents(urls, asset_url_path)
if is_cached:
urls = self._get_cached_style_urls(asset_url_path)
return urls
def _get_cached_style_urls(self, asset_url_path):
"""
Gets the URLs of the cached styles.
"""
try:
cached_styles = os.listdir(self.cache_path)
except IOError as ex:
if ex.errno != errno.ENOENT and ex.errno != errno.ESRCH:
raise
return []
except OSError:
return []
return [posixpath.join(asset_url_path, style)
for style in cached_styles
if style.endswith('.css')]
def _cache_contents(self, style_urls, asset_url_path):
"""
Fetches the given URLs and caches their contents
and their assets in the given directory.
"""
files = {}
asset_urls = []
for style_url in style_urls:
if not self.quiet:
print(' * Downloading style', style_url, file=sys.stderr)
r = requests.get(style_url)
if not 200 <= r.status_code < 300:
print(' -> Warning: Style request responded with',
r.status_code, file=sys.stderr)
files = None
continue
asset_content = r.text
# Find assets and replace their base URLs with the cache directory
for url in re.findall(STYLE_ASSET_URLS_RE, asset_content):
asset_urls.append(urljoin(style_url, url))
contents = re.sub(
STYLE_ASSET_URLS_RE,
STYLE_ASSET_URLS_SUB_FORMAT.format(asset_url_path.rstrip('/')),
asset_content)
# Prepare cache
if files is not None:
filename = self.cache_filename(style_url)
files[filename] = contents.encode('utf-8')
for asset_url in asset_urls:
if not self.quiet:
print(' * Downloading asset', asset_url, file=sys.stderr)
# Retrieve binary file and show message
r = requests.get(asset_url, stream=True)
if not 200 <= r.status_code < 300:
print(' -> Warning: Asset request responded with',
r.status_code, file=sys.stderr)
files = None
continue
# Prepare cache
if files is not None:
filename = self.cache_filename(asset_url)
files[filename] = r.raw.read(decode_content=True)
# Skip caching if something went wrong to try again next time
if not files:
return False
# Cache files if all downloads were successful
cache = {}
for relname in files:
cache[safe_join(self.cache_path, relname)] = files[relname]
if not os.path.exists(self.cache_path):
os.makedirs(self.cache_path)
for filename in cache:
with open(filename, 'wb') as f:
f.write(cache[filename])
if not self.quiet:
print(' * Cached all downloads in', self.cache_path, file=sys.stderr)
return True
def retrieve_styles(self, asset_url_path):
"""
Get style URLs from the source HTML page and specified cached
asset base URL.
"""
if not asset_url_path.endswith('/'):
asset_url_path += '/'
self.style_urls.extend(self._get_style_urls(asset_url_path))
|
Python
| 0.999999
|
@@ -933,16 +933,17 @@
def _st
+r
ip_url_p
@@ -1450,16 +1450,17 @@
self._st
+r
ip_url_p
|
9437e024b1e1630e06d1b05972eb9049af442be0
|
fix bad copy/paste
|
build_all.py
|
build_all.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
# list of projects
progs = [
{'path': 'apps/EHD', 'travis': True},
{'path': 'apps/fractal/cpp', 'travis': False},
{'path': 'apps/GenMAI', 'travis': True},
{'path': 'apps/md5', 'travis': True},
{'path': 'apps/minibarreTE', 'travis': True},
{'path': 'cmake/findGMM', 'travis': True},
{'path': 'cmake/findMKL', 'travis': False},
{'path': 'langtests/cpp11', 'travis': True},
{'path': 'langtests/exporttpl', 'travis': True},
{'path': 'langtests/singleton', 'travis': True},
{'path': 'metafor/arbre', 'travis': True},
{'path': 'metafor/drmeta', 'travis': True},
{'path': 'metafor/mailsph', 'travis': False},
{'path': 'sandbox/fortran', 'travis': True},
{'path': 'sandbox/fortranc', 'travis': True},
{'path': 'student/dcm1', 'travis': False},
{'path': 'student/dcm2', 'travis': True},
{'path': 'student/lejeune', 'lejeune': True},
{'path': 'student/mico', 'travis': True},
{'path': 'student/ndh', 'travis': True},
]
def getArgs():
# parse args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--travis", help="run only travis tests",
action="store_true")
args = parser.parse_args()
return args
def build_one(basedir, p):
"""build project 'p'
"""
args = getArgs()
fullpath = os.path.join(basedir, *(p['path'].split('/')))
if(p['travis'] or not args.travis):
print '=> running build.py in', fullpath
os.chdir(fullpath)
subprocess.call(['python', 'build.py'])
def build_all(basedir):
"""build everything in 'basedir'
"""
for p in progs:
build_one(basedir, p)
def rm_builds(basedir):
"""remove all 'build' directories in 'basedir'
"""
import shutil
for path, subdirs, files in os.walk(basedir):
for name in subdirs:
if name == 'build':
fullname = os.path.join(path, name)
print 'removing', fullname
shutil.rmtree(fullname)
if __name__ == "__main__":
basedir = os.path.abspath(os.path.dirname(__file__))
rm_builds(basedir)
build_all(basedir)
|
Python
| 0.000009
|
@@ -949,23 +949,22 @@
eune', '
-lejeune
+travis
': True%7D
|
a8cb15b1983c48547edfeb53bfb63245f7e7c892
|
Revert "log integrations with zabbix through pyzabbix"
|
dbaas_zabbix/__init__.py
|
dbaas_zabbix/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import sys
from dbaas_zabbix.dbaas_api import DatabaseAsAServiceApi
from dbaas_zabbix.provider_factory import ProviderFactory
from pyzabbix import ZabbixAPI
stream = logging.StreamHandler(sys.stdout)
stream.setLevel(logging.DEBUG)
log = logging.getLogger('pyzabbix')
log.addHandler(stream)
log.setLevel(logging.DEBUG)
def factory_for(**kwargs):
databaseinfra = kwargs['databaseinfra']
credentials = kwargs['credentials']
del kwargs['databaseinfra']
del kwargs['credentials']
zabbix_api = ZabbixAPI
if kwargs.get('zabbix_api'):
zabbix_api = kwargs.get('zabbix_api')
del kwargs['zabbix_api']
dbaas_api = DatabaseAsAServiceApi(databaseinfra, credentials)
return ProviderFactory.factory(dbaas_api, zabbix_api=zabbix_api, **kwargs)
|
Python
| 0
|
@@ -43,35 +43,8 @@
-*-%0A
-import logging%0Aimport sys%0A%0A
from
@@ -190,169 +190,8 @@
PI%0A%0A
-stream = logging.StreamHandler(sys.stdout)%0Astream.setLevel(logging.DEBUG)%0Alog = logging.getLogger('pyzabbix')%0Alog.addHandler(stream)%0Alog.setLevel(logging.DEBUG)%0A
%0Adef
|
81c1f1609428690ff4ba951c7454054dbd5f9c54
|
add new channelsection files to the gyp file
|
ircd.gyp
|
ircd.gyp
|
{
'variables':
{
'python-includes': 'python -c "from distutils import sysconfig; import ntpath; print sysconfig.get_python_inc().replace(ntpath.sep, \'/\')"',
'python-version': 'python -c "from distutils import sysconfig; print sysconfig.get_config_var(\\"VERSION\\")"',
'python-libpath': 'python -c "from distutils import sysconfig; print sysconfig.get_config_var(\\"LIBPL\\")"',
},
'targets':
[{
'target_name': 'ircd',
'dependencies':
[
'jsoncpp/jsoncpp.gyp:jsoncpp',
'libuv/uv.gyp:uv',
],
'type': 'executable',
'include_dirs':
[
'include',
'<!@(<(python-includes))',
'c:\openssl\include',
],
'sources':
[
'common.gypi',
'etc/example.conf',
'etc/default.msgs',
'include/baseclient.h',
'include/channel.h',
'include/client.h',
'include/command.h',
'include/config.h',
'include/configsection.h',
'include/connection.h',
'include/event.h',
'include/generalsection.h',
'include/ircstring.h',
'include/listener.h',
'include/listenersection.h',
'include/logging.h',
'include/loggingsection.h',
'include/module.h',
'include/modulesection.h',
'include/murmurhash3.h',
'include/nuhmask.h',
'include/numeric.h',
'include/parser.h',
'include/server.h',
'include/ssl.h',
'include/sslconnection.h',
'include/stdinc.h',
'include/system.h',
'include/python/pbool.h',
'include/python/pchannel.h',
'include/python/pclient.h',
'include/python/pclist.h',
'include/python/pcollection.h',
'include/python/pcmap.h',
'include/python/pctype.h',
'include/python/pdict.h',
'include/python/pevent.h',
'include/python/pexception.h',
'include/python/pint.h',
'include/python/pmembership.h',
'include/python/pmethod.h',
'include/python/pnuhmask.h',
'include/python/pparser.h',
'include/python/pobject.h',
'include/python/pstring.h',
'include/python/ptuple.h',
'include/python/ptupleitem.h',
'include/python/pythonloader.h',
'include/python/pythonutil.h',
'src/baseclient.cc',
'src/channel.cc',
'src/client.cc',
'src/command.cc',
'src/config.cc',
'src/connection.cc',
'src/generalsection.cc',
'src/listener.cc',
'src/listenersection.cc',
'src/logging.cc',
'src/loggingsection.cc',
'src/module.cc',
'src/modulesection.cc',
'src/murmur3.cc',
'src/main.cc',
'src/nuhmask.cc',
'src/numeric.cc',
'src/parser.cc',
'src/server.cc',
'src/ssl.cc',
'src/sslconnection.cc',
'src/system.cc',
'src/python/pbool.cc',
'src/python/pchannel.cc',
'src/python/pclient.cc',
'src/python/pctype.cc',
'src/python/pdict.cc',
'src/python/pevent.cc',
'src/python/pexception.cc',
'src/python/pint.cc',
'src/python/pmembership.cc',
'src/python/pnuhmask.cc',
'src/python/pobject.cc',
'src/python/pparser.cc',
'src/python/pstring.cc',
'src/python/ptuple.cc',
'src/python/ptupleitem.cc',
'src/python/pythonloader.cc',
'src/python/pythonutil.cc',
'modules/python/ircd/__init__.py',
'modules/python/ircd/channel.py',
'modules/python/ircd/msflags.py',
'modules/python/ircd/numerics.py',
'modules/python/ircd/user.py',
'modules/python/ircclient.py'
],
'conditions':
[
[ 'OS=="win"',
{
'variables':
{
'python-binlibdest': 'python -c "from distutils import sysconfig; print sysconfig.get_config_var(\\"BINLIBDEST\\")"',
},
'msvs_settings':
{
'VCLinkerTool':
{
'AdditionalLibraryDirectories': '<!@(<(python-binlibdest))s;c:\openssl\lib',
},
},
'defines':
[
'_WIN32_WINNT=0x0600',
'_GNU_SOURCE',
],
'include_dirs':
[
'c:\openssl\include',
],
'libraries':
[
'python<!@(<(python-version))_d.lib',
'libeay32.lib',
'ssleay32.lib',
],
},
{
'xcode_settings':
{
'WARNING_CFLAGS':
[
'-Wall',
'-Wextra',
'-Wno-long-long',
'-Wno-unused-parameter',
'-Wno-deprecated-declarations',
'-Wno-newline-eof',
],
'OTHER_CFLAGS':
[
'-std=c++0x',
'-stdlib=libc++',
],
},
'cflags':
[
'-std=c++0x',
'-Wall',
'-Wextra',
'-pedantic',
'-Wno-long-long',
'-Wno-unused-parameter',
'-Wno-missing-field-initializers',
],
'defines':
[
'_GNU_SOURCE'
],
'libraries':
[
'-lpython<!@(<(python-version))',
'-lssl',
'-lcrypto',
],
'conditions':
[
['OS == "mac"',
{
'libraries': [ '-lc++' ],
}],
['OS == "freebsd"',
{
'cflags+': [ '-stdlib=libc++' ],
'libraries': [ '-lc++' ],
'ldflags': [ '-L<!@(<(python-libpath))' ],
}],
]
}
],
],
}]
}
|
Python
| 0
|
@@ -820,32 +820,66 @@
ude/channel.h',%0A
+ 'include/channelsection.h',%0A
'include/c
@@ -2288,32 +2288,63 @@
rc/channel.cc',%0A
+ 'src/channelsection.cc',%0A
'src/clien
|
b3c1b3b66d1c720172e731d1bfc44cfb44c992a3
|
Revert of [Android] Re-enable content_browsertests on main waterfall. (https://codereview.chromium.org/132403005/)
|
build/android/pylib/gtest/gtest_config.py
|
build/android/pylib/gtest/gtest_config.py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'content_gl_tests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'android_webview_unittests',
'base_unittests',
'cc_unittests',
'components_unittests',
'content_unittests',
'gl_tests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'webkit_unit_tests',
'breakpad_unittests',
'sandbox_linux_unittests',
'content_browsertests',
]
WEBRTC_CHROMIUM_TEST_SUITES = [
'content_browsertests',
]
WEBRTC_NATIVE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'neteq_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_engine_core_unittests',
'voice_engine_unittests',
]
|
Python
| 0.000251
|
@@ -303,32 +303,60 @@
TEST_SUITES = %5B%0A
+ 'content_browsertests',%0A
'content_gl_
@@ -1020,36 +1020,8 @@
s',%0A
- 'content_browsertests',%0A
%5D%0A%0AW
|
3812403655153e86a8b0e1ac68c9b15e69d6a4e3
|
Update BUILD_OSS to 4770.
|
src/data/version/mozc_version_template.bzl
|
src/data/version/mozc_version_template.bzl
|
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 28
# BUILD number used for the OSS version.
BUILD_OSS = 4750
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 11
|
Python
| 0
|
@@ -1609,9 +1609,9 @@
= 47
-5
+7
0%0A%0A#
|
c3d610472708cd95054c4cb3447023051e2e0479
|
rename hash/chksum -> md5sum in more places
|
dogen/plugins/cct.py
|
dogen/plugins/cct.py
|
import os
import yaml
import subprocess
import shutil
from dogen.plugin import Plugin
class CCT(Plugin):
@staticmethod
def info():
return "cct", "Support for configuring images via cct"
def __init__(self, dogen):
super(CCT, self).__init__(dogen)
def extend_schema(self, parent_schema):
"""
Read in a schema definition for our part of the config and hook it
into the parent schema at the cct: top-level key.
"""
schema_path = os.path.join(self.dogen.pwd, "plugins", "cct", "cct_schema.yaml")
schema = {}
with open(schema_path, 'r') as fh:
schema = yaml.safe_load(fh)
parent_schema['map']['cct'] = schema
def prepare(self, cfg):
"""
create cct changes yaml file for image.yaml template decscriptor
it require cct aware template.jinja file
"""
if os.path.exists(self.output + '/cct/'):
shutil.rmtree(self.output + '/cct/')
if 'modules' in cfg['cct']:
self._prepare_modules(cfg)
cfg['cct']['run'] = ['cct.yaml']
cfg_file_dir = os.path.join(self.output, "cct")
if not os.path.exists(cfg_file_dir):
os.makedirs(cfg_file_dir)
cfg_file = os.path.join(cfg_file_dir, "cct.yaml")
with open(cfg_file, 'w') as f:
yaml.dump(cfg['cct']['configure'], f)
def _prepare_modules(self, cfg):
for module in cfg['cct']['modules']:
name = None
if 'name' in module:
name = module['name']
elif module['path'][-1] == '/':
name = os.path.basename(module['path'][0:-1])
elif len(module['path']) > 4 and module['path'][-4:] == ".git":
name = os.path.basename(module['path'][0:-4])
else:
name = os.path.basename(module['path'])
descriptor_dir = os.path.dirname(self.descriptor) + '/cct/'
# check if module exists in cct dir next to do descriptor
if os.path.exists(descriptor_dir + name):
# path exists - I'll just copy it
shutil.copytree(descriptor_dir + name,
self.output + '/cct/' + name)
self.log.info("Copied cct module %s." % name)
else:
# clone it to target dir if not exists
self.clone_repo(module['path'], self.output + '/cct/' + name)
self.log.info("Cloned cct module %s." % name)
try:
self.append_sources(name, cfg)
except Exception as ex:
self.log.info("cannot process sources for module %s" % name)
self.log.debug("exception: %s" % ex)
def clone_repo(self, url, path):
try:
if not os.path.exists(path):
subprocess.check_call(["git", "clone", url, path])
except Exception as ex:
self.log.error("cannot clone repo %s into %s: %s", url, path, ex)
def append_sources(self, module, cfg):
"""
Extract sources defined within the module, if provided, and merge
them with Dogen's master sources list.
"""
sources_path = os.path.join(self.output, "cct", module, "sources.yaml")
if not os.path.exists(sources_path):
self.log.debug("no sources defined for module %s" % module)
return
source_prefix = os.getenv("DOGEN_CCT_SOURCES_PREFIX") or ""
if not source_prefix:
self.log.debug("DOGEN_CCT_SOURCES_PREFIX variable is not defined")
cct_sources = []
with open(sources_path) as f:
cct_sources = yaml.load(f)
dogen_sources = []
for source in cct_sources:
dogen_source = {}
dogen_source['url'] = source_prefix + source['name']
dogen_source['hash'] = source['chksum']
dogen_sources.append(dogen_source)
try:
cfg['sources'].extend(dogen_sources)
except:
cfg['sources'] = dogen_sources
|
Python
| 0
|
@@ -3898,12 +3898,14 @@
ce%5B'
-hash
+md5sum
'%5D =
@@ -3917,11 +3917,11 @@
ce%5B'
-chk
+md5
sum'
|
3c223248a80043b12e700b09f1e32ebf54f6ebd3
|
Support Path objects for DataLibrary.register_filename
|
openmc/data/library.py
|
openmc/data/library.py
|
import os
import xml.etree.ElementTree as ET
import h5py
from openmc.mixin import EqualityMixin
from openmc._xml import clean_indentation
from openmc.checkvalue import check_type
class DataLibrary(EqualityMixin):
"""Collection of cross section data libraries.
Attributes
----------
libraries : list of dict
List in which each item is a dictionary summarizing cross section data
from a single file. The dictionary has keys 'path', 'type', and
'materials'.
"""
def __init__(self):
self.libraries = []
def get_by_material(self, name):
"""Return the library dictionary containing a given material.
Parameters
----------
name : str
Name of material, e.g. 'Am241'
Returns
-------
library : dict or None
Dictionary summarizing cross section data from a single file;
the dictionary has keys 'path', 'type', and 'materials'.
"""
for library in self.libraries:
if name in library['materials']:
return library
return None
def register_file(self, filename):
"""Register a file with the data library.
Parameters
----------
filename : str
Path to the file to be registered.
"""
# Support pathlib
# TODO: Remove when support is Python 3.6+ only
filename = str(filename)
with h5py.File(filename, 'r') as h5file:
filetype = h5file.attrs['filetype'].decode()[5:]
materials = list(h5file)
library = {'path': filename, 'type': filetype, 'materials': materials}
self.libraries.append(library)
def export_to_xml(self, path='cross_sections.xml'):
"""Export cross section data library to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'cross_sections.xml'.
"""
root = ET.Element('cross_sections')
# Determine common directory for library paths
common_dir = os.path.dirname(os.path.commonprefix(
[lib['path'] for lib in self.libraries]))
if common_dir == '':
common_dir = '.'
if os.path.relpath(common_dir, os.path.dirname(str(path))) != '.':
dir_element = ET.SubElement(root, "directory")
dir_element.text = os.path.realpath(common_dir)
for library in self.libraries:
lib_element = ET.SubElement(root, "library")
lib_element.set('materials', ' '.join(library['materials']))
lib_element.set('path', os.path.relpath(library['path'], common_dir))
lib_element.set('type', library['type'])
# Clean the indentation to be user-readable
clean_indentation(root)
# Write XML file
tree = ET.ElementTree(root)
tree.write(str(path), xml_declaration=True, encoding='utf-8',
method='xml')
@classmethod
def from_xml(cls, path=None):
"""Read cross section data library from an XML file.
Parameters
----------
path : str, optional
Path to XML file to read. If not provided, the
`OPENMC_CROSS_SECTIONS` environment variable will be used.
Returns
-------
data : openmc.data.DataLibrary
Data library object initialized from the provided XML
"""
data = cls()
# If path is None, get the cross sections from the
# OPENMC_CROSS_SECTIONS environment variable
if path is None:
path = os.environ.get('OPENMC_CROSS_SECTIONS')
# Check to make sure there was an environmental variable.
if path is None:
raise ValueError("Either path or OPENMC_CROSS_SECTIONS "
"environmental variable must be set")
# Convert to string to support pathlib
# TODO: Remove when support is Python 3.6+ only
path = str(path)
tree = ET.parse(path)
root = tree.getroot()
if root.find('directory') is not None:
directory = root.find('directory').text
else:
directory = os.path.dirname(path)
for lib_element in root.findall('library'):
filename = os.path.join(directory, lib_element.attrib['path'])
filetype = lib_element.attrib['type']
materials = lib_element.attrib['materials'].split()
library = {'path': filename, 'type': filetype,
'materials': materials}
data.libraries.append(library)
return data
|
Python
| 0
|
@@ -37,16 +37,31 @@
ee as ET
+%0Aimport pathlib
%0A%0Aimport
@@ -1277,32 +1277,40 @@
filename : str
+ or Path
%0A Pat
@@ -1366,113 +1366,134 @@
-# Support pathlib%0A # TODO: Remove when support is Python 3.6+ only%0A filename = str(
+if not isinstance(filename, pathlib.Path):%0A path = pathlib.Path(filename)%0A else:%0A path =
filename
)%0A%0A
@@ -1488,17 +1488,16 @@
filename
-)
%0A%0A
@@ -1513,24 +1513,20 @@
py.File(
-filename
+path
, 'r') a
@@ -1653,36 +1653,37 @@
rary = %7B'path':
-file
+path.
name, 'type': fi
|
b6d747599661f3ce19b4d2f6ea9f80ec9839a2d8
|
Update couchm.reactor.py
|
resources/reactors/couchm.reactor.py
|
resources/reactors/couchm.reactor.py
|
#!/usr/bin/python
import argparse
import mosquitto
#from pushover import PushoverClient
import os, sys
import urllib2
import json, base64
#Posting data to couchDB
def post(doc):
global config
url = 'http://%(server)s/%(database)s/_design/energy_data/_update/measurement' % config
# print url
request = urllib2.Request(url, data=json.dumps(doc))
auth = base64.encodestring('%(user)s:%(password)s' % config).replace('\n', '')
request.add_header('Authorization', 'Basic ' + auth)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'POST'
urllib2.urlopen(request, timeout=1)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='host', default="localhost", help='MQTT host send results to')
parser.add_argument('-t', dest='topic', default="", help='MQTT topic to process')
parser.add_argument('-m', dest='message', default="", help='MQTT message to process')
args = parser.parse_args()
#Where am I
path = os.path.abspath(os.path.dirname(sys.argv[0]))
#Load config file...
ConfigFile = path + "/couchm.cfg"
try:
f = open(ConfigFile,"r")
f.close()
except:
print "Please provide a valid config file! In the same folder as the couchDB script!"
exit(1)
#Read config file.
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.read(ConfigFile)
#Load basic config.
config = {}
config["user"] = config.get("CouchDB","user")
config["password"] = config.get("CouchDB","password")
config["server"] = config.get("CouchDB","server")
config["database"] = config.get("CouchDB","database")
source = config.get("CouchM","source")
if args.message[0] == '"':
args.message = args.message[1:]
if args.message[-1] == '"':
args.message = args.message[:-1]
data = json.loads(args.message)
#Post data to couchm
post({
"source": source,
"timestamp": str(data["time"]),
"ElectricPower": str(data["value"]),
"ElectricEnergy": str(0),
"PowerThreshold": str(1),
"ElectricPowerUnoccupied": "0",
"ElectricEnergyOccupied": "0",
"ElectricEnergyUnoccupied": "0"
})
|
Python
| 0
|
@@ -132,16 +132,36 @@
, base64
+%0Aimport ConfigParser
%0A%0A#Posti
|
ad5df2624e19779f264443f9ec779ab8a018baaa
|
Add socket based fallback approach in get_local_ip
|
catt/util.py
|
catt/util.py
|
import ipaddress
import json
import tempfile
import time
from pathlib import Path
import click
import ifaddr
def warning(msg):
click.secho("Warning: ", fg="red", nl=False, err=True)
click.echo("{}.".format(msg), err=True)
def echo_json(data_dict):
click.echo(json.dumps(data_dict, indent=4, default=str))
def guess_mime(path):
# source: https://developers.google.com/cast/docs/media
extension = Path(path).suffix.lower()
extensions = {
".mp4": "video/mp4",
".m4a": "audio/mp4",
".mp3": "audio/mp3",
".mpa": "audio/mpeg",
".webm": "video/webm",
".mkv": "video/x-matroska",
".bmp": "image/bmp",
".jpg": "image/jpeg",
".gif": "image/gif",
".png": "image/png",
".webp": "image/web",
}
return extensions.get(extension, "video/mp4")
def hunt_subtitles(video):
"""Searches for subtitles in the current folder"""
video_path = Path(video)
video_path_stem_lower = video_path.stem.lower()
for entry_path in video_path.parent.iterdir():
if entry_path.is_dir():
continue
if entry_path.stem.lower().startswith(video_path_stem_lower) and entry_path.suffix.lower() in [".vtt", ".srt"]:
return str(entry_path.resolve())
return None
def create_temp_file(content):
with tempfile.NamedTemporaryFile(mode="w+b", suffix=".vtt", delete=False) as tfile:
tfile.write(content.encode())
return tfile.name
def human_time(seconds: int):
return time.strftime("%H:%M:%S", time.gmtime(seconds))
def get_local_ip(host):
for adapter in ifaddr.get_adapters():
for adapter_ip in adapter.ips:
aip = adapter_ip.ip[0] if isinstance(adapter_ip.ip, tuple) else adapter_ip.ip
try:
if not isinstance(ipaddress.ip_address(host), type(ipaddress.ip_address(aip))):
raise ValueError
except ValueError:
continue
ipt = [(ip, adapter_ip.network_prefix) for ip in (aip, host)]
catt_net, cc_net = [ipaddress.ip_network("{0}/{1}".format(*ip), strict=False) for ip in ipt]
if catt_net == cc_net:
return aip
else:
continue
def is_ipaddress(device):
try:
ipaddress.ip_address(device)
except ValueError:
return False
else:
return True
|
Python
| 0
|
@@ -22,16 +22,30 @@
rt json%0A
+import socket%0A
import t
@@ -1610,24 +1610,512 @@
l_ip(host):%0A
+ %22%22%22%0A The primary ifaddr based approach, tries to guess the local ip from the cc ip,%0A by comparing the subnet of ip-addresses of all the local adapters to the subnet of the cc ip.%0A This should work on all platforms, but requires the catt box and the cc to be on the same subnet.%0A As a fallback we use a socket based approach, that does not suffer from this limitation, but%0A might not work on all platforms.%0A %22%22%22%0A%0A host_ipversion = type(ipaddress.ip_address(host))%0A
for adap
@@ -2140,24 +2140,24 @@
adapters():%0A
-
for
@@ -2349,46 +2349,28 @@
ess(
-host), type(ipaddress.ip_address(aip))
+aip), host_ipversion
):%0A
@@ -2392,24 +2392,16 @@
-raise ValueError
+continue
%0A
@@ -2704,32 +2704,32 @@
else:%0A
-
@@ -2738,16 +2738,178 @@
ntinue%0A%0A
+ return %5B%0A (s.connect((%228.8.8.8%22, 53)), s.getsockname()%5B0%5D, s.close())%0A for s in %5Bsocket.socket(socket.AF_INET, socket.SOCK_DGRAM)%5D%0A %5D%5B0%5D%5B1%5D%0A%0A
%0Adef is_
|
1453c4c7fa7283e461b9870372679ac5df94c834
|
remove extraneous print statement
|
update.py
|
update.py
|
# class to help update local file geodatabase data from SDE
from os.path import join
import arcpy
from datetime import datetime
from numpy.testing import assert_almost_equal
from itertools import izip
changes = []
def updateFGDBfromSDE(fgdb, sde, logger=None):
global changes
"""
fgdb: file geodatabase
sde: sde geodatabase connection
logger: agrc.logging.Logger (optional)
returns: String[] - the list of errors
Loops through the file geodatabase feature classes and looks for
matches in the SDE database. If there is a match, it does a schema check
and then updates the data.
"""
def log(msg):
if logger:
logger.logMsg(msg)
else:
print msg
def updateData():
try:
# validate that there was not a schema change
arcpy.env.workspace = fgdb
layer = sdeFC + '_Layer'
arcpy.MakeFeatureLayer_management(sdeFC, layer, '1 = 2')
try:
arcpy.Append_management(layer, f, 'TEST')
log('schema test passed')
passed = True
except arcpy.ExecuteError as e:
if '000466' in e.message:
log(e.message)
msg = 'schema change detected'
msg += '\n\n{0}'.format(getFieldDifferences(sdeFC, f))
errors.append('{}: {}'.format(f, msg))
log(msg)
passed = False
return passed
else:
raise e
arcpy.Delete_management(layer)
log('checking for changes...')
if checkForChanges(f, sdeFC) and passed:
log('updating data...')
arcpy.TruncateTable_management(f)
# edit session required for data that participates in relationships
editSession = arcpy.da.Editor(fgdb)
editSession.startEditing(False, False)
editSession.startOperation()
fields = [fld.name for fld in arcpy.ListFields(f)]
fields = filter_fields(fields)
fields.append('SHAPE@')
with arcpy.da.InsertCursor(f, fields) as icursor, arcpy.da.SearchCursor(sdeFC, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as cursor:
for row in cursor:
print(row[0])
icursor.insertRow(row)
editSession.stopOperation()
editSession.stopEditing(True)
changes.append(f.upper())
else:
log('no changes found')
return passed
except:
errors.append('Error updating: {}'.format(f))
if logger:
logger.logError()
return passed
log('** Updating {} from {}'.format(fgdb, sde))
errors = []
# loop through local feature classes
arcpy.env.workspace = fgdb
fcs = arcpy.ListFeatureClasses()
totalFcs = len(fcs)
i = 0
for f in fcs:
i = i + 1
log('{} of {} | {}'.format(i, totalFcs, f))
found = False
# search for match in stand-alone feature classes
arcpy.env.workspace = sde
matches = arcpy.ListFeatureClasses('*.{}'.format(f))
if matches is not None and len(matches) > 0:
match = matches[0]
sdeFC = join(sde, match)
found = True
else:
# search in feature datasets
datasets = arcpy.ListDatasets()
if len(datasets) > 0:
# loop through datasets
for ds in datasets:
matches = arcpy.ListFeatureClasses('*.{}'.format(f), None, ds)
if matches is not None and len(matches) > 0:
match = matches[0]
sdeFC = join(sde, match)
found = True
break
if not found:
msg = 'no match found in sde'
errors.append("{}: {}".format(f, msg))
log(msg)
continue
updateData()
return (errors, changes)
def wasModifiedToday(fcname, fgdb):
return fcname.upper() in changes
def filter_fields(lst):
newFields = []
for fld in lst:
if 'SHAPE' not in fld.upper() and fld.upper() not in ['GLOBAL_ID', 'GLOBALID']:
newFields.append(fld)
return newFields
def getFieldDifferences(ds1, ds2):
def getFields(ds):
flds = arcpy.ListFields(ds)
returnFlds = []
for f in flds:
returnFlds.append(f.name)
returnFlds.sort()
return returnFlds
ds1Flds = getFields(ds1)
ds2Flds = getFields(ds2)
return "{} Fields: \n{}\n{} Fields: \n{}".format(ds1, ds1Flds, ds2, ds2Flds)
def checkForChanges(f, sde):
"""
returns False if there are no changes
"""
# try simple feature count first
fCount = int(arcpy.GetCount_management(f).getOutput(0))
sdeCount = int(arcpy.GetCount_management(sde).getOutput(0))
if fCount != sdeCount:
return True
fields = [fld.name for fld in arcpy.ListFields(f)]
# filter out shape fields
fields = filter_fields(fields)
d = arcpy.Describe(f)
shapeType = d.shapeType
if shapeType == 'Polygon':
shapeToken = 'SHAPE@AREA'
elif shapeType == 'Polyline':
shapeToken = 'SHAPE@LENGTH'
elif shapeType == 'Point':
shapeToken = 'SHAPE@XY'
else:
shapeToken = 'SHAPE@JSON'
fields.append(shapeToken)
def parseShape(shapeValue):
if shapeValue is None:
return 0
elif shapeType in ['Polygon', 'Polyline']:
return shapeValue
elif shapeType == 'Point':
if shapeValue[0] is not None and shapeValue[1] is not None:
return shapeValue[0] + shapeValue[1]
else:
return 0
else:
return shapeValue
changed = False
with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as sdeCursor:
for fRow, sdeRow in izip(fCursor, sdeCursor):
if fRow != sdeRow:
# check shapes first
if fRow[-1] != sdeRow[-1]:
if not shapeType in ['Polygon', 'Polyline', 'Point']:
changed = True
break
fShape = parseShape(fRow[-1])
sdeShape = parseShape(sdeRow[-1])
try:
assert_almost_equal(fShape, sdeShape, -1)
# trim off shapes
fRow = list(fRow[:-1])
sdeRow = list(sdeRow[:-1])
except AssertionError:
changed = True
break
# trim microseconds since they can be off by one between file and sde databases
for i in range(len(fRow)):
if type(fRow[i]) is datetime:
fRow = list(fRow)
sdeRow = list(sdeRow)
fRow[i] = fRow[i].replace(microsecond=0)
sdeRow[i] = sdeRow[i].replace(microsecond=0)
# compare all values except OBJECTID
if fRow[1:] != sdeRow[1:]:
changed = True
break
return changed
|
Python
| 0.999999
|
@@ -2384,46 +2384,8 @@
or:%0A
- print(row%5B0%5D)%0A
|
7a2fd7bbdaed3ffda3cb8740d38e5f3e88dd8ce8
|
add name for the thread
|
update.py
|
update.py
|
from time import time
from app import db
import argparse
from jobs import update_registry
from util import elapsed
# needs to be imported so the definitions get loaded into the registry
import jobs_defs
"""
examples of calling this:
# update everything
python update.py Person.refresh --limit 10 --chunk 5 --rq
# update one thing not using rq
python update.py Package.test --id 0000-1111-2222-3333
"""
def parse_update_optional_args(parser):
# just for updating lots
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many jobs to do")
parser.add_argument('--chunk', "-ch", nargs="?", default=10, type=int, help="how many to take off db at once")
parser.add_argument('--after', nargs="?", type=str, help="minimum id or id start, ie 0000-0001")
parser.add_argument('--rq', action="store_true", default=False, help="do jobs in this thread")
parser.add_argument('--order', action="store_true", default=True, help="order them")
parser.add_argument('--append', action="store_true", default=False, help="append, dont' clear queue")
# just for updating one
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update")
parser.add_argument('--doi', nargs="?", type=str, help="doi of the one thing you want to update")
# parse and run
parsed_args = parser.parse_args()
return parsed_args
def run_update(parsed_args):
update = update_registry.get(parsed_args.fn)
start = time()
#convenience method for handling an doi
if parsed_args.doi:
from publication import Crossref
from util import clean_doi
my_pub = db.session.query(Crossref).filter(Crossref.id==clean_doi(parsed_args.doi)).first()
parsed_args.id = my_pub.id
print u"Got database hit for this doi: {}".format(my_pub.id)
update.run(**vars(parsed_args))
db.session.remove()
print "finished update in {} secconds".format(elapsed(start))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# for everything
parser.add_argument('fn', type=str, help="what function you want to run")
parsed_args = parse_update_optional_args(parser)
run_update(parsed_args)
|
Python
| 0
|
@@ -1070,16 +1070,99 @@
queue%22)
+%0A parser.add_argument('--name', nargs=%22?%22, type=str, help=%22name for the thread%22)
%0A%0A #
|
ce625cb3c6769e859d47b3bcc90bc772b7d92a3e
|
use default widget many2one in tree view
|
partner_multi_relation_tabs/tablib/tab.py
|
partner_multi_relation_tabs/tablib/tab.py
|
# Copyright 2014-2018 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import logging
from lxml import etree
from odoo import _
from odoo.osv.orm import transfer_modifiers_to_node
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
NAME_PREFIX = 'relation_ids_tab'
class Tab(object):
"""Encapsulate the information on a tab in the database."""
def __init__(self, tab_record):
"""Create tab from tab_record.
In this version tab_record can be assumed to be a partner.relation.tab.
"""
self.tab_record = tab_record
self.name = tab_record.code
def get_fieldname(self):
return '%s_%s' % (NAME_PREFIX, self.tab_record.id)
def get_visible_fieldname(self):
return '%s_visible' % self.get_fieldname()
def create_page(self):
tab_page = etree.Element('page')
self._set_page_attrs(tab_page)
field = etree.Element(
'field',
name=self.get_fieldname(),
context='{'
'"default_this_partner_id": id,'
'"default_tab_id": %d,'
'"active_test": False}' % self.tab_record.id)
tab_page.append(field)
tree = etree.Element('tree', editable='bottom')
field.append(tree)
# Now add fields for the editable tree view in the tab.
type_field = etree.Element(
'field',
name='type_selection_id',
widget='many2one_clickable')
type_field.set('domain', repr([('tab_id', '=', self.tab_record.id)]))
type_field.set('options', repr({'no_create': True}))
tree.append(type_field)
other_partner_field = etree.Element(
'field',
string=_('Partner'),
name='other_partner_id',
widget='many2one_clickable')
other_partner_field.set('options', repr({'no_create': True}))
tree.append(other_partner_field)
tree.append(etree.Element('field', name='date_start'))
tree.append(etree.Element('field', name='date_end'))
return tab_page
def _set_page_attrs(self, tab_page):
tab_page.set('string', self.tab_record.name)
attrs = {'invisible': [(self.get_visible_fieldname(), '=', False)]}
tab_page.set('attrs', repr(attrs))
transfer_modifiers_to_node(attrs, tab_page)
def compute_visibility(self, partner):
"""Compute visibility, dependent on partner and conditions."""
tab = self.tab_record
if tab.partner_ids:
return partner in tab.partner_ids
if tab.contact_type:
is_company_tab = tab.contact_type == 'c'
if partner.is_company != is_company_tab:
return False
if tab.partner_category_id:
if tab.partner_category_id not in partner.category_id:
return False
return True
|
Python
| 0.000001
|
@@ -1519,34 +1519,24 @@
et='many2one
-_clickable
')%0A t
@@ -1865,18 +1865,8 @@
2one
-_clickable
')%0A
|
0b54c244e6e4b745a678fe69fc1be7c16850203d
|
Fix a mistake.
|
python/distutils/example_without_dependency/setup.py
|
python/distutils/example_without_dependency/setup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyNurseryRhymesDemo
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from nursery_rhymes import __version__ as VERSION
from distutils.core import setup
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries']
PACKAGES = ['nursery_rhymes']
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='jd.jdhp@gmail.com',
maintainer='Jeremie DECOCK',
maintainer_email='jd.jdhp@gmail.com',
name='nursery_rhymes',
description='A snippet to show how to install a project with setuptools',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# where the package may be downloaded
scripts = ["rowyourboat"],
classifiers=CLASSIFIERS,
#license='MIT license', # Useless if license is already in CLASSIFIERS
packages=PACKAGES,
version=VERSION)
|
Python
| 0.003448
|
@@ -1607,17 +1607,8 @@
m ::
- POSIX ::
OS
|
d0a42e06baa46f1f9455fbfbe16a0ba3f16b4b61
|
Fix CellView has no set_completion method
|
gaphor/adapters/profiles/metaclasseditor.py
|
gaphor/adapters/profiles/metaclasseditor.py
|
"""
Metaclass item editors.
"""
from builtins import object
from zope import component
from gi.repository import Gtk
from zope.interface import implementer
from gaphor import UML
from gaphor.adapters.propertypages import create_hbox_label, EventWatcher
from gaphor.core import _, transactional
from gaphor.diagram import items
from gaphor.ui.interfaces import IPropertyPage
def _issubclass(c, b):
try:
return issubclass(c, b)
except TypeError:
return False
@implementer(IPropertyPage)
class MetaclassNameEditor(object):
"""
Metaclass name editor. Provides editable combo box entry with
predefined list of names of UML classes.
"""
order = 10
NAME_LABEL = _("Name")
CLASSES = list(
sorted(
n
for n in dir(UML)
if _issubclass(getattr(UML, n), UML.Element) and n != "Stereotype"
)
)
def __init__(self, item):
self.item = item
self.size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
self.watcher = EventWatcher(item.subject)
def construct(self):
page = Gtk.VBox()
subject = self.item.subject
if not subject:
return page
hbox = create_hbox_label(self, page, self.NAME_LABEL)
model = Gtk.ListStore(str)
for c in self.CLASSES:
model.append([c])
cb = Gtk.ComboBox(model=model)
completion = Gtk.EntryCompletion()
completion.set_model(model)
completion.set_minimum_key_length(1)
completion.set_text_column(0)
cb.get_child().set_completion(completion)
entry = cb.get_child()
entry.set_text(subject and subject.name or "")
hbox.pack_start(cb, True, True, 0)
page.default = entry
# monitor subject.name attribute
changed_id = entry.connect("changed", self._on_name_change)
def handler(event):
if event.element is subject and event.new_value is not None:
entry.handler_block(changed_id)
entry.set_text(event.new_value)
entry.handler_unblock(changed_id)
self.watcher.watch("name", handler).register_handlers()
entry.connect("destroy", self.watcher.unregister_handlers)
page.show_all()
return page
@transactional
def _on_name_change(self, entry):
self.item.subject.name = entry.get_text()
component.provideAdapter(
MetaclassNameEditor, adapts=[items.MetaclassItem], name="Properties"
)
# vim:sw=4:et:ai
|
Python
| 0
|
@@ -1397,15 +1397,34 @@
oBox
-(model=
+.new_with_model_and_entry(
mode
|
e129577665a9d476888a8d3ad949d5754cd7744b
|
remove cosines from args
|
gary/coordinates/tests/test_propermotion.py
|
gary/coordinates/tests/test_propermotion.py
|
# coding: utf-8
"""
Test conversions in propermotion.py
"""
from __future__ import absolute_import, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
import tempfile
import numpy as np
import astropy.coordinates as coord
import astropy.units as u
from ..propermotion import *
_txt = """ # from here: http://www.astrostudio.org/xhipreadme.html
# HIPID ra dec pmra pmdec pml pmb
1 0.00091185 1.08901332 -4.58 -1.61 -4.85 0.29
2 0.00379738 -19.49883738 179.70 1.40 104.99 -145.85
3 0.00500794 38.85928608 4.28 -3.42 3.44 -4.26
10241 32.92989805 -31.39849677 40.91 18.19 -24.11 37.72
10242 32.93085553 3.45271681 18.94 -1.75 17.12 8.29
10243 32.93296084 60.71236365 0.85 -1.23 1.18 -0.91
19265 61.93883644 80.97646929 -7.44 -2.57 -3.31 -7.14
19266 61.93960267 4.31970083 -7.87 -3.75 -1.68 -8.55
19267 61.94490325 2.72570815 11.89 -33.83 34.41 -10.08
"""
class TestPMConvert(object):
def setup(self):
with tempfile.NamedTemporaryFile() as temp:
temp.write(_txt)
temp.flush()
temp.seek(0)
self.data = np.genfromtxt(temp, names=True, skiprows=1)
def test_pm_gal_to_icrs(self):
# test a single entry
row = self.data[0]
c = coord.SkyCoord(ra=row['ra']*u.deg, dec=row['dec']*u.deg)
muad = [row['pmra'],row['pmdec']]*u.mas/u.yr
mulb = [row['pml'],row['pmb']]*u.mas/u.yr
trans_muad = pm_gal_to_icrs(c, mulb, cosb=True)[:,0]
assert np.allclose(muad, trans_muad, atol=1E-2)
# multiple entries
c = coord.SkyCoord(ra=self.data['ra']*u.deg, dec=self.data['dec']*u.deg)
muad = np.vstack((self.data['pmra'],self.data['pmdec']))*u.mas/u.yr
mulb = np.vstack((self.data['pml'],self.data['pmb']))*u.mas/u.yr
trans_muad = pm_gal_to_icrs(c, mulb, cosb=True)
assert np.allclose(muad, trans_muad, atol=1E-2)
def test_pm_icrs_to_gal(self):
# test a single entry
row = self.data[0]
c = coord.SkyCoord(ra=row['ra']*u.deg, dec=row['dec']*u.deg)
muad = [row['pmra'],row['pmdec']]*u.mas/u.yr
mulb = [row['pml'],row['pmb']]*u.mas/u.yr
trans_mulb = pm_icrs_to_gal(c, muad, cosdec=True)[:,0]
assert np.allclose(mulb, trans_mulb, atol=1E-2)
# multiple entries
c = coord.SkyCoord(ra=self.data['ra']*u.deg, dec=self.data['dec']*u.deg)
muad = np.vstack((self.data['pmra'],self.data['pmdec']))*u.mas/u.yr
mulb = np.vstack((self.data['pml'],self.data['pmb']))*u.mas/u.yr
trans_mulb = pm_icrs_to_gal(c, muad, cosdec=True)
assert np.allclose(mulb, trans_mulb, atol=1E-2)
|
Python
| 0.004387
|
@@ -1548,26 +1548,15 @@
mulb
-, cosb=True
)%5B:,0%5D%0A
+
@@ -1913,19 +1913,8 @@
mulb
-, cosb=True
)%0A
@@ -2277,29 +2277,16 @@
(c, muad
-, cosdec=True
)%5B:,0%5D%0A
@@ -2591,32 +2591,32 @@
%5D))*u.mas/u.yr%0A%0A
+
trans_mu
@@ -2646,21 +2646,8 @@
muad
-, cosdec=True
)%0A
|
6c11b9cc9b213928e32d883d4f557f7421da6802
|
Add kamerstukken to dossier API
|
document/api.py
|
document/api.py
|
from rest_framework import serializers, viewsets
from document.models import Document, Kamerstuk, Dossier
class DossierSerializer(serializers.HyperlinkedModelSerializer):
documents = serializers.HyperlinkedRelatedField(read_only=True,
view_name='document-detail',
many=True)
class Meta:
model = Dossier
fields = ('id', 'dossier_id', 'title', 'documents')
class DossierViewSet(viewsets.ModelViewSet):
queryset = Dossier.objects.all()
serializer_class = DossierSerializer
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Document
fields = ('id', 'dossier', 'raw_type', 'raw_title', 'publisher', 'date_published', 'document_url')
class DocumentViewSet(viewsets.ModelViewSet):
queryset = Document.objects.all()
serializer_class = DocumentSerializer
class KamerstukSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Kamerstuk
fields = ('id', 'document', 'id_main', 'id_sub', 'type_short', 'type_long')
class KamerstukViewSet(viewsets.ModelViewSet):
queryset = Kamerstuk.objects.all()
serializer_class = KamerstukSerializer
|
Python
| 0
|
@@ -380,16 +380,233 @@
=True)%0A%0A
+ kamerstukken = serializers.HyperlinkedRelatedField(read_only=True,%0A view_name='kamerstuk-detail',%0A many=True)%0A%0A
clas
@@ -683,16 +683,32 @@
'title',
+ 'kamerstukken',
'docume
|
4f8c2e6ccab17f220a46d9b8e22bf72dffceb135
|
Improve Unit Tests for Dashboard
|
test/test_dashboard.py
|
test/test_dashboard.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Matthieu Estrada, ttamalfor@gmail.com
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest2
from alignak_app.core.utils import init_config
from alignak_app.dashboard.app_dashboard import Dashboard
try:
__import__('PyQt5')
from PyQt5.QtWidgets import QApplication
except ImportError:
from PyQt4.Qt import QApplication
class TestNotification(unittest2.TestCase):
"""
This file test the Dashboard class and by same time the DashboardFactory
"""
init_config()
@classmethod
def setUpClass(cls):
"""Create QApplication"""
try:
cls.app = QApplication(sys.argv)
except:
pass
def test_initialize_dashboard(self):
"""Initialize Dashboard"""
under_test = Dashboard()
self.assertIsNone(under_test.dashboard_type)
self.assertIsNotNone(under_test.dashboard_factory)
# Create all the label
under_test.initialize()
self.assertEqual('state', under_test.dashboard_type.objectName())
self.assertIsNotNone(under_test.dashboard_factory)
def test_display_dashboard(self):
"""Display Dashboard"""
under_test = Dashboard()
under_test.initialize()
self.assertEqual('', under_test.dashboard_type.text())
# Simulate dicts of states
synthesis = {
'hosts': {
'up': 1,
'down': 2,
'unreachable': 3,
'acknowledge': 4,
'downtime': 5,
},
'services': {
'ok': 4,
'warning': 5,
'critical': 6,
'unknown': 7,
'unreachable': 8,
'acknowledge': 9,
'downtime': 10,
}
}
# Send a CRITICAL dashboard
changes = {
'hosts': {
'up': 0,
'down': 0,
'unreachable': 0,
'acknowledge': 0,
'downtime': 0
},
'services': {
'ok': 0,
'warning': 0,
'critical': 0,
'unknown': 0,
'unreachable': 0,
'acknowledge': 0,
'downtime': 0
}
}
under_test.update_dashboard(synthesis, changes, True)
self.assertEqual('CRITICAL', under_test.dashboard_type.text())
self.assertEqual(
under_test.dashboard_factory.state_data['hosts_up']['state_number'].text(),
'1'
)
self.assertEqual(
under_test.dashboard_factory.state_data['hosts_up']['progress_bar'].value(),
6
)
self.assertEqual(
under_test.dashboard_factory.state_data['hosts_up']['diff'].text(),
''
)
assert 'Background-color: #e74c3c;' in under_test.styleSheet()
def test_get_style_sheet(self):
"""Get Style Sheet according to States"""
ok_css = "Background-color: #27ae60;"
warning_css = "Background-color: #e67e22;"
critical_css = "Background-color: #e74c3c;"
none_css = "Background-color: #EEE;"
under_test = Dashboard()
css = {
'OK': ok_css,
'WARNING': warning_css,
'CRITICAL': critical_css,
'NONE': none_css,
}
states = ('OK', 'WARNING', 'CRITICAL', 'NONE')
for state in states:
expected_css = css[state]
under_test.set_style_sheet(state)
current_css = under_test.styleSheet()
assert expected_css in current_css
def test_set_position(self):
"""Dashboard Position change"""
under_test = Dashboard()
initial_position = under_test.app_widget.pos()
under_test.set_position()
self.assertNotEqual(under_test.app_widget.pos(), initial_position)
|
Python
| 0
|
@@ -882,16 +882,32 @@
t_config
+, set_app_config
%0Afrom al
@@ -4582,24 +4582,130 @@
dget.pos()%0A%0A
+ # Test set_position affect Dashboard%0A set_app_config('Dashboard', 'position', 'top:right')%0A
unde
@@ -4726,17 +4726,16 @@
ition()%0A
-%0A
@@ -4796,12 +4796,255 @@
al_position)
+%0A%0A # Test new configuration not affect Dashbard position%0A new_position = under_test.app_widget.pos()%0A set_app_config('Dashboard', 'position', 'top:left')%0A self.assertEqual(under_test.app_widget.pos(), new_position)%0A
|
2642128696fd0448edac6623d57b5964775c339a
|
add reset command
|
dog/ext/time.py
|
dog/ext/time.py
|
import logging
import datetime
import discord
import pycountry
import pytz
from discord.ext.commands import group
from lifesaver.bot import Cog, Context
from lifesaver.bot.storage import AsyncJSONStorage
log = logging.getLogger(__name__)
class Time(Cog):
def __init__(self, bot):
super().__init__(bot)
self.timezones = AsyncJSONStorage('timezones.json', loop=bot.loop)
def get_time_for(self, user: discord.User):
timezone = self.timezones.get(user.id)
if not timezone:
return None
their_time = datetime.datetime.now(pytz.timezone(timezone))
return their_time.strftime('%B %d, %Y %H:%M:%S (%I:%M:%S %p)')
@group(invoke_without_command=True, aliases=['t'])
async def time(self, ctx: Context, *, who: discord.User = None):
"""Views the time for another user."""
who = who or ctx.author
formatted_time = self.get_time_for(who)
if not formatted_time:
await ctx.send(
f'{who.display_name} has not set their time. You can tell them to set their time with '
f'`{ctx.prefix}time set`.'
)
return
await ctx.send(f'{who.display_name}: {formatted_time}')
@time.command(name='set')
async def time_set(self, ctx: Context):
"""Sets your current timezone interactively."""
target = ctx.author
# TODO: refactor, improve, and build upon ask and prompt into a generic interface. should be usable by others.
# it's currently too trashy and narrow the way it stands.
ask_aborted = object() # sentinel that indicates abort
async def ask(prompt, *, determiner, on_fail="Invalid response. Please try again."):
embed = discord.Embed(color=discord.Color.green(), title='Timezone wizard', description=prompt)
await target.send(embed=embed)
while True:
message = await self.bot.wait_for('message', check=lambda m: not m.guild and m.author == target)
if message.content == 'cancel':
return ask_aborted
try:
value = determiner(message.content)
if not value:
continue
else:
return value
except:
await target.send(on_fail)
continue
async def prompt(message):
embed = discord.Embed(color=discord.Color.gold(), title='Confirmation', description=message)
confirmation: discord.Message = await target.send(embed=embed)
emoji = ['\N{WHITE HEAVY CHECK MARK}', '\N{NO ENTRY SIGN}']
for e in emoji:
await confirmation.add_reaction(e)
while True:
def _check(_r, u):
return u == target
reaction, user = await self.bot.wait_for('reaction_add', check=_check)
if reaction.emoji in emoji:
return reaction.emoji == emoji[0]
try:
embed = discord.Embed(
title='Timezone wizard',
description="Hello! I'll be helping you pick your timezone. By setting your timezone, other people "
"will be able to see what time it is for you, and other cool stuff."
)
await target.send(embed=embed)
except discord.HTTPException:
await ctx.send("I can't DM you.")
return
log.debug('%d: Timezone wizard started.', target.id)
while True:
country = await ask(
'Please send me the name of the country you live in.\n\n'
'Something like "USA", "United States", or even the two-letter country code like "US".\n'
"Send 'cancel' to abort.",
determiner=pycountry.countries.lookup,
on_fail="Sorry, that didn't seem like a country to me. Please try again, or send 'cancel' to abort."
)
if country is ask_aborted:
await target.send('Operation cancelled. Sorry about that!')
return
log.debug('%d: Provided country: %s', target.id, country)
if await prompt(f'Do you live in **{country.official_name}**?\n'
'Click \N{WHITE HEAVY CHECK MARK} to continue.'):
break
code = country.alpha_2
log.debug('%d: Lives in %s (%s)', target.id, code, country)
try:
timezones = pytz.country_timezones[code]
except KeyError:
await ctx.send(f"Sorry, but I couldn't find any designated timezones for **{country.official_name}** "
"in my database. I still love you, though. \N{HEAVY BLACK HEART}")
log.warning('%d: Failed to find any timezones for %s (%s)', target.id, code, country)
return
embed = discord.Embed(
title='Timezone wizard',
description='Which timezone are you living in?\n\n',
color=discord.Color.green()
)
for timezone in timezones:
pytz_timezone = pytz.timezone(timezone)
now_in_timezone: datetime.datetime = datetime.datetime.now(pytz_timezone)
time_now = now_in_timezone.strftime('%H:%M (%I:%M %p)')
embed.description += f'\N{BULLET} {timezone} {time_now}\n'
embed.description += '\nPlease send the timezone code.'
if len(embed.description) > 2048:
await target.send(
"Hmm. Looks like there's so many timezones in that region, I can't display them all. "
"Sorry about that."
)
return
await target.send(embed=embed)
while True:
response = await self.bot.wait_for('message', check=lambda m: not m.guild and m.author == target)
if response.content == 'cancel':
await target.send('Aborted. Sorry.')
return
if response.content not in timezones:
await target.send("That's not a timezone code that was listed above. Send 'cancel' to abort.")
else:
user_timezone = response.content
break
log.debug('%d: Timezone is: %s', target.id, user_timezone)
await self.timezones.put(target.id, user_timezone)
embed = discord.Embed(title='You made it!', color=discord.Color.magenta(),
description=f'Your timezone is now {user_timezone}. Thanks for putting up with me.')
await target.send(embed=embed)
def setup(bot):
bot.add_cog(Time(bot))
|
Python
| 0.000042
|
@@ -1223,24 +1223,457 @@
ed_time%7D')%0A%0A
+ @time.command(name='reset')%0A async def time_reset(self, ctx: Context):%0A %22%22%22Resets your timezone.%22%22%22%0A if await ctx.confirm(title='Are you sure?', message='Your timezone will be removed.'):%0A try:%0A await self.timezones.delete(ctx.author.id)%0A except KeyError:%0A pass%0A await ctx.send('Done.')%0A else:%0A await ctx.send('Okay, cancelled.')%0A%0A
@time.co
|
68cdb1264ba77367f10f880103aaf558498a0ed8
|
Return None on poll of "starting" container
|
dockerspawner/swarmspawner.py
|
dockerspawner/swarmspawner.py
|
"""
A Spawner for JupyterHub that runs each user's server in a separate docker service
"""
from pprint import pformat
from textwrap import dedent
from docker.types import (
ContainerSpec, TaskTemplate, Resources, EndpointSpec, Mount, DriverConfig
)
from docker.errors import APIError
from tornado import gen
from traitlets import Dict, Unicode, default
from .dockerspawner import DockerSpawner
class SwarmSpawner(DockerSpawner):
"""A Spawner for JupyterHub that runs each user's server in a separate docker service"""
object_type = "service"
object_id_key = "ID"
@property
def service_id(self):
"""alias for object_id"""
return self.object_id
@property
def service_name(self):
"""alias for object_name"""
return self.object_name
@default("network_name")
def _default_network_name(self):
# no default network for swarm
# use internal networking by default
return ""
extra_resources_spec = Dict(
config=True,
help="""
Keyword arguments to pass to the Resources spec
""",
)
extra_container_spec = Dict(
config=True,
help="""
Keyword arguments to pass to the ContainerSpec constructor
""",
)
extra_task_spec = Dict(
config=True,
help="""
Keyword arguments to pass to the TaskTemplate constructor
""",
)
extra_endpoint_spec = Dict(
config=True,
help="""
Keyword arguments to pass to the Endpoint constructor
""",
)
volume_driver = Unicode(
"",
config=True,
help=dedent(
"""
Use this driver for mounting the notebook volumes.
Note that this driver must support multiple hosts in order for it to work across the swarm.
For a list of possible drivers, see https://docs.docker.com/engine/extend/legacy_plugins/#volume-plugins
"""
),
)
volume_driver_options = Dict(
config=True,
help=dedent(
"""
Configuration options for the multi-host volume driver.
"""
),
)
# container-removal cannot be disabled for services
remove = True
@property
def mount_driver_config(self):
return DriverConfig(
name=self.volume_driver, options=self.volume_driver_options or None
)
@property
def mounts(self):
if len(self.volume_binds):
driver = self.mount_driver_config
return [
Mount(
target=vol["bind"],
source=host_loc,
type="bind",
read_only=vol["mode"] == "ro",
driver_config=driver,
)
for host_loc, vol in self.volume_binds.items()
]
else:
return []
@gen.coroutine
def poll(self):
"""Check for my id in `docker ps`"""
service = yield self.get_task()
if not service:
self.log.warning("Service %s not found", self.service_name)
return 0
service_state = service["Status"]
self.log.debug(
"Service %s status: %s", self.service_id[:7], pformat(service_state)
)
if service_state["State"] in {"running", "pending", "preparing"}:
return None
else:
return pformat(service_state)
@gen.coroutine
def get_task(self):
self.log.debug("Getting task of service '%s'", self.service_name)
if self.get_object() is None:
return None
try:
tasks = yield self.docker(
"tasks",
filters={"service": self.service_name, "desired-state": "running"},
)
if len(tasks) == 0:
return None
elif len(tasks) > 1:
raise RuntimeError(
"Found more than one running notebook task for service '{}'".format(
self.service_name
)
)
task = tasks[0]
except APIError as e:
if e.response.status_code == 404:
self.log.info("Task for service '%s' is gone", self.service_name)
task = None
else:
raise
return task
@gen.coroutine
def create_object(self):
"""Start the single-user server in a docker service."""
container_kwargs = dict(
image=self.image,
env=self.get_env(),
args=(yield self.get_command()),
mounts=self.mounts,
)
container_kwargs.update(self.extra_container_spec)
container_spec = ContainerSpec(**container_kwargs)
resources_kwargs = dict(
mem_limit=self.mem_limit,
mem_reservation=self.mem_guarantee,
cpu_limit=int(self.cpu_limit * 1e9) if self.cpu_limit else None,
cpu_reservation=int(
self.cpu_guarantee * 1e9
) if self.cpu_guarantee else None,
)
resources_kwargs.update(self.extra_resources_spec)
resources_spec = Resources(**resources_kwargs)
task_kwargs = dict(
container_spec=container_spec,
resources=resources_spec,
networks=[self.network_name] if self.network_name else [],
)
task_kwargs.update(self.extra_task_spec)
task_spec = TaskTemplate(**task_kwargs)
endpoint_kwargs = {}
if not self.use_internal_ip:
endpoint_kwargs["ports"] = {None: (self.port, "tcp")}
endpoint_kwargs.update(self.extra_endpoint_spec)
endpoint_spec = EndpointSpec(**endpoint_kwargs)
create_kwargs = dict(
task_template=task_spec, endpoint_spec=endpoint_spec, name=self.service_name
)
create_kwargs.update(self.extra_create_kwargs)
return (yield self.docker("create_service", **create_kwargs))
@gen.coroutine
def remove_object(self):
self.log.info("Removing %s %s", self.object_type, self.object_id)
# remove the container, as well as any associated volumes
yield self.docker("remove_" + self.object_type, self.object_id)
@gen.coroutine
def start_object(self):
"""Nothing to do here
There is no separate start action for services
"""
pass
@gen.coroutine
def stop_object(self):
"""Nothing to do here
There is no separate stop action for services
"""
pass
@gen.coroutine
def get_ip_and_port(self):
"""Queries Docker daemon for service's IP and port.
If you are using network_mode=host, you will need to override
this method as follows::
@gen.coroutine
def get_ip_and_port(self):
return self.host_ip, self.port
You will need to make sure host_ip and port
are correct, which depends on the route to the service
and the port it opens.
"""
if self.use_internal_ip:
ip = self.service_name
port = self.port
else:
# discover published ip, port
ip = self.host_ip
service = yield self.get_object()
for port_config in service["Endpoint"]["Ports"]:
if port_config.get("TargetPort") == self.port:
port = port_config["PublishedPort"]
break
else:
self.log.error(
"Couldn't find PublishedPort for %s in %s",
self.port,
service["Endpoint"]["Ports"],
)
raise RuntimeError(
"Couldn't identify port for service %s", self.service_name
)
return ip, port
|
Python
| 0.000241
|
@@ -3373,16 +3373,28 @@
unning%22,
+ %22starting%22,
%22pendin
|
e0db7a514982a040b0d5d75faae8db5e531a7789
|
Fix missed quotes
|
dimod/core/initialized.py
|
dimod/core/initialized.py
|
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some samplers require or accept an initial starting point in sample-space.
See the source code for :class:`.IdentitySampler` for an example of using
this abstract base class in a sampler.
"""
from __future__ import annotations
import abc
from collections import namedtuple
from numbers import Integral
from typing import Optional, Tuple
import numpy as np
from dimod.sampleset import as_samples, infer_vartype, SampleSet
from dimod.vartypes import Vartype
from dimod.binary.binary_quadratic_model import BinaryQuadraticModel
from dimod.typing import SampleLike
__all__ = ['Initialized']
ParsedInputs = namedtuple('ParsedInputs',
['initial_states',
'initial_states_generator',
'num_reads',
'seed'])
class Initialized(abc.ABC):
# Allows new generators to be registered
_generators = {}
# dev note: if this function is updated, make sure to also update
# IdentitySampler
def parse_initial_states(self, bqm: BinaryQuadraticModel,
initial_states = None,
initial_states_generator: str = 'random',
num_reads: Optional[int] = None,
seed: Optional[int] = None,
copy_always: bool = False) -> Tuple[SampleLike, str, int, int]:
"""Parse or generate initial states for an initialized sampler.
Args:
bqm:
Binary quadratic model.
initial_states (samples-like):
One or more samples, each defining an initial state for all the
problem variables. Initial states are given one per read, but
if fewer than ``num_reads`` initial states are defined,
additional values are generated as specified by
``initial_states_generator``. See func:`.as_samples` for a
description of "samples-like".
initial_states_generator:
Defines the expansion of ``initial_states`` if fewer than
``num_reads`` are specified:
* "none":
If the number of initial states specified is smaller than
``num_reads``, raises ValueError.
* "tile":
Reuses the specified initial states if fewer than
``num_reads`` or truncates if greater.
* "random":
Expands the specified initial states with randomly
generated states if fewer than ``num_reads`` or truncates if
greater.
num_reads:
Number of reads. Defaults to the number of initial states, if
``initial_states`` is specified, or to 1, if not.
seed:
32-bit unsigned integer seed to use for the PRNG. Specifying a
particular seed with a constant set of parameters produces
identical results. If not provided, a random seed is chosen.
copy_always:
If True, ``initial_states`` is always copied; otherwise it is
copied only if necessary.
Returns:
A named tuple with `['initial_states', 'initial_states_generator',
'num_reads', 'seed']` as generated by this function.
"""
num_variables = len(bqm)
# validate/initialize initial_states
if initial_states is None:
initial_states_array = np.empty((0, num_variables), dtype=np.int8)
initial_states_variables = list(bqm.variables)
initial_states_vartype = bqm.vartype
else:
# confirm that the vartype matches and/or make it match
if isinstance(initial_states, SampleSet):
initial_states_vartype = initial_states.vartype
else:
# check based on values, defaulting to match the current bqm
initial_states_vartype = infer_vartype(initial_states) or bqm.vartype
if not copy_always:
# only copy if there's a vartype mismatch
copy_always = initial_states_vartype != bqm.vartype
initial_states_array, initial_states_variables = \
as_samples(initial_states, copy=copy_always)
# confirm that the variables match
if bqm.variables ^ initial_states_variables:
raise ValueError("mismatch between variables in "
"'initial_states' and 'bqm'")
# match the vartype of the initial_states to the bqm
if initial_states_vartype is Vartype.SPIN and bqm.vartype is Vartype.BINARY:
initial_states_array += 1
initial_states_array //= 2
elif initial_states_vartype is Vartype.BINARY and bqm.vartype is Vartype.SPIN:
initial_states_array *= 2
initial_states_array -= 1
# validate num_reads and/or infer them from initial_states
if num_reads is None:
num_reads = len(initial_states_array) or 1
if not isinstance(num_reads, Integral):
raise TypeError("'num_reads' should be a positive integer")
if num_reads < 1:
raise ValueError("'num_reads' should be a positive integer")
# fill/generate the initial states as needed
if initial_states_generator not in self._generators:
raise ValueError("unknown value for 'initial_states_generator'")
extrapolate = self._generators[initial_states_generator]
initial_states_array = extrapolate(initial_states=initial_states_array,
num_reads=num_reads,
num_variables=num_variables,
seed=seed,
vartype=bqm.vartype)
initial_states_array = self._truncate_filter(initial_states_array, num_reads)
sampleset = SampleSet.from_samples_bqm((initial_states_array,
initial_states_variables),
bqm)
return ParsedInputs(sampleset, initial_states_generator, num_reads,
seed)
@staticmethod
def _truncate_filter(initial_states, num_reads):
if len(initial_states) > num_reads:
initial_states = initial_states[:num_reads]
return initial_states
def _none_generator(initial_states, num_reads, *args, **kwargs):
if len(initial_states) < num_reads:
raise ValueError("insufficient number of initial states given")
return initial_states
Initialized._generators.update(none=_none_generator)
def _tile_generator(initial_states, num_reads, *args, **kwargs):
if len(initial_states) < 1:
raise ValueError("cannot tile an empty sample set of initial states")
if len(initial_states) >= num_reads:
return initial_states
reps, rem = divmod(num_reads, len(initial_states))
initial_states = np.tile(initial_states, (reps, 1))
initial_states = np.vstack((initial_states, initial_states[:rem]))
return initial_states
Initialized._generators.update(tile=_tile_generator)
def _random_generator(initial_states, num_reads, num_variables, vartype, seed=None):
rem = max(0, num_reads - len(initial_states))
np_rand = np.random.RandomState(seed)
# sort vartype so that seed is reproducable
values = np.asarray(sorted(vartype.value), dtype=np.int8)
# takes dtype from values
random_states = np_rand.choice(values, size=(rem, num_variables))
# handle zero-length array of input states
if len(initial_states):
initial_states = np.vstack((initial_states, random_states))
else:
initial_states = random_states
return initial_states
Initialized._generators.update(random=_random_generator)
|
Python
| 0
|
@@ -2548,16 +2548,21 @@
e func:%60
+dimod
.as_samp
@@ -3915,16 +3915,17 @@
le with
+%60
%60%5B'initi
@@ -3997,16 +3997,17 @@
'seed'%5D%60
+%60
as gene
|
51e35e88597d2c34905222cd04a46a2a840c0d92
|
Refactor Poly ABC
|
dimod/core/polysampler.py
|
dimod/core/polysampler.py
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============================================================================
import abc
from six import add_metaclass
__all__ = 'PolySampler',
@add_metaclass(abc.ABCMeta)
class PolySampler:
"""Sampler/Composite supports binary polynomials.
Binary polynomials are an extension of binary quadratic models that allow
higher-order interactions.
"""
@abc.abstractmethod
def sample_poly(self, polynomial, **kwargs):
"""Sample from a higher-order polynomial."""
pass
|
Python
| 0
|
@@ -727,16 +727,118 @@
aclass%0A%0A
+from dimod.core.composite import Composite%0Afrom dimod.higherorder.polynomial import BinaryPolynomial%0A%0A
__all__
@@ -853,16 +853,38 @@
ampler',
+ 'ComposedPolySampler'
%0A%0A%0A@add_
@@ -943,18 +943,8 @@
pler
-/Composite
sup
@@ -969,20 +969,16 @@
omials.%0A
-
%0A Bin
@@ -1079,20 +1079,16 @@
ctions.%0A
-
%0A %22%22%22
@@ -1223,12 +1223,312 @@
pass%0A
+%0A def sample_hising(self, h, J, **kwargs):%0A return self.sample_poly(BinaryPolynomial.from_hising(h, J), **kwargs)%0A%0A def sample_hubo(self, H, **kwargs):%0A return self.sample_poly(BinaryPolynomial.from_hubo(H), **kwargs)%0A%0A%0Aclass ComposedPolySampler(PolySampler, Composite):%0A pass%0A
|
e9806e906ae8a15dceb5aeb77cdacb753af3943c
|
Make interpolation compatibility optional
|
Lib/fontmake/font_project.py
|
Lib/fontmake/font_project.py
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from os import path
from time import time
from cu2qu import fonts_to_quadratic
from glyphs2ufo.glyphslib import build_masters, build_instances
from robofab.world import OpenFont
from ufo2ft import compileOTF, compileTTF
class FontProject:
"""Provides methods for building fonts."""
def __init__(self, src_dir, out_dir):
self.src_dir = src_dir
self.out_dir = out_dir
def build_masters(self, glyphs_path):
"""Build master UFOs from Glyphs source."""
return build_masters(
glyphs_path, self.src_dir, 'Italic' in glyphs_path)
def build_instances(self, glyphs_path):
"""Build instance UFOs from Glyphs source."""
out_dir = self._output_dir('ufo')
return build_instances(
glyphs_path, self.src_dir, out_dir, 'Italic' in glyphs_path)
def save_otf(self, ufo):
"""Build OTF from UFO."""
otf_path = self._output_path(ufo, 'otf')
otf = compileOTF(ufo)
otf.save(otf_path)
def save_ttf(self, ufo):
"""Build TTF from UFO."""
ttf_path = self._output_path(ufo, 'ttf')
ttf = compileTTF(ufo)
ttf.save(ttf_path)
def run_all(self, glyphs_path, fea_path=None, interpolate=False):
"""Run toolchain from Glyphs source to OpenType binaries."""
if interpolate:
print '>> Interpolating master UFOs from Glyphs source'
ufos = self.build_instances(glyphs_path)
else:
print '>> Loading master UFOs from Glyphs source'
ufos = self.build_masters(glyphs_path)
for ufo in ufos:
print '>> Saving OTF for ' + ufo.info.postscriptFullName
self.save_otf(ufo)
print '>> Converting curves to quadratic'
start_t = time()
print fonts_to_quadratic(ufos, compatible=True)
t = time() - start_t
print '[took %f seconds]' % t
for ufo in ufos:
name = ufo.info.postscriptFullName
print '>> Saving TTF for ' + name
self.save_ttf(ufo)
def _output_dir(self, ext):
"""Generate an output directory."""
return path.join(self.out_dir, ext.lower())
def _output_path(self, ufo, ext):
"""Generate output path for a UFO with given directory and extension."""
family = ufo.info.familyName.replace(' ', '')
style = ufo.info.styleName.replace(' ', '')
out_dir = self._output_dir(ext)
if not path.exists(out_dir):
os.makedirs(out_dir)
return path.join(out_dir, '%s-%s.%s' % (family, style, ext))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('glyphs_path', metavar='GLYPHS_PATH')
parser.add_argument('-i', '--interpolate', action='store_true')
parser.add_argument('-f', '--fea-path')
args = parser.parse_args()
project = FontProject('src', 'out')
project.run_all(args.glyphs_path, args.fea_path, args.interpolate)
if __name__ == '__main__':
main()
|
Python
| 0.000002
|
@@ -1807,16 +1807,25 @@
run_all(
+%0A
self, gl
@@ -1849,16 +1849,34 @@
th=None,
+ compatible=False,
interpo
@@ -2432,16 +2432,43 @@
time()%0A
+ if compatible:%0A
@@ -2496,29 +2496,102 @@
tic(
+*
ufos
-, compatible=True
+)%0A else:%0A for ufo in ufos:%0A print fonts_to_quadratic(ufo
)%0A
@@ -3456,16 +3456,83 @@
_PATH')%0A
+ parser.add_argument('-c', '--compatible', action='store_true')%0A
pars
@@ -3727,16 +3727,25 @@
run_all(
+%0A
args.gly
@@ -3764,24 +3764,41 @@
gs.fea_path,
+ args.compatible,
args.interp
|
efc4826bf692937129f57a046c5d06c02c0dc911
|
Update pd.DataFrame.read_excel parameters
|
vdd/io.py
|
vdd/io.py
|
import re
import collections
import itertools
import warnings
import numpy as np
try:
import pandas as pd
import xlrd
except ImportError:
warnings.warn('`pandas` and `xlrd` packages required for '
'spreadsheet support.')
class ExcelParser(object):
# 20 characteristic definitions are supported ((4*26)/5 cols)
_MAX_COL = 'CZ'
_NCOLS_CHAR = 4
def __init__(self, path):
self.path = path
@property
def df(self):
"""DataFrame of requirement-characteristic relationships."""
try:
return self._df
except AttributeError:
df = self._df = pd.read_excel(self.path, skiprows=[0,1])
return df
@property
def cdf(self):
"""DataFrame of characteristic definitions.
This augments the regular DataFrame.
"""
try:
return self._cdf
except AttributeError:
df = pd.read_excel(
self.path,
parse_cols="C:{}".format(self._MAX_COL)
)[:1]
return self._cdf_base(df)
def _cdf_base(self, df):
dd = collections.defaultdict(list)
for i, s in enumerate(df.columns):
ridx = i % self._NCOLS_CHAR # Relative index
if ridx == 0:
# Initial column of group; begin construct.
dd['name'].append(s)
elif ridx == 1:
dd['min'].append(df.loc[0, s])
elif ridx == 3:
# Final column of group; add construct to list.
dd['max'].append(df.loc[0, s])
self._cdf = tdf = pd.DataFrame.from_dict(dd)
return tdf
def get_characteristics(self):
"""Returns a 3-tuple: (<name>, <minvalue>, <maxvalue>).
If the minimum or maximum values are omitted, NaN(s) will be
returned.
"""
l = []
for rec in self.cdf.to_records():
if re.match(r'^(Unnamed: \d+|Characteristic \d+)$',
rec['name']) is not None:
warnings.warn("Picked up a default column name")
else:
l.append((rec['name'], rec['min'], rec['max']))
return l
def get_relationships(self):
"""Get relationships defined a 4/5-tuple.
Size of tuple depends on the type of relationship.
"""
# TODO: Yeah I know, variable return type.
reqts = [tup[0] for tup in self.get_requirements()]
chars = [tup[0] for tup in self.get_characteristics()]
return self._parse_row(reqts, chars)
def _parse_row(self, reqts, chars):
n = self._NCOLS_CHAR
df = self.df.loc[:,'Correlation':]
relationships = []
for (i, r), (j, c) in itertools.product(enumerate(reqts),
enumerate(chars)):
row = df.loc[i,:].values
base_tup = (r, c, row[j*n+1], row[j*n+0], row[j*n+2])
if np.isnan(base_tup[4]):
# The target value is always a quantity.
continue
if base_tup[2] == 'opt':
tup = base_tup + (row[j*n+3],)
else:
tup = base_tup
relationships.append(tup)
return relationships
def get_requirements(self):
cols = ('Weighting', 'Requirements')
return [tuple(reversed(tuple(rec)[1:])) # Exclude idx
for rec in self.df.loc[:,cols].to_records()]
class CompactExcelParser(ExcelParser):
_NCOLS_CHAR = 3
def _cdf_base(self, df):
dd = collections.defaultdict(list)
for i, s in enumerate(df.columns):
ridx = i % self._NCOLS_CHAR # Relative index
if ridx == 0:
# Initial column of group; begin construct.
dd['name'].append(s)
elif ridx == 1:
dd['min'].append(df.loc[0, s])
elif ridx == 2:
# Final column of group; add construct to list.
dd['max'].append(df.loc[0, s])
self._cdf = tdf = pd.DataFrame.from_dict(dd)
return tdf
def _parse_row(self, reqts, chars):
n = self._NCOLS_CHAR
df = self.df.loc[:,'Relationship Type':]
relationships = []
for (i, r), (j, c) in itertools.product(enumerate(reqts),
enumerate(chars)):
row = df.loc[i,:].values
rel = row[j*n]
try:
type_ = {'+': 'max', 'o': 'opt', '-': 'min'}[rel[0]]
except (TypeError, IndexError):
# rel is not a recognised string.
continue
base_tup = (r, c, type_, rel, row[j*n+1])
if np.isnan(base_tup[4]):
# The target value is always a quantity.
continue
if base_tup[2] == 'opt':
tup = base_tup + (row[j*n+2],)
else:
tup = base_tup
relationships.append(tup)
return relationships
|
Python
| 0
|
@@ -995,22 +995,19 @@
-par
+u
se
-_
cols=%22C:
|
81cd0b74e611532f8421d0dfb22266cd789a5a6a
|
add in oauth_keys to dev (bug 858813)
|
solitude/settings/sites/dev/db.py
|
solitude/settings/sites/dev/db.py
|
"""private_base will be populated from puppet and placed in this directory"""
import logging
import dj_database_url
import private_base as private
from solitude.settings import base
from django_sha2 import get_password_hashers
ADMINS = ()
ALLOWED_HOSTS = ['payments-dev.allizom.org', 'localhost']
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
HMAC_KEYS = private.HMAC_KEYS
PASSWORD_HASHERS = get_password_hashers(base.BASE_PASSWORD_HASHERS, HMAC_KEYS)
LOG_LEVEL = logging.DEBUG
SECRET_KEY = private.SECRET_KEY
SENTRY_DSN = private.SENTRY_DSN
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
SYSLOG_TAG = 'http_app_payments_dev'
TEMPLATE_DEBUG = DEBUG
# Solitude specific settings.
AES_KEYS = private.AES_KEYS
CLEANSED_SETTINGS_ACCESS = True
CLIENT_JWT_KEYS = private.CLIENT_JWT_KEYS
PAYPAL_PROXY = private.PAYPAL_PROXY
PAYPAL_URL_WHITELIST = ('https://marketplace-dev.allizom.org',)
BANGO_PROXY = private.BANGO_PROXY
SITE_URL = 'https://payments-dev.allizom.org'
|
Python
| 0
|
@@ -1045,27 +1045,29 @@
True%0ACLIENT_
-JWT
+OAUTH
_KEYS = priv
@@ -1081,11 +1081,13 @@
ENT_
-JWT
+OAUTH
_KEY
|
a8972158ab12ec3aef3a224264f1145d5ed910e4
|
Move intervention to corner nav and outwork to footer
|
derrida/outwork/migrations/0002_initial_pages.py
|
derrida/outwork/migrations/0002_initial_pages.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 20:12
from __future__ import unicode_literals
from collections import OrderedDict
from attrdict import AttrDict
from django.conf import settings
from django.db import migrations
from django.utils import timezone
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
initial_homepage_content = '''
<section>
<q>And yet did we not know that... only in the book, coming back to it unceasingly, drawing all our resources from it, could we indefinitely designate the writing beyond the book?</q>
<p class="quote-cite">Jacques Derrida, <i>Writing and Difference</i></p>
<p>“Derrida’s Margins” unpacks the library contained within each of Derrida’s published works, starting with the landmark 1967 text <i>De la grammatologie</i>. Additional texts will be added as the project continues.</p>
<p>This scholarly tool enables researchers to approach the development of deconstruction in an unprecedented way by exploring the relationship between Derrida’s thought and his reading practices. Users may browse or search: Derrida’s personal copies of books that are referenced in <i>De la grammatologie</i>; the nearly one thousand <strong>references</strong> (quotations, citations, footnotes, or epigraphs) found in the pages of <i>De la grammatologie</i>; and the <strong>interventions</strong> Derrida made in his books (annotations, marginalia, bookmarks, tipped-in pages, notes, etc.) that correspond to each reference. The website also provides data <strong>visualizations</strong> of Derrida’s references.
<p>The Library of Jacques Derrida is housed in Princeton University Library’s Rare Books and Special Collections.</p>
</section>
<section class="credits">
<ul class="credits__group">
<li class="credits__role">Project Director</li>
<li class="credits__name">Katie Chenoweth</li>
</ul>
<ul class="credits__group">
<li class="credits__role">Project Manager</li>
<li class="credits__name">Alex Raiffe</li>
</ul>
<ul class="credits__group">
<li class="credits__role">Consultant</li>
<li class="credits__name">Jean Bauer</li>
</ul>
<ul class="credits__group">
<li class="credits__role">Graduate Research Assistants</li>
<li class="credits__name"><span>Renée Altergott</span></li>
<li class="credits__name"><span>Chad Córdova</span></li>
<li class="credits__name"><span>Austin Hancock</span></li>
<li class="credits__name">Chloé Vettier</li>
</ul>
<ul class="credits__group">
<li class="credits__role">Advisors</li>
<li class="credits__name">Avital Ronell</li>
<li class="credits__name">Eduardo Cadava</li>
</ul>
</section>
'''
#: primary nav (also in footer)
NAV_PAGES = OrderedDict([
('library', {'title': 'Derrida\'s Library',
'description': 'Browse Derrida’s personal copies of the books referenced in his published works.'}),
('references', {'title': 'Reference List',
'description': 'Explore the quotations and references in Derrida’s works.'}),
('references/histogram/de-la-grammatologie', {'title': 'Visualization',
'description': 'Explore all quotations and references in Derrida’s works.',
'meta_title': 'References by Section ‖ Visualization'}),
('outwork', {'title': 'Outwork'})
])
#: footer nav
FOOTER_PAGES = OrderedDict([
('/', {'title': 'Derrida\'s Margins',
'description': 'An online research tool for Derrida’s annotations that provides a behind-the-scenes look at his reading practices and the philosophy of deconstruction',
'content': initial_homepage_content}),
('cite', {'title': 'How to Cite'}),
('contact', {'title': 'Contact'}),
])
OTHER_PAGES = OrderedDict([
('interventions', {'title': 'Interventions',
'description': 'Explore the traces of Derrida’s reading.'}),
('references/histogram', {'title': 'Visualization by Author',
'description': 'Explore all quotations and references in Derrida’s works.',
'meta_title': 'References by Author ‖ Visualization'})
])
def create_pages(apps, schema_editor):
RichTextPage = apps.get_model('pages', 'RichTextPage')
Site = apps.get_model('sites', 'Site')
site = Site.objects.get_or_create(pk=settings.SITE_ID)[0]
now = timezone.now()
def create_page(slug, info, index, menus=''):
RichTextPage.objects.create(slug=slug, title=info.title,
_meta_title=info.get('meta_title', ''),
titles=info.title, created=now, updated=now,
description=info.get('description', ''),
gen_description=not info.get('description', ''),
status=CONTENT_STATUS_PUBLISHED,
content=info.get('content', '[placeholder]'),
site=site, publish_date=now, in_menus=menus,
content_model="richtextpage", _order=index)
index = 1
for slug, info in FOOTER_PAGES.items():
create_page(slug, AttrDict(info), index, menus="3")
index += 1
for slug, info in NAV_PAGES.items():
create_page(slug, AttrDict(info), index, menus="1,3")
index += 1
for slug, info in OTHER_PAGES.items():
create_page(slug, AttrDict(info), index)
index += 1
def remove_pages(apps, schema_editor):
RichTextPage = apps.get_model('pages', 'RichTextPage')
slugs = list(FOOTER_PAGES.keys()) + list(NAV_PAGES.keys()) \
+ list(OTHER_PAGES.keys())
RichTextPage.objects.filter(slug__in=slugs).delete()
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
('outwork', '0001_initial'),
]
operations = [
migrations.RunPython(create_pages, reverse_code=remove_pages),
]
|
Python
| 0
|
@@ -3313,23 +3313,29 @@
,%0A ('
-outwork
+interventions
', %7B'tit
@@ -3344,18 +3344,92 @@
': '
-Outwork
+Interventions',%0A 'description': 'Explore the traces of Derrida%E2%80%99s reading.
'%7D)
+,
%0A%5D)%0A
@@ -3734,24 +3734,63 @@
_content%7D),%0A
+ ('outwork', %7B'title': 'Outwork'%7D),%0A
('cite',
@@ -3892,126 +3892,8 @@
t(%5B%0A
- ('interventions', %7B'title': 'Interventions',%0A 'description': 'Explore the traces of Derrida%E2%80%99s reading.'%7D),%0A
|
6e3ad1b462a95a5d51ab7b56e475b334fed32260
|
add "--style=symbol_index" for outputting symbox index
|
shotglass/app/management/commands/show.py
|
shotglass/app/management/commands/show.py
|
import collections
import os
import sys
from django.core.management.base import BaseCommand
from django.db.models import Avg, Max, Sum
from app.models import SourceFile, Symbol
def show_file_index(projects):
FORMAT = '{name:20} {path:50} {num_lines:>5}'
print FORMAT.format(name='NAME', path='PATH', num_lines="LINES")
for project in projects:
# pylint: disable=no-member
files = SourceFile.objects.filter(
project=project).order_by('path')
for file_ in files:
print FORMAT.format(**vars(file_))
def show_dir_index(projects):
"""
for each directory, output total number of source lines
"""
FORMAT = '{dir_path:50} {num_lines:>5}'
for project in projects:
data = SourceFile.objects.filter(
project=project).values_list('path', 'num_lines')
count = collections.Counter()
for path,num_lines in data:
count[os.path.dirname(path)] = num_lines
for dir_path,num_lines in sorted(count.iteritems()):
print FORMAT.format(**locals())
# XX V1
def show_symbol_index(projects):
FORMAT = '{name:30} {path}:{line_number}'
def fun_symbol(sym):
return sym.name[0] != '_'
for project in projects:
# pylint: disable=no-member
symbols = SourceLine.objects.filter(
project=project).order_by('name')
for symbol in filter(fun_symbol, symbols):
print FORMAT.format(**symbol.__dict__)
def show_summary(projects):
HEADER = '{:30} {:>9} {:>6} {:>7} {:>10} {:>9}'.format(
'project', 'files', 'avglen', 'maxlen', 'total', 'symbols')
FORMAT = (
'{project:30}'
' {num_files:9,}'
' {avg_length:6,}'
' {max_length:7,}'
' {total_length:10,}'
' {num_symbols:9,}')
print HEADER
for project in projects:
proj_qs = SourceFile.objects.filter(project=project)
num_files = proj_qs.count()
avg_length = int(proj_qs.aggregate(Avg('num_lines')).values()[0])
max_length = proj_qs.aggregate(Max('num_lines')).values()[0]
total_length = proj_qs.aggregate(Sum('num_lines')).values()[0]
proj_symbols = Symbol.objects.filter(source_file__project=project)
num_symbols = proj_symbols.count()
print FORMAT.format(**locals())
class Command(BaseCommand):
help = 'beer'
def add_arguments(self, parser):
parser.add_argument('projects', nargs='*')
parser.add_argument('--style', default='summary')
# if 0: # XX V1
# parser.add_argument('--index', action="store_true")
# else:
# parser.add_argument('--dirindex',
# action="store_true",
# help="show source lines per directory")
# parser.add_argument('--index', default=True)
def handle(self, *args, **options):
all_projects = SourceFile.projects()
projects = options['projects']
if not projects:
print('PROJECTS: {}'.format(', '.join(all_projects)))
print('or "all"')
return
if projects == ['all']:
projects = all_projects
try:
style_fname = 'show_{}'.format(options['style'])
infofunc = globals()[style_fname]
except KeyError:
sys.exit("{}: unknown style".format(options['style']))
infofunc(projects)
# if options['dirindex']:
# show_dir_index(projects)
# elif options['index']:
# show_file_index(projects)
# else:
# show_summary(projects)
|
Python
| 0.000487
|
@@ -1072,24 +1072,17 @@
als())%0A%0A
-# XX V1
%0A
+
def show
@@ -1125,36 +1125,20 @@
= '%7B
-name
+0
:30%7D %7B
-path%7D:%7Bline_number
+1%7D:%7B2
%7D'%0A%0A
@@ -1270,16 +1270,21 @@
+proj_
symbols
@@ -1290,17 +1290,13 @@
= S
-ourceLine
+ymbol
.obj
@@ -1312,32 +1312,45 @@
er(%0A
+source_file__
project=project)
@@ -1341,32 +1341,98 @@
project=project)
+%0A fun_symbols = proj_symbols.exclude(label__startswith='_')
.order_by('name'
@@ -1426,20 +1426,21 @@
der_by('
-name
+label
')%0A
@@ -1456,23 +1456,16 @@
mbol in
-filter(
fun_symb
@@ -1470,18 +1470,9 @@
mbol
-, symbols)
+s
:%0A
@@ -1505,25 +1505,65 @@
mat(
-**
symbol.
-__dict__
+label, symbol.source_file.path, symbol.line_number
)%0A%0A%0A
|
701bb76a49ec88ba1352ca85e75c43e5f204ab73
|
remove commented code commited by mistake.
|
depsolver/solver/core.py
|
depsolver/solver/core.py
|
import six
from depsolver.errors \
import \
DepSolverError
from depsolver.compat \
import \
OrderedDict
from depsolver.bundled.traitlets \
import \
HasTraits, Instance
from depsolver.solver.decisions \
import \
DecisionsSet
from depsolver.operations \
import \
Install, Remove, Update
from depsolver.pool \
import \
Pool
from depsolver.repository \
import \
Repository
from depsolver.request \
import \
Request
from depsolver.solver.policy \
import \
DefaultPolicy
from depsolver.solver.rules_generator \
import \
RulesGenerator
# FIXME: the php model for this class is pretty broken as many attributes are
# initialized outside the ctor. Fix this.
class Solver(HasTraits):
policy = Instance(DefaultPolicy)
pool = Instance(Pool)
installed_repository = Instance(Repository)
def __init__(self, pool, installed_repository, **kw):
policy = DefaultPolicy()
super(Solver, self).__init__(self, policy=policy, pool=pool,
installed_repository=installed_repository, **kw)
def solve(self, request):
decision, rules = self._prepare_solver(request)
self._make_assertion_rules_decisions(decisions, rules)
return decisions
def _setup_install_map(self, jobs):
installed_map = OrderedDict()
for package in self.installed_repository.iter_packages():
installed_map[package.id] = package
for job in jobs:
if job.job_type == "update":
raise NotImplementedError()
elif job.job_type == "upgrade":
raise NotImplementedError()
elif job.job_type == "install":
if len(job.packages) < 1:
raise NotImplementedError()
return installed_map
def _prepare_solver(self, request):
installed_map = self._setup_install_map(request.jobs)
decisions = DecisionsSet(self.pool)
watch_graph = RulesWatchGraph()
rules_generator = RulesGenerator(self.pool, request, installed_map)
rules = list(rules_generator.iter_rules())
return decisions, rules
def _make_assertion_rules_decisions(self, decisions, rules):
decision_start = len(decisions) - 1
rule_index = 0
while rule_index < len(rules):
rule = rules[rule_index]
rule_index += 1
if not rule.is_assertion or not rule.enabled:
#print "\trule {} is an assertion or disabled".format(rule)
continue
literals = rule.literals
literal = literals[0]
#print "\tlooking at literal {}".format(literal)
if not decisions.is_decided(abs(literal)):
decisions.decide(literal, 1, rule)
continue;
if decisions.satisfy(literal):
continue
if rule.rule_type == "learnt":
rule.enable = False
continue
raise NotImplementedError()
|
Python
| 0
|
@@ -2510,84 +2510,8 @@
ed:%0A
- #print %22%5Ctrule %7B%7D is an assertion or disabled%22.format(rule)%0A
@@ -2608,69 +2608,8 @@
0%5D%0A%0A
- #print %22%5Ctlooking at literal %7B%7D%22.format(literal)%0A
|
b068180ef61b3e865bc0eb325e7722ddffa72bce
|
Add OTP_VERIFY_URL default val
|
droll/settings.py
|
droll/settings.py
|
"""
Django settings for droll project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import sys
import random
import string
import dj_database_url
from . import utils
env = utils.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.get('SECRET_KEY') or ''.join(random.choice(
''.join([string.ascii_letters,
string.digits,
string.punctuation])) for _ in range(50))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.get_bool('DEBUG')
TESTING = 'test' in sys.argv
PRODUCTION = not DEBUG and not TESTING
if PRODUCTION:
MAILTO = env.get('MAILTO')
if MAILTO:
ADMINS = (('Admin', MAILTO), )
DEFAULT_FROM_EMAIL = env.get('DEFAULT_FROM_EMAIL') or 'webmaster@localhost'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'access',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
if DEBUG and not TESTING:
INSTALLED_APPS += (
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'droll.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'droll.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = env.get('LANGUAGE_CODE') or 'en-us'
TIME_ZONE = env.get('TIME_ZONE') or 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'access.User'
OTP_SESSION_FLAG_NAME = 'otp_verified'
|
Python
| 0.000005
|
@@ -3402,16 +3402,49 @@
ss.User'
+%0A%0AOTP_VERIFY_URL = '/access/otp/'
%0AOTP_SES
|
3422c60553c6cd1a746e7c6e39a3e2ac707b0cf7
|
return only mean predictions for cost and omit variance
|
robo/acquisition/EnvEntropySearch.py
|
robo/acquisition/EnvEntropySearch.py
|
'''
Created on Jun 8, 2015
@author: Aaron Klein
'''
import emcee
import numpy as np
from robo.acquisition.LogEI import LogEI
from robo.acquisition.EntropyMC import EntropyMC
from scipy import stats
class EnvEntropySearch(EntropyMC):
'''
classdocs
'''
def __init__(self, model, cost_model, X_lower, X_upper, compute_incumbent, is_env_variable, n_representer=10, n_hals_vals=100, n_func_samples=100, **kwargs):
self.cost_model = cost_model
self.n_dims = X_lower.shape[0]
self.is_env_variable = is_env_variable
super(EnvEntropySearch, self).__init__(model, X_lower, X_upper, compute_incumbent, Nb=n_representer, Nf=n_func_samples, Np=n_hals_vals)
def update(self, model, cost_model):
self.cost_model = cost_model
super(EnvEntropySearch, self).update(model)
def compute(self, X, derivative=False):
# Predict the costs for this configuration
cost = self.cost_model.predict(X)
# Compute fantasized pmin
new_pmin = self.change_pmin_by_innovation(X, self.f)
# Compute acquisition value
H_old = np.sum(np.multiply(self.pmin, (self.logP + self.lmb)))
H_new = np.sum(np.multiply(new_pmin, (np.log(new_pmin) + self.lmb)))
loss = np.array([[-H_new + H_old]])
acquisition_value = loss / cost
return acquisition_value
def update_representer_points(self):
#TODO: We might want to start the sampling of the representer points from the incumbent here? Or maybe from a sobel grid?
super(EnvEntropySearch, self).update_representer_points()
# Project representer points to subspace
self.zb[:, self.is_env_variable == 1] = self.X_upper[self.is_env_variable == 1]
|
Python
| 0.000001
|
@@ -835,23 +835,24 @@
def
-compute
+__call__
(self, X
@@ -965,16 +965,19 @@
edict(X)
+%5B0%5D
%0A%0A
@@ -1295,16 +1295,17 @@
_old%5D%5D)%0A
+%0A
@@ -1659,16 +1659,16 @@
ubspace%0A
+
@@ -1747,9 +1747,8 @@
e == 1%5D%0A
-%0A
|
2d8c8cce8885b24ac1766912ee7bd1897900ae0c
|
fix up Comment model
|
dwitter/models.py
|
dwitter/models.py
|
from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import pre_delete
def get_sentinel_user():
users = get_user_model().objects
return users.get_or_create(username='[deleted]', is_active=False)[0]
@receiver(pre_delete, sender=User)
def soft_delete_user_dweets(instance, **kwargs):
for dweet in Dweet.objects.filter(_author=instance):
dweet.delete()
class NotDeletedDweetManager(models.Manager):
def get_queryset(self):
base_queryset = super(NotDeletedDweetManager, self).get_queryset()
return base_queryset.filter(deleted=False)
class Dweet(models.Model):
code = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey("self", on_delete=models.DO_NOTHING,
null=True, blank=True)
likes = models.ManyToManyField(User, related_name="liked")
hotness = models.FloatField(default=1.0)
deleted = models.BooleanField(default=False)
_author = models.ForeignKey(User, on_delete=models.SET_NULL,
null=True, blank=True)
@property
def author(self):
return self._author or get_sentinel_user()
@author.setter
def author(self, value):
self._author = value
objects = NotDeletedDweetManager()
with_deleted = models.Manager()
def delete(self):
self.deleted = True
self.save()
def __unicode__(self):
return 'd/' + str(self.id) + ' (' + self.author.username + ')'
class Meta:
ordering = ('-posted',)
class Comment(models.Model):
text = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey(Dweet, on_delete=models.CASCADE,
related_name="comments")
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __unicode__(self):
return ('c/' +
str(self.id) +
' (' +
self.author.username +
') to ' +
str(self.reply_to))
class Meta:
ordering = ('-posted',)
|
Python
| 0.000001
|
@@ -1889,16 +1889,17 @@
s%22)%0A
+_
author =
@@ -1950,16 +1950,159 @@
SCADE)%0A%0A
+ @property%0A def author(self):%0A return self._author%0A%0A @author.setter%0A def author(self, value):%0A self._author = value%0A%0A
def
|
142a893a006d3226d8d4caa512c8fdfffd9acb31
|
Fix CLI dispatch to work with previous refactoring
|
dotfiles/cli.py
|
dotfiles/cli.py
|
# -*- coding: utf-8 -*-
"""
dotfiles.cli
This module provides the CLI interface to dotfiles.
"""
import os
from . import core
import ConfigParser
from optparse import OptionParser, OptionGroup
DEFAULT_REPO = "~/Dotfiles"
def method_list(object):
return [method for method in dir(object)
if callable(getattr(object, method))]
def parse_args():
parser = OptionParser(usage="%prog ACTION [OPTION...] [FILE...]")
parser.set_defaults(config=os.path.expanduser("~/.dotfilesrc"))
parser.set_defaults(ignore=[])
parser.set_defaults(externals={})
parser.add_option("-v", "--version", action="store_true",
dest="show_version", default=False,
help="show version number and exit")
parser.add_option("-f", "--force", action="store_true", dest="force",
default=False, help="ignore unmanaged dotfiles (use with --sync)")
# OptionParser expands ~ constructions
parser.add_option("-R", "--repo", type="string", dest="repo",
help="set repository location (default is %s)" % DEFAULT_REPO)
parser.add_option("-p", "--prefix", type="string", dest="prefix",
help="set prefix character (default is None)")
parser.add_option("-C", "--config", type="string", dest="config",
help="set configuration file location (default is ~/.dotfilesrc)")
action_group = OptionGroup(parser, "Actions")
action_group.add_option("-a", "--add", action="store_const", dest="action",
const="add", help="add dotfile(s) to the repository")
action_group.add_option("-c", "--check", action="store_const",
dest="action", const="check", help="check dotfiles repository")
action_group.add_option("-l", "--list", action="store_const",
dest="action", const="list",
help="list currently managed dotfiles")
action_group.add_option("-r", "--remove", action="store_const",
dest="action", const="remove",
help="remove dotfile(s) from the repository")
action_group.add_option("-s", "--sync", action="store_const",
dest="action", const="sync", help="update dotfile symlinks")
parser.add_option_group(action_group)
(opts, args) = parser.parse_args()
# Skip checking if the repository exists here. The user may have specified
# a command line argument or a configuration file, which will be examined
# next.
return (opts, args)
def main():
(opts, args) = parse_args()
if opts.show_version:
print 'dotfiles v%s' % core.__version__
exit(0)
config_defaults = {
'repository': opts.repo,
'prefix': opts.prefix,
'ignore': opts.ignore,
'externals': opts.externals}
parser = ConfigParser.SafeConfigParser(config_defaults)
parser.read(opts.config)
if 'dotfiles' in parser.sections():
if not opts.repo and parser.get('dotfiles', 'repository'):
opts.repo = os.path.expanduser(parser.get('dotfiles', 'repository'))
if not opts.prefix and parser.get('dotfiles', 'prefix'):
opts.prefix = parser.get('dotfiles', 'prefix')
if not opts.ignore and parser.get('dotfiles', 'ignore'):
opts.ignore = eval(parser.get('dotfiles', 'ignore'))
if not opts.externals and parser.get('dotfiles', 'externals'):
opts.externals = eval(parser.get('dotfiles', 'externals'))
if not opts.repo:
opts.repo = os.path.expanduser(DEFAULT_REPO)
if not opts.prefix:
opts.prefix = ''
if not os.path.exists(opts.repo):
if opts.repo == os.path.expanduser(DEFAULT_REPO):
print "Error: Could not find dotfiles repository \"%s\"" % DEFAULT_REPO
missing_default_repo()
else:
print "Error: Could not find dotfiles repository \"%s\"" % opts.repo
exit(-1)
if not opts.action:
print "Error: An action is required. Type 'dotfiles -h' to see detailed usage information."
exit(-1)
getattr(core.Dotfiles(location=opts.repo,
prefix=opts.prefix,
ignore=opts.ignore,
externals=opts.externals,
force=opts.force), opts.action)(files=args)
def missing_default_repo():
"""Print a helpful message when the default repository is missing."""
print """
If this is your first time running dotfiles, you must first create
a repository. By default, dotfiles will look for '{0}'. Something like:
$ mkdir {0}
is all you need to do. If you don't like the default, you can put your
repository wherever you like. You have two choices once you've created your
repository. You can specify the path to the repository on the command line
using the '-R' flag. Alternatively, you can create a configuration file at
'~/.dotfilesrc' and place the path to your repository in there. The contents
would look like:
[dotfiles]
repository = {0}
Type 'dotfiles -h' to see detailed usage information.""".format(DEFAULT_REPO)
|
Python
| 0
|
@@ -4049,24 +4049,27 @@
1)%0A%0A
-getattr(
+dotfiles =
core.Dot
@@ -4078,240 +4078,652 @@
les(
-location=opts.repo,%0A prefix=opts.prefix,%0A ignore=opts.ignore,%0A externals=opts.externals,%0A force=opts.force), opts.action)(files=args
+home='~/', repo=opts.repo, prefix=opts.prefix,%0A ignore=opts.ignore, externals=opts.externals)%0A%0A if opts.action in %5B'list', 'check'%5D:%0A getattr(dotfiles, opts.action)()%0A%0A elif opts.action in %5B'add', 'remove'%5D:%0A getattr(dotfiles, opts.action)(args)%0A%0A elif opts.action == 'sync':%0A dotfiles.sync(opts.force)%0A%0A elif opts.action == 'move':%0A if len(args) %3E 1:%0A print %22Error: Move cannot handle multiple targets.%22%0A exit(-1)%0A if opts.repo != args%5B0%5D:%0A dotfiles.move(args%5B0%5D)%0A%0A else:%0A print %22Error: Something truly terrible has happened.%22%0A exit(-1
)%0A%0A%0A
|
3c87476c4d0861638ff7c3d6950377c75d3057dd
|
read true positions
|
streams/io/core.py
|
streams/io/core.py
|
# coding: utf-8
""" Code for helping to select stars from the nearby Sgr wraps. """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import gc
# Third-party
import h5py
import numpy as np
import numexpr
import astropy.units as u
from astropy.io import ascii
from astropy.table import vstack, Table, Column
import astropy.coordinates as coord
# Project
from ..coordinates.frame import heliocentric, galactocentric
from ..dynamics import Particle, ObservedParticle, Orbit
__all__ = ["read_table", "read_hdf5"]
def read_table(filename, expr=None, N=None):
_table = np.genfromtxt(filename, names=True)
if expr is not None:
idx = numexpr.evaluate(str(expr), _table)
_table = _table[idx]
if N is not None and N > 0:
np.random.shuffle(_table)
_table = _table[:min(N,len(_table))]
return _table
def read_hdf5(h5file):
""" Read particles and satellite from a given HDF5 file. """
return_dict = dict()
with h5py.File(h5file, "r") as f:
try:
ptcl = f["particles"]
satl = f["satellite"]
except KeyError:
raise ValueError("Invalid HDF5 file. Missing 'particles' or "
"'satellite' group.")
if "error" in ptcl.keys():
p = ObservedParticle(ptcl["data"].value.T, ptcl["error"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in ptcl["units"]])
p.tub = ptcl["tub"].value
else:
p = Particle(ptcl["data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in ptcl["units"]])
p.tub = ptcl["tub"].value
return_dict["particles"] = p
if "error" in satl.keys():
s = ObservedParticle(satl["data"].value.T, satl["error"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in satl["units"]])
else:
s = Particle(satl["data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in satl["units"]])
s.m = satl["m"].value
s.v_disp = satl["v_disp"].value
return_dict["satellite"] = s
if "simulation" in f.keys():
return_dict["t1"] = float(f["simulation"]["t1"].value)
return_dict["t2"] = float(f["simulation"]["t2"].value)
return return_dict
|
Python
| 0.000938
|
@@ -1574,32 +1574,280 @@
cl%5B%22tub%22%5D.value%0A
+ return_dict%5B%22true_particles%22%5D = Particle(ptcl%5B%22true_data%22%5D.value.T,%0A frame=heliocentric,%0A units=%5Bu.Unit(x) for x in ptcl%5B%22units%22%5D%5D)%0A
else:%0A
@@ -2312,32 +2312,281 @@
satl%5B%22units%22%5D%5D)%0A
+ return_dict%5B%22true_satellite%22%5D = Particle(satl%5B%22true_data%22%5D.value.T,%0A frame=heliocentric,%0A units=%5Bu.Unit(x) for x in satl%5B%22units%22%5D%5D)%0A%0A
else:%0A
|
51522b68b55a0a5a8a879ef6592960b3d8a465bb
|
Add status raise
|
scraper.py
|
scraper.py
|
import csv
import sys
import time
import lxml.html
import progressbar
import requests
from cache import Cache
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_0) ' \
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 ' \
'Safari/537.36'
RATE_LIMIT_SLEEP = 0.01
CACHE = Cache()
def find_ship_url(mmsi):
"""Find the URL of a ship with this MMSI."""
if CACHE.has('url', mmsi):
return CACHE.get('url', mmsi)
print('>', 'Finding ship:', mmsi)
url = 'https://www.vesselfinder.com/vessels/livesearch'
params = {'term': str(mmsi)}
headers = {'User-Agent': USER_AGENT}
response = requests.get(url, params=params, headers=headers)
json = response.json()
def find_result():
for result in json['list']:
if result['MMSI'] == str(mmsi):
return result
data = find_result()
if data is None:
url = None
else:
name = data['NAME'] \
.replace(' & ', '-') \
.replace(' ', '-') \
.replace(':', '-') \
.replace('+', '-') \
.replace('.', '') \
.replace('!', '') \
.replace('(', '') \
.replace(')', '') \
.replace('*', '') \
.replace(',', '') \
.replace('/', '') \
.replace('\\', '') \
.replace("'", '') \
.replace('"', '') \
.replace('#', '') \
.replace('=', '') \
.replace('[', '') \
.replace(']', '') \
.replace('^', '') \
.replace(';', '') \
.replace('?', '') \
.replace('>', '') \
.replace('<', '')
url = 'https://www.vesselfinder.com/vessels/{0}-IMO-{1}-MMSI-{2}' \
.format(name, data['IMO'], data['MMSI'])
CACHE.set('url', mmsi, url)
time.sleep(RATE_LIMIT_SLEEP) # rate limiting
return url
def scrape_information(mmsi, imo, name):
"""Scrape information about a particular ship."""
if CACHE.has('ship', mmsi):
return CACHE.get('ship', mmsi)
url = find_ship_url(mmsi)
if url is None:
return None
print('>', 'Scraping:', url)
response = requests.get(url, headers={'User-Agent': USER_AGENT})
content = response.content
page = lxml.html.fromstring(content)
vehicle = page.xpath('//article[@itemtype="http://schema.org/Vehicle"]')[0]
vesselfinder_name = vehicle.xpath('//h1[@itemprop="name"]')[0].text_content()
gross_tonnage = vehicle.xpath('//*[@itemprop="weight"]')[0].text_content()
net_tonnage = vehicle.xpath('//*[@itemprop="cargoVolume"]')[0].text_content()
imo = None if imo == 0 else imo
gross_tonnage = None if gross_tonnage == 'N/A' else int(gross_tonnage[:-2])
net_tonnage = None if net_tonnage == 'N/A' else int(net_tonnage[:-2])
info = (mmsi, imo, name, vesselfinder_name, gross_tonnage, net_tonnage)
CACHE.set('ship', mmsi, info)
time.sleep(RATE_LIMIT_SLEEP) # rate limiting
return info
def main():
"""Run the script."""
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('input_csv')
parser.add_argument('output_csv')
args = parser.parse_args()
with open(args.input_csv) as infile, open(args.output_csv, 'w') as outfile:
reader = csv.reader(infile)
writer = csv.writer(outfile)
rows = list(reader)
progress = progressbar.ProgressBar(redirect_stdout=True)
for row in progress(rows):
mmsi = int(row[0])
imo = int(row[1])
name = row[2].strip()
info = scrape_information(mmsi, imo, name)
if info is None:
print('!', mmsi, 'not found.')
else:
writer.writerow(info)
sys.stdout.flush()
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -2289,16 +2289,49 @@
AGENT%7D)%0A
+ response.raise_for_status()%0A%0A
cont
|
c98662bf577afa1dcf1b847193dd2e856a90e864
|
Fix flopped windows comment
|
examples/app-two-programs.py
|
examples/app-two-programs.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import gl, app, gloo
vertex = """
attribute vec2 a_position;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
gl_PointSize = 30.0;
}
"""
fragment1 = """
void main() {
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
}
"""
fragment2 = """
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"""
program1 = gloo.Program(vertex, fragment1) # blue on the left
program1['a_position'] = np.zeros((1,2),dtype=np.float32) + 0.5
program2 = gloo.Program(vertex, fragment2) # red on the right
program2['a_position'] = np.zeros((1,2),dtype=np.float32) - 0.5
window = app.Window()
@window.event
def on_draw(dt):
window.clear()
program1.draw(gl.GL_POINTS)
program2.draw(gl.GL_POINTS)
app.run()
|
Python
| 0
|
@@ -676,11 +676,12 @@
the
-lef
+righ
t%0Apr
@@ -791,36 +791,35 @@
) # red on the
-righ
+lef
t%0Aprogram2%5B'a_po
|
818f1431fe67120967f385ee090d06c1038e48c4
|
Add project level imports so users don't have to worry about the module names.
|
dimensionful/__init__.py
|
dimensionful/__init__.py
|
Python
| 0
|
@@ -0,0 +1,105 @@
+from units import Unit%0Afrom quantity import Quantity%0A%0Afrom common_units import *%0Afrom constants import *%0A
|
|
d511bd84295a8d07f886ca8482cfea25dfca0519
|
fix comment
|
skimage/transform/tests/test_geometric.py
|
skimage/transform/tests/test_geometric.py
|
import numpy as np
from numpy.testing import assert_equal, assert_array_almost_equal
from skimage.transform._geometric import _stackcopy
from skimage.transform import (estimate_transform, SimilarityTransform,
AffineTransform, ProjectiveTransform,
PolynomialTransform)
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
[6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
[1024, 1931],
])
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_array_almost_equal(x[..., i], y)
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_equal(tform._matrix[0, 0], tform._matrix[1, 1])
assert_equal(tform._matrix[0, 1], - tform._matrix[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_equal(tform2._matrix[0, 0], tform2._matrix[1, 1])
assert_equal(tform2._matrix[0, 1], - tform2._matrix[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_array_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.shear, shear)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.shear, shear)
assert_array_almost_equal(tform2.translation, translation)
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_array_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform._matrix)
assert_array_almost_equal(tform2._matrix, tform._matrix)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_array_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation matrix
tform2 = PolynomialTransform(tform._params)
assert_array_almost_equal(tform2._params, tform._params)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
Python
| 0
|
@@ -4593,17 +4593,9 @@
ms)%0A
- %0A
+%0A
%0Adef
@@ -4707,38 +4707,42 @@
transformation
-matrix
+parameters
%0A tform2 = Po
|
c713aa4953063cb6e64ecaaaed464f2a441482bb
|
Add testing scaffolding.
|
lilkv/testsuite/test_basic.py
|
lilkv/testsuite/test_basic.py
|
# -*- coding: utf-8 -*-
"""
lilkv.testsuite.basic
Test lilkv basic functionality.
"""
from lilkv.testsuite import LilKVTestCase
from lilkv.keyspace import Keyspace
from lilkv.columnfamily import ColumnFamily
from lilkv.column import Column
class BasicTests(LilKVTestCase):
"""Baseclass for testing out the application.
"""
def test_keyspace_creation(self):
ks = Keyspace("Test Keyspace")
self.assert_in("Test Keyspace", Keyspace.KEYSPACES)
def test_columnfamily_creation(self):
ks = Keyspace("Test Keyspace")
ks.create_columnfamily("daily_visitors")
self.assert_in("daily_visitors", ks.columnfamilies)
|
Python
| 0
|
@@ -667,8 +667,149 @@
milies)%0A
+%0A def test_adding_data(self):%0A pass%0A%0A def test_reading_data(self):%0A pass%0A%0A def test_deleting_data(self):%0A pass%0A
|
f9e4d580eb01d16a2ed48d64b7413dedb1c3bdb1
|
make requested changes
|
cupy/indexing/insert.py
|
cupy/indexing/insert.py
|
import numpy
import cupy
from cupy import core
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
This function uses the first N elements of `vals`, where N is the number
of true values in `mask`.
Args:
arr (cupy.ndarray): Array to put data into.
mask (array-like): Boolean mask array. Must have the same size as `a`.
vals (array-like): Values to put into `a`. Only the first
N elements are used, where N is the number of True values in
`mask`. If `vals` is smaller than N, it will be repeated, and if
elements of `a` are to be masked, this sequence must be non-empty.
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.place`
"""
# TODO(niboshi): Avoid nonzero which may synchronize the device.
mask = cupy.asarray(mask)
if arr.size != mask.size:
raise ValueError('Mask and data must be the same size.')
vals = cupy.asarray(vals)
mask_indices = mask.ravel().nonzero()[0] # may synchronize
if mask_indices.size == 0:
return
if vals.size == 0:
raise ValueError('Cannot insert from an empty array.')
arr.put(mask_indices, vals, mode='wrap')
def put(a, ind, v, mode='wrap'):
"""Replaces specified elements of an array with given values.
Args:
a (cupy.ndarray): Target array.
ind (array-like): Target indices, interpreted as integers.
v (array-like): Values to place in `a` at target indices.
If `v` is shorter than `ind` it will be repeated as necessary.
mode (str): How out-of-bounds indices will behave. Its value must be
either `'raise'`, `'wrap'` or `'clip'`. Otherwise,
:class:`TypeError` is raised.
.. note::
Default `mode` is set to `'wrap'` to avoid unintended performance drop.
If you need NumPy's behavior, please pass `mode='raise'` manually.
.. seealso:: :func:`numpy.put`
"""
a.put(ind, v, mode=mode)
_putmask_kernel = core._kernel.ElementwiseKernel(
'Q mask, raw S values, uint64 len_vals', 'T out',
'''
if (mask) out = (T) values[i % len_vals];
''',
'putmask_kernel'
)
def putmask(a, mask, values):
"""
Changes elements of an array inplace, based on conditional mask and
input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
Args
a (cupy.ndarray): Target array.
mask (cupy.ndarray): Boolean mask array. It has to be
the same shape as `a`.
values (cupy.ndarray or scalar): Values to put into `a` where `mask`
is True. If `values` is smaller than `a`, then it will be
repeated.
Examples
--------
>>> x = cupy.arange(6).reshape(2, 3)
>>> cupy.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = cupy.arange(6)
>>> cupy.putmask(x, x>2, [-33, -44])
>>> x
array([ 0, 1, 2, -44, -33, -44])
.. seealso:: :func:`numpy.putmask`
"""
if not isinstance(a, cupy.ndarray):
raise ValueError('`a` should be of type cupy.ndarray')
if not isinstance(mask, cupy.ndarray):
raise ValueError('`mask` should be of type cupy.ndarray')
if not (cupy.isscalar(values) or isinstance(values, cupy.ndarray)):
raise ValueError('`values` should be of type cupy.ndarray')
if not a.shape == mask.shape:
raise ValueError('mask and data must be the same size')
if mask.dtype.kind == 'c':
mask = mask.astype(bool)
if cupy.isscalar(values):
a[mask] = values
elif not numpy.can_cast(values.dtype, a.dtype):
raise TypeError('Cannot cast array data from'
' {} to {} according to the rule \'safe\''
.format(values.dtype, a.dtype))
elif a.shape == values.shape:
a[mask] = values[mask]
else:
values = values.ravel()
_putmask_kernel(mask, values, len(values), a)
def fill_diagonal(a, val, wrap=False):
"""Fills the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Args:
a (cupy.ndarray): The array, at least 2-D.
val (scalar): The value to be written on the diagonal.
Its type must be compatible with that of the array a.
wrap (bool): If specified, the diagonal is "wrapped" after N columns.
This affects only tall matrices.
Examples
--------
>>> a = cupy.zeros((3, 3), int)
>>> cupy.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
.. seealso:: :func:`numpy.fill_diagonal`
"""
# The followings are imported from the original numpy
if a.ndim < 2:
raise ValueError('array must be at least 2-d')
end = None
if a.ndim == 2:
step = a.shape[1] + 1
if not wrap:
end = a.shape[1] * a.shape[1]
else:
if not numpy.alltrue(numpy.diff(a.shape) == 0):
raise ValueError('All dimensions of input must be of equal length')
step = 1 + numpy.cumprod(a.shape[:-1]).sum()
a.flat[:end:step] = val
|
Python
| 0
|
@@ -2255,16 +2255,8 @@
ore.
-_kernel.
Elem
@@ -2493,32 +2493,34 @@
place, based on
+a
conditional mask
@@ -2704,16 +2704,17 @@
Args
+:
%0A
@@ -3444,36 +3444,35 @@
:%0A raise
-Valu
+Typ
eError('%60a%60 shou
@@ -3549,36 +3549,35 @@
:%0A raise
-Valu
+Typ
eError('%60mask%60 s
@@ -3686,36 +3686,35 @@
:%0A raise
-Valu
+Typ
eError('%60values%60
|
34fa0d9a3285c8161a34bcf39dae0474c4fd34c4
|
allow members to be specified on instantiation
|
salt/states/libcloud_loadbalancer.py
|
salt/states/libcloud_loadbalancer.py
|
# -*- coding: utf-8 -*-
'''
Apache Libcloud Load Balancer State
===================================
Manage load balancers using libcloud
:codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>`
Apache Libcloud load balancer management for a full list
of supported clouds, see http://libcloud.readthedocs.io/en/latest/loadbalancer/supported_providers.html
Clouds include Amazon ELB, ALB, Google, Aliyun, CloudStack, Softlayer
.. versionadded:: Oxygen
:configuration:
This module uses a configuration profile for one or multiple Cloud providers
.. code-block:: yaml
libcloud_loadbalancer:
profile_test1:
driver: gce
key: GOOG0123456789ABCXYZ
secret: mysecret
profile_test2:
driver: alb
key: 12345
secret: mysecret
:depends: apache-libcloud
'''
# Import Python Libs
from __future__ import absolute_import
import logging
# Import salt libs
import salt.utils
import salt.utils.compat
log = logging.getLogger(__name__)
def __virtual__():
return True
def __init__(opts):
salt.utils.compat.pack_dunder(__name__)
def state_result(result, message, name, changes=None):
if changes is None:
changes = {}
return {'result': result,
'comment': message,
'name': name,
'changes': changes}
def balancer_present(name, port, protocol, profile, algorithm=None):
'''
Ensures a load balancer is present.
:param name: Load Balancer name
:type name: ``str``
:param port: Port the load balancer should listen on, defaults to 80
:type port: ``str``
:param protocol: Loadbalancer protocol, defaults to http.
:type protocol: ``str``
:param profile: The profile key
:type profile: ``str``
:param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN. See Algorithm type
in Libcloud documentation for a full listing.
:type algorithm: ``str``
'''
balancers = __salt__['libcloud_loadbalancer.list_balancers'](profile)
match = [z for z in balancers if z['name'] == name]
if len(match) > 0:
return state_result(True, "Balancer already exists", name)
else:
balancer = __salt__['libcloud_loadbalancer.create_balancer'](name, port, protocol, profile, algorithm=algorithm)
return state_result(True, "Created new load balancer", name, balancer)
def balancer_absent(name, profile):
'''
Ensures a load balancer is absent.
:param name: Load Balancer name
:type name: ``str``
:param profile: The profile key
:type profile: ``str``
'''
balancers = __salt__['libcloud_loadbalancer.list_balancers'](profile)
match = [z for z in balancers if z['name'] == name]
if len(match) == 0:
return state_result(True, "Balancer already absent", name)
else:
result = __salt__['libcloud_loadbalancer.delete_balancer'](match['id'], profile)
return state_result(result, "Deleted load balancer", name)
def member_present(ip, port, balancer_id, profile):
'''
Ensure a load balancer member is present
:param ip: IP address for the new member
:type ip: ``str``
:param port: Port for the new member
:type port: ``int``
:param balancer_id: id of a load balancer you want to attach the member to
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
'''
existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile)
for member in existing_members:
if member['ip'] == ip and member['port'] == port:
return state_result(True, "Member already present", balancer_id)
member = __salt__['libcloud_loadbalancer.balancer_attach_member'](balancer_id, ip, port, profile)
return state_result(True, "Member added to balancer, id: {0}".format(member['id']), balancer_id, member)
def member_absent(ip, port, balancer_id, profile):
'''
Ensure a load balancer member is absent, based on IP and Port
:param ip: IP address for the member
:type ip: ``str``
:param port: Port for the member
:type port: ``int``
:param balancer_id: id of a load balancer you want to detach the member from
:type balancer_id: ``str``
:param profile: The profile key
:type profile: ``str``
'''
existing_members = __salt__['libcloud_loadbalancer.list_balancer_members'](balancer_id, profile)
for member in existing_members:
if member['ip'] == ip and member['port'] == port:
result = __salt__['libcloud_loadbalancer.balancer_detach_member'](balancer_id, member['id'], profile)
return state_result(result, "Member removed", balancer_id)
return state_result(True, "Member already absent", balancer_id)
|
Python
| 0
|
@@ -1439,27 +1439,41 @@
gorithm=None
+, members=None
):%0A
-
'''%0A
@@ -1993,32 +1993,157 @@
gorithm: %60%60str%60%60
+%0A%0A :param members: An optional list of members to create on deployment%0A :type members: %60%60list%60%60 of %60%60dict%60%60 (ip, port)
%0A '''%0A bal
@@ -2357,32 +2357,245 @@
name)%0A else:%0A
+ starting_members = None%0A if members is not None:%0A starting_members = %5B%5D%0A for m in members:%0A starting_members.append(Member(id=None, ip=m%5B'ip'%5D, port=m%5B'port'%5D))%0A
balancer
@@ -2647,16 +2647,29 @@
ancer'%5D(
+%0A
name, po
@@ -2673,32 +2673,44 @@
port, protocol,
+%0A
profile, algori
@@ -2722,16 +2722,54 @@
lgorithm
+,%0A members=starting_members
)%0A
|
573d1e2498467da357a79bb865683e162e16eb14
|
increment version to 0.13.1
|
gym/version.py
|
gym/version.py
|
VERSION = '0.13.0'
|
Python
| 0.000683
|
@@ -9,11 +9,11 @@
= '0.13.
-0
+1
'%0A
|
d83e8f8702755766c1c15a35297b40d25051d55e
|
Bump version
|
gym/version.py
|
gym/version.py
|
VERSION = '0.4.8'
|
Python
| 0
|
@@ -12,7 +12,7 @@
0.4.
-8
+9
'%0A
|
3f6d5aa5ee18462ff3a9a0e366ea48f508c93a58
|
move to target
|
character.py
|
character.py
|
import pyglet # noqa
from pyglet.gl import * # noqa
from utility import load_image, mainbatches, window_width, window_height, calc_vel_xy # noqa
from collide import * # noqa
import random
import math
# from controller import Controller
def mean(inp):
return sum(inp) / float(len(inp))
green_sprite = pyglet.image.SolidColorImagePattern(color=(30, 255, 30, 255))
blue_sprite = pyglet.image.SolidColorImagePattern(color=(30, 30, 255, 255))
class Character(object):
def __init__(self, assets):
self.assets = assets
self.sprites = [pyglet.sprite.Sprite(pyglet.image.create(10, 10, green_sprite),
500, 500, batch=mainbatches[2])
]
for i in range(9):
self.make_sprite()
# self.collision = SpriteCollision(self.sprite)
# self.controller = Controller(self)
def update(self):
self.random_move()
m = self.sprite_mean()
for i in self.sprites:
if math.hypot(m[0] - i.x, m[1] - i.y) > 10:
ret = calc_vel_xy(m[0], m[1], i.x, i.y, 3)
i.x += ret[0]
i.y += ret[1]
def cleanup(self):
try:
self.sprite.delete()
except:
pass
try:
self.assets.modules['characters'].remove(self)
except:
pass
try:
del self
except:
pass
def random_move(self):
v = random.choice(self.sprites)
v.x += random.randint(-5, 5)
v.y += random.randint(-5, 5)
def sprite_mean(self):
x = mean([m.x for m in self.sprites])
y = mean([m.x for m in self.sprites])
return (x, y)
def make_sprite(self):
m = self.sprite_mean()
self.sprites.append(
pyglet.sprite.Sprite(pyglet.image.create(10, 10, green_sprite),
m[0], m[1], batch=mainbatches[2]) # noqa
)
|
Python
| 0
|
@@ -722,24 +722,51 @@
ke_sprite()%0A
+ self.target = None%0A
# se
@@ -869,32 +869,400 @@
f update(self):%0A
+ if not self.target:%0A self.target = (random.randint(50, window_width - 50), random.randint(50, window_height - 50))%0A print self.target%0A print self.sprite_mean()%0A print %22###%22%0A i = random.choice(self.sprites)%0A ret = calc_vel_xy(self.target%5B0%5D, self.target%5B1%5D, i.x, i.y, 3)%0A i.x += ret%5B0%5D%0A i.y += ret%5B1%5D%0A
self.ran
|
bb9aafe090d71c2a25eb3f3a6d591a205dbb7e5e
|
bump to 0.9.5
|
dvc/__init__.py
|
dvc/__init__.py
|
"""
DVC
----
Make your data science projects reproducible and shareable.
"""
import os
VERSION = '0.9.4'
if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' and os.getenv('TRAVIS_TAG', '') == '':
# Dynamically update version
try:
import git
repo = git.Repo(os.curdir, search_parent_directories=True)
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=6)
dirty = '.mod' if repo.is_dirty() else ''
VERSION = '{}+{}{}'.format(VERSION, short_sha, dirty)
except:
pass
__version__ = VERSION
|
Python
| 0
|
@@ -100,9 +100,9 @@
0.9.
-4
+5
'%0A%0Ai
|
3b65181e01aeaa884f3386f131aa2fcf0b09a3c7
|
remove unneeded <??:?? for unknown ETA
|
dvc/progress.py
|
dvc/progress.py
|
"""Manages progress bars for DVC repo."""
import logging
import sys
from threading import RLock
from funcy import merge
from tqdm import tqdm
from dvc.utils import env2bool
logger = logging.getLogger(__name__)
tqdm.set_lock(RLock())
class Tqdm(tqdm):
"""
maximum-compatibility tqdm-based progressbars
"""
BAR_FMT_DEFAULT = (
"{percentage:3.0f}% {desc}|{bar}|"
"{n_fmt}/{total_fmt}"
" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]"
)
# nested bars should have fixed bar widths to align nicely
BAR_FMT_DEFAULT_NESTED = (
"{percentage:3.0f}%|{bar:10}|{desc:{ncols_desc}.{ncols_desc}}"
"{n_fmt}/{total_fmt}"
" [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]"
)
BAR_FMT_NOTOTAL = (
"{desc:{ncols_desc}.{ncols_desc}}{n_fmt}"
" [{elapsed}<??:??, {rate_fmt:>11}{postfix}]"
)
BYTES_DEFAULTS = dict(
unit="B", unit_scale=True, unit_divisor=1024, miniters=1
)
def __init__(
self,
iterable=None,
disable=None,
level=logging.ERROR,
desc=None,
leave=False,
bar_format=None,
bytes=False, # pylint: disable=W0622
file=None,
total=None,
**kwargs
):
"""
bytes : shortcut for
`unit='B', unit_scale=True, unit_divisor=1024, miniters=1`
desc : persists after `close()`
level : effective logging level for determining `disable`;
used only if `disable` is unspecified
disable : If (default: None), will be determined by logging level.
May be overridden to `True` due to non-TTY status.
Skip override by specifying env var `DVC_IGNORE_ISATTY`.
kwargs : anything accepted by `tqdm.tqdm()`
"""
kwargs = kwargs.copy()
if bytes:
kwargs = merge(self.BYTES_DEFAULTS, kwargs)
else:
kwargs.setdefault("unit_scale", total > 999 if total else True)
if file is None:
file = sys.stderr
self.desc_persist = desc
# auto-disable based on `logger.level`
if disable is None:
disable = logger.getEffectiveLevel() > level
# auto-disable based on TTY
if (
not disable
and not env2bool("DVC_IGNORE_ISATTY")
and hasattr(file, "isatty")
):
disable = not file.isatty()
super().__init__(
iterable=iterable,
disable=disable,
leave=leave,
desc=desc,
bar_format="!",
lock_args=(False,),
total=total,
**kwargs
)
if bar_format is None:
if self.__len__():
self.bar_format = (
self.BAR_FMT_DEFAULT_NESTED
if self.pos
else self.BAR_FMT_DEFAULT
)
else:
self.bar_format = self.BAR_FMT_NOTOTAL
else:
self.bar_format = bar_format
self.refresh()
def update_desc(self, desc, n=1):
"""
Calls `set_description_str(desc)` and `update(n)`
"""
self.set_description_str(desc, refresh=False)
self.update(n)
def update_to(self, current, total=None):
if total:
self.total = total # pylint: disable=W0613,W0201
self.update(current - self.n)
def close(self):
if self.desc_persist is not None:
self.set_description_str(self.desc_persist, refresh=False)
for fmt in [
"<??:??", # unknown ETA
"<{remaining}", # 00:00 ETA
]:
self.bar_format = self.bar_format.replace(fmt, "")
for fmt in [
"|{bar:10}|", # completed progressbar
]:
self.bar_format = self.bar_format.replace(fmt, " ")
super().close()
@property
def format_dict(self):
"""inject `ncols_desc` to fill the display width (`ncols`)"""
d = super().format_dict
ncols = d["ncols"] or 80
ncols_desc = ncols - len(self.format_meter(ncols_desc=1, **d)) + 1
ncols_desc = max(ncols_desc, 0)
if ncols_desc:
d["ncols_desc"] = ncols_desc
else:
# work-around for zero-width description
d["ncols_desc"] = 1
d["prefix"] = ""
return d
|
Python
| 0.999655
|
@@ -835,22 +835,16 @@
elapsed%7D
-%3C??:??
, %7Brate_
|
14d1466400ee37c41b5b57146666913a4feb5254
|
Add device information to solarlog integration (#43680)
|
homeassistant/components/solarlog/sensor.py
|
homeassistant/components/solarlog/sensor.py
|
"""Platform for solarlog sensors."""
import logging
from urllib.parse import ParseResult, urlparse
from requests.exceptions import HTTPError, Timeout
from sunwatcher.solarlog.solarlog import SolarLog
from homeassistant.const import CONF_HOST
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from .const import SCAN_INTERVAL, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the solarlog platform."""
_LOGGER.warning(
"Configuration of the solarlog platform in configuration.yaml is deprecated "
"in Home Assistant 0.119. Please remove entry from your configuration"
)
async def async_setup_entry(hass, entry, async_add_entities):
"""Add solarlog entry."""
host_entry = entry.data[CONF_HOST]
device_name = entry.title
url = urlparse(host_entry, "http")
netloc = url.netloc or url.path
path = url.path if url.netloc else ""
url = ParseResult("http", netloc, path, *url[3:])
host = url.geturl()
try:
api = await hass.async_add_executor_job(SolarLog, host)
_LOGGER.debug("Connected to Solar-Log device, setting up entries")
except (OSError, HTTPError, Timeout):
_LOGGER.error(
"Could not connect to Solar-Log device at %s, check host ip address", host
)
return
# Create solarlog data service which will retrieve and update the data.
data = await hass.async_add_executor_job(SolarlogData, hass, api, host)
# Create a new sensor for each sensor type.
entities = []
for sensor_key in SENSOR_TYPES:
sensor = SolarlogSensor(entry.entry_id, device_name, sensor_key, data)
entities.append(sensor)
async_add_entities(entities, True)
return True
class SolarlogSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, entry_id, device_name, sensor_key, data):
"""Initialize the sensor."""
self.device_name = device_name
self.sensor_key = sensor_key
self.data = data
self.entry_id = entry_id
self._state = None
self._json_key = SENSOR_TYPES[self.sensor_key][0]
self._label = SENSOR_TYPES[self.sensor_key][1]
self._unit_of_measurement = SENSOR_TYPES[self.sensor_key][2]
self._icon = SENSOR_TYPES[self.sensor_key][3]
@property
def unique_id(self):
"""Return the unique id."""
return f"{self.entry_id}_{self.sensor_key}"
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.device_name} {self._label}"
@property
def unit_of_measurement(self):
"""Return the state of the sensor."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the sensor icon."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest data from the sensor and update the state."""
self.data.update()
self._state = self.data.data[self._json_key]
class SolarlogData:
"""Get and update the latest data."""
def __init__(self, hass, api, host):
"""Initialize the data object."""
self.api = api
self.hass = hass
self.host = host
self.update = Throttle(SCAN_INTERVAL)(self._update)
self.data = {}
def _update(self):
"""Update the data from the SolarLog device."""
try:
self.api = SolarLog(self.host)
response = self.api.time
_LOGGER.debug(
"Connection to Solarlog successful. Retrieving latest Solarlog update of %s",
response,
)
except (OSError, Timeout, HTTPError):
_LOGGER.error("Connection error, Could not retrieve data, skipping update")
return
try:
self.data["TIME"] = self.api.time
self.data["powerAC"] = self.api.power_ac
self.data["powerDC"] = self.api.power_dc
self.data["voltageAC"] = self.api.voltage_ac
self.data["voltageDC"] = self.api.voltage_dc
self.data["yieldDAY"] = self.api.yield_day / 1000
self.data["yieldYESTERDAY"] = self.api.yield_yesterday / 1000
self.data["yieldMONTH"] = self.api.yield_month / 1000
self.data["yieldYEAR"] = self.api.yield_year / 1000
self.data["yieldTOTAL"] = self.api.yield_total / 1000
self.data["consumptionAC"] = self.api.consumption_ac
self.data["consumptionDAY"] = self.api.consumption_day / 1000
self.data["consumptionYESTERDAY"] = self.api.consumption_yesterday / 1000
self.data["consumptionMONTH"] = self.api.consumption_month / 1000
self.data["consumptionYEAR"] = self.api.consumption_year / 1000
self.data["consumptionTOTAL"] = self.api.consumption_total / 1000
self.data["totalPOWER"] = self.api.total_power
self.data["alternatorLOSS"] = self.api.alternator_loss
self.data["CAPACITY"] = round(self.api.capacity * 100, 0)
self.data["EFFICIENCY"] = round(self.api.efficiency * 100, 0)
self.data["powerAVAILABLE"] = self.api.power_available
self.data["USAGE"] = self.api.usage
_LOGGER.debug("Updated Solarlog overview data: %s", self.data)
except AttributeError:
_LOGGER.error("Missing details data in Solarlog response")
|
Python
| 0
|
@@ -344,16 +344,24 @@
t import
+ DOMAIN,
SCAN_IN
@@ -3025,16 +3025,263 @@
_state%0A%0A
+ @property%0A def device_info(self):%0A %22%22%22Return the device information.%22%22%22%0A return %7B%0A %22identifiers%22: %7B(DOMAIN, self.entry_id)%7D,%0A %22name%22: self.device_name,%0A %22manufacturer%22: %22Solar-Log%22,%0A %7D%0A%0A
def
|
3aac2716972c49eb3b1b688cb1fad89ce690ca58
|
fix incorrect empty list condition
|
filter_log.py
|
filter_log.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 ciel <ciel@cieldeMBP>
#
# Distributed under terms of the MIT license.
"""
filter git log
"""
import codecs
from call_cmd import call
import config
def filter_log(last_commit):
commit_valid = call('git -C {} cat-file -e '.format(config.config_dic['project_path']) + last_commit)[0]
if commit_valid != 0:
return '无'
git_logs_cmd = '''git -C {} log --pretty=\"%s\" {}..HEAD'''.format(config.config_dic['project_path'], last_commit)
logs = call(git_logs_cmd)
log_has_prefix = []
prefix = config.config_dic['filter_log']['prefix']
if not prefix:
prefix = '['
for line in logs[1].split("\n"):
if line.startswith(prefix):
log_has_prefix.append(line)
if log_has_prefix.count:
return '无'
log_file = '{}log.txt'.format(config.config_dic['builds_path'])
with codecs.open(log_file, 'w', 'UTF-8') as f:
for line in log_has_prefix:
f.write('{}\n'.format(line))
with codecs.open(log_file, 'r+', 'UTF-8') as f:
flip_cmd = "sed '1!G;h;$!d' " + log_file
res = call(flip_cmd)
f.write(res[1])
with codecs.open(log_file, 'r+', 'UTF-8') as f:
add_num_cmd = """awk '{printf NR"."" "}1' """ + log_file
res = call(add_num_cmd)
f.write(res[1])
with codecs.open(log_file, 'r', 'UTF-8') as f:
return f.read()
def msg_with_intall_info(last_commit, build):
build_info = config.config_dic['build'][build]
log = filter_log(last_commit)
msg = '更新日志:' + '\n\n' + log + '\n\n' + '安装地址:' + build_info['download_url']
return msg
|
Python
| 0.999117
|
@@ -804,16 +804,20 @@
%0A if
+not
log_has_
@@ -826,14 +826,8 @@
efix
-.count
:%0A
|
ee795da3215374f30005c9daa42de6f9d581580f
|
Make finglonger output a bit easier to read
|
finglonger.py
|
finglonger.py
|
#!/usr/bin/python
import os
import sys
import subprocess
import tempfile
import yaml
def validate_config(config):
environment = config.get('environment')
if environment is None:
print "No environment set, set one in config.yaml "
sys.exit(1)
def validate_environment(config):
if os.path.isfile("envs/" + config['environment'] + "/tasks.yaml"):
pass
else:
print "Tasks file not found, are you in the right directory?"
sys.exit(1)
def git_cmd(command):
p = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def process_task(task):
print "Finglongering..."
print task['name']
temp, temp_name = tempfile.mkstemp()
print temp_name
f = os.fdopen(temp, 'w')
f.write(task['shell'])
f.close()
os.chmod(temp_name, 0755)
p = subprocess.Popen(["/bin/bash", temp_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
print out
print err
if __name__ == "__main__":
config_file = os.environ['HOME'] + "/.config/finglonger/config.yaml"
if os.path.isfile(config_file):
with open(config_file) as f:
config = yaml.load(f.read())
else:
print "Config file not found: {0}".format(config_file)
sys.exit(1)
validate_config(config)
validate_environment(config)
git_cmd('git checkout master')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
master_tasks = yaml.load(f.read())
git_cmd('git checkout done')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
done_tasks = yaml.load(f.read())
git_cmd('git checkout master')
print len(master_tasks)
print len(done_tasks)
for i in done_tasks:
master_tasks.remove(i)
for task in master_tasks:
process_task(task['task'])
git_cmd('git checkout done')
git_cmd('git merge master')
git_cmd('git push origin done')
git_cmd('git checkout master')
|
Python
| 0.000005
|
@@ -1854,24 +1854,43 @@
)%0A%0A print
+ %22Tasks on master%22,
len(master_
@@ -1905,16 +1905,94 @@
print
+ %22Tasks on done%22, len(done_tasks)%0A print %22Tasks to do%22, len(master_tasks) -
len(don
|
cb4f022fb1fe0780eb2e37c8fdc8ff6a4409115c
|
Test and implementation for !ptr+offset loading
|
ostester/yamlreader.py
|
ostester/yamlreader.py
|
import collections.abc
from io import StringIO
import yaml
import ast
def parse(file):
return yaml.safe_load(file)
def parse_from_string(string):
return parse(StringIO(string))
class Zeros(collections.abc.Sequence):
"""
Represents a zeroed region of memory in C
>>> yaml.load("!zeros 5")
Zeros(5)
>>> yaml.dump(Zeros(3))
"!zeros '3'\\n"
>>> list(Zeros(7))
[0, 0, 0, 0, 0, 0, 0]
>>> Zeros(3)[-3]
0
>>> Zeros(3)[-2]
0
>>> Zeros(4)[1:3]
[0, 0]
"""
yaml_tag='!zeros'
def __init__(self, len):
self.len = len
@staticmethod
def from_yaml_loader(loader, node):
return Zeros(int(node.value))
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Zeros.yaml_tag, str(data.len))
def __getitem__(self, key):
if isinstance(key, slice):
return [0 for key in range(*key.indices(self.len))]
elif key > self.len-1 or key < -self.len:
raise IndexError('Zeros index out of range')
return 0
def __len__(self):
return self.len
def __repr__(self):
return 'Zeros({})'.format(repr(self.len))
yaml.add_representer(Zeros, Zeros.yaml_representer)
yaml.add_constructor(Zeros.yaml_tag, Zeros.from_yaml_loader)
class Pointer():
"""
Represents a pointer into an array.
>>> yaml.load('!ptr value')
Pointer('value')
>>> yaml.dump(Pointer("value"))
"!ptr 'value'\\n"
"""
yaml_tag = '!ptr'
def __init__(self, data, offset=0):
self.data = data
self.offset = offset
@staticmethod
def from_yaml_loader(loader, node):
return Pointer(node.value)
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Pointer.yaml_tag, data.data)
def __repr__(self):
return 'Pointer({})'.format(repr(self.data))
yaml.add_representer(Pointer, Pointer.yaml_representer)
yaml.add_constructor(Pointer.yaml_tag, Pointer.from_yaml_loader)
def transform(yaml):
pass
|
Python
| 0
|
@@ -1426,24 +1426,89 @@
er('value')%0A
+ %3E%3E%3E yaml.load('!ptr array+3')%0A Pointer('array', offset=3)%0A
%3E%3E%3E yaml
@@ -1531,16 +1531,16 @@
alue%22))%0A
-
%22!pt
@@ -1671,22 +1671,27 @@
ffset =
+int(
offset
+)
%0A%0A @s
@@ -1734,32 +1734,85 @@
(loader, node):%0A
+ args = map(str.strip, node.value.split('+'))%0A
return P
@@ -1818,26 +1818,21 @@
Pointer(
-node.value
+*args
)%0A%0A @
@@ -1989,28 +1989,156 @@
-return 'Pointer(%7B%7D)'
+if not self.offset:%0A format_str = 'Pointer(%7B%7D)'%0A else:%0A format_str = 'Pointer(%7B%7D, offset=%7B%7D)'%0A return format_str
.for
@@ -2156,16 +2156,29 @@
lf.data)
+, self.offset
)%0A%0Ayaml.
|
fe9e6e0335716333c7cbdf6d11a737125551dc5f
|
Use get_terminal_size directly from shutil on newer Python version.
|
halo/_utils.py
|
halo/_utils.py
|
# -*- coding: utf-8 -*-
"""Utilities for Halo library.
"""
import codecs
import platform
import six
from backports.shutil_get_terminal_size import get_terminal_size
from colorama import init
from termcolor import colored
init(autoreset=True)
def is_supported():
"""Check whether operating system supports main symbols or not.
Returns
-------
boolean
Whether operating system supports main symbols or not
"""
os_arch = platform.system()
if os_arch != 'Windows':
return True
return False
def get_environment():
"""Get the environment in which halo is running
Returns
-------
str
Environment name
"""
try:
from IPython import get_ipython
except ImportError:
return 'terminal'
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole
return 'jupyter'
elif shell == 'TerminalInteractiveShell': # Terminal running IPython
return 'ipython'
else:
return 'terminal' # Other type (?)
except NameError:
return 'terminal'
def colored_frame(frame, color):
"""Color the frame with given color and returns.
Parameters
----------
frame : str
Frame to be colored
color : str
Color to be applied
Returns
-------
str
Colored frame
"""
return colored(frame, color, attrs=['bold'])
def is_text_type(text):
"""Check if given parameter is a string or not
Parameters
----------
text : *
Parameter to be checked for text type
Returns
-------
bool
Whether parameter is a string or not
"""
if isinstance(text, six.text_type) or isinstance(text, six.string_types):
return True
return False
def decode_utf_8_text(text):
"""Decode the text from utf-8 format
Parameters
----------
text : str
String to be decoded
Returns
-------
str
Decoded string
"""
try:
return codecs.decode(text, 'utf-8')
except:
return text
def get_terminal_columns():
"""Determine the amount of available columns in the terminal
Returns
-------
int
Terminal width
"""
terminal_size = get_terminal_size()
# If column size is 0 either we are not connected
# to a terminal or something else went wrong. Fallback to 80.
if terminal_size.columns == 0:
return 80
else:
return terminal_size.columns
|
Python
| 0
|
@@ -93,16 +93,123 @@
ort six%0A
+from sys import version_info%0Aif version_info %3E= (3, 3):%0A from shutil import get_terminal_size%0Aelse:%0A
from bac
|
3329b260fbea858dcfe3f6f6a9ff365467352d1f
|
optimize sum_lines for time consuming
|
fileprocess/filesline.py
|
fileprocess/filesline.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2017-09-15 15:33
#
# Filename: filesline.py
#
# Description: All Rights Are Reserved
#
"""
import sys
import os
from dirlist import DirList
class FilesLine(DirList):
"""generate the line number of files located in directory
"""
def __init__(self, directory):
super(FilesLine, self).__init__(directory)
self.filesline = 0
def sum_lines(self):
pass
if __name__ == "__main__":
tmp = DirList('/Users/edony/coding/toolkitem')
#print(tmp.dirlist)
#print(sys.path)
#print(os.path.split(os.path.realpath(__file__)))
tmp1 = FilesLine('/Users/edony/coding/toolkitem')
print(tmp1.dirlist)
|
Python
| 0.012633
|
@@ -1346,12 +1346,421 @@
-pass
+# TODO(edony): optimize algorithm of sum_lines method%0A filesname = %5B%5D%0A for item_dir in self.dirlist.keys():%0A for item_file in self.dirlist%5Bitem_dir%5D%5B1%5D:%0A filesname.append(item_dir + '/' + item_file)%0A for filename in filesname:%0A with open(filename, 'rb') as filebuf:%0A self.filesline += len(filebuf.readlines())%0A%0A return self.filesline
%0A%0Aif
@@ -1784,16 +1784,32 @@
ain__%22:%0A
+ import time%0A
tmp
@@ -2000,24 +2000,24 @@
toolkitem')%0A
-
print(tm
@@ -2020,16 +2020,83 @@
t(tmp1.dirlist)%0A
+ print(time.time())%0A tmp1.sum_lines()%0A print(time.time())%0A
|
e1f1f0ca797b639a730e8804dbd5595ad0f395e0
|
Add docstring for module.py
|
package_name/module.py
|
package_name/module.py
|
import numpy as np
def cubic_rectification(x):
'''
Returns the rectified value of the cube of X.
If X is positive, this is the cube of X, if X is negative it is 0.
'''
return np.maximum(0, x**3)
|
Python
| 0.000001
|
@@ -1,8 +1,72 @@
+%22%22%22%0AModule provides a simple cubic_rectification function.%0A%22%22%22%0A%0A
import n
|
d71c0745a4032ce60dd506e91665e46c4c98271f
|
Update forwarder_ZMQ_Server.py
|
ProBot_Server/Midi_Device/forwarder_ZMQ_Server.py
|
ProBot_Server/Midi_Device/forwarder_ZMQ_Server.py
|
#!/usr/bin/python
import zmq
def main():
print "\nServer for ProBot is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -52,25 +52,27 @@
%22%5Cn
-Server for ProBot
+ProBot's ZMQ Server
is
@@ -83,16 +83,17 @@
ing...%22%0A
+%0A
try:
|
788cc159e4d734b972e22ccf06dbcd8ed8f94885
|
Update DictStack implementation from jaraco.collections 3.5.1
|
distutils/_collections.py
|
distutils/_collections.py
|
import collections
import itertools
# from jaraco.collections 3.5
class DictStack(list, collections.abc.Mapping):
"""
A stack of dictionaries that behaves as a view on those dictionaries,
giving preference to the last.
>>> stack = DictStack([dict(a=1, c=2), dict(b=2, a=2)])
>>> stack['a']
2
>>> stack['b']
2
>>> stack['c']
2
>>> stack.push(dict(a=3))
>>> stack['a']
3
>>> set(stack.keys()) == set(['a', 'b', 'c'])
True
>>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)])
True
>>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2)
True
>>> d = stack.pop()
>>> stack['a']
2
>>> d = stack.pop()
>>> stack['a']
1
>>> stack.get('b', None)
>>> 'c' in stack
True
"""
def __iter__(self):
dicts = list.__iter__(self)
return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
def __getitem__(self, key):
for scope in reversed(self):
if key in scope:
return scope[key]
raise KeyError(key)
push = list.append
def __contains__(self, other):
return collections.abc.Mapping.__contains__(self, other)
|
Python
| 0
|
@@ -60,17 +60,19 @@
ions 3.5
+.1
%0A
-
class Di
@@ -356,32 +356,57 @@
tack%5B'c'%5D%0A 2%0A
+ %3E%3E%3E len(stack)%0A 3%0A
%3E%3E%3E stack.pu
@@ -1017,21 +1017,43 @@
eversed(
+tuple(list.__iter__(
self)
+))
:%0A
@@ -1197,16 +1197,16 @@
other):%0A
-
@@ -1262,8 +1262,69 @@
other)%0A
+%0A def __len__(self):%0A return len(list(iter(self)))%0A
|
3ff2ecfd26097b37832a397a43db6121a0bc3627
|
Remove superfluous comment.
|
djadyen/management/commands/adyen_maintenance.py
|
djadyen/management/commands/adyen_maintenance.py
|
from datetime import timedelta
from django.apps import apps
from django.core.management.base import BaseCommand
from django.utils import timezone
from djadyen import settings
from djadyen.choices import Status
from djadyen.models import AdyenNotification
class Command(BaseCommand):
help = "Process the adyen notifications that are not processed yet."
def handle(self, *args, **options):
order_models = [apps.get_model(model) for model in settings.ADYEN_ORDER_MODELS]
#
# N.B. In our implementations there use to be a limit at how far back in the past we
# would go to process notifications. I'm not sure why it existed, so i've removed it.
#
# Process notifications which have been sent by Adyen.
for notification in AdyenNotification.objects.filter(is_processed=False):
notification_data = notification.get_notification_data()
reference = notification_data.get('merchantReference')
for order_model in order_models:
orders = order_model.objects.filter(reference=reference)
for order in orders:
order.process_notification(notification)
# After five days of an Order having status 'Pending', move them to 'Error'
five_days_ago = timezone.now() - timedelta(days=5)
for order_model in order_models:
for obj in order_model.objects.filter(
status=Status.Pending,
created_on__lte=five_days_ago
):
obj.status = Status.Error
obj.save()
|
Python
| 0.000001
|
@@ -487,216 +487,8 @@
S%5D%0A%0A
- #%0A # N.B. In our implementations there use to be a limit at how far back in the past we%0A # would go to process notifications. I'm not sure why it existed, so i've removed it.%0A #%0A%0A
|
32eba84ec5527f1afc82998e98f5d15035e311c1
|
Allow forced loading. Contemplating changing the default too.
|
chef/base.py
|
chef/base.py
|
from chef.api import ChefAPI
class DelayedAttribute(object):
"""Descriptor that calls ._populate() before access to implement lazy loading."""
def __init__(self, attr):
self.attr = attr
def __get__(self, instance, owner):
if instance is None:
return self
if not getattr(instance, '_populated', False):
instance._populate()
instance._populated = True
return getattr(instance, '_'+self.attr)
class ChefObjectMeta(type):
"""Metaclass for ChefObject to implement lazy attributes."""
def __init__(cls, name, bases, d):
for attr in cls.attributes:
setattr(cls, attr, DelayedAttribute(attr))
class ChefObject(object):
"""A base class for Chef API objects."""
__metaclass__ = ChefObjectMeta
url = ''
attributes = []
def __init__(self, name, api=None):
self.name = name
self.api = api or ChefAPI.get_global()
self.url = self.__class__.url + '/' + self.name
@classmethod
def list(cls, api=None):
api = api or ChefAPI.get_global()
for name, url in api[cls.url].iteritems():
yield cls(name, api=api)
def save(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('PUT', self.url, data=self)
def delete(self, api=None):
api = api or ChefAPI.get_global()
api.api_request('DELETE', self.url)
def _populate(self):
data = self.api[self.url]
for attr in self.__class__.attributes:
setattr(self, '_'+attr, data[attr])
|
Python
| 0
|
@@ -865,32 +865,43 @@
, name, api=None
+, lazy=True
):%0A self.
@@ -1014,16 +1014,66 @@
elf.name
+%0A if not lazy:%0A self._populate()
%0A%0A @c
|
6ba2dc8cf06efd74cae941c370e75ccddcf1d25c
|
fix broken arg of DnnL2Pool2DNode
|
treeano/sandbox/nodes/l2_pool.py
|
treeano/sandbox/nodes/l2_pool.py
|
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("l2_pool")
class L2PoolNode(treeano.Wrapper1NodeImpl):
"""
node that takes the L2 norm of the pooled over region
"""
hyperparameter_names = ("pool_size",)
def architecture_children(self):
nodes = [
tn.SqrNode(self.name + "_sqr"),
self._children.children,
# convert mean pool to sum pool by multiplying by pool size
tn.MultiplyConstantNode(self.name + "_mul"),
tn.SqrtNode(self.name + "_sqrt"),
]
return [tn.SequentialNode(self.name + "_sequential", nodes)]
def init_state(self, network):
super(L2PoolNode, self).init_state(network)
pool_size = network.find_hyperparameter(["pool_size"])
network.set_hyperparameter(self.name + "_mul",
"value",
# cast to float, to not trigger
# warn_float64
float(np.prod(pool_size)))
def L2Pool2DNode(name, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.MeanPool2DNode(name + "_pool", **kwargs),
**l2_kwargs)
def DnnL2Pool2DNode(name, pool_size, **kwargs):
l2_kwargs = {}
if "pool_size" in kwargs:
l2_kwargs["pool_size"] = kwargs.pop("pool_size")
return L2PoolNode(
name,
tn.DnnMeanPoolNode(name + "_pool", **kwargs),
**l2_kwargs)
|
Python
| 0.000004
|
@@ -1424,19 +1424,8 @@
ame,
- pool_size,
**k
|
539038ba1135b68786adb44d7660c82a96794971
|
Remove logging
|
imgproc.py
|
imgproc.py
|
from SimpleCV import *
import numpy
import cv2
def process_image(obj, img, config, each_blob=None):
"""
:param obj: Object we're tracking
:param img: Input image
:param config: Controls
:param each_blob: function, taking a SimpleCV.Blob as an argument, that is called for every candidate blob
:return: Mask with candidates
"""
hsv_image = img.toHSV()
print([config.min_hue, config.min_sat, config.min_val])
print([config.max_hue, config.max_sat, config.max_val])
segmented = Image(cv2.inRange(hsv_image.rotate90().getNumpyCv2(),
numpy.array([config.min_hue, config.min_sat, config.min_val]),
numpy.array([config.max_hue, config.max_sat, config.max_val])))
segmented = segmented.dilate(2)
blobs = segmented.findBlobs()
if blobs:
for b in blobs:
if b.radius() > 10:
rect_width = b.minRectWidth()
rect_height = b.minRectHeight()
aspect_ratio = rect_width / rect_height
square_error = abs(obj.aspect_ratio - aspect_ratio) / abs(aspect_ratio)
if square_error < 0.1:
if not each_blob: # default to just outlining
# minRectX and minRectY actually give the center point, not the minX and minY, so we shift by 1/2
rect_ctr_x = b.minRectX()
mrX = rect_ctr_x-rect_width/2
mrY = b.minRectY()-rect_height/2
segmented.drawRectangle(mrX, mrY, rect_width,
rect_height, color=Color.GREEN, width=6)
# px * (px/cm) = cm
offset = int(round((rect_ctr_x - segmented.width/2) * (obj.width / rect_width)))
segmented.drawText('Offset %s cm' % offset, mrX, mrY, Color.RED, 64)
else:
each_blob(b)
# Give the result mask
return segmented.applyLayers()
|
Python
| 0.000001
|
@@ -382,128 +382,8 @@
V()%0A
- print(%5Bconfig.min_hue, config.min_sat, config.min_val%5D)%0A print(%5Bconfig.max_hue, config.max_sat, config.max_val%5D)%0A
|
372ce38d1ddcf2fd65d83df2499d97d4fc2128e6
|
Fix issue in cbb.py
|
ed2d/physics/cbb.py
|
ed2d/physics/cbb.py
|
from ed2d.physics.collisiondata import*
from ed2d.glmath import vector
# Circle Bounding Box
class CBB(object):
def __init__(self, radius, center):
'''Creates a circle bounding box object to be used with the physics engine. Takes in a float for the radius and an array for the center.'''
self.radius = radius
self.center = vector.Vector(3, data=center)
def intersectCBB(self, oCBB):
tempDistance = self.center - oCBB.center
distanceCenters = tempDistance.magnitude()
distanceRadii = self.radius + oCBB.radius
# Collision happens when the distance between the two centers is less than the sum of the radii
state = distanceCenters < distanceRadii
# Calculate the depth penetration
depthPenetration = distanceCenters - (distanceRadii)
return CollisionData(state, tempDistance, depthPenetration)
def getCenter(self):
return center
def getRadius(self):
return radius
|
Python
| 0.000001
|
@@ -735,17 +735,16 @@
etration
-
%0A %09de
@@ -901,16 +901,21 @@
return
+self.
center%0A%0A
@@ -954,14 +954,20 @@
return
+self.
radius
+%0A
|
1451d199833b405929105f939f57b4d4faf50fa2
|
Use new py.test to generate vector-vs-scalar tests
|
skyfield/tests/test_vectorization.py
|
skyfield/tests/test_vectorization.py
|
"""Determine whether arrays work as well as individual inputs."""
import pytest
from numpy import array
from ..constants import T0
from ..planets import earth, mars
from ..timescales import JulianDate, julian_date
dates = array([
julian_date(1969, 7, 20, 20. + 18. / 60.),
T0,
julian_date(2012, 12, 21),
julian_date(2027, 8, 2, 10. + 7. / 60. + 50. / 3600.),
])
deltas = array([39.707, 63.8285, 66.8779, 72.])
def generate_planetary_position(ut1, delta_t):
jd = JulianDate(ut1=ut1, delta_t=delta_t)
yield jd.ut1
yield jd.tt
yield jd.tdb
observer = earth(jd)
yield observer.position
yield observer.velocity
yield observer.jd.ut1
yield observer.jd.tt
yield observer.jd.tdb
astrometric = observer.observe(mars)
yield astrometric.position
yield astrometric.velocity
ra, dec, distance = astrometric.radec()
yield ra.hours()
yield dec.degrees()
yield distance
@pytest.fixture(params=[generate_planetary_position])
def gradual_computation(request):
return request.param
def test_gradual_computations(gradual_computation):
vector_results = list(gradual_computation(dates, deltas))
correct_length = len(dates)
for vector_value in vector_results:
assert vector_value.shape[-1] == correct_length
for i, (date, delta) in enumerate(zip(dates, deltas)):
scalar_results = list(gradual_computation(date, delta))
for vector_value, scalar_value in zip(vector_results, scalar_results):
assert (vector_value.T[i] == scalar_value).all()
|
Python
| 0
|
@@ -64,21 +64,34 @@
%22%22%0A%0A
+from itertools
import
-pytest
+izip
%0Afro
@@ -444,22 +444,21 @@
%5D)%0A%0Adef
-genera
+compu
te_plane
@@ -963,173 +963,310 @@
ce%0A%0A
-@pytest.fixture(params=%5Bgenerate_planetary_position%5D)%0Adef gradual_computation(request):%0A return request.param%0A%0Adef test_gradual_computations(gradual_computation):
+def generate_comparisons(computation):%0A %22%22%22Set up comparisons between vector and scalar outputs of %60computation%60.%0A%0A The %60computation%60 should be a generator that accepts both vector and%0A scalar input, and that yields a series of values whose shape%0A corresponds to its input's shape.%0A%0A %22%22%22
%0A
@@ -1284,32 +1284,24 @@
ults = list(
-gradual_
computation(
@@ -1320,45 +1320,116 @@
s))%0A
-%0A
-correct_length = len(dates
+for i, (date, delta_t) in enumerate(zip(dates, deltas)):%0A g = computation(date, delta_t
)%0A
+
for
@@ -1434,26 +1434,33 @@
r vector
-_value in
+, scalar in izip(
vector_r
@@ -1465,16 +1465,20 @@
_results
+, g)
:%0A
@@ -1483,267 +1483,462 @@
-assert vector_value.shape%5B-1%5D == correct_length%0A%0A for i, (date, delta) in enumerate(zip(dates, deltas)):%0A scalar_results = list(gradual_computation(date, delta))%0A for vector_value, scalar_value in zip(vector_results, scalar_results):%0A
+ f = g.gi_frame%0A location = '%7B%7D:%7B%7D'.format(f.f_code.co_filename, f.f_lineno)%0A yield location, vector, i, scalar%0A%0Adef pytest_generate_tests(metafunc):%0A if 'vector_vs_scalar' in metafunc.fixturenames:%0A metafunc.parametrize('vector_vs_scalar',%0A list(generate_comparisons(compute_planetary_position))%0A )%0A%0Adef test_vector_vs_scalar(vector_vs_scalar):%0A location, vector, i, scalar = vector_vs_scalar%0A
@@ -1951,22 +1951,16 @@
(vector
-_value
.T%5Bi%5D ==
@@ -1970,18 +1970,84 @@
alar
-_value
).all(
+), (%0A '%7B%7D:%5Cn %7B%7D%5B%7B%7D%5D != %7B%7D'.format(location, vector.T, i, scalar)
)%0A
|
0ea687403b01dbc6268c15550f0caf45a54e9106
|
Fix Joust picking with multiple minions in the deck
|
fireplace/cards/utils.py
|
fireplace/cards/utils.py
|
import random
from hearthstone.enums import CardClass, CardType, GameTag, Race, Rarity
from ..actions import *
from ..aura import Refresh
from ..dsl import *
from ..events import *
from ..utils import custom_card
# For buffs which are removed when the card is moved to play (eg. cost buffs)
# This needs to be Summon, because of Summon from the hand
REMOVED_IN_PLAY = Summon(PLAYER, OWNER).after(Destroy(SELF))
RandomCard = lambda **kw: RandomCardPicker(**kw)
RandomCollectible = lambda **kw: RandomCardPicker(collectible=True, **kw)
RandomMinion = lambda **kw: RandomCollectible(type=CardType.MINION, **kw)
RandomBeast = lambda **kw: RandomMinion(race=Race.BEAST)
RandomMurloc = lambda **kw: RandomMinion(race=Race.MURLOC)
RandomSpell = lambda **kw: RandomCollectible(type=CardType.SPELL, **kw)
RandomTotem = lambda **kw: RandomCardPicker(race=Race.TOTEM)
RandomWeapon = lambda **kw: RandomCollectible(type=CardType.WEAPON, **kw)
RandomSparePart = lambda **kw: RandomCardPicker(spare_part=True, **kw)
class RandomEntourage(RandomCardPicker):
def pick(self, source):
self._cards = source.entourage
return super().pick(source)
class RandomID(RandomCardPicker):
def pick(self, source):
self._cards = self.args
return super().pick(source)
Freeze = lambda target: SetTag(target, (GameTag.FROZEN, ))
Stealth = lambda target: SetTag(target, (GameTag.STEALTH, ))
Unstealth = lambda target: UnsetTag(target, (GameTag.STEALTH, ))
Taunt = lambda target: SetTag(target, (GameTag.TAUNT, ))
GiveCharge = lambda target: SetTag(target, (GameTag.CHARGE, ))
GiveDivineShield = lambda target: SetTag(target, (GameTag.DIVINE_SHIELD, ))
GiveWindfury = lambda target: SetTag(target, (GameTag.WINDFURY, ))
CLEAVE = Hit(TARGET_ADJACENT, Attr(SELF, GameTag.ATK))
COINFLIP = RandomNumber(0, 1) == 1
EMPTY_HAND = Count(FRIENDLY_HAND) == 0
HOLDING_DRAGON = Find(FRIENDLY_HAND + DRAGON)
JOUST = Joust(FRIENDLY_DECK + MINION, ENEMY_DECK + MINION)
def SET(amt):
return lambda self, i: amt
# Buff helper
def buff(atk=0, health=0, **kwargs):
buff_tags = {}
if atk:
buff_tags[GameTag.ATK] = atk
if health:
buff_tags[GameTag.HEALTH] = health
for tag in GameTag:
if tag.name.lower() in kwargs.copy():
buff_tags[tag] = kwargs.pop(tag.name.lower())
if "immune" in kwargs:
value = kwargs.pop("immune")
buff_tags[GameTag.CANT_BE_DAMAGED] = value
buff_tags[GameTag.CANT_BE_TARGETED_BY_OPPONENTS] = value
if kwargs:
raise NotImplementedError(kwargs)
class Buff:
tags = buff_tags
return Buff
|
Python
| 0
|
@@ -1888,16 +1888,23 @@
= Joust(
+RANDOM(
FRIENDLY
@@ -1917,18 +1917,26 @@
+ MINION
+)
,
+RANDOM(
ENEMY_DE
@@ -1947,16 +1947,17 @@
MINION)
+)
%0A%0A%0Adef S
|
42462135cec040d17f8ce4488c1ee6bb3b59f406
|
Bump mono-basic to @mono/mono-basic/b8011b2f274606323da0927214ed98336465f467
|
packages/mono-basic.py
|
packages/mono-basic.py
|
GitHubTarballPackage ('mono', 'mono-basic', '3.0', '0d0440feccf648759f7316f93ad09b1e992ea13a',
configure = './configure --prefix="%{prefix}"',
override_properties = { 'make': 'make' }
)
|
Python
| 0.000001
|
@@ -42,55 +42,57 @@
', '
-3
+4
.0
+.1
', '
-0d0440feccf648759f7316f93ad09b1e992ea13a
+b8011b2f274606323da0927214ed98336465f467
',%0A%09
|
3ddddbd24bb37c30df80233ec4c70c38b6c29e82
|
Update leaflet request to be over https
|
emstrack/forms.py
|
emstrack/forms.py
|
from django.contrib.gis.forms import widgets
class LeafletPointWidget(widgets.BaseGeometryWidget):
template_name = 'leaflet/leaflet.html'
class Media:
css = {
'all': ('https://cdn.leafletjs.com/leaflet/v0.7.7/leaflet.css',
'leaflet/css/location_form.css',
'leaflet/css/LeafletWidget.css')
}
js = (
'https://cdn.leafletjs.com/leaflet/v0.7.7/leaflet.js',
'leaflet/js/LeafletWidget.js'
)
def render(self, name, value, attrs=None):
# add point
if value:
attrs.update({ 'point': { 'x': value.x,
'y': value.y,
'z': value.z,
'srid': value.srid }
})
return super().render(name, value, attrs)
|
Python
| 0
|
@@ -198,38 +198,51 @@
'https://cdn
-.leafletjs.com
+js.cloudflare.com/ajax/libs
/leaflet/v0.
@@ -418,22 +418,35 @@
/cdn
-.leafletjs.com
+js.cloudflare.com/ajax/libs
/lea
|
984140261620ae275aaedebe044502f2be185b0f
|
Remove legacy_api.utils.filter_version() as it's not used at all
|
src/olympia/legacy_api/utils.py
|
src/olympia/legacy_api/utils.py
|
import re
from django.conf import settings
from django.utils.html import strip_tags
from olympia import amo
from olympia.amo.helpers import absolutify
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import urlparams, epoch
from olympia.tags.models import Tag
from olympia.versions.compare import version_int
# For app version major.minor matching.
m_dot_n_re = re.compile(r'^\d+\.\d+$')
def addon_to_dict(addon, disco=False, src='api'):
"""
Renders an addon in JSON for the API.
"""
def url(u, **kwargs):
return settings.SITE_URL + urlparams(u, **kwargs)
v = addon.current_version
if disco:
learnmore = settings.SERVICES_URL + reverse('discovery.addons.detail',
args=[addon.slug])
learnmore = urlparams(learnmore, src='discovery-personalrec')
else:
learnmore = url(addon.get_url_path(), src=src)
d = {
'id': addon.id,
'name': unicode(addon.name) if addon.name else None,
'guid': addon.guid,
'status': amo.STATUS_CHOICES_API[addon.status],
'type': amo.ADDON_SLUGS_UPDATE[addon.type],
'authors': [{'id': a.id, 'name': unicode(a.name),
'link': absolutify(a.get_url_path(src=src))}
for a in addon.listed_authors],
'summary': (
strip_tags(unicode(addon.summary)) if addon.summary else None),
'description': strip_tags(unicode(addon.description)),
'icon': addon.icon_url,
'learnmore': learnmore,
'reviews': url(addon.reviews_url),
'total_dls': addon.total_downloads,
'weekly_dls': addon.weekly_downloads,
'adu': addon.average_daily_users,
'created': epoch(addon.created),
'last_updated': epoch(addon.last_updated),
'homepage': unicode(addon.homepage) if addon.homepage else None,
'support': unicode(addon.support_url) if addon.support_url else None,
}
if addon.is_persona():
d['theme'] = addon.persona.theme_data
if v:
d['version'] = v.version
d['platforms'] = [unicode(a.name) for a in v.supported_platforms]
d['compatible_apps'] = [
{unicode(amo.APP_IDS[obj.application].pretty): {
'min': unicode(obj.min), 'max': unicode(obj.max)}}
for obj in v.compatible_apps.values()]
if addon.eula:
d['eula'] = unicode(addon.eula)
if addon.developer_comments:
d['dev_comments'] = unicode(addon.developer_comments)
if addon.takes_contributions:
contribution = {
'link': url(addon.contribution_url, src=src),
'meet_developers': url(addon.meet_the_dev_url(), src=src),
'suggested_amount': addon.suggested_amount,
}
d['contribution'] = contribution
if addon.type == amo.ADDON_PERSONA:
d['previews'] = [addon.persona.preview_url]
else:
d['previews'] = [p.as_dict(src=src) for p in addon.all_previews]
return d
def extract_from_query(term, filter, regexp, end_of_word_boundary=True):
"""
This pulls out a keyword filter from a search term and returns the value
for the filter and a new term with the filter removed.
E.g. term="yslow version:3", filter='version', regexp='\w+' will result in
a return value of: (yslow, 3).
"""
re_string = r'\b%s:\s*(%s)' % (filter, regexp)
if end_of_word_boundary:
re_string += r'\b'
match = re.search(re_string, term)
if match:
term = term.replace(match.group(0), '').strip()
value = match.group(1)
else:
value = None
return (term, value)
def extract_filters(term, opts=None):
"""
Pulls all the filtering options out of the term and returns a cleaned term
and a dictionary of filter names and filter values. Term filters override
filters found in opts.
"""
opts = opts or {}
filters = {}
params = {}
# Type filters.
term, addon_type = extract_from_query(term, 'type', '\w+')
addon_type = addon_type or opts.get('addon_type')
if addon_type:
try:
atype = int(addon_type)
if atype in amo.ADDON_SEARCH_TYPES:
filters['type'] = atype
except ValueError:
# `addon_type` is not a digit.
# Try to find it in `ADDON_SEARCH_SLUGS`.
atype = amo.ADDON_SEARCH_SLUGS.get(addon_type.lower())
if atype:
filters['type'] = atype
# Platform and version filters.
# We don't touch the filters dict for platform and version: that filtering
# is (sadly) done by the view after ES has returned results, using
# addon.compatible_version().
term, platform = extract_from_query(term, 'platform', '\w+')
params['platform'] = platform or opts.get('platform')
term, version = extract_from_query(term, 'version', '[0-9.]+')
params['version'] = version or opts.get('version')
# Tag filters.
term, tag = extract_from_query(term, 'tag', '\w+')
if tag:
tag = Tag.objects.filter(tag_text=tag).values_list('tag_text',
flat=True)
if tag:
filters['tags__in'] = list(tag)
return (term, filters, params)
def filter_version(version, app_id):
"""
Returns filters that can be sent to ES for app version ranges.
If the version is a alpha, beta, or pre-release this does an exact match.
Otherwise it will query where max >= M.Na and min <= M.N.
"""
low = version_int(version)
return {'current_version.compatible_apps.%s.min__lte' % app_id: low}
|
Python
| 0
|
@@ -278,57 +278,8 @@
Tag%0A
-from olympia.versions.compare import version_int%0A
%0A%0A#
@@ -5265,371 +5265,4 @@
ms)%0A
-%0A%0Adef filter_version(version, app_id):%0A %22%22%22%0A Returns filters that can be sent to ES for app version ranges.%0A%0A If the version is a alpha, beta, or pre-release this does an exact match.%0A Otherwise it will query where max %3E= M.Na and min %3C= M.N.%0A %22%22%22%0A low = version_int(version)%0A return %7B'current_version.compatible_apps.%25s.min__lte' %25 app_id: low%7D%0A
|
612698f37ab726fb77aa1f284c97d01d1d726abf
|
Bump version
|
django_anyvcs/__init__.py
|
django_anyvcs/__init__.py
|
# Copyright (c) 2014-2016, Clemson University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Clemson University nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = '2.4.0'
|
Python
| 0
|
@@ -1561,13 +1561,13 @@
__ = '2.
-4
+5
.0'%0A
|
e5e3afeb61f0c6da8a8f2cf7ce6e8c822517ba8d
|
refactor unsupported parameter
|
ensembl/models.py
|
ensembl/models.py
|
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 03-07-2017 16:50
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
This module contains Ensembl models used as Entities and DAO/Services
"""
import config_manager
class Species:
def __init__(self, ensembl_species_entry):
self.__ensembl_species_entry = ensembl_species_entry
self.__logger = config_manager.get_app_config_manager().get_logger_for(__name__)
def _get_logger(self):
return self.__logger
def _get_value_for_key_or_default(self, key, default='-not_available-'):
if key in self.get_ensembl_species_entry():
return self.get_ensembl_species_entry()[key]
return default
def get_ensembl_species_entry(self):
return self.__ensembl_species_entry
def get_division(self):
return self._get_value_for_key_or_default('division')
def get_ncbi_taxonomy_id(self):
return self._get_value_for_key_or_default('taxon_id')
def get_name(self):
return self._get_value_for_key_or_default('name')
def get_ensembl_release(self):
return self._get_value_for_key_or_default('release')
def get_display_name(self):
return self._get_value_for_key_or_default('display_name')
def get_assembly_accession(self):
return self._get_value_for_key_or_default('accession')
def get_strain_collection(self):
if self._get_value_for_key_or_default('strain_collection', 'null') == 'null':
return None
return self._get_value_for_key_or_default('strain_collection')
def get_common_name(self):
return self._get_value_for_key_or_default('common_name')
def get_strain(self):
return self._get_value_for_key_or_default('strain')
def get_aliases(self):
return self._get_value_for_key_or_default('aliases', [])
def get_groups(self):
return self._get_value_for_key_or_default('groups', [])
def get_assembly(self):
return self._get_value_for_key_or_default('assembly')
class SpeciesService:
def __init__(self, species_data):
self.__logger = config_manager.get_app_config_manager().get_logger_for(__name__)
# I've changed this, we store the original species data, and then we offer two different views
self.__species_data = species_data
self.__index_by_taxonomy_id = None
@staticmethod
def __index_data_for_property(data, property_getter):
"""
Given an iterable data container, and a property getter to run on every object of that container, it returns a
dictionary where the key is the property value for a particular data object part of the data collection
:param data: iterable of data objects to index
:param property_getter: property on which the index should be created
:return: a dictionary of the given data objects where the key is the indexed property
"""
return {property_getter(data_item): data_item for data_item in data}
def _get_logger(self):
return self.__logger
def _get_species_from_species_data(self):
return self.get_species_data()['species']
def _get_index_taxonomy_id(self):
"""
Build the index for species data by taxonomy ID
:return: ensembl species data indexed by taxonomy ID
"""
if self.__index_by_taxonomy_id is None:
self.__index_by_taxonomy_id = \
self.__index_data_for_property(self._get_species_from_species_data(), Species.get_ncbi_taxonomy_id)
return self.__index_by_taxonomy_id
def get_species_data(self):
return self.__species_data
def get_species_entry_for_taxonomy_id(self, taxonomy_id):
"""
Given a taxonomy ID, get its Ensembl species entry
:param taxonomy_id: taxonomy ID
:return: the species entry or None if not found
"""
if taxonomy_id in self._get_index_taxonomy_id():
return self._get_index_taxonomy_id()[taxonomy_id]
return None
def count_ensembl_species(self):
return len(self._get_species_from_species_data())
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
|
Python
| 0.000007
|
@@ -610,16 +610,21 @@
default
+Value
='-not_a
@@ -767,16 +767,21 @@
default
+Value
%0A%0A de
|
9674a0869c2a333f74178e305677259e7ac379c3
|
Make the Websocket's connection header value case-insensitive
|
examples/ignore_websocket.py
|
examples/ignore_websocket.py
|
# This script makes mitmproxy switch to passthrough mode for all HTTP
# responses with "Connection: Upgrade" header. This is useful to make
# WebSockets work in untrusted environments.
#
# Note: Chrome (and possibly other browsers), when explicitly configured
# to use a proxy (i.e. mitmproxy's regular mode), send a CONNECT request
# to the proxy before they initiate the websocket connection.
# To make WebSockets work in these cases, supply
# `--ignore :80$` as an additional parameter.
# (see http://mitmproxy.org/doc/features/passthrough.html)
from libmproxy.protocol.http import HTTPRequest
from libmproxy.protocol.tcp import TCPHandler
from libmproxy.protocol import KILL
from libmproxy.script import concurrent
def start(context, argv):
HTTPRequest._headers_to_strip_off.remove("Connection")
HTTPRequest._headers_to_strip_off.remove("Upgrade")
def done(context):
HTTPRequest._headers_to_strip_off.append("Connection")
HTTPRequest._headers_to_strip_off.append("Upgrade")
@concurrent
def response(context, flow):
if flow.response.headers.get_first("Connection", None) == "Upgrade":
# We need to send the response manually now...
flow.client_conn.send(flow.response.assemble())
# ...and then delegate to tcp passthrough.
TCPHandler(flow.live.c, log=False).handle_messages()
flow.reply(KILL)
|
Python
| 0.005274
|
@@ -1042,10 +1042,15 @@
-if
+value =
flo
@@ -1101,20 +1101,51 @@
one)
- == %22Upgrade
+%0A if value and value.upper() == %22UPGRADE
%22:%0A
|
7c90e73d3ffa2a8209a751b01c7cd8bd3122b13b
|
Use actual feature values instead of binary for making pivot predictions
|
scripts/build_pivot_training_data.py
|
scripts/build_pivot_training_data.py
|
#!/usr/bin/env python
from os.path import join, dirname
from sklearn.datasets import load_svmlight_file, dump_svmlight_file
import numpy as np
import scipy.sparse
import sys
from uda_common import read_feature_groups
def main(args):
if len(args) < 3:
sys.stderr.write("Three required arguments: <pivot file> <data file> <output directory>\n")
sys.exit(-1)
pivot_file = args[0]
model_dir = dirname(pivot_file)
group_name = join(model_dir, 'reduced-feature-groups.txt')
group_map = read_feature_groups(group_name)
domain_inds = group_map['Domain']
out_dir = args[2]
sys.stderr.write("Reading in data files\n")
all_X, all_y = load_svmlight_file(args[1])
all_X = all_X.tolil()
## Zero out domain-indicator variables (not needed for this step)
all_X[:,domain_inds[0]] = 0
all_X[:,domain_inds[1]] = 0
num_instances, num_feats = all_X.shape
sys.stderr.write("Reading in pivot files and creating pivot labels dictionary\n")
## Read pivots file into dictionary:
pivots = []
pivot_labels = {}
for line in open(pivot_file, 'r'):
pivot = int(line.strip())
pivots.append(pivot)
pivot_labels[pivot] = np.zeros((num_instances,1))
pivot_labels[pivot] += np.round(all_X[:,pivot] > 0).astype('int').toarray()
sys.stderr.write("Creating pivot matrices for each feature group\n")
#ind_groups = [None] * num_feats
for group_key,group_inds in group_map.items():
group_inds = np.array(group_inds)
group_X = scipy.sparse.lil_matrix(np.zeros((num_instances, num_feats)))
group_X += (all_X > 0).astype('int')
group_X[:, group_inds] = 0
group_X[:, pivots] = 0
for group_ind in group_inds:
if group_ind in pivots:
out_file = join(out_dir, 'pivot_%s-training.liblinear' % group_ind)
print('Writing file %s ' % out_file)
sys.stderr.write('.')
dump_svmlight_file(group_X, pivot_labels[group_ind][:,0], out_file)
sys.stderr.write('\n')
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
|
Python
| 0
|
@@ -1621,33 +1621,13 @@
+=
-(
all_X
- %3E 0).astype('int')
%0A
|
58d754c3904f53c2fbb527a8fd7d9abf19c37a52
|
clear cache after switching
|
cms/views.py
|
cms/views.py
|
import os
import pygit2
import shutil
from beaker.cache import cache_region
from cms import models as cms_models, utils
from gitmodel.workspace import Workspace
from pyramid.view import view_config
from pyramid.renderers import get_renderer
from pyramid.decorator import reify
from pyramid.httpexceptions import HTTPFound
CACHE_TIME = 'long_term'
class CmsViews(object):
def __init__(self, request):
self.request = request
self.repo_path = os.path.join(
self.request.registry.settings['git.path'], '.git')
def get_repo_models(self):
repo = pygit2.Repository(self.repo_path)
ws = Workspace(repo.path, repo.head.name)
return ws.import_models(cms_models)
@reify
def global_template(self):
renderer = get_renderer("templates/base.pt")
return renderer.implementation().macros['layout']
@cache_region(CACHE_TIME)
def get_categories(self):
models = self.get_repo_models()
return [c.to_dict() for c in models.Category().all()]
@cache_region(CACHE_TIME)
def get_category(self, slug):
models = self.get_repo_models()
return models.Category().get(slug).to_dict()
@cache_region(CACHE_TIME)
def get_pages_for_category(self, category_slug):
models = self.get_repo_models()
category = models.Category().get(category_slug)
return [
p.to_dict()
for p in models.Page().filter(primary_category=category)
]
@cache_region(CACHE_TIME)
def get_page(self, id):
models = self.get_repo_models()
return models.Page().get(id).to_dict()
@view_config(route_name='home', renderer='templates/home.pt')
def home(self):
return {'categories': self.get_categories()}
@view_config(route_name='categories', renderer='templates/categories.pt')
def categories(self):
return {'categories': self.get_categories()}
@view_config(route_name='category', renderer='cms:templates/category.pt')
def category(self):
category_slug = self.request.matchdict['category']
category = self.get_category(category_slug)
pages = self.get_pages_for_category(category_slug)
return {'category': category, 'pages': pages}
@view_config(route_name='content', renderer='cms:templates/content.pt')
def content(self):
return {'page': self.get_page(self.request.matchdict['id'])}
class AdminViews(object):
def __init__(self, request):
self.request = request
def get_ws(self):
repo_path = self.request.registry.settings['git.path']
repo = pygit2.Repository(repo_path)
if repo.is_empty:
return Workspace(repo.path)
return Workspace(repo.path, repo.head.name)
@view_config(route_name='configure', renderer='cms:templates/admin/configure.pt')
def configure(self):
repo_path = self.request.registry.settings['git.path']
ws = self.get_ws()
branches = utils.getall_branches(ws.repo)
errors = []
if self.request.method == 'POST':
url = self.request.POST.get('url')
if url:
if ws.repo.is_empty:
shutil.rmtree(repo_path)
pygit2.clone_repository(url, repo_path)
self.get_ws().sync_repo_index()
else:
errors.append('Url is required')
return {
'repo': ws.repo,
'errors': errors,
'branches': [b.shorthand for b in branches],
'current': ws.repo.head.shorthand if not ws.repo.is_empty else None
}
@view_config(route_name='configure_switch')
def configure_switch(self):
if self.request.method == 'POST':
branch = self.request.POST.get('branch')
if branch:
self.get_ws().sync_repo_index()
utils.checkout_branch(self.get_ws().repo, branch)
self.get_ws().sync_repo_index()
return HTTPFound(location=self.request.route_url('configure'))
|
Python
| 0.000001
|
@@ -69,16 +69,32 @@
e_region
+, cache_managers
%0Afrom cm
@@ -334,16 +334,17 @@
PFound%0A%0A
+%0A
CACHE_TI
@@ -4004,32 +4004,154 @@
ync_repo_index()
+%0A%0A # clear caches%0A for _cache in cache_managers.values():%0A _cache.clear()
%0A return
|
187bd62a74d9b1bc995085a90cbcfa700e2454a7
|
Bump to 2.0.1.
|
wessex.py
|
wessex.py
|
import hashlib
import hmac
import posixpath
import urllib
import urlparse
import requests
__all__ = ['Harold', 'connect_harold']
__version__ = '2.0.0'
class Harold(object):
def __init__(self, url, secret, timeout=3):
self.scheme, self.netloc, self.path, query, fragment = urlparse.urlsplit(url)
assert not query, "harold url may not contain query parameters."
assert not fragment, "harold url may not contain fragments."
self.secret = secret
self.timeout = timeout
self.session = requests.Session()
def _post_to_harold(self, path, data):
combined_path = posixpath.join(self.path, "harold", path)
url = urlparse.urlunsplit((
self.scheme,
self.netloc,
combined_path,
None,
None,
))
body = urllib.urlencode(data)
hash = hmac.new(self.secret, body, hashlib.sha1)
resp = self.session.post(
url,
data=body,
timeout=self.timeout,
headers={
"User-Agent": "/".join((__name__, __version__)),
"Content-Type": "application/x-www-form-urlencoded",
"X-Hub-Signature": "sha1=" + hash.hexdigest(),
},
)
resp.raise_for_status()
def alert(self, tag, message):
self._post_to_harold("alert", {
"tag": tag,
"message": message,
})
def heartbeat(self, tag, interval):
self._post_to_harold("heartbeat", {
"tag": tag,
"interval": interval,
})
def get_irc_channel(self, channel):
return IrcChannel(self, channel)
def get_deploy(self, id):
return Deploy(self, id)
class IrcChannel(object):
def __init__(self, harold, channel):
self.harold = harold
self.channel = channel
def message(self, message):
self.harold._post_to_harold("message", {
"channel": self.channel,
"message": message,
})
def set_topic(self, topic):
self.harold._post_to_harold("topic/set", {
"channel": self.channel,
"topic": topic,
})
class Deploy(object):
def __init__(self, harold, id):
self.harold = harold
self.id = id
def begin(self, who, args, log_path, host_count):
self.harold._post_to_harold("deploy/begin", {
"id": self.id.encode('utf-8'),
"who": who,
"args": args,
"log_path": log_path.encode('utf-8'),
"count": host_count
})
def end(self):
self.harold._post_to_harold("deploy/end", {
"id": self.id.encode('utf-8'),
})
def error(self, error):
self.harold._post_to_harold("deploy/error", {
"id": self.id.encode('utf-8'),
"error": error,
})
def abort(self, reason):
self.harold._post_to_harold("deploy/abort", {
"id": self.id.encode('utf-8'),
"reason": reason,
})
def progress(self, host, index):
self.harold._post_to_harold("deploy/progress", {
"id": self.id.encode('utf-8'),
"host": host,
"index": index,
})
def connect_harold(config="/etc/harold.ini"):
"""Creates a Harold object based on configuration in the given
configuration file"""
import ConfigParser
parser = ConfigParser.RawConfigParser({
"timeout": 3,
})
files_read = parser.read(config)
if not files_read:
raise IOError("No config file found in: %r" % config)
url = parser.get("harold", "url")
secret = parser.get("harold", "hmac_secret")
timeout = parser.getint("harold", "timeout")
return Harold(url, secret, timeout)
def harold_irc():
import os
import sys
import argparse
parser = argparse.ArgumentParser(description="Send a message to an IRC channel via Harold.")
parser.add_argument("channel", nargs=1, help="IRC channel to send message to")
parser.add_argument("message", nargs="*", help="Message to send.")
args = parser.parse_args()
try:
harold = connect_harold()
channel = harold.get_irc_channel(args.channel[0])
if args.message:
channel.message(" ".join(args.message))
else:
while True:
line = sys.stdin.readline()
if not line:
break
channel.message(line)
except Exception, e:
print "%s: %s" % (os.path.basename(sys.argv[0]), e)
return 1
return 0
|
Python
| 0
|
@@ -144,17 +144,17 @@
= '2.0.
-0
+1
'%0A%0A%0Aclas
|
d981caff4b6710a3779f25fd8955fd111d9ea0cf
|
fix export error in dj 19
|
django_tablib/datasets.py
|
django_tablib/datasets.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from .base import BaseDataset
class SimpleDataset(BaseDataset):
def __init__(self, queryset, headers=None, encoding='utf-8'):
self.queryset = queryset
self.encoding = encoding
if headers is None:
# We'll set the queryset to include all fields including calculated
# aggregates using the same names as a values() queryset:
v_qs = queryset.values()
headers = []
headers.extend(v_qs.query.extra_select)
headers.extend(v_qs.field_names)
headers.extend(v_qs.query.aggregate_select)
self.header_list = headers
self.attr_list = headers
elif isinstance(headers, dict):
self.header_dict = headers
self.header_list = self.header_dict.keys()
self.attr_list = self.header_dict.values()
elif isinstance(headers, (tuple, list)):
self.header_list = headers
self.attr_list = headers
super(SimpleDataset, self).__init__()
|
Python
| 0
|
@@ -569,32 +569,217 @@
ct)%0A
+try:%0A field_names = v_qs.query.values_select%0A except AttributeError:%0A # django %3C 1.9%0A field_names = v_qs.field_names%0A
headers.extend(v
@@ -777,21 +777,16 @@
.extend(
-v_qs.
field_na
|
82de88c784018a0a1403a117a5fbb2b386f8a41f
|
Comment added
|
ThreeDeeBFS.py
|
ThreeDeeBFS.py
|
"""Author:Dom Modica
The purpose of this program is to solve the problem described @ http://www.reddit.com/r/dailyprogrammer/comments/2o5tb7/2014123_challenge_191_intermediate_space_probe/
Essentially one is to create a 2D NxN matrix along with various obstacles denoted as gravity wells and asteroids and implement a BFS to find a path from S(start) to E(end)
This instance however solves it for a 3D NxNxN matrix.
"""
import math
import random
import pprint
from copy import copy,deepcopy
class Node: #Allows me to give each node in the NxNxN matrix properties such as value, posistion, and whether or not this particular node has been visited or not.
def __init__(self, val, pos):
self.val = val
self.pos = pos
self.visited = False
def __repr__(self):
return repr(self.pos)
def __getitem__(self, index):
return self.pos[index]
class Galaxy(): #Creates an instance of NxNxN matrix and populates it with asteroids and gravity wells
def __init__(self,n,start,end): # all the properties for the space graph are created her on a per 'instance' basis
self.n=n
self.size=n*n*n
self.asteroidspop= math.floor(self.size*0.10)
self.gravitywellspop=math.floor(self.size*0.10)
self.matrix=[[["." for x in range (n)] for x in range(n)]for x in range(n)] #Let it be noted a "." represents a non-obstacle and non start or end node.
self.start=start
self.end=end
self.starta,self.startb,self.startc=self.start[0],self.start[1],self.start[2]
def createobstacles(self):
starta,startb,startc=self.start[0],self.start[1],self.start[2] #Unpack start and end tuples
enda,endb,endc=self.end[0],self.end[1],self.end[2]
self.matrix[starta][startb][startc]='S' #sets start and end points on matrix
self.matrix[enda][endb][endc]='E'
for item in range(self.asteroidspop): #asteroid creation
x,y,z=random.randint(0,self.n-1),random.randint(0,self.n-1),random.randint(0,self.n-1)
while self.matrix[x][y][z] in ['A','S','E']: #after a random coordinate is chosen it makes sure its not already a create obstacle if it is it rerolls that random
x,y,z=random.randint(0,self.n-1),random.randint(0,self.n-1),random.randint(0,self.n-1)
self.matrix[x][y][z]='A'
for item in range(self.gravitywellspop): #gravity well creation
x,y,z=random.randint(0,self.n-1),random.randint(0,self.n-1),random.randint(0,self.n-1) #same logic applies from before
while self.matrix[x][y][z] in ['A','S','E','G']:
x,y,z=random.randint(0,self.n-1),random.randint(0,self.n-1),random.randint(0,self.n-1)
self.matrix[x][y][z]='G'
def custombfs(graph,start):
todo=[[start]]
if start.val == 'E': #checks to see if start point is end point
return [start]
start.visited=True
length=len(graph) #needs to be stored to aid in deciphering legal/illegal moves
moves = [(-1, 0,0), (1,0,0), (0, -1,0), (0, 1,0),(0,0,1),(0,0,-1)]
while todo:
path = todo.pop(0) #deques first element in todo list
node = path[-1]
pos = node.pos #gets the posistion for which we will apply all possible legal "moves"
for move in moves:
if not (0 <= pos[0] + move[0] < length and 0 <= pos[1] + move[1] < length and 0 <= pos[2]+move[2]<length): #cycles through moves list and decides whether legal or illegal
continue
neighbor = graph[pos[0] + move[0]] [pos[1] + move[1]][pos[2]+move[2]]
if neighbor.val == 'E':
return path + [neighbor]
elif neighbor.val == '.' and not neighbor.visited:
neighbor.visited = True
todo.append(path+[neighbor]) # creates copy of list
else:
pass
raise Exception('Path not found!')
def main():
Space=Galaxy(10,(0,0,0),(9,9,9)) #send in size,start,end coordinates (in that order)
Space.createobstacles()
tempmaster=deepcopy(Space.matrix) #ran into problems converting the space object to have the node "properties" so I had to make a deep copy of the object and work with that
for q in range(Space.n):
for r in range(Space.n):
for z in range(Space.n):
tempmaster[q][r][z]=Node(tempmaster[q][r][z],(q,r,z)) #converts all coordinates to nodes, is inefficient but works none the less
solution=custombfs(tempmaster,tempmaster[Space.starta][Space.startb][Space.startc])
print(solution) #prints solutions. in the even no solution is found custombfs with raise an exception and say path not found
if __name__=="__main__":
main()
|
Python
| 0
|
@@ -3364,16 +3364,55 @@
n moves:
+ #iterates through the bank of %22moves%22
%0A
@@ -3693,16 +3693,17 @@
move%5B2%5D%5D
+
%0A
|
a43b62c60b00233fa84c66bf4a332410903476eb
|
fix typo
|
django_fabric/fabfile.py
|
django_fabric/fabfile.py
|
# -*- coding: utf8 -*-
from fabric.api import local, run, cd
from fabric.operations import sudo
from fabric import colors
from fabric.context_managers import settings
from fabric.contrib.console import confirm
from fabric.contrib import django
from fabric.utils import abort
class App():
project_paths = {}
project_package = None
test_settings = None
def __init__(self, project_paths, project_package, test_settings=None):
self.project_paths = project_paths
self.project_package = project_package
self.test_settings = None
django.project(project_package)
def local_management_command(self, command, *args, **kwargs):
return local("venv/bin/python manage.py %s" % command, *args, **kwargs)
def run_management_command(self, instance, command):
code_dir = self.project_paths[instance]
with cd(code_dir):
return run("venv/bin/python manage.py %s" % command)
def test(self, is_deploying=True):
with settings(warn_only=True):
print(colors.yellow("Running tests, please wait!"))
if settings is None:
command = "test --settings=%s" % \
self.test_settings
else:
command = "test"
result = self.local_management_command(command, capture=True)
if result.failed:
print(colors.red("Tests failed"))
if is_deploying:
if not confirm('Do you really want to deploy?'):
abort('')
else:
print(colors.green("All tests ok"))
def run_server_updates(self, instance):
code_dir = self.project_paths[instance]
with cd(code_dir):
run("git fetch")
run("git reset --hard origin/master")
run("venv/bin/pip install -r requirements.txt")
from django.conf import settings
if 'south' in settings.INSTALLED_APPS:
self.run_management_command(instance,
"syncdb --noinput --migrate")
else:
self.run_management_command(instance, "syncdb --noinput")
if 'djangobower' in settings.INSTALLED_APPS:
self.run_management_command(instance, "bower_install")
self.run_management_command(instance, "collectstatic --noinput")
def restart_app(self, instance):
raise NotImplementedError
def deploy(self, instance):
self.run_server_updates(instance)
self.restart_app(instance)
def deploy_dev(self):
if confirm("Do you want to run tests before deploying?"):
self.test(is_deploying=True)
self.deploy('dev')
def deploy_prod(self, run_test=True):
if run_test:
self.test(is_deploying=True)
self.deploy('prod')
class UwsgiApp(App):
ini_files = {}
def __init__(self, ini_files, *args, **kwargs):
super(UwsgiApp, self).__init__(*args, **kwargs)
self.ini_files = ini_files
def restart_app(self, instance):
sudo("touch %s" % self.ini_files['instance'])
|
Python
| 0.999991
|
@@ -3125,17 +3125,16 @@
les%5B
-'
instance
'%5D)%0A
@@ -3129,12 +3129,11 @@
instance
-'
%5D)%0A
|
a5add45a7f4fb1f9651e49fb5f20fe1c9953c0b8
|
Assert expected dates for A1
|
esios/archives.py
|
esios/archives.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from libsaas import http, parsers, port
from libsaas.services import base
from esios.utils import translate_param, serialize_param
LIQUICOMUN_PRIORITY = [
'C7', 'A7', 'C6', 'A6', 'C5', 'A5', 'C4', 'A4', 'C3', 'A3', 'C2', 'A2',
'C1', 'A1'
]
def parser_none(body, code, headers):
return body
class Archive(base.RESTResource):
path = 'archives'
def get_filename(self):
return self.__class__.__name__
def order_key_function(self, param):
return param['name']
def validate_dates(self, start, end):
return True
@base.apimethod
def get(self, start_date, end_date, taxonomy_terms=None):
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
assert self.validate_dates(start_date, end_date), "Dates are not in the expected range for the requested version"
date_type = 'datos'
start_date = start_date.isoformat()
end_date = end_date.isoformat()
locale = 'en'
param_list = ('locale', 'start_date', 'end_date', 'date_type')
if taxonomy_terms:
param_list += ('taxonomy_terms',)
params = base.get_params(
param_list,
locals(),
translate_param=translate_param,
serialize_param=serialize_param,
)
request = http.Request('GET', self.get_url(), params)
return request, parsers.parse_json
@base.apimethod
def download(self, start_date, end_date, taxonomy_terms=None):
assert isinstance(start_date, datetime)
assert isinstance(end_date, datetime)
if taxonomy_terms is None:
taxonomy_terms = []
assert isinstance(taxonomy_terms, (list, tuple))
# gets filename from class name
filename = self.get_filename()
body = self.get(start_date, end_date, taxonomy_terms)
regs = [a for a in body['archives'] if filename in a['name']]
sorted_list = sorted(regs, key=self.order_key_function)
# gets last (better) file
url = sorted_list[0]['download']['url']
request = http.Request('GET', self.parent.get_url() + url)
return request, parser_none
class Liquicomun(Archive):
def get_filename(self):
return super(Liquicomun, self).get_filename().lower()
def order_key_function(self, param):
return LIQUICOMUN_PRIORITY.index(param['name'][:2])
def get(self, start_date, end_date, taxonomy_terms=None):
if taxonomy_terms is None:
taxonomy_terms = []
taxonomy_terms.append('Settlements')
return super(Liquicomun, self).get(start_date, end_date, taxonomy_terms)
class A1_liquicomun(Archive):
""" This month and future """
## Validate dates in A1 period (this month & future)
def order_key_function(self, param):
print (param)
if type(param) == list:
param = param[0]
name = (param['name'])
assert name == "A1_liquicomun"
return LIQUICOMUN_PRIORITY.index(name[:2])
class A2_liquicomun(Liquicomun):
""" Just previous month """
pass
|
Python
| 0.999994
|
@@ -46,16 +46,51 @@
datetime
+%0Afrom dateutil import relativedelta
%0A%0Afrom l
@@ -605,21 +605,21 @@
alidate_
-dates
+range
(self, s
@@ -981,21 +981,21 @@
alidate_
-dates
+range
(start_d
@@ -2956,64 +2956,569 @@
%22%22%22%0A
- ## Validate dates in A1 period (this month & future)
+%0A def validate_range(self, start, end):%0A ## Validate range for A1 period (this month & future)%0A ### toDo acotar future%0A%0A today = datetime.today()%0A try:%0A first_day_current_month = datetime(today.year, today.month, 1)%0A assert start %3E= first_day_current_month%0A%0A last_day_current_month = first_day_current_month + relativedelta.relativedelta(months=1) - relativedelta.relativedelta(days=1)%0A assert end %3C= last_day_current_month%0A except:%0A return False%0A%0A return True%0A
%0A%0A
|
2aaf601b3ad1bbce9252b10e709e76995b00845c
|
Use thousand separator in numbers
|
django_zoook/settings.py
|
django_zoook/settings.py
|
# -*- coding: utf-8 -*-
############################################################################################
#
# Zoook e-sale for OpenERP, Open Source Management Solution
# Copyright (C) 2011 Zikzakmedia S.L. (<http://www.zikzakmedia.com>). All Rights Reserved
# $Id$
#
# Module Modified: 2012-06-07
# Author: Mariano Ruiz <mrsarm@gmail.com>,
# Enterprise Objects Consulting (<http://www.eoconsulting.com.ar>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################################
import sys
from django_zoook.config import *
import os
import re
USER_ADD_APP = [
{'app':'django_zoook.content.content','url':'/content/add/','string':'Add Content'},
{'app':'django_zoook.cms.modules','url':'/cms/modules/list/','string':'All Modules'},
] + PROJECT_USER_ADD_APP
PAGINATOR_TOTAL = 9
PAGINATOR_ITEMS = [9,18,36]
PAGINATOR_ORDER_TOTAL = 5
PAGINATOR_INVOICE_TOTAL = 5
CATALOG_ORDERS = ['price','name']
USER_LENGHT = 8
KEY_LENGHT = 6
LOGIN_URL = '/partner/'
LOGIN_REDIRECT_URL = '/'
ADMINS = (
('Enterprise Objects Consulting','info@eoconsulting.com.ar'),
)
MANAGERS = ADMINS
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
PROJECT_PATH = os.path.dirname(__file__)
PATH = os.path.abspath(os.path.dirname(__file__).decode("utf-8"))
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PATH, "static")
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
STATIC_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'YOUR_SECRET_KEY'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'localeurl.middleware.LocaleURLMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'maintenancemode.middleware.MaintenanceModeMiddleware',
'django_zoook.middleware.threadlocals.ThreadLocals', #local middleware project
'django.middleware.cache.UpdateCacheMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
'django_zoook.trace.TraceMiddleware',
)
TEMPLATE_DIRS = (
os.path.join(PATH, "templates"+"/"+BASE_TEMPLATE),
os.path.join(PROJECT_PATH,'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.core.context_processors.auth',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django_zoook.tools.cms.context_processors.site_configuration',
'django_zoook.tools.cms.context_processors.theme',
)
INSTALLED_APPS = (
'localeurl',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.sitemaps',
#'haystack',
'inplaceeditform',
'pagination',
'django_zoook.base',
'django_zoook.partner',
'django_zoook.content',
'django_zoook.contact',
'django_zoook.catalog',
'django_zoook.search',
'django_zoook.tag',
'django_zoook.account',
'django_zoook.sale',
'django_zoook.tools.filemanager',
'django_zoook.tools.cms',
'filebrowser',
) + PROJECT_APPS
AUTH_PROFILE_MODULE = "partner.AuthProfile"
LOCALE_INDEPENDENT_PATHS = (
re.compile('^/static/'),
re.compile('^/media/'),
re.compile('^/manager/'),
re.compile('^/filemanager/'),
re.compile('^/filebrowser/'),
re.compile('^/inplaceeditform/'),
) + PROJECT_LOCALE_INDEPENDENT_PATHS
MAINTENANCE_IGNORE_URLS = (
r'^/static/*',
)
"""Search Engine"""
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
|
Python
| 0.00002
|
@@ -1612,16 +1612,47 @@
AL = 5%0A%0A
+USE_THOUSAND_SEPARATOR = True%0A%0A
CATALOG_
|
4c500ce1995da97861e37647b61efaf14c6b08d0
|
Load saved RDD
|
code/main.py
|
code/main.py
|
from spark_model import SparkModel
import socket
from document import Document
from pyspark import SparkContext, SparkConf
from boto.s3.connection import S3Connection
from pyspark import SparkConf, SparkContext
import json
import sys
from datetime import datetime
def log_results(model_type, start_time, end_time, score, n_subs, clean_n_subs):
with open('../logs/log.txt', 'a') as f:
f.write('-'*40+'\n')
duration = str(end_time - start_time).split('.')[0]
f.write('Model: %s\n' % model_type)
f.write('Number of subs: %s\n' % n_subs)
f.write('Percentage subs parsed: %.1f%%\n' % (100*float(clean_n_subs) / n_subs))
f.write('Time to run: %s\n' % duration)
f.write('Accuracy: %.2f\n' % score)
if __name__ == '__main__':
with open('/root/.aws/credentials.json') as f:
CREDENTIALS = json.load(f)
# sc = SparkContext()
APP_NAME = 'spark_model'
conf = (SparkConf()
.setAppName(APP_NAME)
.set("spark.executor.cores", 4)
.setMaster('spark://ec2-54-173-173-223.compute-1.amazonaws.com:7077'))
sc = SparkContext(conf=conf, pyFiles=['document.py'])
conn = S3Connection(CREDENTIALS['ACCESS_KEY'], CREDENTIALS['SECRET_ACCESS_KEY'])
model_type = sys.argv[1] if len(sys.argv) > 1 else 'naive_bayes'
start_time = datetime.now()
sm = SparkModel(sc, conn, model_type=model_type)
sm.preprocess()
subs, clean_subs = sm.n_subs, len(sm.labeled_paths)
sm.train()
score = sm.eval_score()
sm.RDD.saveAsPickleFile('rdd.pkl')
end_time = datetime.now()
log_results(model_type, start_time, end_time, score, subs, clean_subs)
sc.stop()
|
Python
| 0.000001
|
@@ -266,32 +266,39 @@
def log_results(
+saved,
model_type, star
@@ -345,16 +345,16 @@
_subs):%0A
-
with
@@ -745,16 +745,47 @@
score)%0A
+ f.write('Saved.'*saved)%0A
%0A%0Aif __n
@@ -1463,16 +1463,26 @@
process(
+'rdd3.pkl'
)%0A su
@@ -1583,41 +1583,127 @@
s
-m.RDD.saveAsPickleFile('rdd.pkl')
+aved = True%0A try:%0A sm.labeled_points.saveAsPickleFile('labeled_points.pkl')%0A except:%0A saved = False
%0A
@@ -1725,24 +1725,24 @@
etime.now()%0A
-
log_resu
@@ -1745,16 +1745,23 @@
results(
+saved,
model_ty
|
74158ab0decf443dcb8222e1220deba664bb26c9
|
Use cls.objects when we check for objects attribute, fixes issue #6
|
djmoney/models/fields.py
|
djmoney/models/fields.py
|
from django.db import models
from django.utils.encoding import smart_unicode
from exceptions import Exception
from moneyed import Money, Currency, DEFAULT_CURRENCY
from djmoney import forms
from decimal import Decimal
__all__ = ('MoneyField', 'currency_field_name', 'NotSupportedLookup')
currency_field_name = lambda name: "%s_currency" % name
SUPPORTED_LOOKUPS = ('exact', 'lt', 'gt', 'lte', 'gte')
class NotSupportedLookup(Exception):
def __init__(self, lookup):
self.lookup = lookup
def __str__(self):
return "Lookup '%s' is not supported for MoneyField" % self.lookup
class MoneyFieldProxy(object):
def __init__(self, field):
self.field = field
self.currency_field_name = currency_field_name(self.field.name)
def _money_from_obj(self, obj):
return Money(obj.__dict__[self.field.name], obj.__dict__[self.currency_field_name])
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
if not isinstance(obj.__dict__[self.field.name], Money):
obj.__dict__[self.field.name] = self._money_from_obj(obj)
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
if isinstance(value, Money):
obj.__dict__[self.field.name] = value.amount
setattr(obj, self.currency_field_name, smart_unicode(value.currency))
else:
if value: value = str(value)
obj.__dict__[self.field.name] = self.field.to_python(value)
class CurrencyField(models.CharField):
def __init__(self, verbose_name=None, name=None, default=DEFAULT_CURRENCY, **kwargs):
if isinstance(default, Currency):
default = default.code
kwargs['max_length'] = 3
super(CurrencyField, self).__init__(verbose_name, name, default=default, **kwargs)
def get_internal_type(self):
return "CharField"
class MoneyField(models.DecimalField):
def __init__(self, verbose_name=None, name=None,
max_digits=None, decimal_places=None,
default=Decimal("0.0"), default_currency=DEFAULT_CURRENCY, **kwargs):
if isinstance(default, Money):
self.default_currency = default.currency
# Avoid giving the user hard-to-debug errors if they miss required attributes
if max_digits is None:
raise Exception("You have to provide a max_digits attribute to Money fields.")
if decimal_places is None:
raise Exception("You have to provide a decimal_places attribute to Money fields.")
self.default_currency = default_currency
super(MoneyField, self).__init__(verbose_name, name, max_digits, decimal_places, default=default, **kwargs)
def to_python(self, value):
if isinstance(value, Money):
value = value.amount
return super(MoneyField, self).to_python(value)
def get_internal_type(self):
return "DecimalField"
def contribute_to_class(self, cls, name):
c_field_name = currency_field_name(name)
c_field = CurrencyField(max_length=3, default=self.default_currency, editable=False)
c_field.creation_counter = self.creation_counter
cls.add_to_class(c_field_name, c_field)
super(MoneyField, self).contribute_to_class(cls, name)
setattr(cls, self.name, MoneyFieldProxy(self))
from managers import money_manager
if hasattr(cls, '_default_manager'):
cls._default_manager = money_manager(cls._default_manager)
elif hasattr(cls, 'objects'):
cls.objects = money_manager(cls._default_manager)
else:
cls.objects = money_manager(models.Manager)
def get_db_prep_save(self, value):
if isinstance(value, Money):
value = value.amount
return super(MoneyField, self).get_db_prep_save(value)
def get_db_prep_lookup(self, lookup_type, value):
if not lookup_type in SUPPORTED_LOOKUPS:
raise NotSupportedLookup(lookup_type)
value = self.get_db_prep_save(value)
return super(MoneyField, self).get_db_prep_lookup(lookup_type, value)
def get_default(self):
if isinstance(self.default, Money):
return self.default
else:
return super(MoneyField, self).get_default()
def formfield(self, **kwargs):
defaults = {'form_class': forms.MoneyField}
defaults.update(kwargs)
return super(MoneyField, self).formfield(**defaults)
## South support
try:
from south.modelsinspector import add_introspection_rules
rules = [
((MoneyField,),
[], # No positional args
{'default_currency':('default_currency',{})}),
((CurrencyField,),
[], # No positional args
{}), # No new keyword args
]
add_introspection_rules(rules, ["^djmoney\.models"])
except ImportError:
pass
|
Python
| 0
|
@@ -3834,32 +3834,23 @@
ger(cls.
-_default_manager
+objects
)%0D%0A
|
9ec2382de5a3d5377fee03a6151e5afbf36f8e71
|
add doc link
|
code/mode.py
|
code/mode.py
|
def mode(array):
count = {}
for elem in array:
try:
count[elem] += 1
except (KeyError):
count[elem] = 1
# get max count
maximum = 0
modeKey = 0
for key in count.keys():
if count[key] > maximum:
maximum = count[key]
modeKey = key
return modeKey
|
Python
| 0
|
@@ -1,8 +1,159 @@
+# an in-depth rundown of this program%0A# can be found at:%0A# https://github.com/joshhartigan/learn-programming/blob/master/Most%2520Frequent%2520Integer.md%0A%0A
def mode
|
ae2284fa85e1ef7be43792b72480018729b1c2ba
|
Bump PEP version for __version__ comment
|
fluent_blogs/__init__.py
|
fluent_blogs/__init__.py
|
# following PEP 386
__version__ = "1.0"
# Fix for internal messy imports.
# When base_models is imported before models/__init__.py runs, there is a circular import:
# base_models -> models/managers.py -> invoking models/__init__.py -> models/db.py -> base_models.py
#
# This doesn't occur when the models are imported first.
|
Python
| 0.000001
|
@@ -13,11 +13,11 @@
PEP
-386
+440
%0A__v
|
ac249c24c2f72764a8618a0f2e9cd1909d50d1d5
|
Allow to specify custom options for EscapeCode preprocessor.
|
foliant/backends/base.py
|
foliant/backends/base.py
|
from importlib import import_module
from shutil import copytree
from datetime import date
from logging import Logger
from foliant.utils import spinner
class BaseBackend(object):
'''Base backend. All backends must inherit from this one.'''
targets = ()
required_preprocessors_before = ()
required_preprocessors_after = ()
def __init__(self, context: dict, logger: Logger, quiet=False, debug=False):
self.project_path = context['project_path']
self.config = context['config']
self.context = context
self.logger = logger
self.quiet = quiet
self.debug = debug
self.working_dir = self.project_path / self.config['tmp_dir']
def get_slug(self) -> str:
'''Generate a slug from the project title and version and the current date.
Spaces in title are replaced with underscores, then the version and the current date
are appended.
'''
if 'slug' in self.config:
return self.config['slug']
components = []
components.append(self.config['title'].replace(' ', '_'))
version = self.config.get('version')
if version:
components.append(str(version))
components.append(str(date.today()))
return '-'.join(components)
def apply_preprocessor(self, preprocessor: str or dict):
'''Apply preprocessor.
:param preprocessor: Preprocessor name or a dict of the preprocessor name and its options
'''
if isinstance(preprocessor, str):
preprocessor_name, preprocessor_options = preprocessor, {}
elif isinstance(preprocessor, dict):
(preprocessor_name, preprocessor_options), = (*preprocessor.items(),)
with spinner(
f'Applying preprocessor {preprocessor_name}',
self.logger,
self.quiet,
self.debug
):
try:
preprocessor_module = import_module(f'foliant.preprocessors.{preprocessor_name}')
preprocessor_module.Preprocessor(
self.context,
self.logger,
self.quiet,
self.debug,
preprocessor_options
).apply()
except ModuleNotFoundError:
raise ModuleNotFoundError(f'Preprocessor {preprocessor_name} is not installed')
except Exception as exception:
raise type(exception)(
f'Failed to apply preprocessor {preprocessor_name}: {exception}'
)
def preprocess_and_make(self, target: str) -> str:
'''Apply preprocessors required by the selected backend and defined in the config file,
then run the ``make`` method.
:param target: Output format: pdf, docx, html, etc.
:returns: Result as returned by the ``make`` method
'''
src_path = self.project_path / self.config['src_dir']
copytree(src_path, self.working_dir)
common_preprocessors = (
*self.required_preprocessors_before,
*self.config.get('preprocessors', ()),
*self.required_preprocessors_after
)
if self.config.get('escape_code', False):
preprocessors = (
'escapecode',
*common_preprocessors,
'unescapecode'
)
else:
preprocessors = (
*common_preprocessors,
'_unescape'
)
for preprocessor in preprocessors:
self.apply_preprocessor(preprocessor)
return self.make(target)
def make(self, target: str) -> str:
'''Make the output from the source. Must be implemented by every backend.
:param target: Output format: pdf, docx, html, etc.
:returns: Typically, the path to the output file, but in general any string
'''
raise NotImplementedError
|
Python
| 0
|
@@ -3270,32 +3270,108 @@
e):%0A
+if isinstance(self.config%5B'escape_code'%5D, dict):%0A escapecode_
preprocessors =
@@ -3370,42 +3370,255 @@
ssor
-s
=
-(%0A 'escapecode'
+%7B%0A 'escapecode': self.config%5B'escape_code'%5D.get('options', %7B%7D)%0A %7D%0A%0A else:%0A escapecode_preprocessor = 'escapecode'%0A%0A preprocessors = (%0A escapecode_preprocessor
,%0A
|
ef9cd0033ccfd314592be7987c262a61d0ec2fba
|
fix thing I apparently never testedgit add light.py
|
light.py
|
light.py
|
import RPi.GPIO as GPIO
class Light:
def __init__(self, pin):
self.pin = pin
self.status = False
GPIO.setup(pin, GPIO.OUT)
def toggle(self):
self.status = not self.status
self.do()
def on(self):
self.status = True
self.do()
def off(self):
self.status = False
self.do()
def do(self):
GPIO.output(light.pin, light.status)
if light.status:
logging.debug("illuminating pin #%(pinNum)d" % {'pinNum': light.pin})
|
Python
| 0
|
@@ -347,24 +347,22 @@
put(
-light
+self
.pin,
-light
+self
.sta
@@ -373,21 +373,20 @@
%0A if
-light
+self
.status:
@@ -454,13 +454,12 @@
m':
-light
+self
.pin
|
31387ec27be3757ee7dceac7cc4d196fc74fdd65
|
Allow parameter server to be killed
|
worker.py
|
worker.py
|
#!/usr/bin/env python
import cv2
import go_vncdriver
import tensorflow as tf
import argparse
import logging
import os
import universe.utils
from a3c import A3C
from envs import create_env
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Disables write_meta_graph argument, which freezes entire process and is mostly useless.
class FastSaver(tf.train.Saver):
def save(self, sess, save_path, global_step=None, latest_filename=None,
meta_graph_suffix="meta", write_meta_graph=True):
super(FastSaver, self).save(sess, save_path, global_step, latest_filename,
meta_graph_suffix, False)
def run(args, server):
env = create_env(args.env_id, client_id=str(args.task), remotes=args.remotes)
trainer = A3C(env, args.task)
# Variable names that start with "local" are not saved in checkpoints.
variables_to_save = [v for v in tf.global_variables() if not v.name.startswith("local")]
init_op = tf.variables_initializer(variables_to_save)
init_all_op = tf.global_variables_initializer()
saver = FastSaver(variables_to_save)
def init_fn(ses):
logger.info("Initializing all parameters.")
ses.run(init_all_op)
config = tf.ConfigProto(device_filters=["/job:ps", "/job:worker/task:{}/cpu:0".format(args.task)])
logdir = os.path.join(args.log_dir, 'train')
summary_writer = tf.summary.FileWriter(logdir + "_%d" % args.task)
logger.info("Events directory: %s_%s", logdir, args.task)
sv = tf.train.Supervisor(is_chief=(args.task == 0),
logdir=logdir,
saver=saver,
summary_op=None,
init_op=init_op,
init_fn=init_fn,
summary_writer=summary_writer,
ready_op=tf.report_uninitialized_variables(variables_to_save),
global_step=trainer.global_step,
save_model_secs=30,
save_summaries_secs=30)
num_global_steps = 100000000
logger.info(
"Starting session. If this hangs, we're mostly likely waiting to connect to the parameter server. " +
"One common cause is that the parameter server DNS name isn't resolving yet, or is misspecified.")
with sv.managed_session(server.target, config=config) as sess, sess.as_default():
trainer.start(sess, summary_writer)
global_step = sess.run(trainer.global_step)
logger.info("Starting training at step=%d", global_step)
while not sv.should_stop() and (not num_global_steps or global_step < num_global_steps):
trainer.process(sess)
global_step = sess.run(trainer.global_step)
# Ask for all the services to stop.
sv.stop()
logger.info('reached %s steps. worker stopped.', global_step)
def cluster_spec(num_workers, num_ps):
"""
More tensorflow setup for data parallelism
"""
cluster = {}
port = 12222
all_ps = []
host = '127.0.0.1'
for _ in range(num_ps):
all_ps.append('{}:{}'.format(host, port))
port += 1
cluster['ps'] = all_ps
all_workers = []
for _ in range(num_workers):
all_workers.append('{}:{}'.format(host, port))
port += 1
cluster['worker'] = all_workers
return cluster
def main(_):
"""
Setting up Tensorflow for data parallel work
"""
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.')
parser.add_argument('--task', default=0, type=int, help='Task index')
parser.add_argument('--job-name', default="worker", help='worker or ps')
parser.add_argument('--num-workers', default=1, type=int, help='Number of workers')
parser.add_argument('--log-dir', default="/tmp/pong", help='Log directory path')
parser.add_argument('--env-id', default="PongDeterministic-v3", help='Environment id')
parser.add_argument('-r', '--remotes', default=None,
help='References to environments to create (e.g. -r 20), '
'or the address of pre-existing VNC servers and '
'rewarders to use (e.g. -r vnc://localhost:5900+15900,vnc://localhost:5901+15901)')
args = parser.parse_args()
spec = cluster_spec(args.num_workers, 1)
cluster = tf.train.ClusterSpec(spec).as_cluster_def()
universe.utils.exit_on_signal()
if args.job_name == "worker":
server = tf.train.Server(cluster, job_name="worker", task_index=args.task,
config=tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=2))
run(args, server)
else:
server = tf.train.Server(cluster, job_name="ps", task_index=args.task,
config=tf.ConfigProto(device_filters=["/job:ps"]))
server.join()
if __name__ == "__main__":
tf.app.run()
|
Python
| 0.000001
|
@@ -101,16 +101,28 @@
logging%0A
+import time%0A
import o
@@ -5008,25 +5008,24 @@
:ps%22%5D))%0A
-%0A
server.j
@@ -5020,20 +5020,47 @@
-server.join(
+while True:%0A time.sleep(1000
)%0A%0Ai
|
f0d4b430b627fb9e2b18ba3f82c936698fac6430
|
Update to version 1.3
|
__openerp__.py
|
__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Account Report CSV, for OpenERP
# Copyright (C) 2013 XCG Consulting (http://odoo.consulting)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Account Report CSV",
"version": "1.2",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """
Export reports as CSV:
- General Ledger
- Trial Balance
Provides the usual filters (by account, period, currency, etc).
""",
"depends": [
'account_report_webkit',
'analytic_structure',
],
"data": [
'wizard/general_ledger_csv_wizard_view.xml',
'wizard/trial_balance_csv_wizard_view.xml',
'csv_menu.xml',
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
|
Python
| 0
|
@@ -1022,17 +1022,17 @@
on%22: %221.
-2
+3
%22,%0A %22
|
7b176d1e775ddec384a76d6de9c121e114a8738e
|
load ACL
|
__openerp__.py
|
__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Account Analytic Online, for OpenERP
# Copyright (C) 2013 XCG Consulting (www.xcg-consulting.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Analytic Structure",
"version" : "0.1",
"author" : "XCG Consulting",
"category": 'Dependency',
"description": """
This module allows to use several analytic dimensions through a structure related
to an object model.
==================================================================================
""",
'website': 'http://www.openerp-experts.com',
"depends" : ['base'],
"data": [
'analytic_dimension.xml',
],
#'demo_xml': [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000002
|
@@ -1402,16 +1402,56 @@
ata%22: %5B%0A
+ 'security/ir.model.access.csv',%0A
|
febfb4c9a5ec5ddfe1f13067c1bc63533e58b09b
|
DEBUG = False
|
elgassia/settings.py
|
elgassia/settings.py
|
"""
Django settings for elgassia project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g*!b+gf-k1j53qo9&uaoz^$j6x4g2^8pzpyf5gjqf7%tam#e@q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'main.context_processors.theme',
)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jquery',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'elgassia.urls'
WSGI_APPLICATION = 'elgassia.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
try:
import dj_database_url
DATABASES['default'] = dj_database_url.config()
except ImportError:
pass
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
Python
| 0.000001
|
@@ -715,35 +715,36 @@
uction!%0ADEBUG =
-Tru
+Fals
e%0A%0ATEMPLATE_DEBU
@@ -747,19 +747,20 @@
DEBUG =
-Tru
+Fals
e%0A%0ATEMPL
@@ -1202,16 +1202,55 @@
me',%0A)%0A%0A
+# should be changed in local_settings%0A%0A
ALLOWED_
@@ -1258,16 +1258,25 @@
OSTS = %5B
+%0A '*'%0A
%5D%0A%0A%0A# Ap
@@ -2269,16 +2269,58 @@
%7D%0A%7D%0A%0A
+# use dj_database_url as default default%0A%0A
try:%0A
@@ -2779,8 +2779,98 @@
tatic')%0A
+STATICFILES_DIRS = ()%0A%0Atry:%0A from local_settings import *%0Aexcept ImportError:%0A pass%0A
|
08fddbdc0ac70a549bac82131771218107186def
|
add discription
|
__openerp__.py
|
__openerp__.py
|
# -*- coding: utf-8 -*-
{
'name': "Account Discount",
'summary': """
Apply Discount model to taxes""",
'description': """
The purpose is to apply discount record for the same tax model
""",
'author': "Khaled Hamed",
'website': "http://www.grandtk.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Accounting',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'account'],
# always loaded
'data': [
'account_discount_view.xml'
],
'installable': True,
'price': 5,
'currency': 'EUR',
}
|
Python
| 0.000952
|
@@ -83,37 +83,43 @@
-Apply Discount model to taxes
+Use Tax model for discounts as well
%22%22%22,
@@ -155,70 +155,508 @@
-The purpose is to apply discount record for the same tax model
+%0A Odoo OpenERP Account Discount from Tax%0A %0AThis module adds new concept to use tax model as discount model and print both taxes and discounts separetly.%0A%0AThe steps to perform are very easy:%0A%0A First you define new tax with negative amount (e.g Name: Discount 10%25, Amount: -0.10).%0A Enable Is Discount Checkbox.%0A Then add this dicount from the Taxes/Discounts column per invoice line.%0A%0AThis way, you can separate and analyze discounts using different account/analytic account as well.
%0A
|
6d83f2150f7c6177385b9f2d8abbe48cd2979130
|
Add staleness to MonthCache Admin display
|
events/admin.py
|
events/admin.py
|
from django.contrib import admin
from .models import Calendar,MonthCache
# Register your models here.
@admin.register(Calendar)
class CalendarAdmin(admin.ModelAdmin):
list_display = ('name','remote_id','css_class')
@admin.register(MonthCache)
class MonthCacheAdmin(admin.ModelAdmin):
list_display = ('calendar','month','data_cached_on')
|
Python
| 0
|
@@ -341,11 +341,28 @@
ched_on'
+,'is_cache_stale'
)%0A%0A
|
d308bbd0200e1b4783bf63cafda03650579b9351
|
change help text
|
ynr/apps/official_documents/models.py
|
ynr/apps/official_documents/models.py
|
import os
from django.db import models
from django.urls import reverse
from django_extensions.db.models import TimeStampedModel
DOCUMENT_UPLOADERS_GROUP_NAME = "Document Uploaders"
def document_file_name(instance, filename):
return os.path.join(
"official_documents", str(instance.ballot.ballot_paper_id), filename
)
class OfficialDocument(TimeStampedModel):
NOMINATION_PAPER = "Nomination paper"
DOCUMENT_TYPES = (
(NOMINATION_PAPER, "Nomination paper", "Nomination papers"),
)
document_type = models.CharField(
blank=False,
choices=[(d[0], d[1]) for d in DOCUMENT_TYPES],
max_length=100,
)
uploaded_file = models.FileField(
upload_to=document_file_name, max_length=800
)
ballot = models.ForeignKey(
"candidates.Ballot", null=False, on_delete=models.CASCADE
)
source_url = models.URLField(
help_text="The page that links to this document", max_length=1000
)
relevant_pages = models.CharField(
"The pages containing information about this ballot",
max_length=50,
default="",
)
class Meta:
get_latest_by = "modified"
def __str__(self):
return "{} ({})".format(self.ballot.ballot_paper_id, self.source_url)
def get_absolute_url(self):
return reverse(
"ballot_paper_sopn",
kwargs={"ballot_id": self.ballot.ballot_paper_id},
)
@property
def locked(self):
"""
Is this post election locked?
"""
return self.ballot.candidates_locked
@property
def lock_suggested(self):
"""
Is there a suggested lock for this document?
"""
return self.ballot.suggestedpostlock_set.exists()
def get_pages(self):
if self.relevant_pages and not self.relevant_pages == "all":
pages = self.relevant_pages.split(",")
return sorted(int(p) for p in pages)
@property
def first_page_number(self):
if self.get_pages():
return self.get_pages()[0]
@property
def last_page_number(self):
if self.get_pages():
return self.get_pages()[-1]
|
Python
| 0.000029
|
@@ -923,26 +923,14 @@
The
-page that links to
+URL of
thi
|
cedae39716587fcc0459a05e74acc43b190d7457
|
split download
|
example-era5.py
|
example-era5.py
|
#!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import cdsapi
c = cdsapi.Client()
r = c.retrieve("reanalysis-era5-pressure-levels",
{
"variable": "temperature",
"pressure_level": "250",
"product_type": "reanalysis",
"date": "2017-12-01/2017-12-31",
"time": "12:00",
"format": "grib"
})
r.download("dowload.grib")
print(r)
r.delete()
|
Python
| 0.000005
|
@@ -779,24 +779,5 @@
b%22)%0A
-print(r)%0Ar.delete()
%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.