commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
6e05b20f070e31948c3cea1b9ec0ab9e2432f1e4
|
Fix failing s3 test
|
s3backup/clients/s3.py
|
s3backup/clients/s3.py
|
# -*- coding: utf-8 -*-
import collections
import datetime
import fnmatch
import json
import logging
import os
import zlib
from botocore.exceptions import ClientError
import magic
from s3backup.clients import SyncClient, SyncObject
logger = logging.getLogger(__name__)
S3Uri = collections.namedtuple('S3Uri', ['bucket', 'key'])
def parse_s3_uri(uri):
if not uri.startswith('s3://'):
return None
tokens = uri.replace('s3://', '').split('/')
if len(tokens) < 2:
return None
bucket = tokens[0]
key = '/'.join(tokens[1:])
return S3Uri(bucket, key)
def to_timestamp(dt):
epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
return (dt - epoch) / datetime.timedelta(seconds=1)
class S3SyncClient(SyncClient):
def __init__(self, boto, bucket, prefix):
self.boto = boto
self.bucket = bucket
self.prefix = prefix
self.index = self.load_index()
self.reload_ignore_files()
def __repr__(self):
return 'S3SyncClient<{}, {}>'.format(self.bucket, self.prefix)
def get_uri(self):
return 's3://{}/{}'.format(self.bucket, self.prefix)
def index_path(self):
return os.path.join(self.prefix, '.index')
def put(self, key, sync_object, callback=None):
self.boto.upload_fileobj(
Bucket=self.bucket,
Key=os.path.join(self.prefix, key),
Fileobj=sync_object.fp,
Callback=callback,
)
self.set_remote_timestamp(key, sync_object.timestamp)
def get(self, key):
try:
resp = self.boto.get_object(
Bucket=self.bucket,
Key=os.path.join(self.prefix, key),
)
return SyncObject(
resp['Body'],
resp['ContentLength'],
to_timestamp(resp['LastModified']),
)
except ClientError:
return None
def delete(self, key):
resp = self.boto.delete_objects(
Bucket=self.bucket,
Delete={
'Objects': [{'Key': os.path.join(self.prefix, key)}]
}
)
return 'Deleted' in resp
def load_index(self):
try:
resp = self.boto.get_object(
Bucket=self.bucket,
Key=self.index_path(),
)
body = resp['Body'].read()
content_type = magic.from_buffer(body, mime=True)
if content_type == 'text/plain':
logger.debug('Detected plain text encoding for index')
return json.loads(body.decode('utf-8'))
elif content_type == 'application/zlib':
logger.debug('Detected zlib encoding for index')
body = zlib.decompress(body)
return json.loads(body.decode('utf-8'))
elif content_type == 'application/x-empty':
return {}
else:
raise ValueError('Unknown content type for index', content_type)
except (ClientError):
return {}
def reload_index(self):
self.index = self.load_index()
def flush_index(self, compressed=True):
data = json.dumps(self.index).encode('utf-8')
if compressed:
logger.debug('Using zlib encoding for writing index')
data = zlib.compress(data)
else:
logger.debug('Using plain text encoding for writing index')
self.boto.put_object(
Bucket=self.bucket,
Key=self.index_path(),
Body=data,
)
def get_local_keys(self):
results = []
resp = self.boto.list_objects_v2(
Bucket=self.bucket,
Prefix=self.prefix,
)
if 'Contents' not in resp:
return results
for obj in resp['Contents']:
key = os.path.relpath(obj['Key'], self.prefix)
if not any(fnmatch.fnmatch(key, pattern) for pattern in self.ignore_files):
results.append(key)
else:
logger.debug('Ignoring %s', key)
return results
def get_real_local_timestamp(self, key):
try:
response = self.boto.head_object(
Bucket=self.bucket,
Key=os.path.join(self.prefix, key),
)
return to_timestamp(response['LastModified'])
except ClientError:
return None
def get_index_keys(self):
return self.index.keys()
def get_index_local_timestamp(self, key):
return self.index.get(key, {}).get('local_timestamp')
def set_index_local_timestamp(self, key, timestamp):
if key not in self.index:
self.index[key] = {}
self.index[key]['local_timestamp'] = timestamp
def get_remote_timestamp(self, key):
return self.index.get(key, {}).get('remote_timestamp')
def set_remote_timestamp(self, key, timestamp):
if key not in self.index:
self.index[key] = {}
self.index[key]['remote_timestamp'] = timestamp
def get_all_real_local_timestamps(self):
result = {}
resp = self.boto.list_objects_v2(
Bucket=self.bucket,
Prefix=self.prefix,
)
for obj in resp.get('Contents', []):
key = os.path.relpath(obj['Key'], self.prefix)
if not any(fnmatch.fnmatch(key, pattern) for pattern in self.ignore_files):
result[key] = to_timestamp(obj['LastModified'])
return result
def get_all_remote_timestamps(self):
return {key: value['remote_timestamp'] for key, value in self.index.items()}
def get_all_index_local_timestamps(self):
return {key: value['local_timestamp'] for key, value in self.index.items()}
def reload_ignore_files(self):
self.ignore_files = ['.index']
try:
response = self.boto.get_object(
Bucket=self.bucket,
Key=os.path.join(self.prefix, '.syncignore')
)
data = response['Body'].read()
data = data.decode('utf8')
ignore_list = data.split('\n')
self.ignore_files.extend(ignore_list)
except ClientError:
pass
|
Python
| 0.000056
|
@@ -3927,38 +3927,113 @@
ix)%0A
-if not
+key_parts = key.split('/')%0A%0A for part in key_parts:%0A if
any(fnmatch.fnm
@@ -4029,35 +4029,36 @@
fnmatch.fnmatch(
-key
+part
, pattern) for p
@@ -4107,27 +4107,70 @@
-results.append(key)
+ logger.debug('Ignoring %25s', key)%0A break
%0A
@@ -4204,36 +4204,23 @@
-logger.debug('Ignoring %25s',
+results.append(
key)
|
a766bfa315f7c91f672f59bdd1b606d50467c332
|
Bump version.
|
src/flask_components/_version.py
|
src/flask_components/_version.py
|
# -*- coding: utf-8 -*-
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
|
Python
| 0
|
@@ -43,17 +43,17 @@
(0, 1,
-0
+1
)%0A__vers
|
780cdf506060ad550355e4b8928743944198ac74
|
Test config
|
tests/test.py
|
tests/test.py
|
import json
from unittest import TestCase
import requests
from PIL import Image
from httmock import urlmatch, HTTMock
from redis import Redis
from app import app
import os
@urlmatch(netloc=r'(.*\.)?test.server\.com$')
def get_image_mock(url, request):
return open('tests/test_resources/heman.png', 'r').read()
class ImageResizeTest(TestCase):
def setUp(self):
self.r = Redis()
self.app = app.test_client()
self.test_image_name = 'test_image.jpg'
self.base_url = '/v1/resizer/{}'
self.get_image_url = self.base_url.format(
'?width={}&height={}&file={}'.format(
0, 100, 'http://test.server.com/heman.png'
)
)
self.bad_argument_url = self.base_url.format('?file=http://test.server.com/heman.png')
def tearDown(self):
self.r.delete('heman.png_0_100')
try:
os.remove(self.test_image_name)
except OSError:
pass
class TestResizeImage(ImageResizeTest):
def make_request(self, width, height):
with HTTMock(get_image_mock):
response = self.app.get(self.get_image_url)
image = self.r.get('heman.png_0_100')
with open(self.test_image_name, 'w') as f:
f.write(image)
width, height = Image.open(self.test_image_name).size
assert response.status_code == 200
assert 'image/jpeg' in response.content_type
assert 'heman.png_0_100' in self.r.keys('heman.png_0_100')
assert image is not None
return width, height
def test_file_exists_already(self):
with HTTMock(get_image_mock):
response = self.app.get(self.get_image_url)
with HTTMock(get_image_mock):
response_2 = self.app.get(self.get_image_url)
assert response_2.status_code == 200
def test_height(self):
width, height = self.make_request(0, 100)
assert height is not None
assert width is not None
def test_width(self):
width, height = self.make_request(100, 0)
assert width is not None
assert height is not None
def test_bad_argument(self):
with HTTMock(get_image_mock):
response = self.app.get(self.bad_argument_url)
assert response.status_code == 400
class TestUtils(TestCase):
def setUp(self):
self.app = app.test_client()
def test_ping(self):
response = self.app.get('/v1/utils/ping')
assert response.status_code == 200
assert response.get_data() == 'pong'
|
Python
| 0.000001
|
@@ -2628,12 +2628,765 @@
) == 'pong'%0A
+%0Aimport config%0A%0A%0Aclass TestConfig(TestCase):%0A def setUp(self):%0A self.statsd_config = 'statsd_config'%0A self._create_statsd_config()%0A%0A def tearDown(self):%0A try:%0A os.remove(self.statsd_config)%0A except OSError:%0A pass%0A%0A def _create_statsd_config(self):%0A config = %7B%0A %22statsd_host%22: %22localhost%22,%0A %22statsd_port%22: 8125%0A %7D%0A with open(self.statsd_config, 'w') as f:%0A f.write(json.dumps(config))%0A%0A def test_statsd_config(self):%0A config._load_statsd_config(app, self.statsd_config)%0A statsd = app.config%5B'STATSD'%5D%0A assert isinstance(statsd, dict)%0A assert statsd%5B'host'%5D == 'localhost'%0A assert statsd%5B'port'%5D == 8125%0A
|
9a24e9e99b4060a1e0fc83af22bc2772733eb15d
|
oopses. fixed decode
|
cypher/cypher.py
|
cypher/cypher.py
|
"""Use Caesar shift to encode or decode your messages!"""
import os
import discord
from .utils.dataIO import fileIO, dataIO
from discord.ext import commands
from __main__ import send_cmd_help
from .utils import checks
__author__ = "FwiedWice"
__version__ = "V1.0.0"
class Code:
def __init__(self, chars):
self.chars = list(chars)
async def code(self, number, en_or_de, text):
to_process = list(text)
length = len(self.chars)
i = 0
result = []
if en_or_de == "de":
number = 0 - number
while i < len(to_process):
if to_process[i].lower() in self.chars:
index = self.chars.index(to_process[i].lower())
index += number
if index < 0:
index += length
if index >= length:
index -= length
letter = self.chars[index]
if to_process[i].lower() != to_process[i]:
letter = letter.upper()
result.append(letter)
else:
result.append(to_process[i])
i += 1
return "".join(result)
class Cypher:
def __init__(self, bot):
self.bot = bot
self.profile = "data/cypher/charset.json"
self.riceCog = dataIO.load_json(self.profile)
self.chars = "abcdefghijklmnopqrstuvwxyz"
@commands.group(pass_context=True)
async def cypher(self, ctx):
if not ctx.invoked_subcommand:
await send_cmd_help(ctx)
@checks.admin_or_permissions(manage_server=True)
@cypher.command(pass_context=True)
async def delmsg(self, ctx):
"""Toggle whether to delete the message that needs to
be encoded/decoded"""
server = ctx.message.server
do_del = True
if server.id not in self.riceCog:
self.riceCog[server.id] = {}
if 'del' in self.riceCog[server.id]:
do_del = self.riceCog[server.id]['del']
do_del = not do_del
self.riceCog[server.id]['del'] = do_del
dataIO.save_json(self.profile, self.riceCog)
if do_del:
await self.bot.say("Message will be deleted.")
else:
await self.bot.say("Message will not be deleted.")
@checks.admin_or_permissions(manage_server=True)
@cypher.command(pass_context=True)
async def reset(self, ctx, chars = None):
"""Reset server settings"""
server = ctx.message.server
if server.id in self.riceCog:
del self.riceCog[server.id]
dataIO.save_json(self.profile, self.riceCog)
await self.bot.say("Server settings deleted.")
@checks.admin_or_permissions(manage_server=True)
@cypher.command(pass_context=True)
async def setchar(self, ctx, chars = None):
"""Set character set for [p]encode/decode commands"""
server = ctx.message.server
if not chars:
msg = ("```Set a character set. Example:\n\n"
"*abcdefghijklmnopqrstuvwxyz*\n\n Those will be the "
"characters that [p]encode/decode will work with.")
await self.bot.say(msg)
return
if len(chars) > 100:
await self.bot.say("```Hold up a second... you don't need more than"
" 100 characters.```")
return
if server.id not in self.riceCog:
self.riceCog[server.id] = {}
self.riceCog[server.id]['charset'] = chars
dataIO.save_json(self.profile, self.riceCog)
await self.bot.say("```The character set is now:\n\n*{}*```".format(chars))
@commands.command(pass_context=True)
async def encode(self, ctx, how_much : int, *, message):
"""Encode a message!
how_much is the amount you want to shift/encode your message by.
"""
server = ctx.message.server
channel = ctx.message.channel
chars = self.chars
do_del = False
if server.id in self.riceCog:
if 'charset' in self.riceCog[server.id]:
chars = self.riceCog[server.id]['charset']
if 'del' in self.riceCog[server.id]:
do_del = self.riceCog[server.id]['del']
if do_del:
try:
await self.bot.delete_message(ctx.message)
except discord.errors.Forbidden:
await self.bot.say("Tried to delete the message but can't! \n"
"Lacking permissions.")
tool = Code(chars)
result = await tool.code(how_much, 'en', message)
await self.bot.say("```You're encoded message is:\n\n{}```".format(result))
@commands.command(pass_context=True)
async def decode(self, ctx, how_much : int, *, message):
"""Decode a message!
how_much is the amount you want to shift/decode your message by.
"""
server = ctx.message.server
channel = ctx.message.channel
chars = self.chars
do_del = False
if server.id in self.riceCog:
if 'chars' in self.riceCog[server.id]:
chars = self.riceCog[server.id]['charset']
if 'del' in self.riceCog[server.id]:
do_del = self.riceCog[server.id]['del']
if do_del:
try:
await self.bot.delete_message(ctx.message)
except discord.errors.Forbidden:
await self.bot.say("Tried to delete the message but can't! \n"
"Lacking permissions.")
tool = Code(chars)
result = await tool.code(how_much, 'en', message)
await self.bot.say("```You're decoded message is:\n\n{}```".format(result))
def check_folder():
if not os.path.exists("data/cypher"):
print("Creating data/cypher/server.id folder")
os.makedirs("data/cypher")
def check_file():
data = {}
f = "data/cypher/charset.json"
if not dataIO.is_valid_json(f):
print("Creating data/cypher/charset.json")
dataIO.save_json(f, data)
def setup(bot):
check_folder()
check_file()
bot.add_cog(Cypher(bot))
|
Python
| 0.999999
|
@@ -5651,34 +5651,34 @@
code(how_much, '
+d
e
-n
', message)%0A
|
f84aa449780f2645a89c3fb015a2235389937ec5
|
Clean up mongo fixtures a bit
|
blaze/tests/test_mongo.py
|
blaze/tests/test_mongo.py
|
from __future__ import absolute_import, division, print_function
import pytest
pymongo = pytest.importorskip('pymongo')
try:
pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.importorskip('fhskjfdskfhsf')
from datashape import discover, dshape
from contextlib import contextmanager
from toolz.curried import get
from blaze import drop, into
conn = pymongo.MongoClient()
db = conn.test_db
@contextmanager
def collection(data=None):
if data is None:
data = []
coll = db.tmp_collection
if data:
coll = into(coll, data)
try:
yield coll
finally:
coll.drop()
bank = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
def test_discover():
with collection(bank) as coll:
assert discover(coll) == dshape('5 * {amount: int64, name: string}')
def test_into():
with collection([]) as coll:
key = get(['name', 'amount'])
assert set(into([], into(coll, bank), columns=['name', 'amount'])) ==\
set([('Alice', 100), ('Alice', 200), ('Bob', 100),
('Bob', 200), ('Bob', 300)])
@pytest.yield_fixture
def mongo():
pymongo = pytest.importorskip('pymongo')
conn = pymongo.MongoClient()
db = conn.test_db
db.tmp_collection.insert(bank)
yield conn
conn.close()
def test_drop(mongo):
db = mongo.test_db
drop(db.tmp_collection)
assert db.tmp_collection.count() == 0
|
Python
| 0
|
@@ -271,76 +271,8 @@
hape
-%0Afrom contextlib import contextmanager%0Afrom toolz.curried import get
%0A%0Afr
@@ -297,16 +297,30 @@
op, into
+, create_index
%0A%0Aconn =
@@ -365,211 +365,250 @@
db%0A%0A
-%0A@contextmanager%0Adef collection(data=None):%0A if data is None:%0A data = %5B%5D%0A coll = db.tmp_collection%0A if data:%0A coll = into(coll, data)%0A%0A try:%0A yield coll%0A finally:%0A
+from pymongo import ASCENDING, DESCENDING%0A%0A%0A@pytest.yield_fixture%0Adef empty_collec():%0A yield db.tmp_collection%0A db.tmp_collection.drop()%0A%0A%0A@pytest.yield_fixture%0Adef bank_collec():%0A coll = into(db.tmp_collection, bank)%0A yield coll%0A
@@ -826,16 +826,17 @@
300%7D%5D%0A%0A
+%0A
def test
@@ -849,50 +849,22 @@
ver(
-):%0A with collection(bank) as coll:%0A
+bank_collec):%0A
@@ -879,20 +879,27 @@
iscover(
+bank_
coll
+ec
) == dsh
@@ -959,96 +959,32 @@
nto(
-):%0A with collection(%5B%5D) as coll:%0A key = get(%5B'name', 'amount'%5D)%0A assert
+empty_collec):%0A lhs =
set
@@ -998,20 +998,28 @@
%5D, into(
+empty_
coll
+ec
, bank),
@@ -1051,28 +1051,18 @@
'%5D))
- ==%5C
%0A
-
+rhs =
set
@@ -1108,23 +1108,31 @@
', 100),
-%0A
+ ('Bob', 200),%0A
@@ -1150,28 +1150,36 @@
b',
-200), ('Bob', 300)%5D)
+300)%5D)%0A assert lhs == rhs
%0A%0A%0A@
|
36408b92a74b8f9963686d215b26de57b429cd6c
|
Fix test_table.py record syntax.
|
blaze/tests/test_table.py
|
blaze/tests/test_table.py
|
from blaze import dshape
from blaze import NDTable, Table, NDArray, Array
def test_arrays():
# Assert that the pretty pritner works for all of the
# toplevel structures
expected_ds = dshape('3, int')
a = NDArray([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
a = Array([1,2,3])
str(a)
repr(a)
a.datashape._equal(expected_ds)
def test_record():
expected_ds = dshape('1, {x: int32; y: float32}')
t = NDTable([(1, 2.1), (2, 3.1)], dshape='1, {x: int32; y: float32}')
t.datashape._equal(expected_ds)
str(t)
repr(t)
def test_record_consume():
expected_ds = dshape("4, {i: int64, f: float64}")
d = {
'i' : [1, 2, 3, 4],
'f' : [4., 3., 2., 1.]
}
t = NDTable(d)
t.datashape._equal(expected_ds)
def test_record_consume2():
d = {
'a' : ["foo", "bar"],
'b' : [4., 3., 2., 1.]
}
table = NDTable(d)
|
Python
| 0.000002
|
@@ -651,17 +651,17 @@
i: int64
-,
+;
f: floa
|
3ffe0a9f5a863c9647279167f94076d878683622
|
Add FUZZING_LANGUAGE to build step. (#3835)
|
infra/gcb/build_lib.py
|
infra/gcb/build_lib.py
|
"""Utility module for Google Cloud Build scripts."""
import base64
import collections
import os
import requests
import sys
import time
import urllib
import urlparse
from oauth2client.service_account import ServiceAccountCredentials
BUILD_TIMEOUT = 12 * 60 * 60
# Needed for reading public target.list.* files.
GCS_URL_BASENAME = 'https://storage.googleapis.com/'
GCS_UPLOAD_URL_FORMAT = '/{0}/{1}/{2}'
# Where corpus backups can be downloaded from.
CORPUS_BACKUP_URL = ('/{project}-backup.clusterfuzz-external.appspot.com/'
'corpus/libFuzzer/{fuzzer}/latest.zip')
# Cloud Builder has a limit of 100 build steps and 100 arguments for each step.
CORPUS_DOWNLOAD_BATCH_SIZE = 100
TARGETS_LIST_BASENAME = 'targets.list'
EngineInfo = collections.namedtuple(
'EngineInfo',
['upload_bucket', 'supported_sanitizers', 'supported_architectures'])
ENGINE_INFO = {
'libfuzzer':
EngineInfo(upload_bucket='clusterfuzz-builds',
supported_sanitizers=['address', 'memory', 'undefined'],
supported_architectures=['x86_64', 'i386']),
'afl':
EngineInfo(upload_bucket='clusterfuzz-builds-afl',
supported_sanitizers=['address'],
supported_architectures=['x86_64']),
'honggfuzz':
EngineInfo(upload_bucket='clusterfuzz-builds-honggfuzz',
supported_sanitizers=['address'],
supported_architectures=['x86_64']),
'dataflow':
EngineInfo(upload_bucket='clusterfuzz-builds-dataflow',
supported_sanitizers=['dataflow'],
supported_architectures=['x86_64']),
'none':
EngineInfo(upload_bucket='clusterfuzz-builds-no-engine',
supported_sanitizers=['address'],
supported_architectures=['x86_64']),
}
def get_targets_list_filename(sanitizer):
return TARGETS_LIST_BASENAME + '.' + sanitizer
def get_targets_list_url(bucket, project, sanitizer):
filename = get_targets_list_filename(sanitizer)
url = GCS_UPLOAD_URL_FORMAT.format(bucket, project, filename)
return url
def _get_targets_list(project_name):
# libFuzzer ASan is the default configuration, get list of targets from it.
url = get_targets_list_url(ENGINE_INFO['libfuzzer'].upload_bucket,
project_name, 'address')
url = urlparse.urljoin(GCS_URL_BASENAME, url)
response = requests.get(url)
if not response.status_code == 200:
sys.stderr.write('Failed to get list of targets from "%s".\n' % url)
sys.stderr.write('Status code: %d \t\tText:\n%s\n' %
(response.status_code, response.text))
return None
return response.text.split()
def get_signed_url(path, method='PUT', content_type=''):
timestamp = int(time.time() + BUILD_TIMEOUT)
blob = '{0}\n\n{1}\n{2}\n{3}'.format(method, content_type, timestamp, path)
creds = ServiceAccountCredentials.from_json_keyfile_name(
os.environ['GOOGLE_APPLICATION_CREDENTIALS'])
client_id = creds.service_account_email
signature = base64.b64encode(creds.sign_blob(blob)[1])
values = {
'GoogleAccessId': client_id,
'Expires': timestamp,
'Signature': signature,
}
return ('https://storage.googleapis.com{0}?'.format(path) +
urllib.urlencode(values))
def download_corpora_steps(project_name):
"""Returns GCB steps for downloading corpora backups for the given project.
"""
fuzz_targets = _get_targets_list(project_name)
if not fuzz_targets:
sys.stderr.write('No fuzz targets found for project "%s".\n' % project_name)
return None
steps = []
# Split fuzz targets into batches of CORPUS_DOWNLOAD_BATCH_SIZE.
for i in range(0, len(fuzz_targets), CORPUS_DOWNLOAD_BATCH_SIZE):
download_corpus_args = []
for binary_name in fuzz_targets[i:i + CORPUS_DOWNLOAD_BATCH_SIZE]:
qualified_name = binary_name
qualified_name_prefix = '%s_' % project_name
if not binary_name.startswith(qualified_name_prefix):
qualified_name = qualified_name_prefix + binary_name
url = get_signed_url(CORPUS_BACKUP_URL.format(project=project_name,
fuzzer=qualified_name),
method='GET')
corpus_archive_path = os.path.join('/corpus', binary_name + '.zip')
download_corpus_args.append('%s %s' % (corpus_archive_path, url))
steps.append({
'name': 'gcr.io/oss-fuzz-base/base-runner',
'entrypoint': 'download_corpus',
'args': download_corpus_args,
'volumes': [{
'name': 'corpus',
'path': '/corpus'
}],
})
return steps
def http_upload_step(data, signed_url, content_type):
"""Returns a GCB step to upload data to the given URL via GCS HTTP API."""
step = {
'name':
'gcr.io/cloud-builders/curl',
'args': [
'-H',
'Content-Type: ' + content_type,
'-X',
'PUT',
'-d',
data,
signed_url,
],
}
return step
def gsutil_rm_rf_step(url):
"""Returns a GCB step to recursively delete the object with given GCS url."""
step = {
'name': 'gcr.io/cloud-builders/gsutil',
'entrypoint': 'sh',
'args': [
'-c',
'gsutil -m rm -rf %s || exit 0' % url,
],
}
return step
def project_image_steps(name, image, language):
"""Returns GCB steps to build OSS-Fuzz project image."""
steps = [{
'args': [
'clone',
'https://github.com/google/oss-fuzz.git',
],
'name': 'gcr.io/cloud-builders/git',
}, {
'name': 'gcr.io/cloud-builders/docker',
'args': [
'build',
'-t',
image,
'.',
],
'dir': 'oss-fuzz/projects/' + name,
}, {
'name':
image,
'args': [
'bash', '-c',
'srcmap > /workspace/srcmap.json && cat /workspace/srcmap.json'
],
'env': [
'OSSFUZZ_REVISION=$REVISION_ID',
'FUZZING_LANGUAGE=%s' % language,
],
}]
return steps
|
Python
| 0
|
@@ -5818,16 +5818,65 @@
+ name,%0A
+ 'env': %5B'FUZZING_LANGUAGE=%25s' %25 language%5D,%0A
%7D, %7B%0A
|
a5c4e829b8e6fba1ab2cda6c8ae27b9292a38af0
|
Implement DefinitionContainer::getValue
|
UM/Settings/DefinitionContainer.py
|
UM/Settings/DefinitionContainer.py
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import json
import collections
import copy
from UM.Resources import Resources
from UM.PluginObject import PluginObject
from . import ContainerInterface
from . import SettingDefinition
class InvalidDefinitionError(Exception):
pass
class IncorrectDefinitionVersionError(Exception):
pass
class InvalidOverrideError(Exception):
pass
## A container for SettingDefinition objects.
#
#
class DefinitionContainer(ContainerInterface.ContainerInterface, PluginObject):
Version = 1
## Constructor
#
# \param container_id A unique, machine readable/writable ID for this container.
def __init__(self, container_id, i18n_catalog = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._id = container_id
self._name = container_id
self._metadata = {}
self._definitions = []
self._i18n_catalog = i18n_catalog
## Reimplement __setattr__ so we can make sure the definition remains unchanged after creation.
def __setattr__(self, name, value):
super().__setattr__(name, value)
#raise NotImplementedError()
## \copydoc ContainerInterface::getId
#
# Reimplemented from ContainerInterface
def getId(self):
return self._id
## \copydoc ContainerInterface::getName
#
# Reimplemented from ContainerInterface
def getName(self):
return self._name
## \copydoc ContainerInterface::getMetaData
#
# Reimplemented from ContainerInterface
def getMetaData(self):
return self._metadata
## \copydoc ContainerInterface::getMetaDataEntry
#
# Reimplemented from ContainerInterface
def getMetaDataEntry(self, entry, default = None):
return self._metadata.get(entry, default)
## \copydoc ContainerInterface::getValue
#
# Reimplemented from ContainerInterface
def getValue(self, key):
return None
## \copydoc ContainerInterface::serialize
#
# Reimplemented from ContainerInterface
def serialize(self):
return ""
## \copydoc ContainerInterface::deserialize
#
# Reimplemented from ContainerInterface
def deserialize(self, serialized):
parsed = json.loads(serialized, object_pairs_hook=collections.OrderedDict)
self._verifyJson(parsed)
# Pre-process the JSON data to include inherited data and overrides
if "inherits" in parsed:
inherited = self._resolveInheritance(parsed["inherits"])
parsed = self._mergeDicts(inherited, parsed)
if "overrides" in parsed:
for key, value in parsed["overrides"].items():
setting = self._findInDict(parsed["settings"], key)
setting.update(value)
# If we do not have metadata or settings the file is invalid
if not "metadata" in parsed:
raise InvalidDefinitionError("Missing required metadata section")
if not "settings" in parsed:
raise InvalidDefinitionError("Missing required settings section")
# Update properties with the data from the JSON
self._name = parsed["name"]
self._metadata = parsed["metadata"]
for key, value in parsed["settings"].items():
definition = SettingDefinition(key, self, None, self._i18n_catalog)
definition.deserialize(value)
self._definitions.append(definition)
## Find definitions matching certain criteria.
#
# \param criteria \type{dict} A dictionary containing key-value pairs which should match properties of the definition.
def findDefinitions(self, criteria):
definitions = []
for definition in self._definitions:
definitions.extend(definition.findDefinitions(criteria))
return definitions
# protected:
# Load a file from disk, used to handle inheritance and includes
def _loadFile(self, file_name):
path = Resources.getPath(Resources.Definitions, file_name + ".json")
contents = {}
with open(path) as f:
contents = json.load(f, object_pairs_hook=collections.OrderedDict)
return contents
# Recursively resolve loading inherited files
def _resolveInheritance(self, file_name):
result = {}
json = self._loadFile(file_name)
self._verifyJson(json)
if "inherits" in json:
inherited = self._resolveInheritance(json["inherits"])
json = self._mergeDicts(inherited, json)
print(json)
return json
# Verify that a loaded json matches our basic expectations.
def _verifyJson(self, json):
if not "version" in json:
raise InvalidDefinitionError("Missing required property 'version'")
if not "name" in json:
raise InvalidDefinitionError("Missing required property 'name'")
if json["version"] != self.Version:
raise IncorrectDefinitionVersionError("Definition uses version {0} but expected version {1}".format(json["version"], self.Version))
# Recursively find a key in a dicationary
def _findInDict(self, dict, key):
if key in dict:
return dict
result = None
for dict_key, value in dict.items():
self._findInDict(value, key)
return result
# Recursively merge two dictionaries, returning a new dictionary
def _mergeDicts(self, first, second):
result = copy.deepcopy(first)
for key, value in second.items():
if key in result:
if isinstance(value, dict):
result[key] = self._mergeDicts(result[key], value)
else:
result[key] = value
else:
result[key] = value
return result
|
Python
| 0
|
@@ -2006,18 +2006,152 @@
-return Non
+definitions = self.findDefinitions(%7B%22key%22: key%7D)%0A if not definitions:%0A return None%0A%0A return definitions%5B0%5D.default_valu
e%0A%0A
|
3f90d0ec25491eb64f164180139d4baf9ff238a9
|
Sort the context list in alphabetical order
|
libravatar/context_processors.py
|
libravatar/context_processors.py
|
# Copyright (C) 2010 Jonathan Harker <jon@jon.geek.nz>
#
# This file is part of Libravatar
#
# Libravatar is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Libravatar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Libravatar. If not, see <http://www.gnu.org/licenses/>.
import settings
"""
Default useful variables for the base page template.
"""
def basepage(request):
context = {}
context["site_name"] = settings.SITE_NAME
context["libravatar_version"] = settings.LIBRAVATAR_VERSION
context["avatar_url"] = settings.AVATAR_URL
context["secure_avatar_url"] = settings.SECURE_AVATAR_URL
context["media_url"] = settings.MEDIA_URL
context["site_url"] = settings.SITE_URL
context["disable_signup"] = settings.DISABLE_SIGNUP
context["analytics_propertyid"] = settings.ANALYTICS_PROPERTYID
context['support_email'] = settings.SUPPORT_EMAIL
return context
|
Python
| 1
|
@@ -874,41 +874,63 @@
ext%5B
-%22site_name%22%5D = settings.SITE_NAME
+'analytics_propertyid'%5D = settings.ANALYTICS_PROPERTYID
%0A
@@ -942,28 +942,20 @@
ext%5B
-%22libr
+'
avatar_
-version%22
+url'
%5D =
@@ -967,26 +967,18 @@
ngs.
-LIBR
AVATAR_
-VERSION
+URL
%0A
@@ -990,105 +990,115 @@
ext%5B
-%22avatar_url%22%5D = settings.AVATAR_URL%0A context%5B%22secure_avatar_url%22%5D = settings.SECURE_AVATAR_URL
+'disable_signup'%5D = settings.DISABLE_SIGNUP%0A context%5B'libravatar_version'%5D = settings.LIBRAVATAR_VERSION
%0A
@@ -1106,17 +1106,17 @@
context%5B
-%22
+'
media_ur
@@ -1112,25 +1112,25 @@
t%5B'media_url
-%22
+'
%5D = settings
@@ -1156,18 +1156,27 @@
ext%5B
-%22site
+'secure_avatar
_url
-%22
+'
%5D =
@@ -1189,11 +1189,20 @@
gs.S
-ITE
+ECURE_AVATAR
_URL
@@ -1218,119 +1218,85 @@
ext%5B
-%22disable_signup%22%5D = settings.DISABLE_SIGNUP%0A context%5B%22analytics_propertyid%22%5D = settings.ANALYTICS_PROPERTYID
+'site_name'%5D = settings.SITE_NAME%0A context%5B'site_url'%5D = settings.SITE_URL
%0A
|
b22c41f7c4047f576420feec7419e6a6141a77a5
|
Set is_agency_admin to false when deactivating an agency
|
app/agency/utils.py
|
app/agency/utils.py
|
from datetime import datetime
from flask_login import current_user
from app.lib.utils import eval_request_bool
from app.lib.db_utils import (
update_object,
create_object
)
from app.models import (
Agencies,
Events,
AgencyUsers
)
from app.constants.event_type import (
AGENCY_ACTIVATED,
AGENCY_DEACTIVATED,
AGENCY_USER_DEACTIVATED
)
from app.admin.utils import get_agency_active_users
def update_agency_active_status(agency_ein, is_active):
"""
Update the active status of an agency.
:param agency_ein: String identifier for agency (4 characters)
:param is_active: Boolean value for agency active status (True = Active)
:return: Boolean value (True if successfully changed active status)
"""
agency = Agencies.query.filter_by(ein=agency_ein).first()
is_valid_agency = agency is not None
if is_active is not None and is_valid_agency:
update_object(
{'is_active': eval_request_bool(is_active)},
Agencies,
agency_ein
)
if is_active == "true":
create_object(
Events(
request_id=None,
user_guid=current_user.guid,
auth_user_type=current_user.auth_user_type,
type_=AGENCY_ACTIVATED,
previous_value={"ein": agency_ein, "is_active": "False"},
new_value={"ein": agency_ein, "is_active": "True"},
timestamp=datetime.utcnow()
)
)
# create request documents
for request in agency.requests:
request.es_create()
return True
if is_active == "false":
create_object(
Events(
request_id=None,
user_guid=current_user.guid,
auth_user_type=current_user.auth_user_type,
type_=AGENCY_DEACTIVATED,
previous_value={"ein": agency_ein, "is_active": "True"},
new_value={"ein": agency_ein, "is_active": "False"},
timestamp=datetime.utcnow()
)
)
# remove requests from index
for request in agency.requests:
request.es_delete()
# deactivate agency users
active_users = get_agency_active_users(agency_ein)
for user in active_users:
update_object(
{"is_agency_active": "False"},
AgencyUsers,
(user.guid, user.auth_user_type, agency_ein)
)
create_object(
Events(
request_id=None,
user_guid=current_user.guid,
auth_user_type=current_user.auth_user_type,
type_=AGENCY_USER_DEACTIVATED,
previous_value={"user_guid": user.guid,
"auth_user_type": user.auth_user_type,
"ein": agency_ein,
"is_active": "True"},
new_value={"user_guid": user.guid,
"auth_user_type": user.auth_user_type,
"ein": agency_ein,
"is_active": "False"},
timestamp=datetime.utcnow()
)
)
return True
return False
def get_agency_feature(agency_ein, feature):
"""
Retrieve the specified agency feature for the specified agency.
:param agency_ein: String identifier for agency (4 characters)
:param feature: Feature specified. See app/lib/constants/agency_features.py for possible values (String)
:return: JSON Object
"""
agency_features = get_agency_features(agency_ein)
if agency_features is not None and feature in agency_features:
return {feature: agency_features[feature]}
return None
def get_agency_features(agency_ein):
"""
Retrieve the agency features JSON object for the specified agency.
:param agency_ein: String identifier for agency (4 characters)
:return: JSON Object
"""
is_valid_agency = Agencies.query.filter_by(ein=agency_ein).first() is not None
if is_valid_agency:
agency_features = Agencies.query.filter_by(ein=agency_ein).first().agency_features
return agency_features
return None
|
Python
| 0.000359
|
@@ -2518,32 +2518,81 @@
_agency_active%22:
+ %22False%22,%0A %22is_agency_admin%22:
%22False%22%7D,%0A
@@ -3328,37 +3328,32 @@
-
%22auth_user_type%22
@@ -3402,37 +3402,32 @@
-
%22ein%22: agency_ei
@@ -3421,37 +3421,32 @@
n%22: agency_ein,%0A
-
|
34dc1c775e4808664dcdb5824b8f2ed5f12e94a1
|
add jsonp renderer and route for graph build status
|
app/app/__init__.py
|
app/app/__init__.py
|
from pyramid.config import Configurator
from sqlalchemy import engine_from_config
from .models import (
DBSession,
Base,
)
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
config = Configurator(settings=settings)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('site_graph', '/site/{code}')
config.add_route('entity_graph', '/entity/{id}')
config.scan()
return config.make_wsgi_app()
|
Python
| 0
|
@@ -33,16 +33,52 @@
gurator%0A
+from pyramid.renderers import JSONP%0A
from sql
@@ -441,16 +441,79 @@
ttings)%0A
+ config.add_renderer('jsonp', JSONP(param_name='callback'))%0A
conf
@@ -671,24 +671,66 @@
tity/%7Bid%7D')%0A
+ config.add_route('status', '/status')%0A
config.s
|
06ba84a8d6bd31060d6ea3a6f7991f8a1b438476
|
Add the ability to search for *_requires argument of setup() in setup.py
|
pyp2rpmlib/metadata_extractors.py
|
pyp2rpmlib/metadata_extractors.py
|
import functools
import os
from pyp2rpmlib.package_data import PypiData, LocalData
from pyp2rpmlib import settings
def memoize_by_args(func):
memory = {}
@functools.wraps(func)
def memoized(*args):
if not args in memory.keys():
value = func(*args)
memory[args] = value
return memory[args]
return memoized
class MetadataExtractor(object):
def __init__(self, local_file, name, version):
self.local_file = local_file
self.name = name
self.version = version
def extract_data(self):
raise NotImplementedError('Whoops, do_extraction method not implemented by %s.' % self.__class__)
def get_extractor_cls(self, suffix):
file_cls = None
# only catches ".gz", even from ".tar.gz"
if suffix in ['.tar', '.gz', '.bz2']:
from tarfile import TarFile
file_cls = TarFile
elif suffix in ['.zip']:
from zipfile import ZipFile
file_cls = ZipFile
else:
pass
# TODO: log that file has unextractable archive suffix and we can't look inside the archive
return file_cls
@memoize_by_args
def get_content_of_file_from_archive(self, name): # TODO: extend to be able to match whole path in archive
suffix = os.path.splitext(self.local_file)[1]
extractor = self.get_extractor_cls(suffix)
if extractor:
with extractor.open(name = self.local_file) as opened_file:
for member in opened_file.getmembers():
if os.path.basename(member.name) == name:
extracted = opened_file.extractfile(member)
return extracted.read()
return None
def find_setup_argument(self, setup_argument):
pass
def requires_from_setup_py(self): # install_requires
pass
def build_requires_from_setup_py(self): # setup_requires
pass
def has_file_with_suffix(self, suffixes):
name, suffix = os.path.splitext(self.local_file)
extractor = self.get_extractor_cls(suffix)
has_file = False
if extractor:
with extractor.open(name = self.local_file) as opened_file:
for member in opened_file.getmembers():
if os.path.splitext(member.name)[1] in suffixes:
has_file = True
def has_bundled_egg_info(self):
return self.has_file_with_suffix('.egg-info')
def has_extension(self):
return self.has_file_with_suffix(settings.EXTENSION_SUFFIXES)
class PypiMetadataExtractor(MetadataExtractor):
def __init__(self, local_file, name, version, client):
super(PypiMetadataExtractor, self).__init__(local_file, name, version)
self.client = client
def extract_data(self):
release_urls = self.client.release_urls(self.name, self.version)[0]
release_data = self.client.release_data(self.name, self.version)
data = PypiData(self.local_file, self.name, self.version, release_urls['md5_digest'], release_urls['url'])
for data_field in settings.PYPI_USABLE_DATA:
setattr(data, data_field, release_data.get(data_field, None))
# if license is not known, try to extract if from trove classifiers
if data.license in [None, 'UNKNOWN']:
data.license = []
for classifier in release_data['classifiers']:
if classifier.find('License') != -1:
data.license.append(settings.TROVE_LICENSES.get(classifier, 'UNKNOWN'))
data.license = ' AND '.join(data.license)
data.has_extension = self.has_extension()
data.has_bundled_egg_info = self.has_bundled_egg_info()
return data
class LocalMetadataExtractor(MetadataExtractor):
def __init__(self, local_file, name, version):
super(LocalMetadataExtractor, self).__init__(local_file, name, version)
|
Python
| 0
|
@@ -19,16 +19,26 @@
mport os
+%0Aimport re
%0A%0Afrom p
@@ -1785,24 +1785,33 @@
find_setup_
+requires_
argument(sel
@@ -1829,29 +1829,686 @@
gument):
-%0A pass
+ # very stupid method to find one of *_requires arguments%0A setup_py = self.get_content_of_file_from_archive('setup.py')%0A if not setup_py: return %22%22%0A%0A argument = %5B%5D%0A start_braces = end_braces = 0%0A cont = False%0A%0A for line in setup_py.splitlines():%0A if line.find(setup_argument) != -1 or cont:%0A start_braces += line.count('%5B')%0A end_braces += line.count('%5D')%0A%0A cont = True%0A argument.append(line)%0A if start_braces == end_braces:%0A break%0A%0A argument%5B-1%5D = argument%5B-1%5D.rstrip().rstrip(',')%0A return ' '.join(argument)%0A
%0A%0A de
|
afe3534c29a8c2d566324adf7d1e95e5d0395627
|
Define websocket resources at module level
|
app/dermshare-ws.py
|
app/dermshare-ws.py
|
#!/usr/bin/env python
#
# DermShare -- WebSocket server for mobile devices
#
# Copyright (c) 2015 Carnegie Mellon University
# All rights reserved.
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
#
from argparse import ArgumentParser
import base64
from cStringIO import StringIO
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
import json
import os
import qrcode
import random
from urlparse import urljoin
try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from ordereddict import OrderedDict
class _WSError(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
class _ImageRelayConnection(WebSocketApplication):
ORIGINS = []
def __init__(self, *args, **kwargs):
WebSocketApplication.__init__(self, *args, **kwargs)
self.peer = None
def on_open(self):
if self.ws.origin not in self.ORIGINS:
print 'Origin prohibited: {0}'.format(self.ws.origin)
self.close(1008, 'Origin prohibited')
return
self._on_open()
def on_message(self, msg):
if msg is None:
return
try:
self._on_message(msg)
except (TypeError, KeyError, ValueError):
self.close(1002, 'Protocol error')
except _WSError, e:
self.close(e.code, e.msg)
def on_close(self, _reason):
if self.peer:
self.peer.unpeer()
self._on_close()
def send_msg(self, type, **kwargs):
msg = {
'type': type,
}
msg.update(kwargs)
self.ws.send(json.dumps(msg))
def send_blob(self, data):
self.ws.send(data, binary=True)
def unpeer(self):
pass
def close(self, code, msg):
self.ws.close(code, msg)
def _on_open(self):
pass
def _on_message(self, msg):
raise ValueError('Invalid message')
def _on_close(self):
pass
class ImageClientConnection(_ImageRelayConnection):
BASE_URL = None
_connections = {} # token -> ImageClientConnection
def __init__(self, *args, **kwargs):
_ImageRelayConnection.__init__(self, *args, **kwargs)
self.token = base64.urlsafe_b64encode(os.urandom(24))
self.verifier = None
@classmethod
def peer(cls, token, peer):
conn = cls._connections.pop(token, None)
if conn is None:
return None
conn.peer = peer
conn.verifier = '{0:04}'.format(random.randint(0, 9999))
conn.send_msg('peer', verifier=conn.verifier)
return conn
def _allow_peering(self):
self._connections[self.token] = self
def unpeer(self):
self.peer = None
self._allow_peering()
self.send_msg('unpeer')
def _on_open(self):
url = urljoin(self.BASE_URL, self.token)
barcode = qrcode.make(url, box_size=4, border=0,
error_correction=qrcode.constants.ERROR_CORRECT_L)
buf = StringIO()
barcode.save(buf, 'png')
self.send_blob(buf.getvalue())
self._allow_peering()
def _on_message(self, msg):
msg = json.loads(msg)
if msg['type'] == 'ack':
if self.peer:
self.peer.send_msg('ack')
elif msg['type'] == 'kick':
if self.peer:
self.peer.close(1000, 'Connection terminated by client')
else:
raise ValueError('Invalid message')
def _on_close(self):
self._connections.pop(self.token, None)
class ImageMobileConnection(_ImageRelayConnection):
STATE_AUTHENTICATING = 0
STATE_RUNNING = 1
STATE_SENDING_IMAGE = 2
def __init__(self, *args, **kwargs):
_ImageRelayConnection.__init__(self, *args, **kwargs)
self.state = self.STATE_AUTHENTICATING
def unpeer(self):
self.peer = None
self.close(1000, 'Session closed')
def _on_message(self, msg):
if self.state == self.STATE_AUTHENTICATING:
msg = json.loads(msg)
if msg['type'] != 'auth':
raise _WSError(1002, 'Authentication required')
self.peer = ImageClientConnection.peer(msg['token'], self)
if not self.peer:
raise _WSError(1008, 'Authentication failed')
self.state = self.STATE_RUNNING
self.send_msg('hello', verifier=self.peer.verifier)
elif self.state == self.STATE_RUNNING:
msg = json.loads(msg)
if msg['type'] == 'image':
self.state = self.STATE_SENDING_IMAGE
else:
raise ValueError('Invalid message')
elif self.state == self.STATE_SENDING_IMAGE:
if self.peer:
self.peer.send_msg('image')
self.peer.send_blob(msg)
self.state = self.STATE_RUNNING
else:
raise ValueError('Invalid state')
def _main():
parser = ArgumentParser(description='DermShare Remote websocket server.',
fromfile_prefix_chars='@',
epilog='Pass @<filename> to load command-line arguments from a file.')
parser.convert_arg_line_to_args = lambda s: s.split()
parser.add_argument('-l', '--listen', metavar='ADDRESS',
default='127.0.0.1',
help='address to listen on [127.0.0.1]')
parser.add_argument('-o', '--origin', metavar='ORIGIN', action='append',
required=True,
help='permit specified HTTP origin (may be repeated)')
parser.add_argument('-p', '--port', metavar='PORT',
type=int, default=5003,
help='port to listen on [5003]')
parser.add_argument('-u', '--mobile-url', metavar='URL',
help='URL of mobile site [auto]')
args = parser.parse_args()
_ImageRelayConnection.ORIGINS.extend(args.origin)
ImageClientConnection.BASE_URL = (args.mobile_url or
urljoin(args.origin[0], '/remote/'))
resources = OrderedDict()
resources['/ws/client'] = ImageClientConnection
resources['/ws/mobile'] = ImageMobileConnection
WebSocketServer((args.listen, args.port),
Resource(resources)).serve_forever()
if __name__ == '__main__':
_main()
|
Python
| 0
|
@@ -5141,16 +5141,143 @@
ate')%0A%0A%0A
+resources = Resource(OrderedDict((%0A ('/ws/client', ImageClientConnection),%0A ('/ws/mobile', ImageMobileConnection),%0A)))%0A%0A%0A
def _mai
@@ -6297,142 +6297,8 @@
))%0A%0A
- resources = OrderedDict()%0A resources%5B'/ws/client'%5D = ImageClientConnection%0A resources%5B'/ws/mobile'%5D = ImageMobileConnection%0A
@@ -6342,30 +6342,9 @@
rt),
-%0A Resource(
+
reso
@@ -6349,17 +6349,16 @@
sources)
-)
.serve_f
|
55cfe1b3ce4c55eaeadbcedeba942cf6ed40f134
|
revert changes in message lib
|
intelmq/lib/message.py
|
intelmq/lib/message.py
|
import json
import hashlib
class Event(object):
def __init__(self, event=None):
if event:
self.event = event
else:
self.event = dict()
def add(self, key, value):
if not value or key in self.event:
return False
self.event[key] = value
return True
def update(self, key, value):
if not value:
return False
self.event[key] = value
return True
def discard(self, key, value):
self.clear(key)
def clear(self, key):
if key in self.event:
return self.event.pop(key)
else:
return None
def value(self, key):
if key in self.event:
return self.event[key]
else:
return None
def keys(self):
return self.event.keys()
def items(self):
return self.event.items()
def contains(self, key):
if key in self.event:
return self.event[key]
else:
return None
def to_dict(self):
return dict(self.event)
def to_unicode(self):
return unicode(json.dumps(self.event))
@staticmethod
def from_unicode(event_string):
return Event(json.loads(event_string))
def __hash__(self):
evhash = hashlib.sha1()
for key, value in sorted(self.items()):
evhash.update(key.encode("utf-8"))
evhash.update("\xc0")
if type(value) != float and type(value) != int:
evhash.update(value.encode("utf-8"))
else:
evhash.update("%s"%value)
evhash.update("\xc0")
return int(evhash.hexdigest(), 16) # FIXME: the int stuff should be done by cache
#return hash(self.event)
def __eq__(self, event2):
return self.event == event2
def __unicode__(self):
return self.to_unicode()
def __repr__(self):
return repr(self.event)
def __str__(self):
return str(self.event)
|
Python
| 0
|
@@ -1628,170 +1628,43 @@
-if type(value) != float and type(value) != int:%0D%0A evhash.update(value.encode(%22utf-8%22))%0D%0A else:%0D%0A evhash.update(%22%25s%22%25value
+evhash.update(value.encode(%22utf-8%22)
)%0D%0A
|
0b45a9191c9acb3947babe564c3c3bbda9b7394f
|
Return page_id in logs/serializer for wiki.
|
api/logs/serializers.py
|
api/logs/serializers.py
|
from rest_framework import serializers as ser
from api.base.serializers import (
JSONAPISerializer,
RelationshipField,
RestrictedDictSerializer,
LinksField,
)
from website.project.model import Node
class NodeLogIdentifiersSerializer(RestrictedDictSerializer):
doi = ser.CharField(read_only=True)
ark = ser.CharField(read_only=True)
class NodeLogInstitutionSerializer(RestrictedDictSerializer):
id = ser.CharField(read_only=True)
name = ser.CharField(read_only=True)
class NodeLogFileParamsSerializer(RestrictedDictSerializer):
materialized = ser.CharField(read_only=True)
url = ser.URLField(read_only=True)
addon = ser.CharField(read_only=True)
node_url = ser.URLField(read_only=True, source='node.url')
node_title = ser.URLField(read_only=True, source='node.title')
class NodeLogParamsSerializer(RestrictedDictSerializer):
addon = ser.CharField(read_only=True)
bucket = ser.CharField(read_only=True)
data_set = ser.CharField(read_only=True, source='dataset')
figshare_title = ser.CharField(read_only=True, source='figshare.title')
forward_url = ser.CharField(read_only=True)
github_user = ser.CharField(read_only=True, source='github.user')
github_repo = ser.CharField(read_only=True, source='github.repo')
filename = ser.CharField(read_only=True)
folder = ser.CharField(read_only=True)
folder_name = ser.CharField(read_only=True)
identifiers = NodeLogIdentifiersSerializer(read_only=True)
params_node = ser.SerializerMethodField(read_only=True)
old_page = ser.CharField(read_only=True)
page = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
params_project = ser.SerializerMethodField(read_only=True)
source = NodeLogFileParamsSerializer(read_only=True)
destination = NodeLogFileParamsSerializer(read_only=True)
view_url = ser.SerializerMethodField(read_only=True)
study = ser.CharField(read_only=True)
tag = ser.CharField(read_only=True)
tags = ser.CharField(read_only=True)
target = NodeLogFileParamsSerializer(read_only=True)
title_new = ser.CharField(read_only=True)
title_original = ser.CharField(read_only=True)
updated_fields = ser.ListField(read_only=True)
version = ser.CharField(read_only=True)
citation_name = ser.CharField(read_only=True, source='citation.name')
institution = NodeLogInstitutionSerializer(read_only=True)
previous_institution = NodeLogInstitutionSerializer(read_only=True)
def get_view_url(self, obj):
urls = obj.get('urls', None)
if urls:
view = urls.get('view', None)
if view:
return view
return {}
def get_params_node(self, obj):
node_id = obj.get('node', None)
if node_id:
node = Node.load(node_id)
return {'id': node_id, 'title': node.title}
return {}
def get_params_project(self, obj):
project_id = obj.get('project', None)
if project_id:
node = Node.load(project_id)
return {'id': project_id, 'title': node.title}
return {}
class NodeLogSerializer(JSONAPISerializer):
filterable_fields = frozenset(['action', 'date'])
non_anonymized_fields = [
'id',
'date',
'action',
]
id = ser.CharField(read_only=True, source='_id')
date = ser.DateTimeField(read_only=True)
action = ser.CharField(read_only=True)
params = NodeLogParamsSerializer(read_only=True)
links = LinksField({'self': 'get_absolute_url'})
class Meta:
type_ = 'logs'
nodes = RelationshipField(
related_view='logs:log-nodes',
related_view_kwargs={'log_id': '<pk>'},
)
user = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
)
contributors = RelationshipField(
related_view='logs:log-contributors',
related_view_kwargs={'log_id': '<pk>'},
)
# This would be a node_link, except that data isn't stored in the node log params
linked_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.pointer.id>'}
)
template_node = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<params.template_node.id>'}
)
def get_absolute_url(self, obj):
return obj.absolute_url
|
Python
| 0
|
@@ -1633,24 +1633,68 @@
_only=True)%0A
+ page_id = ser.CharField(read_only=True)%0A
path = s
@@ -2728,34 +2728,36 @@
%0A return
-%7B%7D
+None
%0A%0A def get_pa
|
4eecc8b2de7ad39ce796da8fc7862300ed5db759
|
Change HP reference to HPE for ProLiant server
|
examples/simple-proliant.py
|
examples/simple-proliant.py
|
# coding=utf-8
""" Simple example to use python-redfish on HP Proliant servers """
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from builtins import str
import os
import sys
import json
import redfish
standard_library.install_aliases()
# Get $HOME environment.
HOME = os.getenv('HOME')
if HOME == '':
print("$HOME environment variable not set, please check your system")
sys.exit(1)
try:
with open(HOME + "/.redfish/inventory") as json_data:
config = json.load(json_data)
json_data.close()
except IOError as e:
print("Please create a json configuration file")
print(e)
sys.exit(1)
URL = config["Managers"]["default"]["url"]
USER_NAME = config["Managers"]["default"]["login"]
PASSWORD = config["Managers"]["default"]["password"]
''' remote_mgmt is a redfish.RedfishConnection object '''
try:
remote_mgmt = redfish.connect(URL,
USER_NAME,
PASSWORD,
simulator=False,
verify_cert=False)
except redfish.exception.RedfishException as e:
sys.stderr.write(str(e.message))
sys.stderr.write(str(e.advices))
sys.exit(1)
print("Redfish API version : %s \n" % remote_mgmt.get_api_version())
# Uncomment following line to reset the blade !!!
# remote_mgmt.Systems.systems_dict["1"].reset_system()
print("Bios version : {}\n".format(
remote_mgmt.Systems.systems_dict["1"].get_bios_version()))
print("Serial Number : {}\n".format(
remote_mgmt.Systems.systems_dict["1"].get_serial_number()))
print("Power State : {}\n".format(
remote_mgmt.Systems.systems_dict["1"].get_power()))
print("Parameter 'SystemType' : {}\n".format(
remote_mgmt.Systems.systems_dict["1"].get_parameter("SystemType")))
print("Get bios parameters : {}\n".format(
remote_mgmt.Systems.systems_dict["1"].bios.get_parameters()))
print("Get boot parameters : {}\n".format(
remote_mgmt.Systems.systems_dict["1"].bios.boot.get_parameters()))
# print("Get bios parameter 'AdminPhone' : {}\n".format(
# remote_mgmt.Systems.systems_dict["1"].bios.get_parameter("AdminPhone")))
# print("Set bios parameter 'AdminPhone' to '' : {}\n".format(
# remote_mgmt.Systems.systems_dict["1"].bios.set_parameter("AdminPhone","")))
# Boot server with script
# remote_mgmt.Systems.systems_dict["1"].bios.set_parameter("Dhcpv4","Enabled")
remote_mgmt.Systems.systems_dict["1"].bios.set_parameter(
"PreBootNetwork", "Auto")
remote_mgmt.Systems.systems_dict["1"].bios.set_parameter(
"UefiShellStartup", "Enabled")
remote_mgmt.Systems.systems_dict["1"].bios.set_parameter(
"UefiShellStartupLocation", "NetworkLocation")
remote_mgmt.Systems.systems_dict["1"].bios.set_parameter(
"UefiShellStartupUrl", "http://10.3.222.88/deploy/startup.nsh")
# remote_mgmt.Systems.systems_dict["1"].set_parameter_json(
# '{"Boot": {"BootSourceOverrideTarget": "UefiShell"}}')
# remote_mgmt.Systems.systems_dict["1"].set_parameter_json(
# '{"Boot": {"BootSourceOverrideEnabled" : "Continuous"}}')
# remote_mgmt.Systems.systems_dict["1"].set_parameter_json(
# '{"Boot": {"BootSourceOverrideEnabled" : "Once"}}')
mySystem = remote_mgmt.Systems.systems_dict["1"]
mySystem.set_boot_source_override("None", "Disabled")
# Uncomment the next line to reset the server
# mySystem.reset_system()
print("Get manager firmware version : {}\n".format(
remote_mgmt.Managers.managers_dict["1"].get_firmware_version()))
print("Get system Bios version : {}\n".format(
remote_mgmt.Systems.systems_dict["1"].get_bios_version()))
# Reset of the system is required to apply the changes
# remote_mgmt.Systems.systems_dict["1"].reset_system()
remote_mgmt.logout()
|
Python
| 0
|
@@ -55,16 +55,17 @@
sh on HP
+E
Prolian
|
17eb885097da7b2b2418f909e2f23058245be72c
|
Update spotify example (#276)
|
examples/spotify_example.py
|
examples/spotify_example.py
|
"""
Example on how to use the Spotify Controller.
NOTE: You need to install the spotipy and spotify-token dependencies.
This can be done by running the following:
pip install spotify-token
pip install git+https://github.com/plamere/spotipy.git
"""
import pychromecast
from pychromecast.controllers.spotify import SpotifyController
import spotify_token as st
import spotipy
chromecasts = pychromecast.get_chromecasts()
cast = chromecasts[0]
cast.start()
CAST_NAME = "My Chromecast"
device_id = None
if cast.name == CAST_NAME:
data = st.start_session("SPOTIFY_USERNAME", "SPOTIFY_PASSWORD")
access_token = data[0]
client = spotipy.Spotify(auth=access_token)
sp = SpotifyController(access_token)
cast.register_handler(sp)
sp.launch_app()
devices_available = client.devices()
for device in devices_available['devices']:
if device['name'] == CAST_NAME and device['type'] == 'CastVideo':
device_id = device['id']
break
client.start_playback(device_id=device_id, uris=["spotify:track:3Zwu2K0Qa5sT6teCCHPShP"])
|
Python
| 0.000001
|
@@ -242,16 +242,43 @@
git%0A%22%22%22%0A
+import logging%0Aimport sys%0A%0A
import p
@@ -399,161 +399,324 @@
py%0A%0A
-chromecasts = pychromecast.get_chromecasts()%0Acast = chromecasts%5B0%5D%0Acast.start()%0A%0ACAST_NAME = %22My Chromecast%22%0Adevice_id = None%0A%0Aif cast.name == CAST_NAME:
+CAST_NAME = %22My Chromecast%22%0A%0Adebug = '--show-debug' in sys.argv%0Aif debug:%0A logging.basicConfig(level=logging.DEBUG)%0A%0Achromecasts = pychromecast.get_chromecasts()%0Acast = None%0Afor _cast in chromecasts:%0A if _cast.name == CAST_NAME:%0A cast = _cast%0A break%0A%0Aif cast:%0A cast.wait()%0A device_id = None
%0A%0A
@@ -1082,42 +1082,8 @@
NAME
- and device%5B'type'%5D == 'CastVideo'
:%0A
|
d609e8895c27d9c862098718140b54bbbdb77f98
|
fix yet another typo......
|
FBRank/parse/League.py
|
FBRank/parse/League.py
|
# -*- coding:utf-8 -*-
"""
parse web content about league,now it includes:premier league,Liga BBVA
"""
import re
from collections import defaultdict
import requests
from bs4 import BeautifulSoup
from prettytable import PrettyTable
from FBRank.utils.exceptions import IllegalArgumentException, NotSupprotedYetException
from FBRank.utils.utils import league_news_pattern, PY2
if PY2:
input = raw_input
league_news_pattern_target = re.compile(league_news_pattern)
def parse_league_rank(url, index=0):
"""
:param url: weburl which contains ceatain league rank information
:return: PrettyTable Object contains league rank
tips:
- origin premire leagure website will be instability
- sports tencent return JSON Data,But cannot be directy play(with out front-end)
url = "http://matchweb.sports.qq.com/team/rank"
payloads =
'callback': 'recommendlist',
'competitionId': '8',
'from': 'sporthp',
'_': str(randint(1, 100000))
}
response = requests.get(url, params=payloads).content.decode('utf-8')
"""
cur = 1
table = PrettyTable(["排名", "球队名", "场次", "胜", "平", "负", "进球", "失球",
"净胜球", "场均进球", "场均失球", "场均净胜", "场均积分", "积分"])
soup = BeautifulSoup(requests.get(url).content.decode("utf-8"), "lxml")
for t in soup.find_all("tr", class_=re.compile(r"trbg[red|yellow|blue|grey]")):
club = [c.get_text() for c in t.find_all("td") if c.get_text()]
if (cur == index):
return club
cur += 1
table.add_row(club)
if index != 0:
raise IllegalArgumentException(
"index out of range,the max is {} /排名超出范围,最大是 {}".format(cur, cur))
return table
def parse_league_news(url):
"""
:param url: weburl which contains ceatain league news information
:return: dict {index:[title,url]}
"""
if url.endswith('bundesliga/'):
raise NotSupprotedYetException("bundesliga news still not to be exported,wait for the next version:-D")
news_dict = defaultdict(list)
web_news = BeautifulSoup(requests.get(url).content.decode('utf-8'), 'lxml')
key = 1
for news in web_news.find('div', class_='banner_list slide').find_all('h3')[:-1]:
# still to be optimized
title = re.search(r'title="(.*?)"', str(news)).group(1)
url = re.search(r'href="(.*?)"', str(news)).group(1)
# use str,because what people input will be str
news_dict[str(key)].extend([url, title])
key += 1
return news_dict
def show_news(news_dict):
"""
:param news_dict: news_dict returned from parse_league_news
:return: None,just print
"""
table = PrettyTable(["ID", "链接"])
for id, (_, title) in news_dict.items():
table.add_row([id, title])
print(table)
while True:
prompt = input('------请输入选择的 id 查看新闻具体内容,或者点击 q 退出------\n')
if prompt == 'q':
return '点击结束'
elif prompt not in news_dict.keys():
print('请输入上述列表中的一个 id')
else:
print(get_news_from_index(news_dict[prompt][0]))
def get_news_from_index(url):
"""
get wen news from certain url
:param url: new url
:return: news content,plain text
"""
headers = {
'Accept': 'image/webp,image/*,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
try:
soup = BeautifulSoup(requests.get(url).content.decode("gb2312"), "lxml")
return (soup.find("dl").find_all("dd")[4].get_text())
except AttributeError as ex:
return "sorry the news content counld not be founded"
|
Python
| 0.000004
|
@@ -3829,17 +3829,16 @@
tent cou
-n
ld not b
|
fd460c1b987354b01d306e2e96ab5c74f6b0d06f
|
add socket close call.
|
echo_server.py
|
echo_server.py
|
#!/usr/bin/env python
from __future__ import print_function
import socket
import email.utils
def server_socket_function():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_IP)
server_socket.bind(('127.0.0.1', 50000))
server_socket.listen(1)
try:
while True:
conn, addr = server_socket.accept()
message = conn.recv(32)
if message:
conn.sendall("I recieved your message. Stop talking to me. You are annoying.")
except KeyboardInterrupt:
conn.close()
def response_ok():
first_line = 'HTTP/1.1 200 OK'
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def response_error():
error_code = '404'
error_text = 'Not Found'
first_line = 'HTTP/1.1 {} {}'.format(error_code, error_text)
timestamp = email.utils.formatdate(usegmt=True)
content_header = 'Content-Type: text/plain'
crlf = '<CRLF>'
response = ('{}\nDate: {}\n{}\n{}').format(
first_line, timestamp, content_header, crlf)
return response
def parse_request():
return
print(response_ok())
# if __name__ == '__main__':
# server_socket_function()
|
Python
| 0
|
@@ -564,16 +564,46 @@
close()%0A
+ server_socket.close()%0A
%0A%0Adef re
|
2e28fdf8c6413443313e98f6ddf258758f0b6bbf
|
Fix check for WIN_CUSTOM_TOOLCHAIN (issue #1470).
|
tools/gclient_hook.py
|
tools/gclient_hook.py
|
# Copyright (c) 2011 The Chromium Embedded Framework Authors.
# Portions copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gclient_util import *
import os, sys
# The CEF directory is the parent directory of _this_ script.
cef_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
# The src directory is the parent directory of the CEF directory.
src_dir = os.path.abspath(os.path.join(cef_dir, os.pardir))
print "\nGenerating CEF version header file..."
gyper = [ 'python', 'tools/make_version_header.py',
'--header', 'include/cef_version.h',
'--cef_version', 'VERSION',
'--chrome_version', '../chrome/VERSION',
'--cpp_header_dir', 'include' ]
RunAction(cef_dir, gyper)
print "\nPatching build configuration and source files for CEF..."
patcher = [ 'python', 'tools/patcher.py',
'--patch-config', 'patch/patch.cfg' ]
RunAction(cef_dir, patcher)
print "\nGenerating CEF project files..."
# depot_tools currently bundles VS2013 Express Update 1 which causes linker
# errors with Debug builds (see issue #1304). Don't use the bundled version
# unless explicitly requested.
if not 'DEPOT_TOOLS_WIN_TOOLCHAIN' in os.environ.keys():
os.environ['DEPOT_TOOLS_WIN_TOOLCHAIN'] = '0'
# By default GYP+Ninja on Windows expects Visual Studio to be installed on the
# local machine. To build when Visual Studio is extracted to a directory but not
# installed (e.g. via a custom toolchain) you have two options:
#
# 1. Set up the environment using only environment variables:
# set WIN_CUSTOM_TOOLCHAIN=1
# set VS_ROOT=<VS root directory>
# set SDK_ROOT=<Platform SDK root directory>
# set INCLUDE=<VS include paths>
# set PATH=<VS executable paths>
# set LIB=<VS library paths>
#
# 2. Set up the environment using a combination of environment variables and the
# "%GYP_MSVS_OVERRIDE_PATH%\VC\vcvarsall.bat" script:
# set GYP_MSVS_OVERRIDE_PATH=<VS root directory>
# set GYP_DEFINES="windows_sdk_path=<Platform SDK root directory>"
#
# The following environment variables must also be set:
# set DEPOT_TOOLS_WIN_TOOLCHAIN=0
# set GYP_MSVS_VERSION=<VS version>
# set CEF_VCVARS=<empty .bat file>
custom_toolchain = False
if 'WIN_CUSTOM_TOOLCHAIN' in os.environ.keys() and \
os.environ['WIN_CUSTOM_TOOLCHAIN'] == '1':
required_vars = [
'GYP_MSVS_VERSION',
'VS_ROOT',
'SDK_ROOT',
'INCLUDE',
'PATH',
'LIB',
]
for var in required_vars:
if not var in os.environ.keys():
raise Exception('%s environment variable must be set' % var)
custom_toolchain = True
# Set windows_sdk_path via GYP_DEFINES.
gyp_defines = ''
if 'GYP_DEFINES' in os.environ.keys():
gyp_defines = os.environ['GYP_DEFINES'] + ' '
gyp_defines = gyp_defines + \
'windows_sdk_path=' + os.environ['SDK_ROOT'].replace('\\', '/')
os.environ['GYP_DEFINES'] = gyp_defines
# Necessary to return correct VS version information via GetVSVersion in
# src/tools/gyp/pylib/gyp/msvs_emulation.py.
os.environ['GYP_MSVS_OVERRIDE_PATH'] = os.environ['VS_ROOT']
# Generate environment files (environment.x64, environment.x86) in each
# build output directory.
# When using the default toolchain this is done by GenerateEnvironmentFiles
# in src/tools/gyp/pylib/gyp/msvs_emulation.py.
setup_script = \
os.path.join(src_dir, 'build/toolchain/win/setup_toolchain.py')
win_tool_script = os.path.join(src_dir, 'tools/gyp/pylib/gyp/win_tool.py')
out_dirs = ['Debug', 'Debug_x64', 'Release', 'Release_x64']
for out_dir in out_dirs:
out_dir_abs = os.path.join(src_dir, 'out', out_dir)
if not os.path.exists(out_dir_abs):
os.makedirs(out_dir_abs)
cmd = ['python', setup_script,
os.environ['VS_ROOT'], win_tool_script, os.environ['SDK_ROOT']]
RunAction(out_dir_abs, cmd)
os.environ['CEF_DIRECTORY'] = os.path.basename(cef_dir)
gyper = [ 'python', '../build/gyp_chromium', 'cef.gyp', '-I', 'cef.gypi' ]
if custom_toolchain:
# Disable GYP's auto-detection of the VS install.
gyper.extend(['-G', 'ninja_use_custom_environment_files'])
if 'GYP_ARGUMENTS' in os.environ.keys():
gyper.extend(os.environ['GYP_ARGUMENTS'].split(' '))
RunAction(cef_dir, gyper)
|
Python
| 0
|
@@ -2335,73 +2335,32 @@
%0Aif
-'WIN_CUSTOM_TOOLCHAIN' in os.environ.keys() and %5C%0A os.environ%5B
+bool(int(os.environ.get(
'WIN
@@ -2381,16 +2381,16 @@
AIN'
-%5D == '1'
+, '0')))
:%0A
|
e916658d3bba66b0d8fa28fc3fdd24abed6b5697
|
Add l10n-slovenia
|
tools/oca_projects.py
|
tools/oca_projects.py
|
# -*- coding: utf-8 -*-
"""
Data about OCA Projects, with a few helper functions.
OCA_PROJECTS: dictionary of OCA Projects mapped to the list of related
repository names, based on
https://community.odoo.com/page/website.projects_index
OCA_REPOSITORY_NAMES: list of OCA repository names
"""
from github_login import login
ALL = ['OCA_PROJECTS', 'OCA_REPOSITORY_NAMES', 'url']
OCA_PROJECTS = {
'accounting': ['account-analytic',
'account-budgeting',
'account-closing',
'account-consolidation',
'account-financial-tools',
'account-financial-reporting',
'account-invoice-reporting',
'account-invoicing',
'account-fiscal-rule',
'operating-unit',
],
# 'backport': ['OCB',
# ],
'banking': ['bank-payment',
'bank-statement-reconcile',
'bank-statement-import',
'account-payment',
],
'community': ['maintainer-tools',
'maintainer-quality-tools',
'runbot-addons',
],
'connector': ['connector',
'connector-ecommerce',
],
'connector AccountEdge': ['connector-accountedge'],
'connector LIMS': ['connector-lims'],
'connector CMIS': ['connector-cmis'],
'connector Magento': ['connector-magento'],
'connector Prestashop': ['connector-prestashop'],
'connector Redmine': ['connector-redmine'],
'connector Sage': ['connector-sage'],
'connector Salesforce': ['connector-salesforce'],
'connector WooCommerce': ['connector-woocommerce'],
'crm sales marketing': ['sale-workflow',
'crm',
'partner-contact',
'sale-financial',
'sale-reporting',
'commission',
'event',
'survey',
],
'document': ['knowledge'],
'ecommerce': ['e-commerce'],
'financial control': ['margin-analysis'],
'geospatial': ['geospatial'],
'hr': ['hr-timesheet',
'hr',
'department',
],
'connector-odoo2odoo': ['connector-odoo2odoo'],
'multi-company': ['multi-company'],
'l10n-argentina': ['l10n-argentina'],
'l10n-belgium': ['l10n-belgium'],
'l10n-brazil': ['l10n-brazil'],
'l10n-canada': ['l10n-canada'],
'l10n-china': ['l10n-china'],
'l10n-colombia': ['l10n-colombia'],
'l10n-costa-rica': ['l10n-costa-rica'],
'l10n-ecuador': ['l10n-ecuador'],
'l10n-finland': ['l10n-finland'],
'l10n-france': ['l10n-france'],
'l10n-germany': ['l10n-germany'],
'l10n-india': ['l10n-india'],
'l10n-iran': ['l10n-iran'],
'l10n-ireland': ['l10n-ireland'],
'l10n-italy': ['l10n-italy'],
'l10n-luxemburg': ['l10n-luxemburg'],
'l10n-mexico': ['l10n-mexico'],
'l10n-morocco': ['l10n-morocco'],
'l10n-netherlands': ['l10n-netherlands'],
'l10n-norway': ['l10n-norway'],
'l10n-peru': ['l10n-peru'],
'l10n-portugal': ['l10n-portugal'],
'l10n-romania': ['l10n-romania'],
'l10n-spain': ['l10n-spain'],
'l10n-switzerland': ['l10n-switzerland'],
'l10n-taiwan': ['l10n-taiwan'],
'l10n-usa': ['l10n-usa'],
'l10n-united-kingdom': ['l10n-united-kingdom'],
'l10n-venezuela': ['l10n-venezuela'],
'logistics': ['carrier-delivery',
'stock-logistics-barcode',
'stock-logistics-workflow',
'stock-logistics-tracking',
'stock-logistics-warehouse',
'stock-logistics-reporting',
'rma',
],
'manufacturing': ['manufacture',
'manufacture-reporting',
],
'management system': ['management-system'],
'purchase': ['purchase-workflow',
'purchase-reporting',
],
'product': ['product-attribute',
'product-kitting',
'product-variant',
],
'project / services': ['project-reporting',
'project-service',
'contract',
'program',
],
'tools': ['reporting-engine',
'report-print-send',
'webkit-tools',
'server-tools',
'community-data-files',
'webhook',
],
'vertical association': ['vertical-association'],
'vertical hotel': ['vertical-hotel'],
'vertical ISP': ['vertical-isp'],
'vertical edition': ['vertical-edition'],
'vertical education': ['vertical-education'],
'vertical medical': ['vertical-medical'],
'vertical NGO': ['vertical-ngo',
# XXX
],
'vertical construction': ['vertical-construction'],
'vertical travel': ['vertical-travel'],
'web': ['web'],
}
def get_repositories():
ignored = {
'odoo-community.org',
'contribute-md-template',
'maintainer-tools',
'maintainer-quality-tools',
'odoo-sphinx-autodoc',
'openupgradelib',
'connector-magento-php-extension',
'OCB',
'OpenUpgrade',
'pylint-odoo',
}
gh = login()
all_repos = [repo.name for repo in gh.iter_user_repos('OCA')
if repo.name not in ignored]
return all_repos
try:
OCA_REPOSITORY_NAMES = get_repositories()
except Exception as exc:
print exc
OCA_REPOSITORY_NAMES = []
for repos in OCA_PROJECTS.itervalues():
OCA_REPOSITORY_NAMES += repos
OCA_REPOSITORY_NAMES.sort()
_OCA_REPOSITORY_NAMES = set(OCA_REPOSITORY_NAMES)
_URL_MAPPINGS = {'git': 'git@github.com:%s/%s.git',
'https': 'https://github.com/%s/%s.git',
}
def url(project_name, protocol='git', org_name='OCA'):
"""get the URL for an OCA project repository"""
if project_name not in _OCA_REPOSITORY_NAMES:
raise ValueError('Unknown project', project_name)
return _URL_MAPPINGS[protocol] % (org_name, project_name)
|
Python
| 0.000004
|
@@ -3279,24 +3279,64 @@
-romania'%5D,%0A
+ 'l10n-slovenia': %5B'l10n-slovenia'%5D,%0A
'l10n-sp
|
70ccfa544ca590693343ca280ba26a0f528b8336
|
make css_class a property, since it's just a getter.
|
lib/djtables/column.py
|
lib/djtables/column.py
|
#!/usr/bin/env python
# vim: et ts=4 sw=4
import datetime
from django.template import defaultfilters
class Column(object):
"""
This class represents a table column. It is responsible for holding
metadata, and rending table cells. Like Django model/fields, columns
are usually created within the table class which they are bound to.
"""
creation_counter = 0
def __init__(self, name=None, value=None, link=None, sortable=True, css_class=None):
self._name = name
self._value = value
self._link = link
self._css_class = css_class
self.sortable = sortable
# like django fields, keep track of the order which columns are
# created, so they can be sorted later. (unfortunately, python
# attrs are passed to __new__ as an unsorted dict, so we must
# keep track of this manually to avoid random column order.)
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
self.bound_to = None
def __lt__(self, other):
"""Allow columns to be sorted by order of creation."""
return self.creation_counter < other.creation_counter
def __unicode__(self):
return self.name
def bind_to(self, table, name):
"""
Bind this column to a table, and assign it a name. This method
can only be called once per instance, because a Column cannot be
bound to multiple tables. (The sort order would be ambiguous.)
"""
if self.bound_to is not None:
raise AttributeError(
"Column is already bound to '%s' as '%s'" %\
self.bound_to)
self.bound_to = (table, name)
@property
def is_bound(self):
"""Return true if this column is bound to a table."""
return (self.bound_to is not None)
@property
def name(self):
"""Return the column name, whether explicit or implicit."""
return self._name or self.bound_to[1]
def value(self, cell):
"""
Extract the value of ``cell``, ready to be rendered.
If this Column was instantiated with a ``value`` attribute, it
is called here to provide the value. (For example, to provide a
calculated value.) Otherwise, ``cell.value`` is returned.
"""
if self._value is not None:
return self._value(cell)
else:
return cell.value
def render(self, cell):
"""
Render ``cell``, ready for display. The default behavior is to
simply cast its value to unicode, but this may be overridden by
child classes to do something more useful.
"""
return unicode(self.value(cell))
@property
def has_link(self):
"""Return True if this column contains links."""
return self._link is not None
def link(self, cell):
"""
Return the URL which ``cell`` should link to, or None if this
column does not contain links.
If this Column was instantiated with a ``link`` attribute, it is
called here (with a single parameter of ``cell``), to provide
the value. Otherwise, None is returned.
"""
if self.has_link:
return self._link(cell)
return None
@property
def has_css_class(self):
"""Return True if a CSS class is defined for this column."""
return self._css_class is not None
def css_class(self):
"""Return the CSS class for this column."""
return self._css_class
class DateColumn(Column):
"""
This class provides a simple way to render a Date field, using the
Django 'date' template filter. The ``format`` argument specifies the
string in ``Django date format``_, **not** ``Python date format``_.
If ``format`` is None the ``DATE_FORMAT`` setting is used.
.. `Django date format``: http://docs.djangoproject.com/en/dev/ref/templates/builtins/#ttag-now
.. `Python date format``: http://docs.python.org/library/datetime.html#strftime-strptime-behavior
"""
def __init__(self, format=None, *args, **kwargs):
super(DateColumn, self).__init__(*args, **kwargs)
self._format = format
def render(self, cell):
return defaultfilters.date(
self.value(cell),
self._format)
class WrappedColumn(object):
"""
This class wraps a Column instance, and binds it to a Table instance
to provide useful properties to the template. This represents a per-
render instance of the column, containing its volatile state, such
as sort order. (More properties, such as visibility, filtering, and
grouping may come later.)
All of the attributes (and methods) of the wrapped Column can be
accessed via this class, with help from some __getattr__ magic.
"""
def __init__(self, table, column):
self.table = table
self.column = column
@property
def sort_url(self):
"""
Return the URL to sort the linked table by this column. If the
table is already sorted by this column, the order is reversed.
Since there is no canonical URL for a table the current URL (via
the HttpRequest linked to the Table instance) is reused, and any
unrelated parameters will be included in the output.
"""
prefix = (self.sort_direction == "asc") and "-" or ""
return self.table.get_url(order_by=prefix + self.name)
@property
def is_sorted(self):
return self.sort_direction is not None
@property
def sort_direction(self):
"""
Return the direction in which the linked table is is sorted by
this column ("asc" or "desc"), or None this column is unsorted.
"""
if self.table._meta.order_by == self.name:
return "asc"
elif self.table._meta.order_by == ("-" + self.name):
return "desc"
else:
return None
def __unicode__(self):
return unicode(self.column)
def __getattr__(self, name):
return getattr(self.column, name)
|
Python
| 0
|
@@ -3460,32 +3460,46 @@
ss is not None%0A%0A
+ @property%0A
def css_clas
|
a5c6705313a31cf095fc2771bddbc037eec30cea
|
Allow numbers in JIRA issue names
|
lib/google_calendar.py
|
lib/google_calendar.py
|
from __future__ import print_function
import httplib2
import re
import os
import sys
import datetime
from functools import total_ordering
import dateutil.parser
import pytz
from jira.exceptions import JIRAError
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
class WorklogParseError(RuntimeError):
pass
def import_worklogs(jira, worklogconfig, calendar_name, from_day, to_day):
"""
Imports worklogs using the Google Calendar API and sumbits them to JIRA.
Calendar entries must start with JIRA issue IDs opitionally followed by
':' and comments. Returns total hours logged as timedelta.
"""
if from_day >= to_day:
print('Start date must be before end date, start:', from_day, 'end:', to_day)
return 0
from_day = _convert_to_datestring(from_day, worklogconfig)
to_day = _convert_to_datestring(to_day, worklogconfig)
service = _get_calendar_service(worklogconfig)
calendarId = _get_calendar_id(service, calendar_name)
eventsResult = service.events().list(calendarId=calendarId, timeMin=from_day, timeMax=to_day,
maxResults=1000, singleEvents=True, orderBy='startTime').execute()
events = eventsResult.get('items', [])
if not events:
print('No events found in calendar', calendar_name, 'during', from_day, '-', to_day)
return 0
durations = []
for event in events:
try:
gcal_worklog = Worklog.from_gcal(event)
jira_worklogs = [Worklog.from_jira(w) for w in jira.worklogs(gcal_worklog.issue)]
if (jira_worklogs and gcal_worklog in jira_worklogs):
jira_worklog = next(w for w in jira_worklogs if w == gcal_worklog)
if gcal_worklog.duration != jira_worklog.duration:
raise WorklogParseError('Google worklog for issue %s '
'starting at %s: duration %s differs from JIRA duration %s'
% (gcal_worklog.issue, gcal_worklog.start,
gcal_worklog.duration, jira_worklog.duration))
print(gcal_worklog.duration, 'hours starting', gcal_worklog.start,
'already logged for', gcal_worklog.issue)
else:
print('Logging', gcal_worklog.duration, 'hours starting', gcal_worklog.start, 'for', gcal_worklog.issue)
# Dates in JIRA use JIRA server timezone, tzinfo is ignored
# and the offset has to be manually subtracted - what a mess
jira_tz = pytz.timezone(worklogconfig.JIRA_TIMEZONE)
started = gcal_worklog.start - jira_tz.utcoffset(gcal_worklog.start.replace(tzinfo=None))
jira.add_worklog(issue=gcal_worklog.issue,
timeSpentSeconds=gcal_worklog.duration.seconds,
started=started,
comment=gcal_worklog.comment)
durations.append(gcal_worklog.duration)
except WorklogParseError as e:
print(e)
except JIRAError as e:
print("Issue '" + gcal_worklog.issue + "' does not exist (or other JIRA error):", e)
return sum(durations, datetime.timedelta(0))
JIRA_ISSUE_REGEX = re.compile('[A-Z]+-\d+')
@total_ordering
class Worklog(object):
@staticmethod
def from_gcal(event):
start = _parse_iso_date(event['start'].get('dateTime'))
end = _parse_iso_date(event['end'].get('dateTime'))
duration = end - start
summary = event['summary']
summary = event['summary'].split(':', 1)
issue = summary[0].strip()
if not JIRA_ISSUE_REGEX.match(issue):
raise WorklogParseError("'%s' is not a JIRA issue ID" %
issue.encode('utf-8'))
comment = summary[1].strip() if len(summary) > 1 else ''
return Worklog(start, duration, issue, comment)
@staticmethod
def from_jira(jira_worklog):
start = _parse_iso_date(jira_worklog.started)
duration = datetime.timedelta(seconds=jira_worklog.timeSpentSeconds)
return Worklog(start, duration)
def __init__(self, start, duration, issue=None, comment=None):
self.start = start
self.duration = duration
self.issue = issue
self.comment = comment
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.start == other.start
def __lt__(self, other):
return NotImplemented
def _convert_to_datestring(datestr, conf):
return datetime.datetime.strptime(datestr, '%Y-%m-%d').isoformat() + conf.TIMEZONE
def _get_calendar_service(conf):
credentials = _get_credentials(conf)
http = credentials.authorize(httplib2.Http())
service = discovery.build('calendar', 'v3', http=http)
return service
def _get_calendar_id(service, calendar_name):
calendars = service.calendarList().list().execute().get('items', [])
calendarId = next((c['id'] for c in calendars
if c['summary'] == calendar_name), None)
if calendarId is None:
raise RuntimeError("Calendar '%s' not found" % calendar_name)
return calendarId
def _parse_iso_date(datestr):
return dateutil.parser.parse(datestr)
def _get_credentials(conf):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
store = Storage(conf.CREDENTIAL_FILE)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(conf.CLIENT_SECRET_FILE, conf.SCOPES)
flow.user_agent = conf.APPLICATION_NAME
# avoid mess with argparse
sys.argv = [sys.argv[0]]
credentials = tools.run_flow(flow, store)
print('Storing Google Calendar credentials to', conf.CREDENTIAL_FILE)
return credentials
|
Python
| 0.000001
|
@@ -3419,16 +3419,18 @@
le('%5BA-Z
+%5Cd
%5D+-%5Cd+')
|
91e30ce69e6715f2fcd5b0258d6a9e2f6ae87c96
|
move redeemed_at field from coupon to couponuser
|
coupons/models.py
|
coupons/models.py
|
import random
from django.conf import settings
from django.db import IntegrityError
from django.db import models
from django.dispatch import Signal
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from .settings import (
COUPON_TYPES,
CODE_LENGTH,
CODE_CHARS,
SEGMENTED_CODES,
SEGMENT_LENGTH,
SEGMENT_SEPARATOR,
)
try:
user_model = settings.AUTH_USER_MODEL
except AttributeError:
from django.contrib.auth.models import User as user_model
redeem_done = Signal(providing_args=["coupon"])
class CouponManager(models.Manager):
def create_coupon(self, type, value, user=None, valid_until=None, prefix="", campaign=None):
coupon = self.create(
value=value,
code=Coupon.generate_code(prefix),
type=type,
user=user,
valid_until=valid_until,
campaign=campaign,
)
try:
coupon.save()
except IntegrityError:
# Try again with other code
return Coupon.objects.create_coupon(type, value, user, valid_until, prefix, campaign)
else:
return coupon
def create_coupons(self, quantity, type, value, valid_until=None, prefix="", campaign=None):
coupons = []
for i in range(quantity):
coupons.append(self.create_coupon(type, value, None, valid_until, prefix, campaign))
return coupons
def used(self):
return self.exclude(redeemed_at=None)
def unused(self):
return self.filter(redeemed_at=None)
def expired(self):
return self.filter(valid_until__lt=timezone.now())
@python_2_unicode_compatible
class Coupon(models.Model):
value = models.IntegerField(_("Value"), help_text=_("Arbitrary coupon value"))
code = models.CharField(
_("Code"), max_length=30, unique=True, blank=True,
help_text=_("Leaving this field empty will generate a random code."))
type = models.CharField(_("Type"), max_length=20, choices=COUPON_TYPES)
users = models.ManyToManyField(
user_model, verbose_name=_("Users"), null=True, blank=True, through='CouponUser',
help_text=_("You may specify a list of users you want to restrict this coupon to."))
users_limit = models.PositiveIntegerField(_("User limit"), default=1)
created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
redeemed_at = models.DateTimeField(_("Redeemed at"), blank=True, null=True)
valid_until = models.DateTimeField(
_("Valid until"), blank=True, null=True,
help_text=_("Leave empty for coupons that never expire"))
campaign = models.ForeignKey('Campaign', verbose_name=_("Campaign"), blank=True, null=True, related_name='coupons')
objects = CouponManager()
class Meta:
ordering = ['created_at']
verbose_name = _("Coupon")
verbose_name_plural = _("Coupons")
def __str__(self):
return self.code
def save(self, *args, **kwargs):
if not self.code:
self.code = Coupon.generate_code()
super(Coupon, self).save(*args, **kwargs)
def expired(self):
return self.valid_until is not None and self.valid_until < timezone.now()
@classmethod
def generate_code(cls, prefix="", segmented=SEGMENTED_CODES):
code = "".join(random.choice(CODE_CHARS) for i in range(CODE_LENGTH))
if segmented:
code = SEGMENT_SEPARATOR.join([code[i:i + SEGMENT_LENGTH] for i in range(0, len(code), SEGMENT_LENGTH)])
return prefix + code
else:
return prefix + code
def redeem(self, user=None):
self.redeemed_at = timezone.now()
self.user = user
self.save()
redeem_done.send(sender=self.__class__, coupon=self)
@python_2_unicode_compatible
class Campaign(models.Model):
name = models.CharField(_("Name"), max_length=255, unique=True)
description = models.TextField(_("Description"), blank=True)
class Meta:
ordering = ['name']
verbose_name = _("Campaign")
verbose_name_plural = _("Campaigns")
def __str__(self):
return self.name
@python_2_unicode_compatible
class CouponUser(models.Model):
coupon = models.ForeignKey(Coupon)
user = models.ForeignKey(user_model, null=True, blank=True)
|
Python
| 0
|
@@ -2474,88 +2474,8 @@
ue)%0A
- redeemed_at = models.DateTimeField(_(%22Redeemed at%22), blank=True, null=True)%0A
@@ -4308,20 +4308,100 @@
l=True, blank=True)%0A
+ redeemed_at = models.DateTimeField(_(%22Redeemed at%22), blank=True, null=True)%0A
|
dd80e81732afd5b39f7120c5951a53c218723998
|
Fix importing of field "number"
|
apps/curia_vista/management/commands/update_councillors.py
|
apps/curia_vista/management/commands/update_councillors.py
|
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import Councillor
class Command(BaseCommand):
help = 'Import councillors from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source_base = 'http://ws.parlament.ch/councillors?format=xml&lang=de&pagenumber='
headers = {'User-Agent': 'Mozilla'}
cur_page = 1
while True:
source = source_base + str(cur_page)
cur_page += 1
try:
self.stdout.write("Starting importing from {}".format(source))
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
councillors = ElementTree.fromstring(response.content)
if not councillors:
raise CommandError("Not a valid XML file: {}".format(source))
more_pages = False
for councillor in councillors:
councillor_id = councillor.find('id').text
councillor_updated = councillor.find('updated').text
councillor_active = councillor.find('active').text == 'true'
councillor_code = councillor.find('code').text
councillor_first_name = councillor.find('firstName').text
councillor_last_name = councillor.find('lastName').text
councillor_number = councillor.find('number').text if 'number' in councillor else None
councillor_official_denomination = councillor.find('officialDenomination').text
councillor_salutation_letter = councillor.find('salutationLetter').text
councillor_salutation_title = councillor.find('salutationTitle').text
if councillor.find('hasMorePages') is not None:
more_pages = 'true' == councillor.find('hasMorePages').text
councillor_model, created = Councillor.objects.update_or_create(id=councillor_id,
defaults={
'updated': councillor_updated,
'active': councillor_active,
'code': councillor_code,
'first_name': councillor_first_name,
'last_name': councillor_last_name,
'number': councillor_number,
'official_denomination': councillor_official_denomination,
'salutation_letter': councillor_salutation_letter,
'salutation_title': councillor_salutation_title})
councillor_model.full_clean()
councillor_model.save()
print(councillor_model)
self.stdout.write("Finished importing from {}".format(source))
if not more_pages:
break
self.stdout.write("Done")
|
Python
| 0.000002
|
@@ -1604,24 +1604,40 @@
text if
+councillor.find(
'number'
in coun
@@ -1628,30 +1628,29 @@
'number'
+)
i
-n councillor
+s not None
else No
|
0f6b0592e6f74f99289c4e35f85fc2b600b79492
|
Revert SmoothLine in example
|
examples/canvas/lines.py
|
examples/canvas/lines.py
|
'''
Line (SmoothLine) Experiment
============================
This demonstrates the experimental and unfinished SmoothLine feature
for fast line drawing. You should see a multi-segment
path at the top of the screen, and sliders and buttons along the bottom.
You can click to add new points to the segment, change the transparency
and width of the line, or hit 'Animate' to see a set of sine and cosine
animations. The Cap and Joint buttons don't work: SmoothLine has not
implemented these features yet.
'''
from kivy.app import App
from kivy.properties import OptionProperty, NumericProperty, ListProperty, \
BooleanProperty
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.clock import Clock
from math import cos, sin
Builder.load_string('''
<LinePlayground>:
canvas:
Color:
rgba: .4, .4, 1, root.alpha
SmoothLine:
points: self.points
joint: self.joint
cap: self.cap
width: self.linewidth
close: self.close
Color:
rgba: .8, .8, .8, root.alpha_controlline
SmoothLine:
points: self.points
close: self.close
Color:
rgba: 1, .4, .4, root.alpha
SmoothLine:
points: self.points2
joint: self.joint
cap: self.cap
width: self.linewidth
close: self.close
GridLayout:
cols: 2
size_hint: 1, None
height: 44 * 5
GridLayout:
cols: 2
Label:
text: 'Alpha'
Slider:
value: root.alpha
on_value: root.alpha = float(args[1])
min: 0.
max: 1.
Label:
text: 'Alpha Control Line'
Slider:
value: root.alpha_controlline
on_value: root.alpha_controlline = float(args[1])
min: 0.
max: 1.
Label:
text: 'Width'
Slider:
value: root.linewidth
on_value: root.linewidth = args[1]
min: 1
max: 40
Label:
text: 'Cap'
GridLayout:
rows: 1
ToggleButton:
group: 'cap'
text: 'none'
on_press: root.cap = self.text
ToggleButton:
group: 'cap'
text: 'round'
on_press: root.cap = self.text
ToggleButton:
group: 'cap'
text: 'square'
on_press: root.cap = self.text
Label:
text: 'Joint'
GridLayout:
rows: 1
ToggleButton:
group: 'joint'
text: 'none'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'round'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'miter'
on_press: root.joint = self.text
ToggleButton:
group: 'joint'
text: 'bevel'
on_press: root.joint = self.text
Label:
text: 'Close'
ToggleButton:
text: 'Close line'
on_press: root.close = self.state == 'down'
AnchorLayout:
GridLayout:
cols: 1
size_hint: None, None
size: self.minimum_size
ToggleButton:
size_hint: None, None
size: 100, 44
text: 'Animate'
on_state: root.animate(self.state == 'down')
Button:
size_hint: None, None
size: 100, 44
text: 'Clear'
on_press: root.points = root.points2 = []
''')
class LinePlayground(FloatLayout):
alpha_controlline = NumericProperty(1.0)
alpha = NumericProperty(0.5)
close = BooleanProperty(False)
points = ListProperty([(500, 500),
[300, 300, 500, 300],
[500, 400, 600, 400]])
points2 = ListProperty([])
joint = OptionProperty('none', options=('round', 'miter', 'bevel', 'none'))
cap = OptionProperty('none', options=('round', 'square', 'none'))
linewidth = NumericProperty(10.0)
dt = NumericProperty(0)
_update_points_animation_ev = None
def on_touch_down(self, touch):
if super(LinePlayground, self).on_touch_down(touch):
return True
touch.grab(self)
self.points.append(touch.pos)
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
self.points[-1] = touch.pos
return True
return super(LinePlayground, self).on_touch_move(touch)
def on_touch_up(self, touch):
if touch.grab_current is self:
touch.ungrab(self)
return True
return super(LinePlayground, self).on_touch_up(touch)
def animate(self, do_animation):
if do_animation:
self._update_points_animation_ev = Clock.schedule_interval(
self.update_points_animation, 0)
elif self._update_points_animation_ev is not None:
self._update_points_animation_ev.cancel()
def update_points_animation(self, dt):
cy = self.height * 0.6
cx = self.width * 0.1
w = self.width * 0.8
step = 20
points = []
points2 = []
self.dt += dt
for i in range(int(w / step)):
x = i * step
points.append(cx + x)
points.append(cy + cos(x / w * 8. + self.dt) * self.height * 0.2)
points2.append(cx + x)
points2.append(cy + sin(x / w * 8. + self.dt) * self.height * 0.2)
self.points = points
self.points2 = points2
class TestLineApp(App):
def build(self):
return LinePlayground()
if __name__ == '__main__':
TestLineApp().run()
|
Python
| 0
|
@@ -868,38 +868,32 @@
t.alpha%0A
-Smooth
Line:%0A
@@ -1102,38 +1102,32 @@
rolline%0A
-Smooth
Line:%0A
@@ -1241,22 +1241,16 @@
-Smooth
Line:%0A
|
b42554fcc895da17a5b4f6c3552436462a968a77
|
Remove now obsolete environment variable - this has been unified and moved into new ST2_ACTION_AUTH_TOKEN environment variable.
|
st2actions/st2actions/runners/fabric_runner.py
|
st2actions/st2actions/runners/fabric_runner.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import abc
from fabric.api import (env, execute)
from oslo.config import cfg
import six
from st2actions.runners import ActionRunner
from st2actions.runners import ShellRunnerMixin
from st2common import log as logging
from st2common.exceptions.actionrunner import ActionRunnerPreRunError
from st2common.exceptions.fabricrunner import FabricExecutionFailureException
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
from st2common.constants.runners import FABRIC_RUNNER_DEFAULT_ACTION_TIMEOUT
# Replace with container call to get logger.
LOG = logging.getLogger(__name__)
# Fabric environment level settings.
# XXX: Note fabric env is a global singleton.
env.parallel = True # By default, execute things in parallel. Uses multiprocessing under the hood.
env.user = cfg.CONF.system_user.user
ssh_key_file = cfg.CONF.system_user.ssh_key_file
if ssh_key_file:
ssh_key_file = os.path.expanduser(ssh_key_file)
if ssh_key_file and os.path.exists(ssh_key_file):
env.key_filename = ssh_key_file
env.timeout = 10 # Timeout for connections (in seconds)
env.command_timeout = FABRIC_RUNNER_DEFAULT_ACTION_TIMEOUT # timeout for commands (in seconds)
env.combine_stderr = False
env.group = 'staff'
env.abort_exception = FabricExecutionFailureException
# constants to lookup in runner_parameters.
RUNNER_HOSTS = 'hosts'
RUNNER_USERNAME = 'username'
RUNNER_PASSWORD = 'password'
RUNNER_PRIVATE_KEY = 'private_key'
RUNNER_PARALLEL = 'parallel'
RUNNER_SUDO = 'sudo'
RUNNER_ON_BEHALF_USER = 'user'
RUNNER_REMOTE_DIR = 'dir'
RUNNER_COMMAND = 'cmd'
RUNNER_CWD = 'cwd'
RUNNER_ENV = 'env'
RUNNER_KWARG_OP = 'kwarg_op'
RUNNER_TIMEOUT = 'timeout'
@six.add_metaclass(abc.ABCMeta)
class BaseFabricRunner(ActionRunner, ShellRunnerMixin):
def __init__(self, runner_id):
super(BaseFabricRunner, self).__init__(runner_id=runner_id)
self._hosts = None
self._parallel = True
self._sudo = False
self._on_behalf_user = None
self._username = None
self._password = None
self._private_key = None
self._kwarg_op = '--'
self._cwd = None
self._env = None
self._timeout = None
def pre_run(self):
LOG.debug('Entering FabricRunner.pre_run() for liveaction_id="%s"',
self.liveaction_id)
LOG.debug(' runner_parameters = %s', self.runner_parameters)
hosts = self.runner_parameters.get(RUNNER_HOSTS, '').split(',')
self._hosts = [h.strip() for h in hosts if len(h) > 0]
if len(self._hosts) < 1:
raise ActionRunnerPreRunError('No hosts specified to run action for action %s.',
self.liveaction_id)
self._username = self.runner_parameters.get(RUNNER_USERNAME, cfg.CONF.system_user.user)
self._username = self._username or cfg.CONF.system_user.user
self._password = self.runner_parameters.get(RUNNER_PASSWORD, None)
self._private_key = self.runner_parameters.get(RUNNER_PRIVATE_KEY, None)
self._parallel = self.runner_parameters.get(RUNNER_PARALLEL, True)
self._sudo = self.runner_parameters.get(RUNNER_SUDO, False)
self._sudo = self._sudo if self._sudo else False
self._on_behalf_user = self.context.get(RUNNER_ON_BEHALF_USER, env.user)
self._cwd = self.runner_parameters.get(RUNNER_CWD, None)
self._env = self.runner_parameters.get(RUNNER_ENV, {})
self._kwarg_op = self.runner_parameters.get(RUNNER_KWARG_OP, '--')
self._timeout = self.runner_parameters.get(RUNNER_TIMEOUT,
FABRIC_RUNNER_DEFAULT_ACTION_TIMEOUT)
LOG.info('[FabricRunner="%s", liveaction_id="%s"] Finished pre_run.',
self.runner_id, self.liveaction_id)
def _run(self, remote_action):
LOG.info('Executing action via FabricRunner :%s for user: %s.',
self.runner_id, remote_action.get_on_behalf_user())
LOG.info(('[Action info] name: %s, Id: %s, command: %s, on behalf user: %s, '
'actual user: %s, sudo: %s'),
remote_action.name, remote_action.action_exec_id, remote_action.get_command(),
remote_action.get_on_behalf_user(), remote_action.get_user(),
remote_action.is_sudo())
results = execute(remote_action.get_fabric_task(), hosts=remote_action.hosts)
return results
def _get_env_vars(self):
"""
:rtype: ``dict``
"""
env_vars = {}
if self.auth_token:
env_vars['st2_auth_token'] = self.auth_token.token
if self._env:
env_vars.update(self._env)
# Include common st2 env vars
st2_env_vars = self._get_common_action_env_variables()
env_vars.update(st2_env_vars)
return env_vars
@staticmethod
def _get_result_status(result, allow_partial_failure):
success = not allow_partial_failure
for r in six.itervalues(result):
r_succeess = r.get('succeeded', False) if r else False
if allow_partial_failure:
success |= r_succeess
if success:
return LIVEACTION_STATUS_SUCCEEDED
else:
success &= r_succeess
if not success:
return LIVEACTION_STATUS_FAILED
return LIVEACTION_STATUS_SUCCEEDED if success else LIVEACTION_STATUS_FAILED
|
Python
| 0
|
@@ -5352,100 +5352,8 @@
%7B%7D%0A%0A
- if self.auth_token:%0A env_vars%5B'st2_auth_token'%5D = self.auth_token.token%0A%0A
|
add0af524dafa241d7bab64093ed45c857c66c0d
|
Rename cfg to settings
|
statsSend/teamCity/teamCityStatisticsSender.py
|
statsSend/teamCity/teamCityStatisticsSender.py
|
#!/usr/bin/env python3
from dateutil import parser
from statsSend.teamCity.teamCityConnection import TeamCityConnection
from statsSend.teamCity.teamCityUrlBuilder import TeamCityUrlBuilder
from statsSend.teamCity.teamCityProject import TeamCityProject
class TeamCityStatisticsSender:
def __init__(self, cfg, reporter):
self.page_size = int(cfg['page_size'])
connection = TeamCityConnection(cfg['user'], cfg['password'])
url_builder = TeamCityUrlBuilder(cfg['server_url'], cfg['api_url_prefix'])
self.project = TeamCityProject(cfg['project_id'], connection, url_builder, self.page_size)
self.since_timestamp = parser.parse(cfg['since_timestamp']).strftime('%Y%m%dT%H%M%S%z')
self.reporter = reporter
async def send(self):
if ("report_categories" in dir(self.reporter)):
categories = [build_configuration.toCategory() async for build_configuration in self.project.retrieve_build_configurations()]
self.reporter.report_categories(categories)
async for build_configuration in self.project.retrieve_build_configurations():
async for build_run in build_configuration.retrieve_build_runs_since_timestamp(self.since_timestamp):
job = build_run.toJob()
self.reporter.report_job(job)
|
Python
| 0.001188
|
@@ -303,19 +303,24 @@
_(self,
-cfg
+settings
, report
@@ -353,19 +353,24 @@
e = int(
-cfg
+settings
%5B'page_s
@@ -416,19 +416,24 @@
nection(
-cfg
+settings
%5B'user'%5D
@@ -434,19 +434,24 @@
user'%5D,
-cfg
+settings
%5B'passwo
@@ -497,19 +497,24 @@
Builder(
-cfg
+settings
%5B'server
@@ -521,19 +521,24 @@
_url'%5D,
-cfg
+settings
%5B'api_ur
@@ -588,19 +588,24 @@
Project(
-cfg
+settings
%5B'projec
@@ -701,11 +701,16 @@
rse(
-cfg
+settings
%5B'si
|
032b208df7a78f731093314d7e8a75cacc447da6
|
Update elmo_example
|
examples/elmo_example.py
|
examples/elmo_example.py
|
"""
Example from training to saving.
"""
import argparse
import os
import numpy as np
from anago.utils import load_data_and_labels, load_glove, filter_embeddings
from anago.models import ELModel
from anago.preprocessing import ELMoTransformer
from anago.trainer import Trainer
def main(args):
print('Loading dataset...')
x_train, y_train = load_data_and_labels(args.train_data)
x_valid, y_valid = load_data_and_labels(args.valid_data)
x_test, y_test = load_data_and_labels(args.test_data)
x_train = np.r_[x_train, x_valid]
y_train = np.r_[y_train, y_valid]
print('Transforming datasets...')
p = ELMoTransformer()
p.fit(x_train, y_train)
print('Loading word embeddings...')
embeddings = load_glove(EMBEDDING_PATH)
embeddings = filter_embeddings(embeddings, p._word_vocab.vocab, 100)
print('Building a model.')
model = ELModel(char_embedding_dim=args.char_emb_size,
word_embedding_dim=args.word_emb_size,
char_lstm_size=args.char_lstm_units,
word_lstm_size=args.word_lstm_units,
char_vocab_size=p.char_vocab_size,
word_vocab_size=p.word_vocab_size,
num_labels=p.label_size,
embeddings=embeddings,
dropout=args.dropout)
model, loss = model.build()
model.compile(loss=loss, optimizer='adam')
print('Training the model...')
trainer = Trainer(model, preprocessor=p)
trainer.train(x_train, y_train, x_test, y_test)
print('Saving the model...')
model.save(args.weights_file, args.params_file)
p.save(args.preprocessor_file)
if __name__ == '__main__':
DATA_DIR = os.path.join(os.path.dirname(__file__), '../data/conll2003/en/ner')
EMBEDDING_PATH = os.path.join(os.path.dirname(__file__), '../data/glove.6B/glove.6B.100d.txt')
parser = argparse.ArgumentParser(description='Training a model')
parser.add_argument('--train_data', default=os.path.join(DATA_DIR, 'train.txt'), help='training data')
parser.add_argument('--valid_data', default=os.path.join(DATA_DIR, 'valid.txt'), help='validation data')
parser.add_argument('--test_data', default=os.path.join(DATA_DIR, 'test.txt'), help='test data')
parser.add_argument('--weights_file', default='weights.h5', help='weights file')
parser.add_argument('--params_file', default='params.json', help='parameter file')
# Training parameters
parser.add_argument('--optimizer', default='adam', help='optimizer')
parser.add_argument('--max_epoch', type=int, default=15, help='max epoch')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--checkpoint_path', default=None, help='checkpoint path')
parser.add_argument('--log_dir', default=None, help='log directory')
parser.add_argument('--early_stopping', action='store_true', help='early stopping')
# Model parameters
parser.add_argument('--char_emb_size', type=int, default=25, help='character embedding size')
parser.add_argument('--word_emb_size', type=int, default=100, help='word embedding size')
parser.add_argument('--char_lstm_units', type=int, default=25, help='num of character lstm units')
parser.add_argument('--word_lstm_units', type=int, default=100, help='num of word lstm units')
parser.add_argument('--dropout', type=float, default=0.5, help='dropout rate')
args = parser.parse_args()
main(args)
|
Python
| 0.000001
|
@@ -1634,16 +1634,18 @@
ile)%0A
+ #
p.save(
@@ -2433,24 +2433,100 @@
eter file')%0A
+ parser.add_argument('--preprocessor_file', default='preprocessor.json')%0A
# Traini
|
daba0d7eb4b77e40790624e23938b2ebb6d04fca
|
fix notify loop
|
examples/multiworker2.py
|
examples/multiworker2.py
|
# -*- coding: utf-8 -
#
# This file is part of pistil released under the MIT license.
# See the NOTICE for more information.
import time
import urllib2
from pistil.arbiter import Arbiter
from pistil.worker import Worker
from pistil.tcp.sync_worker import TcpSyncWorker
from pistil.tcp.arbiter import TcpArbiter
from http_parser.http import HttpStream
from http_parser.reader import SocketReader
class MyTcpWorker(TcpSyncWorker):
def handle(self, sock, addr):
p = HttpStream(SocketReader(sock))
path = p.path()
data = "welcome wold"
sock.send("".join(["HTTP/1.1 200 OK\r\n",
"Content-Type: text/html\r\n",
"Content-Length:" + str(len(data)) + "\r\n",
"Connection: close\r\n\r\n",
data]))
class UrlWorker(Worker):
def run(self):
print "ici"
while self.alive:
time.sleep(0.1)
f = urllib2.urlopen("http://localhost:5000")
print f.read()
self.notify
class MyPoolArbiter(TcpArbiter):
def on_init(self, conf):
TcpArbiter.on_init(self, conf)
# we return a spec
return (MyTcpWorker, 30, "worker", {}, "http_welcome",)
if __name__ == '__main__':
conf = {"num_workers": 3, "address": ("127.0.0.1", 5000)}
specs = [
(MyPoolArbiter, 30, "supervisor", {}, "tcp_pool"),
(UrlWorker, 30, "worker", {}, "grabber")
]
arbiter = Arbiter(conf, specs)
arbiter.run()
|
Python
| 0.000001
|
@@ -1051,16 +1051,18 @@
f.notify
+()
%0A%0Aclass
|
2af6d265834805f5fa11bf237de124f468220f3e
|
oopsie missing comma
|
ircd.gyp
|
ircd.gyp
|
{
'variables':
{
'python-includes': 'python -c "from distutils import sysconfig; import ntpath; print sysconfig.get_python_inc().replace(ntpath.sep, \'/\')"',
'python-version': 'python -c "from distutils import sysconfig; print sysconfig.get_config_var(\\"VERSION\\")"',
},
'targets':
[{
'target_name': 'ircd',
'dependencies':
[
'jsoncpp/jsoncpp.gyp:jsoncpp',
'libuv/uv.gyp:uv',
],
'type': 'executable',
'include_dirs':
[
'include',
'<!@(<(python-includes))',
'c:\openssl\include',
],
'sources':
[
'common.gypi',
'etc/example.conf',
'etc/default.msgs',
'include/client.h',
'include/command.h',
'include/config.h',
'include/configsection.h',
'include/connection.h',
'include/generalsection.h',
'include/listener.h',
'include/listenersection.h',
'include/logging.h',
'include/loggingsection.h',
'include/module.h',
'include/modulesection.h',
'include/numeric.h',
'include/parser.h',
'include/ssl.h',
'include/sslconnection.h',
'include/stdinc.h',
'include/system.h',
'include/python/clientwrap.h',
'include/python/pythonloader.h',
'include/python/pythonwrap.h',
'include/python/parserwrap.h',
'src/client.cc',
'src/command.cc',
'src/config.cc',
'src/connection.cc',
'src/generalsection.cc',
'src/listener.cc',
'src/listenersection.cc',
'src/logging.cc',
'src/loggingsection.cc',
'src/module.cc',
'src/modulesection.cc',
'src/main.cc',
'src/numeric.cc',
'src/parser.cc',
'src/ssl.cc',
'src/sslconnection.cc',
'src/system.cc',
'src/python/clientwrap.cc',
'src/python/pythonloader.cc',
'src/python/pythonwrap.cc',
'src/python/parserwrap.cc'
'modules/python/ircd/__init__.py',
'modules/python/ircclient.py'
],
'conditions':
[
[
'OS=="win"',
{
'variables':
{
'python-binlibdest': 'python -c "from distutils import sysconfig; print sysconfig.get_config_var(\\"BINLIBDEST\\")"',
},
'msvs_settings':
{
'VCLinkerTool':
{
'AdditionalLibraryDirectories': '<!@(<(python-binlibdest))s;c:\openssl\lib',
},
},
'defines':
[
'_WIN32_WINNT=0x0600',
'_GNU_SOURCE',
],
'include_dirs':
[
'c:\openssl\include',
],
'libraries':
[
'python<!@(<(python-version))_d.lib',
'libeay32.lib',
'ssleay32.lib',
],
},
{
'xcode_settings': {
'WARNING_CFLAGS': [
'-Wall',
'-Wextra',
'-pedantic',
'-Wno-long-long',
'-Wno-unused-parameter',
'-Wno-deprecated-declarations',
'-Wno-newline-eof',
],
'OTHER_CFLAGS': [
'-std=c++0x',
'-stdlib=libc++',
],
},
'cflags':
[
'-std=c++0x',
'-stdlib=libc++',
'-Wall',
'-Wextra',
'-pedantic',
'-Wno-long-long',
'-Wno-unused-parameter'
],
'defines':
[
'_GNU_SOURCE'
],
'libraries':
[
'-lpython<!@(<(python-version))',
'-lssl',
'-lcrypto',
'-lc++',
],
}
],
],
}]
}
|
Python
| 0.999098
|
@@ -1900,16 +1900,17 @@
wrap.cc'
+,
%0A '
|
aa66df3587453874f64f776dce66606b78520215
|
Improve migration string formatting
|
website/database/migrate.py
|
website/database/migrate.py
|
import re
from warnings import warn
from database import db
from helpers.commands import got_permission
def add_column(engine, table_name, definition):
sql = f'ALTER TABLE `{table_name}` ADD {definition}'
engine.execute(sql)
def drop_column(engine, table_name, column_name):
sql = f'ALTER TABLE `{table_name}` DROP `{column_name}`'
engine.execute(sql)
def update_column(engine, table_name, column_definition):
sql = f'ALTER TABLE `{table_name}` MODIFY COLUMN {column_definition}'
engine.execute(sql)
def set_foreign_key_checks(engine, active=True):
sql = f'SET FOREIGN_KEY_CHECKS={1 if active else 0};'
if engine.dialect.name == 'sqlite':
warn('Sqlite foreign key checks managements is not supported')
return False
engine.execute(sql)
return True
def set_autocommit(engine, active=True):
sql = f'SET AUTOCOMMIT={1 if active else 0};'
if engine.dialect.name == 'sqlite':
warn('Sqlite autocommit managements is not supported')
return False
engine.execute(sql)
return True
def set_unique_checks(engine, active=True):
sql = f'SET UNIQUE_CHECKS={1 if active else 0};'
if engine.dialect.name == 'sqlite':
warn('Sqlite unique key checks managements is not supported')
return False
engine.execute(sql)
return True
def get_column_names(table):
return set((i.name for i in table.c))
def basic_auto_migrate_relational_db(app, bind):
"""Inspired by http://stackoverflow.com/questions/2103274/"""
from sqlalchemy import Table
from sqlalchemy import MetaData
print('Performing auto-migration in', bind, 'database...')
db.session.commit()
db.reflect()
db.session.commit()
db.create_all(bind=bind)
with app.app_context():
engine = db.get_engine(app, bind)
tables = db.get_tables_for_bind(bind=bind)
metadata = MetaData()
metadata.engine = engine
ddl = engine.dialect.ddl_compiler(engine.dialect, None)
for table in tables:
db_table = Table(
table.name, metadata, autoload=True, autoload_with=engine
)
db_columns = get_column_names(db_table)
columns = get_column_names(table)
new_columns = columns - db_columns
unused_columns = db_columns - columns
existing_columns = columns.intersection(db_columns)
for column_name in new_columns:
column = getattr(table.c, column_name)
if column.constraints:
print(f'Column {column_name} skipped due to existing constraints.')
continue
print(f'Creating column: {column_name}')
definition = ddl.get_column_specification(column)
add_column(engine, table.name, definition)
if engine.dialect.name == 'mysql':
sql = f'SHOW CREATE TABLE `{table.name}`'
table_definition = engine.execute(sql)
columns_definitions = {}
to_replace = {
'TINYINT(1)': 'BOOL', # synonymous for MySQL and SQLAlchemy
'INT(11)': 'INTEGER',
'DOUBLE': 'FLOAT(53)',
' DEFAULT NULL': ''
}
for definition in table_definition.first()[1].split('\n'):
match = re.match('\s*`(?P<name>.*?)` (?P<definition>[^,]*),?', definition)
if match:
name = match.group('name')
definition_string = match.group('definition').upper()
for mysql_explicit_definition, implicit_sqlalchemy in to_replace.items():
definition_string = definition_string.replace(mysql_explicit_definition, implicit_sqlalchemy)
columns_definitions[name] = name + ' ' + definition_string
columns_to_update = []
for column_name in existing_columns:
column = getattr(table.c, column_name)
old_definition = columns_definitions[column_name]
new_definition = ddl.get_column_specification(column)
if old_definition != new_definition:
columns_to_update.append([column_name, old_definition, new_definition])
if columns_to_update:
print(
'\nFollowing columns in `%s` table differ in definitions '
f'from those specified in models: {table.name}'
)
for column, old_definition, new_definition in columns_to_update:
agreed = got_permission(
'Column: `%s`\n'
'Old definition: %s\n'
'New definition: %s\n'
'Update column definition?'
% (column, old_definition, new_definition)
)
if agreed:
update_column(engine, table.name, new_definition)
print(f'Updated {column} column definition')
else:
print(f'Skipped {column} column')
if unused_columns:
print(
'\nFollowing columns in `%s` table are no longer used '
'and can be safely removed:' % table.name
)
for column in unused_columns:
if got_permission(f'Column: `{column}` - remove?'):
drop_column(engine, table.name, column)
print(f'Removed column {column}.')
else:
print(f'Keeping column {column}.')
print('Auto-migration of', bind, 'database completed.')
|
Python
| 0.000164
|
@@ -4474,32 +4474,33 @@
+f
'%5CnFollowing col
@@ -4500,34 +4500,44 @@
ing columns in %60
-%25s
+%7Btable.name%7D
%60 table differ i
@@ -4611,25 +4611,32 @@
odels: %7B
-table.nam
+columns_to_updat
e%7D'%0A
@@ -4803,16 +4803,17 @@
+f
'Column:
@@ -4814,18 +4814,24 @@
olumn: %60
-%25s
+%7Bcolumn%7D
%60%5Cn'%0A
@@ -4851,16 +4851,17 @@
+f
'Old def
@@ -4861,34 +4861,48 @@
Old definition:
-%25s
+%7Bold_definition%7D
%5Cn'%0A
@@ -4913,16 +4913,17 @@
+f
'New def
@@ -4935,53 +4935,13 @@
on:
-%25s%5Cn'%0A 'Update column
+%7Bnew_
defi
@@ -4946,17 +4946,19 @@
finition
-?
+%7D%5Cn
'%0A
@@ -4979,50 +4979,35 @@
-%25 (column, old_definition, new_definition)
+'Update column definition?'
%0A
@@ -5358,16 +5358,17 @@
+f
'%5CnFollo
@@ -5388,10 +5388,20 @@
in %60
-%25s
+%7Btable.name%7D
%60 ta
@@ -5445,16 +5445,17 @@
+f
'and can
@@ -5477,22 +5477,26 @@
ved:
-' %25 table.name
+ %7Bunused_columns%7D'
%0A
|
ff8a9eada9ceac6e704c477d341896f247b30ae9
|
fix tag limit
|
website/project/settings.py
|
website/project/settings.py
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(BASE_DIR, "apps"))
ALLOWED_HOSTS = ['*',]
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'gunicorn',
'django_extensions',
'djangobower',
'compressor',
'sekizai',
'tastypie',
'lineage',
'taggit',
'taggit_templatetags2',
'el_pagination',
'easy_thumbnails',
'adminsortable2',
'fileupload',
'uploader',
'filer',
'mptt',
'blog',
'projects',
'pages',
'gallery'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
# sekizai
'sekizai.context_processors.sekizai',
],
'loaders': [
# admin tools
# 'admin_tools.template_loaders.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
],
},
},
]
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
# i18n
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Zurich'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# static configuration
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
'compressor.finders.CompressorFinder',
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'site-static'),
)
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
FILES_ROOT = os.path.join(BASE_DIR, 'media/files')
FILES_URL = '/media/files/'
THUMBNAIL_BASEDIR = 'files'
# filer
FILER_STORAGES = {
'public': {
'main': {
'ENGINE': 'filer.storage.PublicFileSystemStorage',
'OPTIONS': {
'location': os.path.join(BASE_DIR, 'media/files'),
'base_url': '/media/files/',
},
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': 'public',
},
'thumbnails': {
'ENGINE': 'filer.storage.PublicFileSystemStorage',
'OPTIONS': {
'location': os.path.join(BASE_DIR, 'media/files'),
'base_url': '/media/files/',
},
},
},
}
# tastypie
API_VERSION = 'v1'
TASTYPIE_ALLOW_MISSING_SLASH = True
# bower
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'site-static')
BOWER_INSTALLED_APPS = (
'backbone#1.2.1',
'csshake',
'underscore#1.8.3',
'jquery-placeholder#2.0.8',
'foundation#5.5.2',
'modernizr#2.8.3',
'fastclick#1.0.3',
'jquery#2.1.3',
'jquery.cookie#1.4.1',
'three.js#0.71.0'
)
# pagination
ENDLESS_PAGINATION_PER_PAGE = 3
# load local_settings
try:
from local_settings import *
except ImportError:
pass
|
Python
| 0.000002
|
@@ -4111,16 +4111,57 @@
GE = 3%0A%0A
+# taggit tag cloud%0ATAGGIT_LIMIT = 10000%0A%0A
# load l
|
5f5141b169e61a5b6912146a995917f5d862ee9c
|
version bump
|
exemelopy/__version__.py
|
exemelopy/__version__.py
|
__author__ = 'Phillip B Oldham'
__author_email__ = 'phillip.oldham@gmail.com'
__version__ = '0.0.8'
__licence__ = 'MIT'
|
Python
| 0.000001
|
@@ -94,9 +94,9 @@
0.0.
-8
+9
'%0A__
|
878811a673625f9dbe0f41dd0196887f612ecf2e
|
Set default file extension to empty string
|
expand_region_handler.py
|
expand_region_handler.py
|
import re
try:
import javascript
import html
except:
from . import javascript
from . import html
def expand(string, start, end, extension=None):
if(re.compile("html|htm|xml").search(extension)):
return html.expand(string, start, end)
return javascript.expand(string, start, end)
|
Python
| 0.000005
|
@@ -145,12 +145,10 @@
ion=
-None
+%22%22
):%0A%0A
|
58d3e0712a35052d0016fa3c3b3ffda1ba56b305
|
Add some locks
|
lightcontrol/server.py
|
lightcontrol/server.py
|
#!/usr/bin/env python3
import RPi.GPIO as GPIO
import time
import threading
import logging
from tzlocal import get_localzone
from flask import Flask, render_template, url_for, request, make_response
from lightcontrol.config import lights
from os.path import expanduser
import os.path
import json
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
GPIO.setmode(GPIO.BCM)
logger = logging.getLogger(__name__)
app = Flask("lightcontrol")
home_dir = expanduser('~')
class Preferences:
def __init__(self, filename):
self.filename = filename
def read(self):
if os.path.exists(self.filename):
try:
with open(self.filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except:
logger.exception("Error reading JSON. Resetting preferences")
return dict()
else:
return dict()
def write(self, d):
with open(self.filename, 'wb') as f:
return f.write(json.dumps(d).encode('utf-8'))
def update(self, key, value):
p = self.read()
p[key] = value
self.write(p)
pref = Preferences(filename=home_dir + '/.lightcontrol')
def toggle_switch(light_name, onoff):
line = lights[light_name][0 if onoff else 1]
GPIO.setup(line, GPIO.OUT)
GPIO.output(line, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(line, GPIO.LOW)
pref.update(light_name, onoff)
@app.route("/")
def index():
return render_template("index.html", config=lights)
@app.route("/lights/<room_name>/<onoff>", methods=["POST"])
def control(room_name, onoff):
onoff = onoff == "on"
toggle_switch(room_name, onoff)
return make_response(str(onoff), 200)
@app.route("/lights/<room_name>/status", methods=["GET"])
def status(room_name):
stat = pref.read().get(room_name, False)
# update
#toggle_switch(room_name, stat)
return "1" if stat else "0"
#for name, val in pref.read().items():
# toggle_switch(name, val)
#import IPython
#IPython.embed()
|
Python
| 0.000018
|
@@ -85,16 +85,50 @@
logging%0A
+from threading import RLock, Lock%0A
from tzl
@@ -677,24 +677,80 @@
-def read(self):%0A
+ self.lock = RLock()%0A def read(self):%0A with self.lock:%0A
@@ -803,13 +803,21 @@
+
+
try:%0A
+
@@ -881,24 +881,28 @@
+
return json.
@@ -949,16 +949,20 @@
+
+
except:%0A
@@ -957,16 +957,20 @@
except:%0A
+
@@ -1047,32 +1047,36 @@
+
+
return dict()%0A
@@ -1085,14 +1085,22 @@
+
else:%0A
+
@@ -1145,16 +1145,44 @@
lf, d):%0A
+ with self.lock:%0A
@@ -1226,24 +1226,28 @@
+
+
return f.wri
@@ -1310,24 +1310,52 @@
ey, value):%0A
+ with self.lock:%0A
p =
@@ -1374,16 +1374,20 @@
+
p%5Bkey%5D =
@@ -1393,16 +1393,20 @@
= value%0A
+
@@ -1478,16 +1478,38 @@
trol')%0A%0A
+switch_lock = Lock()%0A%0A
def togg
@@ -1534,24 +1534,89 @@
me, onoff):%0A
+ with switch_lock:%0A pref.update(light_name, onoff)%0A
line = l
@@ -1652,16 +1652,20 @@
else 1%5D%0A
+
GPIO
@@ -1687,16 +1687,20 @@
IO.OUT)%0A
+
GPIO
@@ -1724,16 +1724,20 @@
O.HIGH)%0A
+
time
@@ -1748,16 +1748,20 @@
ep(0.5)%0A
+
GPIO
@@ -1787,43 +1787,8 @@
LOW)
-%0A pref.update(light_name, onoff)
%0A%0A@a
|
c31a3c5fcbdda0075ac50dbcfb0162dc83a4268e
|
support for extra_vars
|
Flansible/flansible/run_ansible_playbook.py
|
Flansible/flansible/run_ansible_playbook.py
|
import os
from flask_restful import Resource, Api
from flask_restful_swagger import swagger
from flask_restful import reqparse
from flansible import app
from flansible import api, app, celery, auth, ansible_default_inventory, get_inventory_access, task_timeout
from ModelClasses import AnsibleCommandModel, AnsiblePlaybookModel, AnsibleRequestResultModel, AnsibleExtraArgsModel
import celery_runner
from flansible_git import FlansibleGit
class RunAnsiblePlaybook(Resource):
@swagger.operation(
notes='Run Ansible Playbook',
nickname='ansibleplaybook',
responseClass=AnsibleRequestResultModel.__name__,
parameters=[
{
"name": "body",
"description": "Inut object",
"required": True,
"allowMultiple": False,
"dataType": AnsiblePlaybookModel.__name__,
"paramType": "body"
}
],
responseMessages=[
{
"code": 200,
"message": "Ansible playbook started"
},
{
"code": 400,
"message": "Invalid input"
}
]
)
@auth.login_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('playbook_dir', type=str, help='folder where playbook file resides', required=True)
parser.add_argument('playbook', type=str, help='name of the playbook', required=True)
parser.add_argument('inventory', type=str, help='path to inventory', required=False,)
parser.add_argument('extra_vars', type=dict, help='extra vars', required=False)
parser.add_argument('forks', type=int, help='forks', required=False)
parser.add_argument('verbose_level', type=int, help='verbose level, 1-4', required=False)
parser.add_argument('become', type=bool, help='run with become', required=False)
parser.add_argument('update_git_repo', type=bool, help='Set to true to update git repo prior to executing', required=False)
args = parser.parse_args()
playbook_dir = args['playbook_dir']
playbook = args['playbook']
become = args['become']
inventory = args['inventory']
do_update_git_repo = args['update_git_repo']
if do_update_git_repo is True:
result = FlansibleGit.update_git_repo(playbook_dir)
task = celery_runner.do_long_running_task.AsyncResult(result.id)
while task.state == "PENDING" or task.state == "PROGRESS":
#waiting for finish
task = celery_runner.do_long_running_task.AsyncResult(result.id)
if task.result['returncode'] != 0:
#git update failed
resp = app.make_response((str.format("Failed to update git repo: {0}", playbook_dir), 404))
return resp
curr_user = auth.username()
playbook_full_path = playbook_dir + "/" + playbook
playbook_full_path = playbook_full_path.replace("//","/")
if not os.path.exists(playbook_dir):
resp = app.make_response((str.format("Directory not found: {0}", playbook_dir), 404))
return resp
if not os.path.isdir(playbook_dir):
resp = app.make_response((str.format("Not a directory: {0}", playbook_dir), 404))
return resp
if not os.path.exists(playbook_full_path):
resp = app.make_response((str.format("Playbook not found in folder. Path does not exist: {0}", playbook_full_path), 404))
return resp
if not inventory:
inventory = ansible_default_inventory
has_inv_access = get_inventory_access(curr_user, inventory)
if not has_inv_access:
resp = app.make_response((str.format("User does not have access to inventory {0}", inventory), 403))
return resp
inventory = str.format(" -i {0}", inventory)
if become:
become_string = ' --become'
else:
become_string = ''
command = str.format("cd {0};ansible-playbook {1}{2}{3}", playbook_dir, playbook, become_string, inventory)
task_result = celery_runner.do_long_running_task.apply_async([command], soft=task_timeout, hard=task_timeout)
result = {'task_id': task_result.id}
return result
api.add_resource(RunAnsiblePlaybook, '/api/ansibleplaybook')
|
Python
| 0
|
@@ -2227,16 +2227,56 @@
ntory'%5D%0A
+ extra_vars = args%5B'extra_vars'%5D%0A
@@ -4108,16 +4108,505 @@
g = ''%0A%0A
+ extra_vars_string = ''%0A if extra_vars:%0A counter = 1%0A extra_vars_string += ' -e%22'%0A for key in extra_vars.keys():%0A if counter %3C len(extra_vars):%0A spacer = %22 %22%0A else:%0A spacer = %22%22%0A opt_string = str.format(%22%7B0%7D=%7B1%7D%7B2%7D%22,key,extra_vars%5Bkey%5D, spacer)%0A extra_vars_string += opt_string%0A counter += 1%0A extra_vars_string += '%22'%0A
%0A
@@ -4661,16 +4661,19 @@
1%7D%7B2%7D%7B3%7D
+%7B4%7D
%22, playb
@@ -4707,32 +4707,51 @@
tring, inventory
+, extra_vars_string
)%0A task_r
@@ -4922,17 +4922,16 @@
result%0A%0A
-%0A
api.add_
|
3c5738242e472641eb9029fe8b84c11d2d693047
|
fix radar regex
|
maps2json.py
|
maps2json.py
|
#!/usr/bin/env python3
# Description: Loops through a directory of map pk3s and outputs JSON with map information
# Author: Tyler "-z-" Mulligan
import zipfile, os, re, hashlib, json
from datetime import datetime
def main():
packs_maps = []
packs_other = []
packs_corrupt = []
path = './packages/'
for file in sorted(os.listdir(path)):
if file.endswith('.pk3'):
print('Processing ' + file)
data = {}
data['pk3'] = file
data['shasum'] = hash_file(path + file)
data['filesize'] = os.path.getsize(path + file)
data['date'] = os.path.getmtime(path + file)
data['bsp'] = []
data['mapshot'] = []
data['mapinfo'] = []
data['waypoints'] = []
data['map'] = []
data['radar'] = []
data['title'] = False
data['description'] = False
data['gametypes'] = []
data['author'] = False
data['license'] = False
try:
zip = zipfile.ZipFile(path + file)
filelist = zip.namelist()
# Get the bsp name(s)
for member in filelist:
if re.search('^maps/.*bsp$', member):
bsp_info = zip.getinfo(member)
# this is coming back as a float
epoch = int(datetime(*bsp_info.date_time).timestamp())
data['date'] = epoch
data['bsp'].append(member.replace('maps/','').replace('.bsp',''))
if len(data['bsp']):
# Find out which of the important files exist in the package
for member in filelist:
for bsp in data['bsp']:
rbsp = re.escape(bsp)
if re.search('^maps/' + rbsp + '\.(jpg|tga|png)$', member):
data['mapshot'].append(member)
if re.search('^maps/' + rbsp + '\.mapinfo$', member):
mapinfofile = member
data['mapinfo'].append(member)
if re.search('^maps/' + rbsp + '\.waypoints$', member):
data['waypoints'].append(member)
if re.search('^maps/' + rbsp + '\.map$', member):
data['map'].append(member)
if re.search('^gfx/' + rbsp + '\.(radar|mini)\.(jpg|tga|png)$', member):
data['radar'].append(member)
if re.search('^maps/(LICENSE|COPYING|gpl.txt)$', member):
data['license'] = True
# If the mapinfo file exists, try and parse it
if len(data['mapinfo']):
mapinfo = zip.open(mapinfofile)
for line in mapinfo:
line = line.decode('unicode_escape').rstrip()
if re.search('^title.*$', line):
data['title'] = line.partition(' ')[2]
elif re.search('^author.*', line):
data['author'] = line.partition(' ')[2]
elif re.search('^description.*', line):
data['description'] = line.partition(' ')[2]
elif re.search('^(type|gametype).*', line):
data['gametypes'].append(line.partition(' ')[2].partition(' ')[0])
if len(data['bsp']):
packs_maps.append(data)
else:
packs_other.append(file)
except zipfile.BadZipfile:
print('Corrupt file: ' + file)
packs_corrupt.append(file)
pass
if len(packs_other) != 0:
e_no_map = 'One or more archives did not contain a map'
print('\n' + e_no_map)
dt = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
fo = open('error.log', 'a')
fo.write('\n' + dt + ' - ' + e_no_map + ':\n')
fo.write('\n'.join(packs_other) + '\n')
fo.close()
if len(packs_corrupt) != 0:
e_corrupt = 'One or more archives were corrupt'
print('\n' + e_corrupt)
dt = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
fo = open('error.log', 'a')
fo.write('\n' + dt + ' - ' + e_corrupt + ':\n')
fo.write('\n'.join(packs_corrupt) + '\n')
fo.close()
output = {}
output['data'] = packs_maps
# for debugging
#print(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))
fo = open('data/maps.json', 'w')
fo.write(json.dumps(output))
fo.close()
def hash_file(filename):
""""This function returns the SHA-1 hash
of the file passed into it"""
# make a hash object
h = hashlib.sha1()
# open file for reading in binary mode
with open(filename,'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
if __name__ == "__main__":
main()
|
Python
| 0.000788
|
@@ -2579,18 +2579,17 @@
rbsp + '
-%5C.
+_
(radar%7Cm
|
d31adbfd0485579c94e92b9c2950230d00fdf309
|
update flaskapp.wsgi
|
FlaskApp/flaskapp.wsgi
|
FlaskApp/flaskapp.wsgi
|
#!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/FlaskApp/")
from FlaskApp import app as application
application.secret_key = 'secretkeyhere'
|
Python
| 0.000001
|
@@ -104,16 +104,36 @@
var/www/
+RiotAPIChallenge2.0/
FlaskApp
|
2c64c4fd5a81537d891aadafe01a4da96fcb7ab4
|
Update ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio = 0
area = 0
raio = input("Entre com o valor do raio: "
|
Python
| 0
|
@@ -205,9 +205,10 @@
raio: %22
+)
%0A
|
1cf097d30d5966456c01e4f2e678213c04f8e334
|
Update ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio = 0
area = 0
raio = input("Entre com o valor do raio: ")
area = 3.14 * raio*raio
print "Valor
|
Python
| 0
|
@@ -245,9 +245,11 @@
%22Valor
+da
%0A
|
8804091fb22ef0a7682ea402ff22750261fc38a7
|
Update ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
lista1/ipc_lista1.6.py
|
#ipc_lista1.6
#Professor: Jucimar Junior
#Any Mendes Carvalho -
#
#
#
#
#Faça um programa que peça o raio de um círculo, calcule e mostre sua área
raio =
|
Python
| 0
|
@@ -151,11 +151,12 @@
%0Araio =
+0
%0A%0A%0A
|
463fa2a169984879aa5904d00f154492cf1c240d
|
move extension check after the imgur check
|
Functions/URLFollow.py
|
Functions/URLFollow.py
|
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from Function import Function
import WebUtils
import re
import HTMLParser
class Instantiate(Function):
Help = 'automatic function that follows urls and grabs information about the resultant webpage'
htmlParser = HTMLParser.HTMLParser()
def GetResponse(self, message):
if message.Type != 'PRIVMSG':
return
match = re.search('(?P<url>https?://[^\s]+)', message.MessageString, re.IGNORECASE)
if not match:
return
if re.search('\.(jpe?g|gif|png|bmp)$', match.group('url')):
return
youtubeMatch = re.search('(www\.youtube\.com/watch.+v=|youtu\.be/)(?P<videoID>[^&#]+)', match.group('url'))
imgurMatch = re.search('(i\.)?imgur.com/(?P<imgurID>[^\.]+)', match.group('url'))
if youtubeMatch:
return self.FollowYouTube(youtubeMatch.group('videoID'), message)
elif imgurMatch:
return self.FollowImgur(imgurMatch.group('imgurID'), message)
else:
return self.FollowStandard(match.group('url'), message)
def FollowYouTube(self, videoID, message):
url = 'https://gdata.youtube.com/feeds/api/videos/%s?v=2&key=AI39si4LaIHfBlDmxNNRIqZjXYlDgVTmUVa7p8dSE8_bI45a9leskPQKauV7qi-qmAqjf6zjTdhwAfJxOfkxNcYOmloh8B1X9Q' % videoID
webPage = WebUtils.FetchURL(url)
titleMatch = re.search('<title>(?P<title>[^<]+?)</title><content', webPage.Page)
if titleMatch:
lengthMatch = re.search("<yt:duration seconds='(?P<length>[0-9]+?)'/>", webPage.Page)
descMatch = re.search("<media:description type='plain'>(?P<desc>[^<]+?)</media:description>", webPage.Page)
title = titleMatch.group('title')
title = self.htmlParser.unescape(title)
length = lengthMatch.group('length')
m, s = divmod(int(length), 60)
h, m = divmod(m, 60)
if h > 0:
length = '{0:02d}:{1:02d}:{2:02d}'.format(h,m,s)
else:
length = '{0:02d}:{1:02d}'.format(m,s)
description = descMatch.group('desc')
description = re.sub('<[^<]+?>', '', description)
description = self.htmlParser.unescape(description)
description = re.sub('\n+', ' ', description)
description = re.sub('\s+', ' ', description)
if len(description) > 200:
description = description[:197] + '...'
return IRCResponse(ResponseType.Say, '{0} | {1} | {2}'.format(title, length, description), message.ReplyTo)
return
def FollowImgur(self, id, message):
clientID = 'cc2c410cd122a79'
clientSecret = '501db78ba87c47393db86c4a557073ee97efdc88'
url = 'https://api.imgur.com/3/image/{0}'.format(id)
headers = [('Authorization', 'Client-ID {0}'.format(clientID))]
webPage = WebUtils.FetchURL(url, headers)
print webPage.Page
return
def FollowStandard(self, url, message):
webPage = WebUtils.FetchURL(url)
if webPage is None:
return
match = re.search('<title\s*>\s*(?P<title>.*?)</title\s*>', webPage.Page, re.IGNORECASE | re.DOTALL)
if match:
title = match.group('title')
title = re.sub('(\n|\r)+', '', title)
title = title.strip()
title = re.sub('\s+', ' ', title)
title = re.sub('<[^<]+?>', '', title)
title = self.htmlParser.unescape(title)
return IRCResponse(ResponseType.Say, '{0} (at {1})'.format(title, webPage.Domain), message.ReplyTo)
return
|
Python
| 0
|
@@ -602,107 +602,8 @@
%0D%0A
- if re.search('%5C.(jpe?g%7Cgif%7Cpng%7Cbmp)$', match.group('url')):%0D%0A return%0D%0A %0D%0A
@@ -756,24 +756,25 @@
'(i%5C.)?imgur
+%5C
.com/(?P%3Cimg
@@ -1017,34 +1017,94 @@
age)%0D%0A el
-se
+if not re.search('%5C.(jpe?g%7Cgif%7Cpng%7Cbmp)$', match.group('url'))
:%0D%0A r
|
c6abbd5b8176943ec02d0a03852e1992f62950a1
|
Update ipc_lista1.9.py
|
lista1/ipc_lista1.9.py
|
lista1/ipc_lista1.9.py
|
#ipc_lista1.9
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que peça a temperatura em graus Fahrenheit, transforme e mostre
|
Python
| 0
|
@@ -157,9 +157,23 @@
e mostre
+ a temperatura
%0A
|
89571a6caf877f8ff5ff0b983548b926dec87f8d
|
Update ipc_lista1.9.py
|
lista1/ipc_lista1.9.py
|
lista1/ipc_lista1.9.py
|
#ipc_lista1.9
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que peça a temperatura em graus
|
Python
| 0
|
@@ -125,9 +125,20 @@
em graus
+ Fahrenheit
%0A
|
c5ed4acaeeb03c9935f01e8a1fed84136b5f3e0c
|
Revert overwritten changes
|
GEOparse/downloader.py
|
GEOparse/downloader.py
|
import os
import requests
from tqdm import tqdm
from ftplib import FTP
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from .logger import geoparse_logger as logger
class Downloader(object):
"""Downloader class."""
def __init__(self, url, outdir, filename=None, silent=False):
self.url = url
if outdir is None:
outdir = os.getcwd()
if filename is None:
filename = self._get_filename()
self.silent = silent
@property
def destination(self):
"""Get the destination path.
This is the property should be calculated every time it is used because
a user could change the outdir and filename dynamically.
"""
return os.path.join(os.path.abspath(self.outdir), self.filename)
def download(self):
"""Download from URL."""
logger.info("Downloading %s to %s" % (self.url, self.destination))
if self.url.startswith("http"):
self._download_http()
elif self.url.startswith("ftp"):
self._download_ftp()
else:
raise ValueError("Invalid URL %s" % self.url)
def _get_filename(self):
filename = os.path.basename(urlparse(self.url).path).strip(" \n\t.")
if len(filename) == 0:
raise Exception("Cannot parse filename from %s" % self.url)
return filename
def _download_ftp(self):
parsed_url = urlparse(self.url)
try:
ftp = FTP(parsed_url.netloc)
ftp.login()
total_size = ftp.size(parsed_url.path)
if total_size is None:
total_size = 0
with open(destination, 'wb') as f:
with tqdm(total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True) as pbar:
def _write(data):
pbar.update(len(data))
f.write(data)
ftp.retrbinary("RETR %s" % parsed_url.path, _write)
ftp.quit()
except Exception:
try:
ftp.quit()
logger.error("Error when trying to retreive %s." % self.url,
exc_info=True)
except Exception:
logger.error("Error when quiting FTP server.", exc_info=True)
def _download_http(self):
r = requests.get(self.url, stream=True)
# Total size in bytes.
total_size = int(r.headers.get('content-length', 0))
chunk_size = 1024
with open(destination, 'wb') as f:
if self.silent:
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
else:
with tqdm(
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
leave=True) as pbar:
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
pbar.update(len(chunk))
|
Python
| 0.000001
|
@@ -392,16 +392,21 @@
+self.
outdir =
@@ -418,16 +418,63 @@
etcwd()%0A
+ else:%0A self.outdir = outdir%0A
@@ -498,32 +498,37 @@
ne:%0A
+self.
filename = self.
@@ -543,16 +543,67 @@
ename()%0A
+ else:%0A self.filename = filename%0A
@@ -623,20 +623,16 @@
silent%0A
-
%0A @pr
@@ -702,24 +702,16 @@
n path.%0A
-
%0A
@@ -1290,25 +1290,8 @@
rl)%0A
- %0A
%0A
@@ -1799,32 +1799,37 @@
with open(
+self.
destination, 'wb
@@ -2784,16 +2784,21 @@
th open(
+self.
destinat
|
470a3e226619ae08983ba47c11e7a1609a365d85
|
Fix potential data loss in upgrade. Closes #2512.
|
trac/upgrades/db13.py
|
trac/upgrades/db13.py
|
sql = [
#-- Add ticket_type to 'ticket', remove the unused 'url' column
"""CREATE TEMP TABLE ticket_old AS SELECT * FROM ticket;""",
"""DROP TABLE ticket;""",
"""CREATE TABLE ticket (
id integer PRIMARY KEY,
type text, -- the nature of the ticket
time integer, -- the time it was created
changetime integer,
component text,
severity text,
priority text,
owner text, -- who is this ticket assigned to
reporter text,
cc text, -- email addresses to notify
version text, --
milestone text, --
status text,
resolution text,
summary text, -- one-line summary
description text, -- problem description (long)
keywords text
);""",
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords)
SELECT id, 'defect', time, changetime, component, severity, priority, owner,
reporter, cc, version, milestone, status, resolution, summary,
description, keywords FROM ticket_old
WHERE severity <> 'enhancement';""",
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords)
SELECT id, 'enhancement', time, changetime, component, 'normal', priority,
owner, reporter, cc, version, milestone, status, resolution, summary,
description, keywords FROM ticket_old
WHERE severity = 'enhancement';""",
"""INSERT INTO enum (type, name, value) VALUES ('ticket_type', 'defect', '1');""",
"""INSERT INTO enum (type, name, value) VALUES ('ticket_type', 'enhancement', '2');""",
"""INSERT INTO enum (type, name, value) VALUES ('ticket_type', 'task', '3');""",
"""DELETE FROM enum WHERE type = 'severity' AND name = 'enhancement';""",
"""DROP TABLE ticket_old;""",
]
def do_upgrade(env, ver, cursor):
for s in sql:
cursor.execute(s)
# -- upgrade reports (involve a rename)
cursor.execute("SELECT id,sql FROM report")
reports = {}
for id, rsql in cursor:
reports[id] = rsql
for id, rsql in reports.items():
parts = rsql.split('ORDER BY', 1)
ending = len(parts)>1 and 'ORDER BY'+parts[1] or ''
cursor.execute("UPDATE report SET sql=%s WHERE id=%s",
(parts[0].replace('severity,',
't.type AS type, severity,') + ending,
id))
|
Python
| 0
|
@@ -1383,24 +1383,33 @@
WHERE
+COALESCE(
severity
%3C%3E 'enh
@@ -1400,16 +1400,20 @@
severity
+,'')
%3C%3E 'enh
|
c168529f52927f13f3ff0b3474b0d0c4e5a71ae4
|
Make dict comprehension more readable
|
locust/distribution.py
|
locust/distribution.py
|
import math
from itertools import combinations_with_replacement
from operator import attrgetter
from typing import (
Dict,
List,
Type,
)
from locust import User
def weight_users(
user_classes: List[Type[User]],
user_count: int,
) -> Dict[str, int]:
"""
Compute the desired state of users using the weight of each user class.
:param user_classes: the list of user class
:param user_count: total number of users
:return: the set of users to run
"""
assert user_count >= 0
if len(user_classes) == 0:
return {}
user_classes = sorted(user_classes, key=attrgetter("__name__"))
user_classes_count = {user_class.__name__: 0 for user_class in user_classes}
# If the number of users is less than the number of user classes, at most one user of each user class
# is chosen. User classes with higher weight are chosen first.
if user_count <= len(user_classes):
user_classes_count.update(
{
user_class.__name__: 1
for user_class in sorted(
user_classes,
key=attrgetter("weight"),
reverse=True,
)[:user_count]
}
)
return user_classes_count
# If the number of users is greater than or equal to the number of user classes, at least one user of each
# user class will be chosen. The greater number of users is, the better the actual distribution
# of users will match the desired one (as dictated by the weight attributes).
weights = list(map(attrgetter("weight"), user_classes))
user_classes_count = {
user_class.__name__: round(relative_weight * user_count) or 1
for user_class, relative_weight in zip(user_classes, (weight / sum(weights) for weight in weights))
}
if sum(user_classes_count.values()) == user_count:
return user_classes_count
else:
user_classes_count = _find_ideal_users_to_add_or_remove(
user_classes,
user_count - sum(user_classes_count.values()),
user_classes_count,
)
assert sum(user_classes_count.values()) == user_count
return user_classes_count
def _find_ideal_users_to_add_or_remove(
user_classes: List[Type[User]],
user_count_to_add_or_remove: int,
user_classes_count: Dict[str, int],
) -> Dict[str, int]:
sign = -1 if user_count_to_add_or_remove < 0 else 1
user_count_to_add_or_remove = abs(user_count_to_add_or_remove)
assert user_count_to_add_or_remove <= len(user_classes), user_count_to_add_or_remove
# Formula for combination with replacement
# (https://www.tutorialspoint.com/statistics/combination_with_replacement.htm)
number_of_combinations = math.factorial(len(user_classes) + user_count_to_add_or_remove - 1) / (
math.factorial(user_count_to_add_or_remove) * math.factorial(len(user_classes) - 1)
)
# If the number of combinations with replacement is above this threshold, we simply add/remove
# users for the first "number_of_users_to_add_or_remove" users. Otherwise, computing the best
# distribution is too expensive in terms of computation.
max_number_of_combinations_threshold = 1000
if number_of_combinations <= max_number_of_combinations_threshold:
user_classes_count_candidates: Dict[float, Dict[str, int]] = {}
for user_classes_combination in combinations_with_replacement(user_classes, user_count_to_add_or_remove):
# Copy in order to not mutate `user_classes_count` for the parent scope
user_classes_count_candidate = user_classes_count.copy()
for user_class in user_classes_combination:
user_classes_count_candidate[user_class.__name__] += sign
distance = distance_from_desired_distribution(
user_classes,
user_classes_count_candidate,
)
if distance not in user_classes_count_candidates:
user_classes_count_candidates[distance] = user_classes_count_candidate.copy()
return user_classes_count_candidates[min(user_classes_count_candidates.keys())]
else:
# Copy in order to not mutate `user_classes_count` for the parent scope
user_classes_count_candidate = user_classes_count.copy()
for user_class in user_classes[:user_count_to_add_or_remove]:
user_classes_count_candidate[user_class.__name__] += sign
return user_classes_count_candidate
def distance_from_desired_distribution(
user_classes: List[Type[User]],
user_classes_count: Dict[str, int],
) -> float:
actual_ratio_of_user_class = {
user_class: user_class_count / sum(user_classes_count.values())
for user_class, user_class_count in user_classes_count.items()
}
expected_ratio_of_user_class = {
user_class.__name__: user_class.weight / sum(map(attrgetter("weight"), user_classes))
for user_class in user_classes
}
differences = [
actual_ratio_of_user_class[user_class] - expected_ratio
for user_class, expected_ratio in expected_ratio_of_user_class.items()
]
return math.sqrt(math.fsum(map(lambda x: x ** 2, differences)))
|
Python
| 0.000323
|
@@ -1613,24 +1613,93 @@
r_classes))%0A
+ relative_weights = %5Bweight / sum(weights) for weight in weights%5D%0A
user_cla
@@ -1848,53 +1848,25 @@
es,
-(weight / sum(weights) for weight in
+relative_
weights)
)%0A
@@ -1861,17 +1861,16 @@
weights)
-)
%0A %7D%0A%0A
|
ed1ae0c787c89ca9fce46cd9ae8b6e06c14e766e
|
Update record.py
|
record.py
|
record.py
|
#!/usr/bin/env python
import numpy as np
import os
import shutil
import wx
import matplotlib
matplotlib.use('WXAgg')
from datetime import datetime
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas
from utils import Screenshot, XboxController
IDLE_SAMPLE_RATE = 1500
SAMPLE_RATE = 200
class MainWindow(wx.Frame):
""" Main frame of the application
"""
title = 'Data Acquisition'
def __init__(self):
wx.Frame.__init__(self, None, title=self.title, size=(660,330))
# Init controller
self.controller = XboxController()
# Create GUI
self.create_main_panel()
# Timer
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.rate = SAMPLE_RATE
self.idle_rate = IDLE_SAMPLE_RATE
self.timer.Start(self.idle_rate)
self.recording = False
self.t = 0
def create_main_panel(self):
# Panels
self.img_panel = wx.Panel(self)
self.joy_panel = wx.Panel(self)
self.record_panel = wx.Panel(self)
# Images
img = wx.Image(320,240)
self.image_widget = wx.StaticBitmap(self.img_panel, wx.ID_ANY, wx.Bitmap(img))
# Joystick
self.init_plot()
self.PlotCanvas = FigCanvas(self.joy_panel, wx.ID_ANY, self.fig)
# Recording
self.txt_outputDir = wx.TextCtrl(self.record_panel, wx.ID_ANY, pos=(5,0), size=(320,30))
uid = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
self.txt_outputDir.ChangeValue("samples/" + uid)
self.btn_record = wx.Button(self.record_panel, wx.ID_ANY, label="Record", pos=(335,0), size=(100,30))
self.Bind(wx.EVT_BUTTON, self.on_btn_record, self.btn_record)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_btn_record, self.btn_record)
# sizers
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.img_panel, 0, wx.ALL, 5)
sizer.Add(self.joy_panel, 0, wx.ALL, 5)
mainSizer_v = wx.BoxSizer(wx.VERTICAL)
mainSizer_v.Add(sizer, 0 , wx.ALL, 5)
mainSizer_v.Add(self.record_panel, 0 , wx.ALL, 5)
# finalize layout
self.SetAutoLayout(True)
self.SetSizer(mainSizer_v)
self.Layout()
def init_plot(self):
self.plotMem = 50 # how much data to keep on the plot
self.plotData = [[0] * (5)] * self.plotMem # mem storage for plot
self.fig = Figure((4,3))
self.axes = self.fig.add_subplot(111)
def on_timer(self, event):
self.poll()
# stop drawing if recording to avoid slow downs
if self.recording == False:
self.draw()
def poll(self):
self.bmp = self.take_screenshot()
self.controller_data = self.controller.read()
self.update_plot()
if self.recording == True:
self.save_data()
def take_screenshot(self):
screen = wx.ScreenDC()
bmp = wx.Bitmap(Screenshot.IMG_W, Screenshot.IMG_H)
mem = wx.MemoryDC(bmp)
mem.Blit(0, 0, Screenshot.IMG_W, Screenshot.IMG_H, screen, Screenshot.OFFSET_X, Screenshot.OFFSET_Y)
return bmp
def update_plot(self):
self.plotData.append(self.controller_data) # adds to the end of the list
self.plotData.pop(0) # remove the first item in the list, ie the oldest
def save_data(self):
image_file = self.outputDir+'/'+'img_'+str(self.t)+'.png'
self.bmp.SaveFile(image_file, wx.BITMAP_TYPE_PNG)
# make / open outfile
outfile = open(self.outputDir+'/'+'data.csv', 'a')
# write line
outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' )
outfile.close()
self.t += 1
def draw(self):
# Image
img = self.bmp.ConvertToImage()
img = img.Rescale(320,240)
self.image_widget.SetBitmap( img.ConvertToBitmap() )
# Joystick
x = np.asarray(self.plotData)
self.axes.plot(range(0,self.plotMem), x[:,0], 'r')
self.axes.hold(True)
self.axes.plot(range(0,self.plotMem), x[:,1], 'b')
self.axes.plot(range(0,self.plotMem), x[:,2], 'g')
self.axes.plot(range(0,self.plotMem), x[:,3], 'k')
self.axes.plot(range(0,self.plotMem), x[:,4], 'y')
self.axes.hold(False)
self.PlotCanvas.draw()
def on_update_btn_record(self, event):
label = "Stop" if self.recording else "Record"
self.btn_record.SetLabel(label)
def on_btn_record(self, event):
# pause timer
self.timer.Stop()
# switch state
self.recording = not self.recording
if self.recording:
self.start_recording()
# un pause timer
if self.recording:
self.timer.Start(self.rate)
else:
self.timer.Start(self.idle_rate)
def start_recording(self):
# check that a dir has been specified
if self.txt_outputDir.IsEmpty():
msg = wx.MessageDialog(self, 'Specify the Output Directory', 'Error', wx.OK | wx.ICON_ERROR)
msg.ShowModal() == wx.ID_YES
msg.Destroy()
self.recording = False
else: # a directory was specified
self.outputDir = self.txt_outputDir.GetValue()
self.t = 0
# check if path exists - ie may be saving over data
if os.path.exists(self.outputDir):
msg = wx.MessageDialog(self, 'Output Directory Exists - Overwrite Data?', 'Yes or No', wx.YES_NO | wx.ICON_QUESTION)
result = msg.ShowModal() == wx.ID_YES
msg.Destroy()
# overwrite the data
if result == True:
# delete the dir
shutil.rmtree(self.outputDir)
# re-make dir
os.mkdir(self.outputDir)
# do not overwrite the data
else: # result == False
self.recording = False
self.txt_outputDir.SetFocus()
# no directory so make one
else:
os.mkdir(self.outputDir)
def on_exit(self, event):
self.Destroy()
if __name__ == '__main__':
app = wx.App()
app.frame = MainWindow()
app.frame.Show()
app.MainLoop()
|
Python
| 0.000001
|
@@ -3016,35 +3016,35 @@
tmap(Screenshot.
-IMG
+SRC
_W, Screenshot.I
@@ -3042,19 +3042,19 @@
eenshot.
-IMG
+SRC
_H)%0A
@@ -3114,19 +3114,19 @@
eenshot.
-IMG
+SRC
_W, Scre
@@ -3136,11 +3136,11 @@
hot.
-IMG
+SRC
_H,
|
822317912f8fe2de65edeaf311c9ee7c267f1fa0
|
Use Attr in module sachsen
|
modules/sachsen/pages.py
|
modules/sachsen/pages.py
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2014 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser2.page import HTMLPage, method, ListElement, ItemElement
from weboob.tools.browser2.filters import Env, CleanText, Regexp, Field, Date, Map
from weboob.capabilities.gauge import Gauge, GaugeMeasure, GaugeSensor
from weboob.capabilities.base import NotAvailable, NotLoaded
import re
__all__ = ['ListPage', 'HistoryPage']
class ListPage(HTMLPage):
@method
class get_rivers_list(ListElement):
item_xpath = ".//a[@onmouseout='pegelaus()']"
class item(ItemElement):
klass = Gauge
forecasts = {'pf_gerade.png': u'stable',
'pf_unten.png': u'Go down',
'pf_oben.png': u'Go up',
}
alarmlevel = {"as1.gif": u"Alarmstufe 1", "as2.gif": u"Alarmstufe 2",
"as3.gif": u"Alarmstufe 3", "as4.gig": u"Alarmstufe 4",
"qua_grau.gif": u"No alarm function", "p_gruen.gif": u"",
"qua_weiss.gif": u"no data", "as0.gif": u"",
"MNW.gif": u""}
obj_id = CleanText(Env('id'))
obj_name = CleanText(Env('name'), "'")
obj_city = Regexp(Field('name'), '^([^\s]+).*')
obj_object = Env('object')
def parse(self, el):
div = el.getparent()
img = div.find('.//img').attrib['src'].split('/')[1]
data = unicode(el.attrib['onmouseover']) \
.strip('pegelein(').strip(')').replace(",'", ",").split("',")
self.env['id'] = data[7].strip()
self.env['name'] = data[0]
self.env['object'] = data[1]
self.env['datetime'] = data[2]
self.env['levelvalue'] = data[3]
self.env['flowvalue'] = data[4]
self.env['forecast'] = data[5]
self.env['alarm'] = img
def add_sensor(self, sensors, name, unit, value, forecast, alarm, date):
sensor = GaugeSensor("%s-%s" % (self.obj.id, name.lower()))
sensor.name = name
sensor.unit = unit
sensor.forecast = forecast
lastvalue = GaugeMeasure()
lastvalue.alarm = alarm
try:
lastvalue.level = float(value)
except ValueError:
lastvalue.level = NotAvailable
lastvalue.date = date
sensor.lastvalue = lastvalue
sensor.history = NotLoaded
sensor.gaugeid = self.obj.id
sensors.append(sensor)
def obj_sensors(self):
sensors = []
lastdate = Date(Regexp(Env('datetime'), r'(\d+)\.(\d+)\.(\d+) (\d+):(\d+)', r'\3-\2-\1 \4:\5', default=NotAvailable))(self)
forecast = Map(Env('forecast'), self.forecasts, default=NotAvailable)(self)
alarm = Map(Env('alarm'), self.alarmlevel, default=u'')(self)
self.add_sensor(sensors, u"Level", u"cm", self.env['levelvalue'], forecast, alarm, lastdate)
self.add_sensor(sensors, u"Flow", u"m3/s", self.env['flowvalue'], forecast, alarm, lastdate)
return sensors
class HistoryPage(HTMLPage):
@method
class iter_history(ListElement):
item_xpath = '//table[@width="215"]/tr'
class item(ItemElement):
klass = GaugeMeasure
verif = re.compile("\d\d.\d\d.\d+ \d\d:\d\d")
def condition(self):
return self.verif.match(self.el[0].text_content())
obj_id = None
obj_date = Date(Regexp(CleanText('.'), r'(\d+)\.(\d+)\.(\d+) (\d+):(\d+)', r'\3-\2-\1 \4:\5'))
sensor_types = [u'Level', u'Flow']
def obj_level(self):
index = self.sensor_types.index(self.env['sensor'].name) + 1
try:
return float(self.el[index].text_content())
except ValueError:
return NotAvailable
# TODO: history.alarm
|
Python
| 0
|
@@ -894,16 +894,22 @@
ate, Map
+, Attr
%0Afrom we
@@ -2115,54 +2115,63 @@
g =
-div.find('.//img').attrib%5B'src'%5D.split('/')%5B1%5D
+Regexp(Attr('.//img', 'src'), %22(.*?)/(.*)%22, %22%5C%5C2%22)(div)
%0A
|
325304d58105b8b40490bee51a1d47bd22213979
|
Fix timestring parsing issue
|
merge_har.py
|
merge_har.py
|
#!/usr/bin/env python
#https://wiki.python.org/moin/WorkingWithTime
import json, argparse, logging, sys
import calendar, iso8601
def convert_enddate_to_ms(ts):
"""Takes ISO 8601 format(string) and converts into epoch time."""
dt = iso8601.parse_date(ts)
ms = calendar.timegm(dt.timetuple())*1000 + dt.microsecond/1000.0
return ms
def execute(args):
with open(args.harfile, 'r') as f:
har = json.load(f)
logging.info('HAR file loaded')
if args.file:
with open(args.file, 'r') as f:
tcpTime = json.load(f)
else:
tcpTime = json.load(sys.stdin)
logging.info('tcpdump timing file loaded')
for i in range(len(har['log']['entries'])):
entry = har['log']['entries'][i]
url = entry['request']['url']
timestamp = convert_enddate_to_ms(entry['startedDateTime'])
if url not in tcpTime:
logging.warning('No record found in tcpdump for %s', url)
continue
else:
if len(tcpTime[url]) > 1:
logging.warning("multiple records found for %s, TODO", url)
timings = entry['timings']
timings['dataArrivals'] = []
timings['reset'] = []
for index in range(len(tcpTime[url])):
timedata = tcpTime[url][index]['data']
timings = entry['timings']
# time in tcp timing is computed based on the timestamp when request is sent
# time in HAR is computed based on the timestamp when the url of the object is parsed
# the gap needs to be closed
gap = max(timings['blocked'], 0) + max(timings['dns'], 0)\
+ max(timings['connect'], 0) + max(timings['send'], 0)
shift = tcpTime[url][index]['request']*1000 - (timestamp + gap)
logging.debug("%s has time shift %f", url, shift)
if abs(shift) > args.threshold:
logging.warning('Big time shift %.3f ms in request sent time for %s', shift, url)
#continue
dataTimestamps = []
for d in timedata:
dataTimestamps.append({'timestamp': d + gap + shift})
timings['dataArrivals'] += dataTimestamps
if 'reset' in tcpTime[url][index]:
timings['reset'].append(tcpTime[url][index]['reset'] + gap + shift)
entry['timings'] = timings
har['log']['entries'][i] = entry
if args.output:
with open(args.output, 'w') as outfile:
json.dump(har, outfile, indent=4)
else:
print json.dumps(har, indent=4)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='Merge the output from HTTP analyzer to HAR file')
parser.add_argument('-f', '--file', default=None, help='Read from a tcp timing file in JSON format instead of STDIN')
parser.add_argument('harfile', help='Path to the HAR file')
parser.add_argument('-o', '--output', default=None, help='Output file path instead of STDOUT')
parser.add_argument('-t', '--threshold', type=float, default=10.0, help='The threshold (ms) for matching objects.\
If the difference of timestamps of the same object in HAR and tcp timing is\
beyond the threshold, the record is discarded.')
parser.add_argument('-q', '--quiet', action='store_true', default=False, help='Only print errors')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Print debug info.')
args = parser.parse_args()
if args.quiet:
level = logging.ERROR
elif args.verbose:
level = logging.DEBUG
else:
level = logging.WARNING
logging.basicConfig(
format = "%(levelname)s:%(message)s",
level = level
)
execute(args)
if __name__ == '__main__':
main()
|
Python
| 0.000097
|
@@ -256,16 +256,71 @@
ate(ts)%0A
+ dtUtc = (dt - dt.utcoffset()).replace(tzinfo=None)%0A
ms =
@@ -338,16 +338,19 @@
imegm(dt
+Utc
.timetup
@@ -364,16 +364,19 @@
000 + dt
+Utc
.microse
|
d6672e2da113e2fdcfec147619ed03d5410ad014
|
Fix cleanup at exit in Escalator. Remove socket.
|
onitu/escalator/server/__main__.py
|
onitu/escalator/server/__main__.py
|
import argparse
import zmq
from logbook import Logger
from logbook import StderrHandler
from logbook.queues import ZeroMQHandler
from .databases import Databases
from .worker import Worker
back_uri = 'inproc://workers'
logger = Logger('Escalator')
def main(logger):
proxy = zmq.devices.ThreadDevice(
device_type=zmq.QUEUE, in_type=zmq.DEALER, out_type=zmq.ROUTER
)
proxy.bind_out(args.bind)
proxy.bind_in(back_uri)
proxy.start()
logger.info("Starting on '{}'", args.bind)
databases = Databases('dbs')
nb_workers = 8
workers = []
for i in range(nb_workers):
worker = Worker(databases, back_uri, logger)
worker.daemon = True
worker.start()
workers.append(worker)
while proxy.launcher.isAlive():
try:
# If we join the process without a timeout we never
# get the chance to handle the exception
proxy.join(100)
except KeyboardInterrupt:
break
logger.info("Exiting")
databases.close()
parser = argparse.ArgumentParser("escalator")
parser.add_argument(
'--bind', default='tcp://127.0.0.1:4224',
help="Address to bind escalator server"
)
parser.add_argument(
'--log-uri',
help="The URI of the ZMQ handler listening to the logs"
)
args = parser.parse_args()
if args.log_uri:
handler = ZeroMQHandler(args.log_uri, multi=True)
else:
handler = StderrHandler()
with handler.applicationbound():
main(logger)
|
Python
| 0
|
@@ -1,12 +1,36 @@
+import os%0Aimport signal%0A
import argpa
@@ -424,25 +424,24 @@
ind_out(
-args.
bind
+_uri
)%0A pr
@@ -533,42 +533,8 @@
d)%0A%0A
- databases = Databases('dbs')%0A%0A
@@ -986,57 +986,489 @@
ak%0A%0A
- logger.info(%22Exiting%22)%0A databases.close()%0A
+%0Adef cleanup(*args, **kwargs):%0A databases.close()%0A%0A if bind_uri.startswith(%22ipc://%22):%0A # With ZMQ %3C 4.1 (which isn't released yet), we can't%0A # close the device in a clean way.%0A # This will be possible with ZMQ 4.1 by using%0A # zmq_proxy_steerable.%0A # In the meantime, we must delete the Unix socket by hand.%0A sock_file = bind_uri%5B6:%5D%0A%0A try:%0A os.unlink(sock_file)%0A except OSError:%0A pass%0A%0A exit()
%0A%0Apa
@@ -1753,16 +1753,156 @@
args()%0A%0A
+bind_uri = args.bind%0Adatabases = Databases('dbs')%0A%0Afor s in (signal.SIGINT, signal.SIGTERM, signal.SIGQUIT):%0A signal.signal(s, cleanup)%0A%0A
if args.
@@ -2043,16 +2043,30 @@
main(logger)%0A
+ cleanup()%0A
|
7b37d39b4c86317c327f2e057c3ff0e86a4b5ce8
|
fix retries and delay
|
reddit.py
|
reddit.py
|
#
#
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
#
import os.path
import urllib
import simplejson
import sys
import time
import re
import HTMLParser
from urlparse import urlparse
import logging
cj = None
ClientCookie = None
cookielib = None
try: # Let's see if cookielib is available
import cookielib
except ImportError:
pass
else:
import urllib2
urlopen = urllib2.urlopen
#cj = cookielib.LWPCookieJar() # This is a subclass of FileCookieJar that has useful load and save methods
cj = cookielib.CookieJar() # This is a subclass of FileCookieJar that has useful load and save methods
Request = urllib2.Request
if not cookielib: # If importing cookielib fails let's try ClientCookie
try:
import ClientCookie
except ImportError:
import urllib2
urlopen = urllib2.urlopen
Request = urllib2.Request
else:
urlopen = ClientCookie.urlopen
#cj = ClientCookie.LWPCookieJar()
cj = ClientCookie.CookieJar()
Request = ClientCookie.Request
class Reddit:
""" r e d d i t """
# the path and filename that you want to use to save your cookies in
COOKIEFILE = 'cookies.lwp'
throttle = 2.5
last_request_time = None
log = None
num_retries = 3
retry_delay_sec = 15
#
def __init__(self, config):
self.log = logging.getLogger('cssbot.reddit')
self.throttle = config.getfloat("reddit", "throttle")
if config.get('reddit', 'num_retries'):
num_retries = config.getint('reddit', 'num_retries')
if config.get('reddit', 'retry_delay_sec'):
num_retries = config.getfloat('reddit', 'retry_delay_sec')
# now we have to install our CookieJar so that it is used as the default CookieProcessor in the default opener handler
if cj != None:
# if os.path.isfile(self.COOKIEFILE):
# cj.load(self.COOKIEFILE)
if cookielib:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
else:
opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
ClientCookie.install_opener(opener)
def login(self, user, passwd):
#
self.user = user
uri = "http://www.reddit.com/api/login"
params = urllib.urlencode(dict(api_type='json', user=self.user, passwd=passwd))
j = self.make_request_json(uri, params)
#FIXME: check errors.
errors = j['json']['errors']
if errors:
self.log.error( "Login failed: [%s] %s", errors[0][0], errors[0][1])
sys.exit(1)
self.modhash = j['json']['data']['modhash']
self.cookie = j['json']['data']['cookie']
self.log.debug( "modhash = %s", self.modhash )
self.log.debug( "cookie = %s", self.cookie )
self.log.debug( "cj = %s", cj )
return True
#
#
def make_request_json(self, uri, params=None):
#
parts = urlparse(uri)
# scheme://netloc/path;parameters?query#fragment
_uri = parts.scheme + "://" + parts.netloc + parts.path
if not parts.path.endswith('.json'):
_uri += ".json"
if parts.params:
_uri += ";" + parts.params
if parts.query:
_uri += "?" + parts.query
if parts.fragment:
_uri += "#" + parts.fragment
#
content = self.make_request(_uri, params)
return simplejson.loads(content)
#
#
def make_request(self, uri, params=None):
#
now = time.time()
if self.last_request_time is not None:
if now < (self.last_request_time + self.throttle):
duration = (self.last_request_time + self.throttle) - now
self.log.debug("delaying %s seconds until next request", duration)
time.sleep(duration)
#
attempts = 1
while attempts <= self.num_retries:
try:
self.log.debug("open uri: %s", uri)
req = Request(uri, params)
handle = urlopen(req)
data = handle.read()
self.last_request_time = now
return data
except IOError, e:
self.log.warn("failed to open uri: %s", uri)
if hasattr(e, 'code'):
self.log.warn('We failed with error code - %s.', e.code)
#
if attempts > self.num_retries:
self.log.error('attempt to open uri %s failed %d times, exiting.' % (uri, attempts))
# alternatively, re-throw the error to catch at a higher level.
sys.exit(1)
#
attempts += 1
time.sleep(retry_delay_sec)
def get_stylesheet(self, sub):
contents = self.make_request('http://www.reddit.com/r/%s/about/stylesheet' % sub)
p = re.compile('<textarea rows="20" cols="20" id="stylesheet_contents" name="stylesheet_contents" >(.*?)</textarea>')
m = p.search(contents)
css = m.group(1)
h = HTMLParser.HTMLParser()
return h.unescape(css)
def save_stylesheet(self, sub, css):
d = dict( id='#subreddit_stylesheet',
op='save',
r=sub,
renderstyle='html',
stylesheet_contents=css,
uh=self.modhash )
params = urllib.urlencode(d)
return self.make_request("http://www.reddit.com/api/subreddit_stylesheet", params)
def get_comments(self, thing_id):
uri = "http://www.reddit.com/comments/%s" % thing_id
return self.make_request_json(uri)
def get_r_new(self, subreddit):
uri = "http://www.reddit.com/r/%s/new/?sort=new" % (subreddit)
return self.make_request_json(uri)
def get_r_new_before(self, subreddit, t3):
uri = "http://www.reddit.com/r/%s/new/?sort=new&before=%s" % (subreddit, t3)
return self.make_request_json(uri)
def get_r_new_after(self, subreddit, t3):
uri = "http://www.reddit.com/r/%s/new/?sort=new&after=%s" % (subreddit, t3)
return self.make_request_json(uri)
|
Python
| 0
|
@@ -1637,32 +1637,37 @@
ies'):%0A
+self.
num_retries = co
@@ -1763,27 +1763,36 @@
-num_retries
+self.retry_delay_sec
= confi
@@ -4851,16 +4851,21 @@
e.sleep(
+self.
retry_de
|
af7647f0d20ca850fb7942706bc8fee5985b5ec7
|
fix missing imports
|
mint/jobs.py
|
mint/jobs.py
|
#
# Copyright (c) 2005-2006 rPath, Inc.
#
# All rights reserved
#
from mint import database
from mint.mint_error import MintError
class JobMissing(MintError):
def __str__(self):
return "the requested job does not exist"
class FileMissing(MintError):
def __str__(self):
return "the requested file does not exist"
class DuplicateJob(MintError):
def __str__(self):
return "a conflicting job is already in progress"
class JobsTable(database.KeyedTable):
name = 'Jobs'
key = 'jobId'
createSQL = """
CREATE TABLE Jobs (
jobId %(PRIMARYKEY)s,
buildId INT,
groupTroveId INT,
owner BIGINT,
userId INT,
status INT,
statusMessage TEXT,
timeSubmitted DOUBLE,
timeStarted DOUBLE,
timeFinished DOUBLE)"""
fields = ['jobId', 'buildId', 'groupTroveId', 'owner', 'userId',
'status', 'statusMessage', 'timeSubmitted',
'timeStarted', 'timeFinished']
indexes = {"JobsBuildIdx": """CREATE INDEX JobsBuildIdx
ON Jobs(buildId)""",
"JobsGroupTroveIdx": """CREATE INDEX JobsGroupTroveIdx
ON Jobs(groupTroveId)""",
"JobsUserIdx": "CREATE INDEX JobsUserIdx ON Jobs(userId)"}
def versionCheck(self):
dbversion = self.getDBVersion()
if dbversion != self.schemaVersion:
if dbversion == 5 and not self.initialCreation:
cu = self.db.cursor()
cu.execute("ALTER TABLE Jobs ADD COLUMN groupTroveId INT")
if dbversion == 11 and not self.initialCreation:
cu = self.db.cursor()
cu.execute("ALTER TABLE Jobs ADD COLUMN owner BIGINT")
if dbversion == 12 and not self.initialCreation:
cu = self.db.cursor()
cu.execute("ALTER TABLE Jobs ADD COLUMN timeSubmitted DOUBLE")
if dbversion == 19:
cu = self.db.cursor()
cu.execute("ALTER TABLE Jobs ADD COLUMN buildId INT")
cu.execute('UPDATE Jobs SET buildId = releaseId')
if self.db.driver == 'mysql':
cu.execute("ALTER TABLE Jobs DROP COLUMN releaseId")
else:
cu.execute("DROP TABLE Jobs")
cu.execute(self.createSQL % self.db.keywords)
cu.execute('DELETE FROM JobData')
return dbversion >= 19
return True
def get(self, id):
res = database.KeyedTable.get(self, id)
del res['owner']
res['status'] = int(res['status'])
return res
class Job(database.TableObject):
__slots__ = JobsTable.fields
# alias for releaseId
releaseId = property(lambda self: self.buildId)
def getItem(self, id):
# newer clients must call getJob2 to maintain backwards
# compatibility with older jobservers
return self.server.getJob2(id)
def getId(self):
return self.id
def getBuildId(self):
return self.buildId
def getGroupTroveId(self):
return self.groupTroveId
def getUserId(self):
return self.userId
def getStatus(self):
return self.status
def getStatusMessage(self):
return self.statusMessage
def setStatus(self, status, statusMessage):
return self.server.setJobStatus(self.id, status, statusMessage)
def getTimeSubmitted(self):
return self.timeSubmitted
def getTimeStarted(self):
return self.timeStarted
def getTimeFinished(self):
return self.timeFinished
def setDataValue(self, name, value, dataType):
return self.server.setJobDataValue(self.id, name, value, dataType)
def getDataValue(self, name):
isPresent, val = self.server.getJobDataValue(self.getId(), name)
if not isPresent:
val = None
return val
class BuildFilesTable(database.KeyedTable):
name = 'BuildFiles'
key = 'fileId'
# Nota Bummer: the filename column is deprecated, so don't use it.
# We need to get rid of it once we adopt a migration scheme that
# doesn't produce different results from InitialCreation vs. Migration.
createSQL = """
CREATE TABLE BuildFiles (
fileId %(PRIMARYKEY)s,
buildId INT,
idx INT,
filename VARCHAR(255),
title CHAR(255) DEFAULT '',
size BIGINT,
sha1 CHAR(40)
);"""
fields = ['fileId', 'buildId', 'idx', 'title', 'size', 'sha1' ]
indexes = {"BuildFilesBuildIdx": """CREATE INDEX BuildFilesBuildIdx
ON BuildFiles(buildId)"""}
def versionCheck(self):
dbversion = self.getDBVersion()
if dbversion != self.schemaVersion:
if dbversion == 1 and not self.initialCreation:
sql = """ALTER TABLE BuildFiles ADD COLUMN title STR DEFAULT ''"""
cu = self.db.cursor()
cu.execute(sql)
if dbversion == 21 and not self.initialCreation:
cu = self.db.cursor()
cu.execute("ALTER TABLE BuildFiles ADD COLUMN size BIGINT")
cu.execute("ALTER TABLE BuildFiles ADD COLUMN sha1 CHAR(40)")
# migrate data over to FilesUrls
cu.execute("SELECT fileId, filename FROM BuildFiles ORDER BY fileId")
results = cu.fetchall()
for row in results:
fileId = row[0]
relativePath = '/'.join(row[1].split('/'))[-3:]
cu.execute("INSERT INTO FilesUrls VALUES(NULL,?,?)",
urltypes.LOCAL, relativePath)
urlId = cu.lastrowid
cu.execute("INSERT INTO BuildFilesUrlsMap VALUES(?,?)",
fileId, urlId)
return dbversion >= 21
return True
class BuildFilesUrlsMapTable(database.KeyedTable):
name = 'BuildFilesUrlsMap'
key = 'fileId'
createSQL = """
CREATE TABLE BuildFilesUrlsMap (
fileId INT,
urlId INT,
CONSTRAINT bfum_f_fk FOREIGN KEY(fileId)
REFERENCES BuildFiles (fileId) ON DELETE CASCADE,
CONSTRAINT bfum_u_fk FOREIGN KEY(urlId)
REFERENCES FilesUrls(urlId) ON DELETE CASCADE
);"""
fields = ['fileId', 'urlId']
class FilesUrlsTable(database.KeyedTable):
name = 'FilesUrls'
key = 'urlId'
createSQL = """
CREATE TABLE FilesUrls (
urlId %(PRIMARYKEY)s,
urlType SMALLINT,
url VARCHAR(255)
);"""
fields = ['urlId', 'urlType', 'url']
|
Python
| 0.962368
|
@@ -85,16 +85,42 @@
atabase%0A
+from mint import urltypes%0A
from min
|
da12486a207e1ade8c7b49379613e4aadec23794
|
add check to see if rename is needed
|
misc/misc.py
|
misc/misc.py
|
import discord
from discord.ext import commands
from .utils import checks
from __main__ import send_cmd_help
import asyncio
class misc:
"""Misc commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(hidden=True)
async def summon(self):
await self.bot.say("Who dares summon me?")
async def rename_orun(self, ):
while self is self.bot.get_cog("misc"):
serverid = "294578270064869377"
userid = "202429404503212034"
server = self.bot.get_server(serverid)
user = server.get_member(userid)
print(user.nick)
nickname = "Orun"
try:
await self.bot.change_nickname(user, nickname)
print("Renamed Orun")
except discord.Forbidden:
print("I cannot do that, I lack the "
"\"Manage Nicknames\" permission.")
await asyncio.sleep(30)
def setup(bot):
n = misc(bot)
loop = asyncio.get_event_loop()
loop.create_task(n.rename_orun())
bot.add_cog(n)
|
Python
| 0.000001
|
@@ -548,17 +548,31 @@
%0A%09%09%09
-nickname
+try:%0A%09%09%09%09if user.nick !
= %22O
@@ -577,23 +577,16 @@
%22Orun%22%0A%09
-%09%09try:%0A
%09%09%09%09awai
@@ -628,24 +628,25 @@
ckname)%0A%09%09%09%09
+%09
print(%22Renam
@@ -655,16 +655,56 @@
Orun%22)%0A
+%09%09%09%09else%0A%09%09%09%09%09print(%22No rename needed%22)%0A
%09%09%09excep
|
2711f6a3ab53a654bc5de6234251d8d7714529ca
|
Add markers to the plot
|
mlp/mnist.py
|
mlp/mnist.py
|
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
from keras.utils.visualize_util import plot
# MNISTの数字分類
# 参考
# https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py
def build_multilayer_perceptron():
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
return model
def plot_history(history):
# print(history.history.keys())
# 精度の履歴をプロット
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(['acc', 'val_acc'], loc='lower right')
plt.show()
# 損失の履歴をプロット
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['loss', 'val_loss'], loc='lower right')
plt.show()
if __name__ == "__main__":
batch_size = 128
nb_classes = 10
nb_epoch = 100
# MNISTデータのロード
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# 画像を1次元配列化
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
# 画素を0.0-1.0の範囲に変換
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# one-hot-encoding
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# 多層ニューラルネットワークモデルを構築
model = build_multilayer_perceptron()
# モデルのサマリを表示
model.summary()
plot(model, show_shapes=True, show_layer_names=True, to_file='model.png')
# モデルをコンパイル
model.compile(loss='categorical_crossentropy',
optimizer=Adam(),
metrics=['accuracy'])
# Early-stopping
early_stopping = EarlyStopping(patience=0, verbose=1)
# モデルの訓練
history = model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=1,
validation_split=0.1,
callbacks=[early_stopping])
# 学習履歴をプロット
plot_history(history)
# モデルの評価
loss, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', loss)
print('Test acc:', acc)
|
Python
| 0.000001
|
@@ -864,24 +864,36 @@
story%5B'acc'%5D
+, marker='.'
)%0A plt.pl
@@ -921,16 +921,28 @@
al_acc'%5D
+, marker='.'
)%0A pl
@@ -1010,32 +1010,47 @@
bel('accuracy')%0A
+ plt.grid()%0A
plt.legend(%5B
@@ -1152,24 +1152,36 @@
tory%5B'loss'%5D
+, marker='.'
)%0A plt.pl
@@ -1210,16 +1210,28 @@
l_loss'%5D
+, marker='.'
)%0A pl
@@ -1268,32 +1268,32 @@
xlabel('epoch')%0A
-
plt.ylabel('
@@ -1295,24 +1295,39 @@
bel('loss')%0A
+ plt.grid()%0A
plt.lege
|
c69ad06b785c874685def9e10107f266d685948e
|
fix #5 Pretty awesome
|
lokingyql/lokingyql.py
|
lokingyql/lokingyql.py
|
import requests
import errors
import pdb
class LokingYQL(object):
'''Yet another Python Yahoo! Query Language Wrapper
'''
default_url = 'https://query.yahooapis.com/v1/public/yql'
def __init__(self, table=None, url=default_url, format='json'):
self.url = url
self.table = table
self.format = format
self._query = None # used to build query when using methods such as <select>, <insert>, ...
def __repr__(self):
'''Returns information on the current instance
'''
return "<url>: '{0}' - <table>: '{1}' - <format> : '{2}' ".format(self.url, self.table, self.format)
def payloadBuilder(self, query, format='json'):
'''Build the payload'''
payload = {
'q' : query,
'callback' : '', #This is not javascript
'diagnostics' : 'true', # always true
'format' : format
}
return payload
def rawQuery(self, query, format='json', pretty=True):
'''Executes a YQL query and returns a response
>>>...
>>> resp = yql.rawQuery('select * from weather.forecast where woeid=2502265')
>>>
'''
payload = self.payloadBuilder(query, format)
response = self.executeQuery(payload)
#if pretty :
# response = self.buildResponse(response)
return response
def executeQuery(self, payload):
'''Execute the query and returns and response'''
response = requests.get(self.url, params= payload)
return response
def clauseFormatter(self, cond):
'''Formats conditions
args is a list of ['column', 'operator', 'value']
'''
cond[2] = "'{0}'".format(cond[2])
return ''.join(cond)
def buildResponse(self, response):
'''Try to return a pretty formatted response object
'''
try:
r = response.json()
result = r['query']['results']['table']
response = {
'num_result': len(result) if isinstance(result, list) else 0 ,
'result': result
}
except Exception, e:
print(e)
return response.content
return response
def buildSelectQuery(conditions):
'''Builds the query for the select method '''
return query
######################################################
#
# ORM METHODS
#
#####################################################
def use(self, url):
'''Changes the data provider
>>> yql.use('http://myserver.com/mytables.xml')
'''
self.url = url
return self.url
def select(self, table=None, items=[]):
'''This method simulate a select on a table
>>> yql.select('table')
'''
try:
self.table = table
if not items:
items = ['*']
self._query = "select {1} from {0}".format(self.table, ','.join(items))
except Exception, e:
print(e)
return self
def where(self, *args):
''' This method simulates a where condition. Use as follow:
>>>yql.select('mytable').where([('name', '=', 'alain'), ('location', '!=', 'paris')])
'''
if not self.table:
raise errors.NoTableSelectedError('No Table Selected')
clause = []
self._query += ' where '
for x in args:
x = self.clauseFormatter(x)
clause.append(x)
self._query += ' and '.join(clause)
return self._query
def showTables(self, format='json'):
'''Return list of all avaible tables'''
query = 'SHOW TABLES'
payload = self.payloadBuilder(query, format)
response = self.executeQuery(payload)
return response
|
Python
| 0
|
@@ -415,16 +415,91 @@
rt%3E, ...
+%0A self.diagnostics = True # Who knows, someone would like to turn it off
%0A%0A def
@@ -851,14 +851,24 @@
' :
-'true'
+self.diagnostics
, #
@@ -2590,27 +2590,24 @@
a table%0A
-
%3E%3E%3E yql.sele
@@ -2616,16 +2616,87 @@
('table'
+)%0A %3E%3E%3E yql.select('social.profile', %5B'guuid', 'givenName', 'gender'%5D
)%0A ''
@@ -3347,33 +3347,534 @@
use)
+%0A
%0A
-return self._query%0A
+payload = self.payloadBuilder(self._query)%0A response = self.executeQuery(payload)%0A%0A return response%0A%0A ######################################################%0A #%0A # HELPERS%0A #%0A #####################################################%0A%0A def getGUID(self, username):%0A '''Returns the guid of the username provided%0A %3E%3E%3E guid = self.getGUID('josue_brunel')%0A %3E%3E%3E guid%0A '''%0A response = self.select('yahoo.identity').where(%5B'yid', '=', username%5D)%0A %0A return response%0A
%0A d
|
2ef9e9229a7497d861438a162986862f7a48062b
|
Check if host IPv6 resolves (#1421)
|
modules/sfp_tldsearch.py
|
modules/sfp_tldsearch.py
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tldsearch
# Purpose: SpiderFoot plug-in for identifying the existence of this target
# on other TLDs.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 31/08/2013
# Copyright: (c) Steve Micallef 2013
# Licence: GPL
# -------------------------------------------------------------------------------
import random
import threading
import time
import dns.resolver
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_tldsearch(SpiderFootPlugin):
meta = {
'name': "TLD Searcher",
'summary': "Search all Internet TLDs for domains with the same name as the target (this can be very slow.)",
'flags': ["slow"],
'useCases': ["Footprint"],
'categories': ["DNS"]
}
# Default options
opts = {
'activeonly': False, # Only report domains that have content (try to fetch the page)
'skipwildcards': True,
'_maxthreads': 50
}
# Option descriptions
optdescs = {
'activeonly': "Only report domains that have content (try to fetch the page)?",
"skipwildcards": "Skip TLDs and sub-TLDs that have wildcard DNS.",
"_maxthreads": "Maximum threads"
}
# Internal results tracking
results = None
# Track TLD search results between threads
tldResults = dict()
lock = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.__dataSource__ = "DNS"
self.lock = threading.Lock()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["INTERNET_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SIMILARDOMAIN"]
def tryTld(self, target, tld):
resolver = dns.resolver.Resolver()
resolver.timeout = 1
resolver.lifetime = 1
resolver.search = list()
if self.opts.get('_dnsserver', "") != "":
resolver.nameservers = [self.opts['_dnsserver']]
if self.opts['skipwildcards'] and self.sf.checkDnsWildcard(tld):
return
try:
# TODO: Support IPv6
addrs = self.sf.resolveHost(target)
if not addrs:
with self.lock:
self.tldResults[target] = False
else:
with self.lock:
self.tldResults[target] = True
except Exception:
with self.lock:
self.tldResults[target] = False
def tryTldWrapper(self, tldList, sourceEvent):
self.tldResults = dict()
running = True
i = 0
t = []
# Spawn threads for scanning
self.sf.info("Spawning threads to check TLDs: " + str(tldList))
for pair in tldList:
(domain, tld) = pair
tn = 'thread_sfp_tldsearch_' + str(random.SystemRandom().randint(0, 999999999))
t.append(threading.Thread(name=tn, target=self.tryTld, args=(domain, tld,)))
t[i].start()
i += 1
# Block until all threads are finished
while running:
found = False
for rt in threading.enumerate():
if rt.name.startswith("thread_sfp_tldsearch_"):
found = True
if not found:
running = False
time.sleep(0.1)
for res in self.tldResults:
if self.tldResults[res] and res not in self.results:
self.sendEvent(sourceEvent, res)
# Store the result internally and notify listening modules
def sendEvent(self, source, result):
self.sf.info("Found a TLD with the target's name: " + result)
self.results[result] = True
# Inform listening modules
if self.opts['activeonly']:
if self.checkForStop():
return
pageContent = self.sf.fetchUrl('http://' + result,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
noLog=True,
verify=False)
if pageContent['content'] is not None:
evt = SpiderFootEvent("SIMILARDOMAIN", result, self.__name__, source)
self.notifyListeners(evt)
else:
evt = SpiderFootEvent("SIMILARDOMAIN", result, self.__name__, source)
self.notifyListeners(evt)
# Search for similar sounding domains
def handleEvent(self, event):
eventData = event.data
if eventData in self.results:
return
self.results[eventData] = True
keyword = self.sf.domainKeyword(eventData, self.opts['_internettlds'])
if not keyword:
self.sf.error(f"Failed to extract keyword from {eventData}")
return
self.sf.debug(f"Keyword extracted from {eventData}: {keyword}")
if keyword in self.results:
return
self.results[keyword] = True
# Look through all TLDs for the existence of this target keyword
targetList = list()
for tld in self.opts['_internettlds']:
if type(tld) != str:
tld = str(tld.strip(), errors='ignore')
else:
tld = tld.strip()
if tld.startswith("//") or len(tld) == 0:
continue
if tld.startswith("!") or tld.startswith("*") or tld.startswith(".."):
continue
if tld.endswith(".arpa"):
continue
tryDomain = keyword + "." + tld
if self.checkForStop():
return
if len(targetList) <= self.opts['_maxthreads']:
targetList.append([tryDomain, tld])
else:
self.tryTldWrapper(targetList, event)
targetList = list()
# Scan whatever may be left over.
if len(targetList) > 0:
self.tryTldWrapper(targetList, event)
# End of sfp_tldsearch class
|
Python
| 0
|
@@ -2461,101 +2461,79 @@
-# TODO: Support IPv6%0A addrs = self.sf.resolveHost(target)%0A if not addrs
+if not self.sf.resolveHost(target) and not self.sf.resolveHost6(target)
:%0A
|
8141a191f063025cb25bd57b48d57a1a475e3c8d
|
improve consumer trend summaries
|
munge/spending_import.py
|
munge/spending_import.py
|
import os.path
import config
from csv_util import unicode_csv_reader, import_csv
from sa_util import swap_tables, summary, build_view
f = 'spending/spending_by_nuts1.csv'
fields = [
'ct_code',
'nuts1_code',
'factor:double precision',
]
summary_data = [
{
'name': 's_population_by_nuts1',
'sql': '''
SELECT
l.nuts1_code, p.population,
p.population::float / t.population::float AS percent
FROM "{t1}" p
RIGHT JOIN "{t2}" l ON l.nuts1_ons_code = la_code
LEFT outer JOIN "{t1}" t ON t.la_code = 'K02000001';
''',
'tables': ['population_by_la', 'l_nuts1_ons'],
},
{
'name': 's_consumer_spend_by_nuts1',
'sql': '''
SELECT
f.nuts1_code,
ct.ct_code,
ct.amount nation_spend,
ct.amount * p.percent area_spend,
(ct.amount * p.percent * f.factor)::numeric(15,2) adjusted_area_spend,
(ct.amount * p.percent * f.factor / p.population)::numeric(11,2)
AS adjusted_spend_per_capita
FROM "{t1}" ct
LEFT OUTER JOIN "{t2}" f ON f.ct_code = ct.ct_code
LEFT OUTER JOIN "{t3}" p ON p.nuts1_code = f.nuts1_code
WHERE ct.amount is not null
''',
'tables': [
'v_consumer_trend_latest',
'nuts1_ct_spending_factor',
's_population_by_nuts1'
],
},
]
views_data = [
{
'name': 'v_consumer_trend_latest',
'sql': '''
CREATE VIEW "{name}" AS
SELECT ct_code, amount * 1000000 as amount
FROM "{t1}"
WHERE date = '2014';
''',
'tables': ['consumer_trend_yearly'],
},
]
def build_summaries(verbose=False):
for info in summary_data:
summary(
config.TEMP_TABLE_STR + info['name'],
info['sql'],
info['tables'],
verbose=verbose
)
def build_views(verbose=False):
for info in views_data:
build_view(
info['name'],
info['sql'],
info['tables'],
verbose=verbose
)
def hei1_reader(filename):
f = os.path.join(config.DATA_PATH, filename)
reader = unicode_csv_reader(f)
first = True
for row in reader:
if first:
nuts = row[1:]
first = False
continue
ct_code = row[0]
for i, nut in enumerate(nuts):
out = [ct_code, nut, row[i + 1]]
yield out
def import_spending(verbose=False):
if verbose:
print('importing spending')
reader = hei1_reader(f)
import_csv(
reader,
'nuts1_ct_spending_factor',
fields=fields,
verbose=verbose
)
build_views(verbose=verbose)
build_summaries(verbose=verbose)
swap_tables(verbose=verbose)
|
Python
| 0.000039
|
@@ -689,16 +689,477 @@
%7D,%0A%0A
+ %7B%0A 'name': 's_consumer_spend_national',%0A 'sql': '''%0A SELECT%0A ct.ct_code,%0A ct.amount nation_spend,%0A (ct.amount / t.population)::numeric(11,2)%0A AS spend_per_capita%0A FROM %22%7Bt1%7D%22 ct%0A LEFT outer JOIN %22%7Bt2%7D%22 t ON t.la_code = 'K02000001'%0A WHERE ct.amount is not null%0A ''',%0A 'tables': %5B%0A 'v_consumer_trend_latest',%0A 'population_by_la',%0A %5D,%0A %7D,%0A%0A
%7B%0A
@@ -1408,25 +1408,35 @@
ic(15,2)
- adjusted
+%0A AS adj
_area_sp
@@ -1535,13 +1535,8 @@
adj
-usted
_spe
@@ -1540,32 +1540,188 @@
spend_per_capita
+,%0A (100 * (((ct.amount * p.percent * f.factor / p.population)%0A / n.spend_per_capita) - 1))::numeric(11,2)%0A AS percent_from_national
%0A FROM %22%7B
@@ -1847,24 +1847,83 @@
.nuts1_code%0A
+ LEFT OUTER JOIN %22%7Bt4%7D%22 n ON n.ct_code = ct.ct_code%0A
WHER
@@ -2093,16 +2093,58 @@
y_nuts1'
+,%0A 's_consumer_spend_national',
%0A
@@ -2154,17 +2154,16 @@
%0A %7D,%0A
-%0A
%5D%0A%0Aviews
|
b637f466f2a49235975e15e46b05a873ad6caa65
|
Update anime1_me.py
|
my-ACG/util/anime1_me.py
|
my-ACG/util/anime1_me.py
|
import argparse
import html
import logging
import re
import pywikibot
import requests
from bs4 import BeautifulSoup
class Anime1Me:
def getData(self, url):
data = {
'episodes': 0,
'end': False,
}
text = requests.get(url).text
soup = BeautifulSoup(text, 'html.parser')
title = soup.find('h1', {'class': 'page-title'}).text
# print('title', title)
text = requests.get('https://anime1.me').text
text = html.unescape(text)
m = re.search(r'>{}</a></td><td class=\"column-2\">(.+?)</td>'.format(re.escape(title)), text)
if m:
episodes = m.group(1)
data['episodes'], data['end'] = self._parse_episodes(episodes)
else:
# print('Not match')
pass
return data
def _parse_episodes(self, episodes):
if episodes == '劇場版':
return 1, True
m = re.match(r'^連載中\((\d+)(?:正式版|AT-X)?\)$', episodes)
if m:
return int(m.group(1)), False
m = re.match(r'^連載中\((\d+)\.5\)$', episodes)
if m:
return int(m.group(1)) + 1, False
m = re.match(r'^1-(\d+)$', episodes)
if m:
return int(m.group(1)), True
m = re.match(r'^1-(\d+)\+SP$', episodes)
if m:
return int(m.group(1)) + 1, True
m = re.match(r'^1-(\d+)\+OVA1$', episodes)
if m:
return int(m.group(1)) + 1, True
m = re.match(r'^1-(\d+)\+(\d+)$', episodes)
if m:
if int(m.group(1)) + 1 == int(m.group(2)):
return int(m.group(2)), True
raise Exception('Unknwon episodes format: {}'.format(episodes))
def updateItem(self, datasite, item):
itemlabel = item.get()['labels']['zh-tw']
logging.info('%s %s', item.id, itemlabel)
claims = item.get()['claims']
if 'P38' not in claims:
logging.error('\t No anime1 claims')
return
url = claims['P38'][0].getTarget()
data = self.getData(url)
# 總集數
if 'episodes' in data:
new_episodes = data['episodes']
episodesValue = claims['P27'][0].getTarget()
old_episodes = episodesValue.amount
if new_episodes > old_episodes:
episodesValue.amount = new_episodes
logging.info('\t Update episodes from %s to %s', old_episodes, new_episodes)
claims['P27'][0].changeTarget(episodesValue, summary='更新總集數')
else:
new_claim = pywikibot.page.Claim(datasite, 'P27')
new_claim.setTarget(pywikibot.WbQuantity(new_episodes, site=datasite))
logging.info('\t Add new episodes %s', new_episodes)
item.addClaim(new_claim, summary='新增總集數')
# 播放狀態
if 'P31' in claims:
if data['end']:
if claims['P31'][0].getTarget().id != 'Q58':
logging.info('\t Update status to end')
statusValue = pywikibot.ItemPage(datasite, 'Q58') # 已完結
claims['P31'][0].changeTarget(statusValue, summary='更新播放狀態')
elif claims['P31'][0].getTarget().id == 'Q57':
logging.info('\t Update status to playing')
statusValue = pywikibot.ItemPage(datasite, 'Q56') # 放送中
claims['P31'][0].changeTarget(statusValue, summary='更新播放狀態')
else:
itemid = 'Q56'
if data['end']:
itemid = 'Q58'
new_claim = pywikibot.page.Claim(datasite, 'P31')
new_claim.setTarget(pywikibot.ItemPage(datasite, itemid))
logging.info('\t Add new status')
item.addClaim(new_claim, summary='新增播放狀態')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('url')
args = parser.parse_args()
logging.info(Anime1Me().getData(args.url))
|
Python
| 0
|
@@ -964,16 +964,23 @@
%E6%AD%A3%E5%BC%8F%E7%89%88%7CAT-X
+%7CAT-X%E7%84%A1%E4%BF%AE
)?%5C)$',
|
66696feb49da76965b5d413e39e63625358dfe1f
|
Fix queue import for Python 3
|
IPython/nbconvert/preprocessors/execute.py
|
IPython/nbconvert/preprocessors/execute.py
|
"""Module containing a preprocessor that removes the outputs from code cells"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
from Queue import Empty
from IPython.kernel import KernelManager
from IPython.nbformat.current import reads, NotebookNode, writes
from .base import Preprocessor
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ExecutePreprocessor(Preprocessor):
"""
Executes all the cells in a notebook
"""
def __init__(self, extra_arguments=[], **kwargs):
"""
Start an kernel to run the Python code
"""
super(ExecutePreprocessor, self).__init__(**kwargs)
self.extra_arguments = []
def _create_client(self):
self.km = KernelManager()
self.km.start_kernel(extra_arguments=self.extra_arguments, stderr=open(os.devnull, 'w'))
self.kc = self.km.client()
self.kc.start_channels()
self.iopub = self.kc.iopub_channel
self.shell = self.kc.shell_channel
self.shell.kernel_info()
def _shutdown_client(self):
self.kc.stop_channels()
self.km.shutdown_kernel()
del self.km
def preprocess(self, nb, resources):
self._create_client()
nb, resources = super(ExecutePreprocessor, self).preprocess(nb, resources)
self._shutdown_client()
return nb, resources
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each code cell. See base.py for details.
"""
if cell.cell_type != 'code':
return cell, resources
try:
outputs = self.run_cell(self.shell, self.iopub, cell)
except Exception as e:
self.log.error("failed to run cell: " + repr(e))
self.log.error(str(cell.input))
sys.exit(1)
cell.outputs = outputs
return cell, resources
@staticmethod
def run_cell(shell, iopub, cell):
# print cell.input
shell.execute(cell.input)
# wait for finish, maximum 20s
shell.get_msg(timeout=20)
outs = []
while True:
try:
msg = iopub.get_msg(timeout=0.2)
except Empty:
break
msg_type = msg['msg_type']
if msg_type in ('status', 'pyin'):
continue
elif msg_type == 'clear_output':
outs = []
continue
content = msg['content']
# print msg_type, content
out = NotebookNode(output_type=msg_type)
if msg_type == 'stream':
out.stream = content['name']
out.text = content['data']
elif msg_type in ('display_data', 'pyout'):
out['metadata'] = content['metadata']
for mime, data in content['data'].iteritems():
attr = mime.split('/')[-1].lower()
# this gets most right, but fix svg+html, plain
attr = attr.replace('+xml', '').replace('plain', 'text')
setattr(out, attr, data)
if msg_type == 'pyout':
out.prompt_number = content['execution_count']
elif msg_type == 'pyerr':
out.ename = content['ename']
out.evalue = content['evalue']
out.traceback = content['traceback']
else:
self.log.error("unhandled iopub msg: " + msg_type)
outs.append(out)
return outs
|
Python
| 0.000223
|
@@ -367,16 +367,81 @@
rt sys%0A%0A
+try:%0A from queue import Empty # Py 3%0Aexcept ImportError:%0A
from Que
@@ -455,16 +455,25 @@
rt Empty
+ # Py 2%0A
%0Afrom IP
|
0ccf067330b702a58d55b7fcd80c13a941142c1e
|
Fix wrong variable name in 'CachedWorld.delete_component'
|
esper/world.py
|
esper/world.py
|
from functools import lru_cache
class World:
def __init__(self):
"""A World object keeps track of all Entities, Components and Processors.
A World contains a database of all Entity/Component assignments. It also
handles calling the process method on any Processors assigned to it.
"""
self._processors = []
self._next_entity_id = 0
self._components = {}
self._entities = {}
def clear_database(self):
"""Remove all entities and components from the world."""
self._components.clear()
self._entities.clear()
self._next_entity_id = 0
def add_processor(self, processor_instance, priority=0):
"""Add a Processor instance to the world.
:param processor_instance: An instance of a Processor,
subclassed from the Processor class
:param priority: A higher number is processed first.
"""
# TODO: raise an exception if the same type Processor already exists.
# TODO: check that the processor is a subclass of esper.Processor.
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda processor: -processor.priority)
def remove_processor(self, processor_type):
"""Remove a Processor from the world, by type.
:param processor_type: The class type of the Processor to remove.
"""
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor)
def create_entity(self):
"""Create a new Entity.
This method return an Entity ID, which is just a plain integer.
:return: The next Entity ID in sequence.
"""
self._next_entity_id += 1
return self._next_entity_id
def delete_entity(self, entity):
"""Delete an Entity from the World.
Delete an Entity from the World. This will also delete any Component
instances that are assigned to the Entity.
:param entity: The Entity ID you wish to delete.
"""
for component_type in self._entities.get(entity, []):
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
try:
del self._entities[entity]
return entity
except KeyError:
pass
def component_for_entity(self, entity, component_type):
"""Retrieve a specific Component instance for an Entity.
:param entity: The Entity to retrieve the Component for.
:param component_type: The Component instance you wish to retrieve.
:return: A Component instance, *if* it exists for the Entity.
"""
try:
return self._entities[entity][component_type]
except KeyError:
pass
def add_component(self, entity, component_instance):
"""Add a new Component instance to an Entity.
If a Component of the same type is already assigned to the Entity,
it will be replaced with the new one.
:param entity: The Entity to associate the Component with.
:param component_instance: A Component instance.
"""
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
def delete_component(self, entity, component_type):
"""Delete a Component instance from an Entity, by type.
An Component instance can be Deleted by providing it's type.
For example: world.delete_component(enemy_a, Velocity) will remove
the Velocity instance from the Entity enemy_a.
:param entity: The Entity to delete the Component from.
:param component_type: The type of the Component to remove.
"""
try:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
except KeyError:
pass
try:
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
return entity
except KeyError:
pass
def get_component(self, component_type):
"""Get an iterator for Entity, Component pairs.
:param component_type: The Component type to retrieve.
:return: An iterator for (Entity, Component) tuples.
"""
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type]
def get_components(self, *component_types):
"""Get an iterator for Entity and multiple Component sets.
:param component_types: Two or more Component types.
:return: An iterator for (Entity, Component1, Component2, etc)
tuples.
"""
entity_db = self._entities
comp_db = self._components
try:
entity_set = set.intersection(*[comp_db[ct] for ct in component_types])
for entity in entity_set:
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
def process(self, *args):
"""Process all Systems, in order of their priority."""
for processor in self._processors:
processor.process(*args)
class CachedWorld(World):
def __init__(self, cache_size=128):
"""A sub-class of World using an LRU cache for Entity lookups."""
super().__init__()
self.set_cache_size(cache_size)
def set_cache_size(self, size):
"""Set the maximum size of the LRU cache for Entity lookup.
Replaces the existing cache.
"""
wrapped = self._get_entities.__wrapped__.__get__(self, World)
self._get_entities = lru_cache(size)(wrapped)
def clear_database(self):
"""Remove all Entities and Components from the world."""
super().clear_database()
self._get_entities.cache_clear()
def delete_entity(self, entity):
"""Delete an Entity from the World."""
if super().delete_entity(entity) is not None:
self._get_entities.cache_clear()
return entity
def add_component(self, entity, component_instance):
"""Add a new Component instance to an Entity."""
super().add_component(entity, component_instance)
self._get_entities.cache_clear()
def delete_component(self, entity, component_type):
"""Delete a Component instance from an Entity, by type."""
if super().delete_component(entity, component_instance) is not None:
self._get_entities.cache_clear()
return entity
@lru_cache()
def _get_entities(self, component_types):
"""Return set of Entities having all given Components."""
comp_db = self._components
return set.intersection(*[comp_db[ct] for ct in component_types])
def get_components(self, *component_types):
"""Get an iterator for Entity and multiple Component sets."""
entity_db = self._entities
try:
for entity in self._get_entities(component_types):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass
|
Python
| 0.000302
|
@@ -7104,23 +7104,19 @@
mponent_
-instanc
+typ
e) is no
|
ecb00947dc7c8d33f3c759d7ff704b970a4e32ce
|
Support non-PNG output formats in visualization tool - auto-detect format from file name
|
lusmu/visualization.py
|
lusmu/visualization.py
|
"""Tools for visualizing a lusmu graph
Copyright 2013 Eniram Ltd. See the LICENSE file at the top-level directory of
this distribution and at https://github.com/akaihola/lusmu/blob/master/LICENSE
"""
# pylint: disable=W0212
# Allow access to protected members of client classes
# pylint: disable=W0142
# Allow * and ** magic
from __future__ import print_function, unicode_literals
from lusmu.core import Input, Node
import subprocess
def collect_nodes(collected_nodes, *args):
"""Collect all nodes belonging to the same graph
Walks dependent Nodes and inputs recursively.
"""
if not args:
return
node = args[0]
if node in collected_nodes:
return
rest = args[1:]
collect_nodes(collected_nodes, *rest)
collected_nodes.add(node)
collect_nodes(collected_nodes, *node._dependents)
if isinstance(node, Node):
collect_nodes(collected_nodes, *node._iterate_inputs())
def get_action_name(action):
"""Try to return a good representation of the name of an action callable"""
if hasattr(action, 'name'):
return action.name
if hasattr(action, '__name__'):
return action.__name__
if hasattr(action, 'func_name'):
return action.func_name
return action.__class__.__name__
def graphviz_lines(nodes, node_filter):
"""Generate source lines for a Graphviz graph definition"""
all_nodes = set()
collect_nodes(all_nodes, *nodes)
if node_filter:
all_nodes = [n for n in all_nodes if node_filter(n)]
all_nodes = sorted(all_nodes, key=id)
input_nodes = [n for n in all_nodes if isinstance(n, Input)]
yield 'digraph gr {'
yield ' rankdir = LR;'
yield ' { rank = source;'
for node in input_nodes:
yield ' n{};'.format(id(node))
yield ' }'
for node in all_nodes:
yield (' n{node} [label="[{name}]{action}"];'
.format(node=id(node),
name=node.name.replace(':', r'\n'),
action='\\n\\n{}'.format(get_action_name(node._action))
if isinstance(node, Node)
else ''))
yield ' edge [color=blue];'
for other in node._dependents:
if other in all_nodes:
yield (' n{node} -> n{other};'
.format(node=id(node), other=id(other)))
yield '}'
def visualize_graph(nodes, filename, node_filter=lambda node: True):
"""Saves a visualization of given nodes in a PNG file"""
graphviz = subprocess.Popen(['dot', '-Tpng', '-o', filename],
stdin=subprocess.PIPE)
source = '\n'.join(graphviz_lines(nodes, node_filter))
graphviz.communicate(source.encode('utf-8'))
return source
|
Python
| 0
|
@@ -2513,12 +2513,15 @@
in a
- PNG
+n image
fil
@@ -2533,52 +2533,189 @@
-graphviz = subprocess.Popen(%5B'dot', '-Tpng',
+image_format = filename.split('.')%5B-1%5D.lower()%0A graphviz = subprocess.Popen(%5B'dot',%0A '-T%7B%7D'.format(image_format),%0A
'-o
|
0d32f515b7a7cc31f263c61f8605f730259b8fa9
|
Update views.py
|
pdf/views.py
|
pdf/views.py
|
"""
RenderPDF helper class
"""
import os
import datetime
import cStringIO as StringIO
import ho.pisa as pisa
from cgi import escape
from django.conf import settings
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from django.views.generic.base import TemplateView
class RenderPDF(object):
"""
class based view to render template in PDF format.
"""
template_name = 'django_pdf/hello_world.html'
assets_url = settings.MEDIA_ROOT
def fetch_resources(self, uri, rel=''):
""""
Method return absolute path to resources.
"""
absolute_path = os.path.join(settings.MEDIA_ROOT,
uri.replace(self.assets_url, ""))
return absolute_path
def render_to_response(self, context, **response_kwargs):
context.update(response_kwargs)
return self.render_to_pdf(context)
def render_to_pdf(self, context):
"""
renders pdf file
"""
template = get_template(self.template_name)
template_context = Context(context)
html = template.render(template_context)
result = StringIO.StringIO()
pdf = pisa.pisaDocument(StringIO.StringIO(html.encode("UTF-8")),
result, link_callback=self.fetch_resources)
if not pdf.err:
return HttpResponse(result.getvalue(), mimetype='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
|
Python
| 0
|
@@ -133,16 +133,31 @@
t escape
+%0Aimport logging
%0A%0Afrom d
@@ -356,16 +356,54 @@
teView%0A%0A
+logger = logging.getLogger(__name__)%0A%0A
%0Aclass R
@@ -819,16 +819,52 @@
l, %22%22))%0A
+ logger.debug(absolute_path)%0A
|
e2e279e32b8366293d6cb99bfd8744ab47c6d36d
|
update stringcleaning app for new rapidsms. also replace '.' with ' ' instead of just deleting them.
|
lib/rapidsms/contrib/stringcleaning/app.py
|
lib/rapidsms/contrib/stringcleaning/app.py
|
import rapidsms
import re
class App (rapidsms.App):
def parse (self, message):
''' Cleans up messages by removing punctuation, and replacing intended
numbers with numerals, for example:
original: "hello"., 2lli.o .2.o.i. d.s. 12.1. 'l3oii user4' "o0,oo", o0.oo,"o0. oo", oo0. ooo0"
shiny new: hello 2111.0 2.0.1 ds 12.1 13011 user4 00 oo 00.00 00 oo 000 ooo0'''
# dont mess with the real message text until the end
msgtxt = message.text
# remove leading/trailing whitespace
# get out your featherduster
msgtxt = msgtxt.strip()
# replace separation marks with a space
separators = [',', '/', ';', '*', '+', '-']
for mark in separators:
msgtxt = msgtxt.replace(mark, ' ')
# remove other marks (we'll deal with . later)
junk = ['\'', '\"', '`', '(', ')']
for mark in junk:
msgtxt = msgtxt.replace(mark, '')
#remove trailing period (.)
if msgtxt[-1:] == '.':
msgtxt = msgtxt[:-1]
# split the text into chunks
blobs = msgtxt.split(" ")
clean_blobs = []
for blob in blobs:
clean_blob = blob
for n in range(3):
# clean up blobs only if they have a digit in the first few
# characters -- so we don't clean up things like user1
try:
if blob[n].isdigit():
clean_blob = self.letters_for_numbers(blob)
break
except IndexError:
# if the blob doesnt have the first few characters,
# and there is no digit yet, move on
break
# add the cleaned blob (or untouched blob) to a running list
clean_blobs.append(clean_blob)
# reconstruct msgtxt with clean blobs
msgtxt = " ".join(clean_blobs)
# remove periods, keep decimal points
msgtxt = self.period_vs_decimal(msgtxt)
self.info("string cleaning! featherduster!")
self.info("original: " + message.text)
self.info("shiny new: " + msgtxt)
# give the message clean text
message.text = msgtxt
def period_vs_decimal(self, str):
'''Removes .'s unless they are between two digits'''
txt = str
# marker for wayfinding within the string
marker = 0
for p in range(txt.count('.')):
# move the marker to each . by adding the previous marker to the
# location of the next . within the substring beyond the marker
marker = marker + txt[marker:].index('.')
if txt[marker-1].isdigit() and txt[marker + 1].isdigit():
# if the . is between two digits, move the marker to the
# next character and find another .
marker += 1
continue
else:
# save the slice up to and the slice beyond this . and move on
# (leave out the .)
txt = txt[:marker] + txt[marker + 1:]
marker += 1
return txt
def letters_for_numbers(self, str):
# dict of letters and the numerals they are intended to be
gaffes = {'i': '1', 'l': '1', 'o': '0'}
# don't worry about case
numeralized = str.lower()
for g in gaffes.iterkeys():
try:
# replace each of the letters with its appropriate numeral
numeralized = numeralized.replace(g, gaffes[g])
except Exception, e:
print e
# return the string once all gaffes have been replaced
return numeralized
|
Python
| 0.000001
|
@@ -5,24 +5,47 @@
rt r
-apidsms%0A
+e%0Afrom rapidsms.apps.base
import
-r
+AppBas
e%0A%0Ac
@@ -58,20 +58,15 @@
pp (
-rapidsms.App
+AppBase
):%0A%0A
@@ -2974,176 +2974,177 @@
se:%0A
- # save the slice up to and the slice beyond this . and move on%0A # (leave out the .)%0A txt = txt%5B:marker%5D + txt%5Bmarker + 1:%5D
+%09%09# if '.' not between 2 digits, replace with a space%0A txt_list = list(txt)%0A txt_list%5Bmarker%5D = ' '%0A txt = ''.join(txt_list)
%0A
@@ -3739,20 +3739,21 @@
return numeralized
+%0A
|
81ced1c9642fa8c364ce9a840adecde633c96b42
|
Remove disable pylint for paginator error and fix syntax
|
openedx/core/lib/api/paginators.py
|
openedx/core/lib/api/paginators.py
|
""" Paginatator methods for edX API implementations."""
from django.http import Http404
from django.utils.translation import ugettext as _
from django.core.paginator import Paginator, InvalidPage
def paginate_search_results(object_class, search_results, page_size, page):
"""
Takes edx-search results and returns a Page object populated
with db objects for that page.
:param object_class: Model class to use when querying the db for objects.
:param search_results: edX-search results.
:param page_size: Number of results per page.
:param page: Page number.
:return: Paginator object with model objects
"""
paginator = Paginator(search_results['results'], page_size)
# This code is taken from within the GenericAPIView#paginate_queryset method.
# It is common code, but
try:
page_number = paginator.validate_number(page)
except InvalidPage:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
try:
paged_results = paginator.page(page_number)
except InvalidPage as e: # pylint: disable=invalid-name
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
})
search_queryset_pks = [item['data']['pk'] for item in paged_results.object_list]
queryset = object_class.objects.filter(pk__in=search_queryset_pks)
def ordered_objects(primary_key):
""" Returns database object matching the search result object"""
for obj in queryset:
if obj.pk == primary_key:
return obj
# map over the search results and get a list of database objects in the same order
object_results = map(ordered_objects, search_queryset_pks)
paged_results.object_list = object_results
return paged_results
|
Python
| 0
|
@@ -86,59 +86,8 @@
404%0A
-from django.utils.translation import ugettext as _%0A
from
@@ -964,18 +964,16 @@
Http404(
-_(
%22Page is
@@ -1021,17 +1021,16 @@
n int.%22)
-)
%0A%0A tr
@@ -1115,41 +1115,17 @@
as e
-: # pylint: disable=invalid-name
+xception:
%0A
@@ -1147,11 +1147,22 @@
404(
-_('
+%0A %22
Inva
@@ -1174,11 +1174,9 @@
age
-(%25(
+%7B
page
@@ -1186,30 +1186,29 @@
mber
-)s): %25(message)s') %25 %7B
+%7D: %7Bmessage%7D%22.format(
%0A
@@ -1216,17 +1216,20 @@
-'
+
page_num
@@ -1231,19 +1231,17 @@
e_number
-':
+=
page_num
@@ -1261,24 +1261,33 @@
+
-'
+
message
-':
+=
str(e
+xception
)%0A
@@ -1292,17 +1292,30 @@
+
-%7D
+ )%0A
)%0A%0A s
|
09967b21638b423faa8d7de8c57cc00cdf6aec49
|
Update catalytic_potential.py
|
KaSaAn/scripts/catalytic_potential.py
|
KaSaAn/scripts/catalytic_potential.py
|
#!/usr/bin/env python3
import argparse
import sys
from KaSaAn.functions import get_potential_of_folder
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='Out of a series of snapshots from a simulation, obtain the catalytic'
'potential of each snapshot, i.e. each state. Each molecular species'
'has a catalytic potential, defines as the product of the number of '
'bound enzyme agents, times the number of bound substrate agents,'
'times the abundance of that species. The catalytic potential of a'
'state is the sum of the catalytic potentials over all the constituent'
'species.')
parser.add_argument('-d', '--directory', type=str, default='./',
help='The directory containing the snapshots to be analyzed.')
parser.add_argument('-e', '--enzyme_name', type=str, required=True,
help='The name of the agent acting as an enzyme; e.g. <GSK(ARM, FTZ, ser3{ph})> would be simply'
' <GSK>.')
parser.add_argument('-s', '--substrate_name', type=str, required=True,
help='The name of the agent acting as a substrate; e.g. <APC(ARM, OD)> would be simply <APC>.')
parser.add_argument('-v', '--verbose', action='store_true',
help='If set, print additional information, like number of snapshots found, and current '
'snapshot being parsed.')
parser.add_argument('-o', '--output_file', type=str,
help='The name of the file where the list of catalytic potentials should be saved; one value'
'per line, in the same order as the snapshots. If not specified, the list will be printed'
'to the console.')
parser.add_argument('-p', '--snapshot_prefix', type=str, required=True,
help='The prefix by which the snapshots are named; e.g. <snap_4.ka> would have <snap_>.')
args = parser.parse_args()
q = get_potential_of_folder(args.directory, args.enzyme_name, args.substrate_name, args.verbose, args.snapshot_prefix)
if args.output_file:
with open(args.output_file, 'w') as out_file:
for item in q:
out_file.write('%s\n' % item)
else:
print(q)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -340,16 +340,17 @@
'
+
potentia
@@ -460,16 +460,17 @@
'
+
has a ca
@@ -527,17 +527,16 @@
umber of
-
'%0A
@@ -579,16 +579,17 @@
'
+
bound en
@@ -696,16 +696,17 @@
'
+
times th
@@ -814,16 +814,17 @@
'
+
state is
@@ -936,16 +936,17 @@
'
+
species.
@@ -1707,17 +1707,16 @@
current
-
'%0A
@@ -1739,16 +1739,17 @@
'
+
snapshot
@@ -1970,16 +1970,17 @@
'
+
per line
@@ -2091,16 +2091,17 @@
'
+
to the c
@@ -2666,16 +2666,16 @@
ain__':%0A
-
main
@@ -2676,8 +2676,9 @@
main()
+%0A
|
43b090d70cc6707367af0439d0dfda7c3a531524
|
Fix weird applescript escaping issues Conflicts: cactus/browser.py
|
cactus/browser.py
|
cactus/browser.py
|
import subprocess
import platform
s1 = """
tell application "Google Chrome"
set windowsList to windows as list
repeat with currWindow in windowsList
set tabsList to currWindow's tabs as list
repeat with currTab in tabsList
if "%s" is in currTab's URL then execute currTab javascript "%s"
end repeat
end repeat
end tell
"""
s2 = """
tell application "Safari"
if (count of windows) is greater than 0 then
set windowsList to windows as list
repeat with currWindow in windowsList
set tabsList to currWindow's tabs as list
repeat with currTab in tabsList
if "%s" is in currTab's URL then
tell currTab to do JavaScript "%s"
end if
end repeat
end repeat
end if
end tell
"""
def applescript(script):
# Bail if we're not on mac os for now
if platform.system() != "Darwin":
return
command = "osascript<<END%sEND" % script
return subprocess.check_output(command, shell = True)
def _insertJavascript(urlMatch, js):
apps = appsRunning(['Safari', 'Google Chrome'])
if apps['Google Chrome']:
try:
applescript(s1 % (urlMatch, js))
except Exception, e:
pass
if apps['Safari']:
try:
applescript(s2 % (urlMatch, js))
except Exception, e:
pass
def browserReload(url):
_insertJavascript(url, "window.location.reload()")
def browserReloadCSS(url):
_insertJavascript(url,
"var links = document.getElementsByTagName('link'); for (var i = 0; i < links.length;i++) { var link = links[i]; if (link.rel === 'stylesheet') {link.href += '?'; }}")
def appsRunning(l):
psdata = subprocess.check_output(['ps aux'], shell = True)
retval = {}
for app in l: retval[app] = app in psdata
return retval
|
Python
| 0.000246
|
@@ -493,36 +493,32 @@
as list%0A
-
repeat with curr
@@ -685,17 +685,16 @@
URL then
-
%0A
@@ -835,34 +835,1538 @@
%22%22%0A%0A
-%0Adef applescript(scrip
+s3 = %22%22%22%0Awindow.location.reload()%0A%22%22%22%0A%0As4 = %22%22%22%0A(function() %7B%0A function updateQueryStringParameter(uri, key, value) %7B%0A%0A%0A // console.log('updateQueryStringParameter')%0A // console.log(uri)%0A // console.log(key)%0A // console.log(value)%0A%0A var re = new RegExp('(%5B?%7C&%5D)' + key + '=.*?(&%7C$)', 'i');%0A separator = uri.indexOf('?') !== -1 ? '&' : '?';%0A%0A if (uri.match(re)) %7B%0A return uri.replace(re, '$1' + separator + key + '=' + value + '$2');%0A %7D else %7B%0A return uri + separator + key + '=' + value;%0A %7D%0A %7D%0A%0A var links = document.getElementsByTagName('link');%0A%0A for (var i = 0; i %3C links.length;i++) %7B%0A%0A var link = links%5Bi%5D;%0A%0A console.log('inspect', link);%0A%0A if (link.rel === 'stylesheet') %7B%0A%0A // Don't reload external urls, they likely did not change%0A if (link.href.indexOf('127.0.0.1') == -1 && link.href.indexOf('localhost') == -1) %7B%0A continue;%0A %7D%0A%0A var updatedLink = updateQueryStringParameter(link.href, 'cactus.reload', new Date().getTime());%0A%0A // This is really hacky, but needed because the regex gets magically broken by piping it%0A // through applescript. This replaces the first occurence of ? with & if there was no &.%0A if (updatedLink.indexOf('?') == -1) %7B%0A updatedLink = updatedLink.replace('&', '?');%0A %7D%0A%0A link.href = updatedLink;%0A %7D%0A %7D%0A%7D)()%0A%22%22%22%0A%0Adef applescript(inpu
t):%0A
+%0A
@@ -2495,21 +2495,20 @@
sEND%22 %25
-scrip
+inpu
t%0A re
@@ -2554,19 +2554,16 @@
hell
- =
+=
True)%0A%0A
-%0A
def
@@ -2595,16 +2595,17 @@
h, js):%0A
+%0A
apps
@@ -2687,36 +2687,24 @@
try:
-%0A
applescript
@@ -2749,36 +2749,24 @@
xception, e:
-%0A
pass%0A%0A i
@@ -2795,28 +2795,16 @@
try:
-%0A
applesc
@@ -2861,28 +2861,15 @@
, e:
-%0A
pass%0A%0A
-%0A
def
@@ -2919,36 +2919,11 @@
rl,
-%22window.location.reload()%22)%0A
+s3)
%0A%0Ade
@@ -2977,199 +2977,12 @@
url,
-%0A %22var links = document.getElementsByTagName('link'); for (var i = 0; i %3C links.length;i++) %7B var link = links%5Bi%5D; if (link.rel === 'stylesheet') %7Blink.href += '?'; %7D%7D%22)%0A
+ s4)
%0A%0Ade
@@ -3053,19 +3053,17 @@
%5D, shell
- =
+=
True)%0A
@@ -3139,9 +3139,8 @@
n retval
-%0A
|
c775b159b310afd323945afcb9dba771731a382b
|
use repr for log serialization if json fails
|
src/ekklesia_portal/__init__.py
|
src/ekklesia_portal/__init__.py
|
import eliot
import logging
import sys
from eliot.stdlib import EliotHandler
logging.getLogger().addHandler(EliotHandler())
logging.getLogger().setLevel(logging.DEBUG)
eliot.to_file(sys.stdout)
logging.captureWarnings(True)
logg = logging.getLogger(__name__)
logging.getLogger("parso").setLevel(logging.WARN)
logg.info("init")
|
Python
| 0.000001
|
@@ -70,16 +70,245 @@
Handler%0A
+from eliot.json import EliotJSONEncoder%0A%0Aclass MyEncoder(EliotJSONEncoder):%0A def default(self, obj):%0A%0A try:%0A return EliotJSONEncoder.default(self, obj)%0A except TypeError:%0A return repr(obj)%0A%0A
%0Alogging
@@ -391,16 +391,17 @@
.DEBUG)%0A
+%0A
eliot.to
@@ -416,18 +416,38 @@
s.stdout
+, encoder=MyEncoder
)%0A
+%0A
logging.
@@ -505,16 +505,16 @@
ame__)%0A%0A
+
logging.
@@ -559,23 +559,4 @@
RN)%0A
-%0Alogg.info(%22init%22)%0A
|
a3e05e67b4907f6f97462aa958e4f344f92d1e72
|
add a new compare method
|
library/module_utils/network/f5/compare.py
|
library/module_utils/network/f5/compare.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
def cmp_simple_list(want, have):
if want is None:
return None
if have is None and want in ['', 'none']:
return None
if have is not None and want in ['', 'none']:
return []
if have is None:
return want
if set(want) != set(have):
return want
return None
|
Python
| 0.000001
|
@@ -538,24 +538,199 @@
rn want%0A return None%0A
+%0A%0Adef cmp_str_with_none(want, have):%0A if want is None:%0A return None%0A if have is None and want == '':%0A return None%0A if want != have:%0A return want%0A
|
795e9734cc802caa8847a9a2b22f3f16297462bc
|
use combined provider in nsi2 provider setup
|
opennsa/protocols/nsi2/__init__.py
|
opennsa/protocols/nsi2/__init__.py
|
"""
Various protocol initialization.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011-2012)
"""
from twisted.web import resource, server
from opennsa.protocols.shared import resource as soapresource
from opennsa.protocols.nsi2 import providerservice, providerclient, provider, \
requesterservice, requesterclient, requester
def setupProvider(nsi_service, top_resource, service_provider, host, port, tls=False, ctx_factory=None):
soap_resource = soapresource.setupSOAPResource(top_resource, 'CS2')
provider_client = providerclient.ProviderClient(ctx_factory)
nsi2_provider = provider.Provider(service_provider)
providerservice.ProviderService(soap_resource, nsi2_provider)
return provider_client
def setupRequester(top_resource, host, port, tls=False, ctx_factory=None, callback_timeout=None):
resource_name = 'RequesterService2'
# copied from nsi1.__init__
def _createServiceURL(host, port, tls=False):
proto_scheme = 'https://' if tls else 'http://'
service_url = proto_scheme + '%s:%i/NSI/services/%s' % (host,port, resource_name)
return service_url
service_url = _createServiceURL(host, port, tls)
soap_resource = soapresource.setupSOAPResource(top_resource, resource_name)
requester_client = requesterclient.RequesterClient(service_url)
nsi_requester = requester.Requester(requester_client, callback_timeout=callback_timeout)
requester_service = requesterservice.RequesterService(soap_resource, nsi_requester)
return nsi_requester
# copied from nsi1.__init__
def createRequesterClient(host, port, tls=False, ctx_factory=None, callback_timeout=None):
top_resource = resource.Resource()
nsi_requester = setupRequester(top_resource, host, port, tls, ctx_factory, callback_timeout)
site = server.Site(top_resource, logPath='/dev/null')
return nsi_requester, site
|
Python
| 0
|
@@ -678,24 +678,41 @@
ice_provider
+, provider_client
)%0A%0A provi
@@ -780,24 +780,29 @@
return
+nsi2_
provider
_client%0A
@@ -793,23 +793,16 @@
provider
-_client
%0A%0A%0Adef s
|
5633c08f942b72bdd8ca576e408cf19041c4f94d
|
Load from the pre processed data
|
MRJobNetworkXSimulations-Generator.py
|
MRJobNetworkXSimulations-Generator.py
|
from __future__ import division
import hdfs
import networkx as nx
import pandas as pd
import random
from mrjob.job import MRJob
from mrjob.protocol import JSONValueProtocol
from mrjob.step import MRStep
import cascade
class MRJobNetworkXSimulations(MRJob):
OUTPUT_PROTOCOL = JSONValueProtocol
def configure_options(self):
super(MRJobNetworkXSimulations, self).configure_options()
self.add_file_option('--network')
self.add_passthrough_option('--modle', type='int', default=0, help='...')
self.add_passthrough_option('--sampelFraction', type='int', default=10, help='...')
self.add_passthrough_option('--resampeling', type='int', default=10, help='...')
self.add_passthrough_option('--numberofloops', type='int', default=100, help='...')
def runCascade(self, C):
cas = C
idx = []
values = []
while True:
try:
cas.next()
values.append(cas.getInfectedNode())
idx.append(cas.getStep())
except StopIteration as err:
break
except Exception as e:
print e
return idx, values
def csize_init(self):
self.G = nx.read_gpickle(self.options.network)
def csize(self, _, line):
client = hdfs.client.Client("http://" + urlparse(line).netloc)
if line[-1] != "#":
with client.read(urlparse(line).path) as r:
# with open(urlparse(line).path) as r:
buf = BytesIO(r.read())
# If the data is in a GZipped file.
if ".gz" in line:
gzip_f = gzip.GzipFile(fileobj=buf)
content = gzip_f.read()
buf = StringIO.StringIO(content)
dtf = pd.read_csv(buf, index_col=False, header=None, sep="\t", engine="python",
compression=None).drop_duplicates(subset=[2], keep='last')
dftt = dtf[dtf[1].isin(self.G.nodes())]
if len(dftt.index) > 0:
yield (None, len(dftt.index))
def mapper_init(self):
self.G = nx.read_gpickle(self.options.network)
nx.set_node_attributes(self.G, 'activated', {node: 0 for node in self.G.nodes()})
seed = random.choice([n for n, attrdict in self.G.node.items() if attrdict['activated'] == 0])
nx.set_node_attributes(self.G, 'activated', {seed: 1})
self.r_u_l = None
self.r_a_l = None
def mapper(self, _, line):
iteration = int(line) * 10
for x in range(0, self.options.numberofloops):
if self.options.modle == 0:
idx, values = self.runCascade(cascade.randomActive(self.G, itterations=iteration))
elif self.options.modle == 1:
idx, values = self.runCascade(cascade.CascadeNabours(self.G, itterations=iteration))
elif self.options.modle == 2:
idx, values = self.runCascade(cascade.NodeWithHighestActiveNabours(self.G, itterations=iteration))
elif self.options.modle == 3:
idx, values = self.runCascade(cascade.NodeInSameCommunity(self.G, itterations=iteration))
elif self.options.modle == 4:
idx, values = self.runCascade(cascade.CascadeNaboursWeight(self.G, itterations=iteration))
df = pd.DataFrame({"ids": values}, index=idx)
for i in range(1, self.options.resampeling):
yield (None, df.sample(frac=(float(self.options.sampelFraction) / float(100))).to_json())
def steps(self):
return [
MRStep(
mapper_init=self.csize_init,
mapper=self.csize
),
MRStep(
mapper_init=self.mapper_init,
mapper=self.mapper
)
]
if __name__ == '__main__':
MRJobNetworkXSimulations.run()
|
Python
| 0
|
@@ -37,34 +37,208 @@
ort
-hdfs%0Aimport networkx as nx
+gzip%0A%0Atry:%0A from BytesIO import BytesIO%0Aexcept ImportError:%0A from io import BytesIO%0A%0Atry:%0A from urlparse import urlparse%0Aexcept ImportError:%0A from urllib.parse import urlparse%0A%0Aimport hdfs
%0Aimp
@@ -258,22 +258,8 @@
pd%0A
-import random%0A
from
@@ -327,16 +327,16 @@
rotocol%0A
+
from mrj
@@ -362,25 +362,8 @@
ep%0A%0A
-import cascade%0A%0A%0A
clas
|
6d5f2b27fec4c4224f8143845cd62ead1ddb6d73
|
Modify email address verification flag and create user event in the same transaction
|
byceps/services/user/email_address_verification_service.py
|
byceps/services/user/email_address_verification_service.py
|
"""
byceps.services.user.email_address_verification_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Optional
from ...database import db
from ...events.user import (
UserEmailAddressConfirmed,
UserEmailAddressInvalidated,
)
from ...typing import UserID
from ..email import service as email_service
from ..site import service as site_service
from ..site.transfer.models import SiteID
from ..user import service as user_service
from ..verification_token import service as verification_token_service
from ..verification_token.transfer.models import Token
from . import event_service as user_event_service
def send_email_address_confirmation_email(
recipient_email_address: str,
recipient_screen_name: str,
user_id: UserID,
site_id: SiteID,
) -> None:
site = site_service.get_site(site_id)
email_config = email_service.get_config(site.brand_id)
sender = email_config.sender
verification_token = (
verification_token_service.create_for_email_address_confirmation(
user_id, recipient_email_address
)
)
confirmation_url = (
f'https://{site.server_name}/users/email_address/'
f'confirmation/{verification_token.token}'
)
subject = f'{recipient_screen_name}, bitte bestätige deine E-Mail-Adresse'
body = (
f'Hallo {recipient_screen_name},\n\n'
f'bitte bestätige deine E-Mail-Adresse, indem du diese URL abrufst: {confirmation_url}'
)
recipients = [recipient_email_address]
email_service.enqueue_email(sender, recipients, subject, body)
class EmailAddressConfirmationFailed(Exception):
pass
def confirm_email_address(
verification_token: Token,
) -> UserEmailAddressConfirmed:
"""Confirm the email address of the user assigned with that
verification token.
"""
user = user_service.get_db_user(verification_token.user_id)
if user.email_address is None:
raise EmailAddressConfirmationFailed(
'Account has no email address assigned.'
)
token_email_address = verification_token.data.get('email_address')
if not token_email_address:
raise EmailAddressConfirmationFailed('Token contains no email address.')
if user.email_address != token_email_address:
raise EmailAddressConfirmationFailed('Email addresses do not match.')
user.email_address_verified = True
db.session.commit()
event_data = {'email_address': token_email_address}
event = user_event_service.create_event(
'user-email-address-confirmed', user.id, event_data
)
verification_token_service.delete_token(verification_token.token)
return UserEmailAddressConfirmed(
occurred_at=event.occurred_at,
initiator_id=user.id,
initiator_screen_name=user.screen_name,
user_id=user.id,
user_screen_name=user.screen_name,
)
def invalidate_email_address(
user_id: UserID, reason: str, *, initiator_id: Optional[UserID] = None
) -> UserEmailAddressInvalidated:
"""Invalidate the user's email address by marking it as unverified.
This might be appropriate if an email to the user's address bounced
because of a permanent issue (unknown mailbox, unknown domain, etc.)
but not a temporary one (for example: mailbox full).
"""
user = user_service.get_db_user(user_id)
if initiator_id is not None:
initiator = user_service.get_user(initiator_id)
else:
initiator = None
user.email_address_verified = False
db.session.commit()
event_data = {
'email_address': user.email_address,
'reason': reason,
}
if initiator:
event_data['initiator_id'] = str(initiator.id)
event = user_event_service.create_event(
'user-email-address-invalidated', user.id, event_data
)
return UserEmailAddressInvalidated(
occurred_at=event.occurred_at,
initiator_id=initiator.id if initiator else None,
initiator_screen_name=initiator.screen_name if initiator else None,
user_id=user.id,
user_screen_name=user.screen_name,
)
|
Python
| 0
|
@@ -2513,32 +2513,8 @@
True
-%0A db.session.commit()
%0A%0A
@@ -2590,38 +2590,37 @@
r_event_service.
-create
+build
_event(%0A
@@ -2667,29 +2667,80 @@
event_data%0A
-
)
+%0A db.session.add(event)%0A%0A db.session.commit()
%0A%0A verifi
@@ -3667,32 +3667,8 @@
alse
-%0A db.session.commit()
%0A%0A
@@ -3865,22 +3865,21 @@
service.
-create
+build
_event(%0A
@@ -3941,24 +3941,75 @@
t_data%0A )
+%0A db.session.add(event)%0A%0A db.session.commit()
%0A%0A return
|
fa9bc00d09cfd173b99eaba3eb17bdfc49100a5b
|
Add explicit name export
|
dasem/__init__.py
|
dasem/__init__.py
|
"""dasem."""
from __future__ import absolute_import
from .fullmonty import Word2Vec
|
Python
| 0.000024
|
@@ -79,8 +79,33 @@
ord2Vec%0A
+%0A%0A__all__ = %5B'Word2Vec'%5D%0A
|
b1097e8b6050d7404d68643f1a8c7923d803c134
|
Update the admin with the latest atribute name changes
|
filer/admin/fileadmin.py
|
filer/admin/fileadmin.py
|
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.contrib.admin.util import unquote, flatten_fieldsets, get_deleted_objects, model_ngettext, model_format_dict
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib import admin
from django import forms
from django.db.models import Q
from filer.admin.permissions import PrimitivePermissionAwareModelAdmin
from filer.models import Folder, FolderRoot, UnfiledImages, ImagesWithMissingData, File
from filer.admin.tools import *
from filer.models import tools
from django.conf import settings
# forms
class FileAdminChangeFrom(forms.ModelForm):
class Meta:
model = File
#ModelAdmins
class FileAdmin(PrimitivePermissionAwareModelAdmin):
list_display = ('label',)
list_per_page = 10
search_fields = ['name', 'original_filename',]
raw_id_fields = ('owner',) #'contact',
# save_as hack, because without save_as it is impossible to hide the
# save_and_add_another if save_as is False.
# To show only save_and_continue and save in the submit row we need save_as=True
# and in render_change_form() override add and change to False.
save_as=True
form = FileAdminChangeFrom
fieldsets = (
(None, {
'fields': ('name', 'owner','description')
}),
(None, {
'fields': ('is_public',)
}),
(_('Advanced'), {
'fields': ('_file',),
'classes': ('collapse',),
}),
)
def response_change(self, request, obj):
'''
Overrides the default to be able to forward to the directory listing
instead of the default change_list_view
'''
r = super(FileAdmin, self).response_change(request, obj)
#print r['Location']
if r['Location']:
# it was a successful save
if r['Location'] in ['../']:
# this means it was a save: redirect to the directory view
if obj.folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.folder.id})
else:
url = reverse('admin:filer-directory_listing-unfiled_images')
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
return r
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
extra_context = {'show_delete': True}
context.update(extra_context)
return super(FileAdmin, self).render_change_form(request=request, context=context, add=False, change=False, form_url=form_url, obj=obj)
def delete_view(self, request, object_id, extra_context=None):
'''
Overrides the default to enable redirecting to the directory view after
deletion of a image.
we need to fetch the object and find out who the parent is
before super, because super will delete the object and make it impossible
to find out the parent folder to redirect to.
'''
parent_folder = None
try:
obj = self.queryset(request).get(pk=unquote(object_id))
parent_folder = obj.folder
except self.model.DoesNotExist:
obj = None
r = super(FileAdmin, self).delete_view(request=request, object_id=object_id, extra_context=extra_context)
url = r.get("Location", None)
if url in ["../../../../","../../"]:
if parent_folder:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': parent_folder.id})
else:
url = reverse('admin:filer-directory_listing-unfiled_images')
return HttpResponseRedirect(url)
return r
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(FileAdmin, self).get_urls()
#from filer import views
url_patterns = patterns('',
#url(r'^(?P<image_id>\d+)/export/$', self.admin_site.admin_view(views.export_image), name='image_filer-export_image'),
)
url_patterns.extend(urls)
return url_patterns
def get_model_perms(self, request):
"""
It seems this is only used for the list view. NICE :-)
"""
return {
'add': False,
'change': False,
'delete': False,
}
#def has_change_permission(self, request, obj=None):
# return False
#def add_view(self, request):
# return HttpResponseRedirect(reverse('admin:filer-directory_listing-root'))
#def changelist_view(self, request, extra_context=None):
# return HttpResponseRedirect(reverse('admin:filer-directory_listing-root'))
|
Python
| 0
|
@@ -1592,17 +1592,16 @@
lds': ('
-_
file',),
|
c1a3d40295a7c4b5f178ae78b49a90c317844371
|
Replace double quotes with single quotes for flake8 check
|
timed/reports/tests/test_notify_reviewers_unverified.py
|
timed/reports/tests/test_notify_reviewers_unverified.py
|
from datetime import date
import pytest
from django.core.management import call_command
from timed.employment.factories import UserFactory
from timed.projects.factories import ProjectFactory, TaskFactory
from timed.tracking.factories import ReportFactory
@pytest.mark.freeze_time('2017-8-4')
def test_notify_reviewers(db, mailoutbox):
"""Test time range 2017-7-1 till 2017-7-31."""
# a reviewer which will be notified
reviewer_work = UserFactory.create()
project_work = ProjectFactory.create()
project_work.reviewers.add(reviewer_work)
task_work = TaskFactory.create(project=project_work)
ReportFactory.create(date=date(2017, 7, 1), task=task_work,
verified_by=None)
# a reviewer which doesn't have any unverfied reports
reviewer_no_work = UserFactory.create()
project_no_work = ProjectFactory.create()
project_no_work.reviewers.add(reviewer_no_work)
task_no_work = TaskFactory.create(project=project_no_work)
ReportFactory.create(date=date(2017, 7, 1), task=task_no_work,
verified_by=reviewer_no_work)
call_command(
'notify_reviewers_unverified',
'--cc=example@example.com',
'--message=This is a test'
)
# checks
mail = mailoutbox[0]
cc = mail.to[-1]
mail.to.pop()
for item in mail.body.split("\n"):
if "test" in item:
msg = item.strip()
assert len(mailoutbox) == 1
assert mail.to == [reviewer_work.email]
url = (
'http://localhost:4200/analysis?fromDate=2017-07-01&'
'toDate=2017-07-31&reviewer=%d&editable=1'
) % reviewer_work.id
assert url in mail.body
assert msg == 'This is a test'
assert cc == 'example@example.com'
|
Python
| 0.000001
|
@@ -1353,12 +1353,12 @@
lit(
-%22%5Cn%22
+'%5Cn'
):%0A
@@ -1371,14 +1371,14 @@
if
-%22
+'
test
-%22
+'
in
|
32e00001ec29b0fe13f8c9b2c4dbd61232ba348a
|
Update starting offset
|
tools/db/copy_nonpartitioned_sentences_to_partitions.py
|
tools/db/copy_nonpartitioned_sentences_to_partitions.py
|
#!/usr/bin/env python3
import time
from mediawords.db import connect_to_db
from mediawords.util.log import create_logger
from mediawords.util.process import run_alone
log = create_logger(__name__)
def copy_nonpartitioned_sentences_to_partitions():
"""Gradually copy sentences from "story_sentences_nonpartitioned" to "story_sentences_partitioned"."""
# How many stories the sentences of which to copy at the same time
stories_chunk_size = 50 * 1000
db = connect_to_db()
# With 512 MB, database can deduplicate (sort) sentences in memory instead of disk
db.query("SET work_mem TO '512MB'")
max_stories_id = db.query("SELECT MAX(stories_id) FROM stories").flat()[0]
if max_stories_id is None:
raise Exception("Max. stories ID is None.")
log.info("Max. stories ID: {}".format(max_stories_id))
for start_stories_id in range(44000000, max_stories_id + 1, stories_chunk_size):
end_stories_id = start_stories_id + stories_chunk_size - 1
log.info("Copying sentences of stories_id BETWEEN {} AND {} to the partitioned table...".format(
start_stories_id,
end_stories_id,
))
copied_sentences = db.query(
'SELECT copy_chunk_of_nonpartitioned_sentences_to_partitions(%(start_stories_id)s, %(end_stories_id)s)',
{'start_stories_id': start_stories_id, 'end_stories_id': end_stories_id}
).flat()[0]
log.info("Copied {} sentences of stories_id BETWEEN {} AND {} to the partitioned table.".format(
copied_sentences,
start_stories_id,
end_stories_id,
))
log.info("All done!")
# Weird, but otherwise Ansible deployments to mctest don't work due to this script exit(0)ing right away
while True:
time.sleep(1)
if __name__ == '__main__':
run_alone(copy_nonpartitioned_sentences_to_partitions)
|
Python
| 0.000001
|
@@ -876,11 +876,11 @@
nge(
-440
+999
0000
|
9337a4debdbe69b522fa74ab8f621655fa2b6783
|
add live camera script
|
car/controller.py
|
car/controller.py
|
import time
import car
# valide cmd:
# Car directions : CAR_F, CAR_B, CAR_L, CAR_R
# Car speeds : CAR_U, CAR_D
# Camera control : CM_L, CM_R, CM_U, CM_D
# Car control : STOP
class Controller(object):
_car_direction = ['CAR_F', 'CAR_B', 'CAR_L', 'CAR_R']
_car_speed = ['CAR_U', 'CAR_D']
_camera_control = ['CM_L', 'CM_R', 'CM_U', 'CM_D']
_car_control = ['STOP']
def __init__(self, generator, car, camera):
self._generator = generator
self._run = True
self._car = car
self._camera = camera
self._car_f = 0
self._car_b = 0
self._car_l = 0
self._car_r = 0
def run(self):
self._generator.start()
self._car.start()
while self._run:
for data in gen.GetNext():
print data
code = data[0]
value = data[1]
#for code, value in gen.GetNext():
if code in self._car_control:
self.ChangeCar(code, value)
elif code in self._car_direction:
self.ChangeCarDirection(code, value)
elif code in self._camera_control:
self.ChangeCamera(code, value)
elif code in self._car_speed:
self.ChangeCarSpeed(code, value)
def ChangeCar(self, code, value):
if code == 'STOP':
self._car.Stop()
def ChangeCarDirection(self, code, value):
if value in [0, 1]:
if code == 'CAR_F':
self._car_f = value
elif code == 'CAR_B':
self._car_b = value
elif code == 'CAR_R':
self._car_r = value
elif code == 'CAR_L':
self._car_l = value
l = 0
r = 0
if self._car_f:
l = 1
r = 1
elif self._car_b:
l = -1
r = -1
if self._car_r:
r = 0
elif self._car_l:
l = 0
print code, value, l, r
self._car._left_direction = l
self._car._right_direction = r
def ChangeCamera(self, code, value):
pass
def ChangeCarSpeed(self, code, value):
if value == 1:
if code == 'CAR_U':
self._car.SpeedUp()
elif code == 'CAR_D':
self._car.SlowDown()
def Terminate(self):
self._run = False
self._generator.Terminate()
self._car.Terminate()
self._generator.join()
self._car.join()
if __name__ == '__main__':
import generator_socket
gen = generator_socket.GeneratorSocket()
car = car.Car((11, 12, 10), (13, 14, 10), True)
c = Controller(gen, car, None)
try:
c.run()
except:
c.Terminate()
|
Python
| 0.000001
|
@@ -16,16 +16,42 @@
ort car%0A
+import os%0Aimport threading
%0A# valid
@@ -2206,16 +2206,320 @@
join()%0A%0A
+def EnableCamera():%0A while True:%0A print 'Open camera live'%0A os.system(%22%22%22gst-launch-1.0 -v v4l2src ! 'video/x-raw, width=640, height=480, framerate=30/1' ! queue ! videoconvert ! omxh264enc ! h264parse ! flvmux ! rtmpsink location='rtmp://139.196.106.212/rtmp/live live=1'%22%22%22)%0A time.sleep(1)%0A%0A
if __nam
@@ -2663,84 +2663,110 @@
%0A%0A
-c = Controller(gen, car, None)%0A%0A try:%0A c.run()%0A except:%0A c.Terminate
+live = threading.Thread(target = EnableCamera)%0A live.start()%0A c = Controller(gen, car, None)%0A c.run
()%0A%0A
|
608a0c75cba735e7d4a59fb941cd6e6135f3e7cf
|
Update reverse URL.
|
src/epiweb/apps/survey/views.py
|
src/epiweb/apps/survey/views.py
|
# -*- coding: utf-8 -*-
from django import forms
from django.template import Context, loader
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
from epiweb.apps.survey import utils
from epiweb.apps.survey import models
from epiweb.apps.survey import example
from epiweb.apps.survey import profile_data
from epidb_client import EpiDBClient
from django.conf import settings
sfh = None
@login_required
def thanks(request):
return render_to_response('survey/thanks.html')
@login_required
def index(request):
global sfh
if sfh is None:
survey = example.survey()
sfh = utils.SurveyFormHelper(survey, request.user)
if request.method == 'POST':
form = sfh.create_form(request.POST)
if form.is_valid():
id = utils.send_survey_response(request.user, form._survey, form.cleaned_data)
utils.save_survey_response(request.user, form._survey, id)
return HttpResponseRedirect(reverse('epiweb.apps.survey.survey_views.thanks'))
else:
form = sfh.create_form()
#js = utils.generate_js_helper(example.survey
jsh = utils.JavascriptHelper(example.survey(), request.user)
js = jsh.get_javascript()
return render_to_response('survey/index.html', {
'form': form,
'js': js
})
@login_required
def profile_index(request):
global sfh
if sfh is None:
survey = profile_data.UserProfile()
sfh = utils.SurveyFormHelper(survey, request.user)
if request.method == 'POST':
form = sfh.create_form(request.POST)
if form.is_valid():
utils.send_profile(request.user, form._survey, form.cleaned_data)
utils.save_profile(request.user, form.cleaned_data)
return HttpResponseRedirect(reverse('epiweb.apps.survey.profile_views.index'))
else:
form = sfh.create_form(utils.get_profile(request.user))
jsh = utils.JavascriptHelper(profile_data.UserProfile(), request.user)
js = jsh.get_javascript()
return render_to_response('profile/index.html', {
'form': form,
'js': js
})
|
Python
| 0
|
@@ -1167,23 +1167,16 @@
.survey.
-survey_
views.th
@@ -1972,24 +1972,30 @@
.survey.
+views.
profile_
views.in
@@ -1986,22 +1986,16 @@
profile_
-views.
index'))
|
99e910f58fa54e9bce2518c6f9752ba1e8dbd6af
|
Stop tracking call size in bm diff
|
tools/profiling/microbenchmarks/bm_diff/bm_constants.py
|
tools/profiling/microbenchmarks/bm_diff/bm_constants.py
|
#!/usr/bin/env python2.7
#
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = [
'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
'bm_metadata', 'bm_fullstack_trickle'
]
_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median',
'locks_per_iteration', 'allocs_per_iteration',
'writes_per_iteration', 'atm_cas_per_iteration',
'atm_add_per_iteration', 'nows_per_iteration',
'cli_transport_stalls_per_iteration',
'cli_stream_stalls_per_iteration',
'svr_transport_stalls_per_iteration',
'svr_stream_stalls_per_iteration',
'http2_pings_sent_per_iteration')
|
Python
| 0
|
@@ -994,31 +994,26 @@
', '
-call_initial_size-media
+locks_per_iteratio
n',%0A
@@ -1029,20 +1029,21 @@
'
+al
loc
-k
s_per_it
@@ -1053,21 +1053,21 @@
tion', '
-alloc
+write
s_per_it
@@ -1093,21 +1093,22 @@
'
-write
+atm_ca
s_per_it
@@ -1118,27 +1118,27 @@
tion', 'atm_
-cas
+add
_per_iterati
@@ -1161,33 +1161,8 @@
- 'atm_add_per_iteration',
'no
@@ -1179,32 +1179,16 @@
ration',
-%0A
'cli_tr
|
dcc2821cac0619fc2ca5f486ad30416f3c3cfda9
|
Replace parsing with Python's ast
|
ce/expr/parser.py
|
ce/expr/parser.py
|
#!/usr/bin/env python
# vim: set fileencoding=UTF-8 :
from ..semantics import mpq
from .common import OPERATORS, ADD_OP, MULTIPLY_OP
def try_to_number(s):
try:
return mpq(s)
except (ValueError, TypeError):
return s
def _parse_r(s):
s = s.strip()
bracket_level = 0
operator_pos = -1
for i, v in enumerate(s):
if v == '(':
bracket_level += 1
if v == ')':
bracket_level -= 1
if bracket_level == 1 and v in OPERATORS:
operator_pos = i
break
if operator_pos == -1:
return s
a1 = _parse_r(s[1:operator_pos].strip())
a2 = _parse_r(s[operator_pos + 1:-1].strip())
return Expr(s[operator_pos], a1, a2)
|
Python
| 0.000004
|
@@ -49,16 +49,28 @@
F-8 :%0A%0A%0A
+import ast%0A%0A
from ..s
@@ -254,494 +254,490 @@
s%0A%0A%0A
-def _parse_r(s):%0A s = s.strip()%0A bracket_level = 0%0A operator_pos = -1%0A for i, v in enumerate(s):%0A if v == '(':%0A bracket_level += 1%0A if v == ')':%0A bracket_level -= 1%0A if bracket_level == 1 and v in OPERATORS:%0A operator_pos = i%0A break%0A if operator_pos == -1:%0A return s%0A a1 = _parse_r(s%5B1:operator_pos%5D.strip())%0A a2 = _parse_r(s%5Boperator_pos + 1:-1%5D.strip())%0A return Expr(s%5Boperator_pos%5D, a1, a2
+OPERATOR_MAP = %7B%0A ast.Add: ADD_OP,%0A ast.Mult: MULTIPLY_OP,%0A%7D%0A%0A%0Adef parse(s):%0A from .biop import Expr%0A def _parse_r(t):%0A try:%0A return t.n%0A except AttributeError:%0A pass%0A try:%0A return t.id%0A except AttributeError:%0A op = OPERATOR_MAP%5Bt.op.__class__%5D%0A a1 = _parse_r(t.left)%0A a2 = _parse_r(t.right)%0A return Expr(op, a1, a2)%0A return _parse_r(ast.parse(s, mode='eval').body
)%0A
|
38fe4ef9df7e09709611bb3aca1aea4f2d42316a
|
add encode_url_path method to util
|
pifx/util.py
|
pifx/util.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2015 Chaoyi Zha <me@cydrobolt.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from .constants import A_OK_HTTP_CODES, A_ERROR_HTTP_CODES
def generate_auth_header(api_key):
headers = {
"Authorization": "Bearer {}".format(api_key),
}
return headers
def arg_tup_to_dict(argument_tuples):
"""Given a set of argument tuples, set their value in a data dictionary if not blank"""
data = dict()
for arg_name, arg_val in argument_tuples:
if arg_val != None:
data[arg_name] = arg_val
return data
def parse_data(parsed_data):
"""Given parsed response, return correct return values"""
return parsed_data['results']
def parse_response(response):
"""Parse JSON API response, return object."""
parsed_response = json.loads(response.text)
return parsed_response
def handle_error(response):
"""Raise appropriate exceptions if necessary."""
status_code = response.status_code
if status_code not in A_OK_HTTP_CODES:
error_explanation = A_ERROR_HTTP_CODES.get(status_code)
raise_error = "{}: {}".format(status_code, error_explanation)
raise Exception(raise_error)
else:
return True
|
Python
| 0.000001
|
@@ -629,16 +629,30 @@
rt json%0A
+import urllib%0A
from .co
@@ -1758,8 +1758,221 @@
rn True%0A
+def encode_url_path(url):%0A %22%22%22Encodes the path url string replacing special characters with properly escaped sequences. %0A Not intended for use with query string parameters. %22%22%22%0A return urllib.quote(url)%0A
|
b8e26d2e5b48a77cfd95513ef7b0235747f33003
|
raise MailConnectionError if connection gets interrupted (instead of MailError)
|
netsecus/mail_handler.py
|
netsecus/mail_handler.py
|
from __future__ import unicode_literals
import logging
import time
import traceback
from . import database
from . import helper
from . import commands
from . import submission
def mail_main(config):
try:
mail_run(config)
except BaseException as e:
on_error(config, e)
raise
def mail_run(config):
db = database.Database(config)
helper.patch_imaplib()
ignored_uids = set()
while True:
try:
mainloop(config, db, ignored_uids)
except (OSError, helper.MailConnectionError) as e:
on_error(config, e)
time.sleep(config("mail.delay"))
def on_error(config, e):
logging.exception(e)
if config('loglevel') == 'debug':
traceback.print_exc()
def mainloop(config, db, ignored_uids):
try:
username = config('mail.username')
except KeyError:
username = config('mail.address')
imapmail = loginIMAP(
config("mail.imap_server"),
username,
config("mail.password"),
config("mail.ssl"),
config("loglevel") == "debug")
imapmail._command("CAPABILITY")
capabilities = imapmail.readline().decode("utf-8")
helper.checkResult(imapmail, b"OK")
if "UTF8" in capabilities:
imapmail._command("ENABLE", "UTF8")
helper.checkResult(imapmail, b"ENABLED")
helper.checkResult(imapmail, b"OK")
imapmail._command("ENABLE", "UTF8=ACCEPT")
helper.checkResult(imapmail, b"ENABLED")
helper.checkResult(imapmail, b"OK")
logging.debug("Server supports UTF8")
imapmail._command("IDLE") # Check whether server supports IDLE
if "idling" in imapmail.readline().decode("utf-8"):
def idle_loop():
imapmail.send(b"DONE\r\n")
imapmail.readline()
mailProcessing(config, db, imapmail, ignored_uids)
imapmail._command("IDLE")
logging.debug("Entering IDLE state.")
logging.debug("Server supports IDLE.")
idle_loop()
while True:
line = imapmail.readline().decode("utf-8")
if not line:
raise helper.MailError('Connection interrupted')
if "EXISTS" in line:
# We got mail!
idle_loop()
else:
logging.debug("Server lacks support for IDLE... Falling back to delay.")
while True:
try:
mailProcessing(config, db, imapmail, ignored_uids)
time.sleep(config("mail.delay"))
except KeyboardInterrupt:
logoutIMAP(imapmail)
raise
def mailProcessing(config, db, imapmail, ignored_uids):
filterCriteria = "SUBJECT \"Abgabe\" UNDELETED"
mails = commands.filter(config, imapmail, [], filterCriteria)
for uid, message in mails:
if uid in ignored_uids:
continue
try:
submission.handle_mail(config, db, imapmail, uid, message)
except helper.MailError as me:
ignored_uids.add(me.uid)
on_error(config, me)
def loginIMAP(server, address, password, ssl=True, debug=False):
if not address or not password:
err = "IMAP login information incomplete. (Missing address or password)"
logging.error(err)
raise ValueError(err)
imapmail = helper.create_imap_conn(server, ssl, debug)
imapmail.login(address, password)
logging.debug("IMAP login (%s on %s)" % (address, server))
return imapmail
def logoutIMAP(imapmail):
imapmail.close()
imapmail.logout()
logging.debug("IMAP logout")
|
Python
| 0
|
@@ -2139,24 +2139,34 @@
helper.Mail
+Connection
Error('Conne
|
d3a5e596f32d1139af29fc394f185fe120fa20dd
|
clean up FIXME
|
Lib/sandbox/models/cox.py
|
Lib/sandbox/models/cox.py
|
import shutil
import tempfile
import numpy as N
from scipy.sandbox.models import survival, model
class discrete:
"""
A simple little class for working with discrete random vectors.
"""
def __init__(self, x, w=None):
self.x = N.squeeze(x)
if self.x.shape == ():
self.x = N.array([self.x])
self.n = self.x.shape[0]
if w is None:
w = N.ones(self.n, N.float64)
else:
if w.shape[0] != self.n:
raise ValueError, 'incompatible shape for weights w'
if N.any(N.less(w, 0)):
raise ValueError, 'weights should be non-negative'
self.w = w / w.sum()
def mean(self, f=None):
if f is None:
fx = self.x
else:
fx = f(self.x)
return (fx * self.w).sum()
def cov(self):
mu = self.mean()
dx = self.x - N.multiply.outer(mu, self.x.shape[1])
return N.dot(dx, N.transpose(dx))
class observation(survival.right_censored):
def __getitem__(self, item):
if self.namespace is not None:
return self.namespace[item]
else:
return getattr(self, item)
def __init__(self, time, delta, namespace=None):
self.namespace = namespace
survival.right_censored.__init__(self, time, delta)
def __call__(self, formula, time=None, **extra):
return formula(namespace=self, time=time, **extra)
class coxph(model.likelihood_model):
def __init__(self, subjects, formula, time_dependent=False):
self.subjects, self.formula = subjects, formula
self.time_dependent = time_dependent
self.initialize(self.subjects)
def initialize(self, subjects):
self.failures = {}
for i in range(len(subjects)):
s = subjects[i]
if s.delta:
if not self.failures.has_key(s.time):
self.failures[s.time] = [i]
else:
self.failures[s.time].append(i)
self.failure_times = self.failures.keys()
self.failure_times.sort()
def cache(self):
if self.time_dependent:
self.cachedir = tempfile.mkdtemp()
self.design = {}
self.risk = {}
first = True
for t in self.failures.keys():
if self.time_dependent:
d = N.array([s(self.formula, time=t)
for s in self.subjects]).astype('<f8')
dshape = d.shape
dfile = file(tempfile.mkstemp(dir=self.cachedir)[1], 'w')
d.tofile(dfile)
dfile.close()
del(d)
self.design[t] = N.memmap(dfile.name,
dtype=N.dtype('<f8'),
shape=dshape)
elif first:
d = N.array([s(self.formula, time=t)
for s in self.subjects]).astype(N.float64)
self.design[t] = d
else:
self.design[t] = d
self.risk[t] = N.compress([s.atrisk(t) for s in self.subjects],
N.arange(self.design[t].shape[0]),axis=-1)
def __del__(self):
shutil.rmtree(self.cachedir, ignore_errors=True)
def logL(self, b, ties='breslow'):
logL = 0
for t in self.failures.keys():
fail = self.failures[t]
d = len(fail)
risk = self.risk[t]
Zb = N.dot(self.design[t], b)
logL += Zb[fail].sum()
if ties == 'breslow':
s = N.exp(Zb[risk]).sum()
logL -= N.log(N.exp(Zb[risk]).sum()) * d
elif ties == 'efron':
s = N.exp(Zb[risk]).sum()
r = N.exp(Zb[fail]).sum()
for j in range(d):
logL -= N.log(s - j * r / d)
elif ties == 'cox':
raise NotImplementedError, 'Cox tie breaking method not implemented'
else:
raise NotImplementedError, 'tie breaking method not recognized'
return logL
def score(self, b, ties='breslow'):
score = 0
for t in self.failures.keys():
fail = self.failures[t]
d = len(fail)
risk = self.risk[t]
Z = self.design[t]
score += Z[fail].sum()
if ties == 'breslow':
w = N.exp(N.dot(Z, b))
rv = discrete(Z[risk], w=w[risk])
score -= rv.mean() * d
elif ties == 'efron':
w = N.exp(N.dot(Z, b))
score += Z[fail].sum()
for j in range(d):
efron_w = w
efron_w[fail] -= i * w[fail] / d
rv = discrete(Z[risk], w=efron_w[risk])
score -= rv.mean()
elif ties == 'cox':
raise NotImplementedError, 'Cox tie breaking method not implemented'
else:
raise NotImplementedError, 'tie breaking method not recognized'
# FIXME: score is an int. it has no shape
# is it that we shouldn't be using an int above
# or that we shouldn't be looking at shape here
if score.shape == ():
score = N.array([score])
return score
def information(self, b, ties='breslow'):
info = 0
score = 0
for t in self.failures.keys():
fail = self.failures[t]
d = len(fail)
risk = self.risk[t]
Z = self.design[t]
if ties == 'breslow':
w = N.exp(N.dot(Z, b))
rv = discrete(Z[risk], w=w[risk])
info += rv.cov()
elif ties == 'efron':
w = N.exp(N.dot(Z, b))
score += Z[fail].sum()
for j in range(d):
efron_w = w
efron_w[fail] -= i * w[fail] / d
rv = discrete(Z[risk], w=efron_w[risk])
info += rv.cov()
elif ties == 'cox':
raise NotImplementedError, 'Cox tie breaking method not implemented'
else:
raise NotImplementedError, 'tie breaking method not recognized'
return score
if __name__ == '__main__':
import numpy.random as R
n = 100
X = N.array([0]*n + [1]*n)
b = 0.4
lin = 1 + b*X
Y = R.standard_exponential((2*n,)) / lin
delta = R.binomial(1, 0.9, size=(2*n,))
subjects = [observation(Y[i], delta[i]) for i in range(2*n)]
for i in range(2*n):
subjects[i].X = X[i]
import formula as F
x = F.quantitative('X')
f = F.formula(x)
c = coxph(subjects, f)
c.cache()
# c.newton([0.4])
|
Python
| 0.00007
|
@@ -5155,249 +5155,33 @@
-# FIXME: score is an int. it has no shape%0A # is it that we shouldn't be using an int above%0A # or that we shouldn't be looking at shape here%0A if score.shape == ():%0A score = N.array(%5Bscore%5D)%0A return score
+return = N.array(%5Bscore%5D)
%0A%0A
|
cc4b1130958c7db5b68c52f018c3a6c41a8a8513
|
use new readPlist() and writePlist() functions
|
Lib/test/test_plistlib.py
|
Lib/test/test_plistlib.py
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
from test import test_support
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(test_support.TESTFN)
except:
pass
def _create(self):
pl = plistlib.Plist(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=plistlib.Dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = plistlib.Data("<binary gunk>"),
someMoreData = plistlib.Data("<lots of binary gunk>" * 10),
)
pl['anotherInt'] = 42
try:
from xml.utils.iso8601 import parse
import time
except ImportError:
pass
else:
pl['aDate'] = plistlib.Date(time.mktime(time.gmtime()))
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
pl.write(test_support.TESTFN)
pl2 = plistlib.Plist.fromFile(test_support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_stringio(self):
from StringIO import StringIO
f = StringIO()
pl = self._create()
pl.write(f)
pl2 = plistlib.Plist.fromFile(StringIO(f.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_cstringio(self):
from cStringIO import StringIO
f = StringIO()
pl = self._create()
pl.write(f)
pl2 = plistlib.Plist.fromFile(StringIO(f.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_main():
test_support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
|
Python
| 0
|
@@ -314,20 +314,19 @@
listlib.
-Plis
+Dic
t(%0A
@@ -1298,23 +1298,38 @@
pl
-.write(
+istlib.writePlist(pl,
test_sup
@@ -1364,30 +1364,25 @@
listlib.
+read
Plist
-.fromFile
(test_su
@@ -1562,39 +1562,54 @@
ate()%0A pl
-.write(
+istlib.writePlist(pl,
f)%0A pl2 =
@@ -1614,38 +1614,33 @@
= plistlib.
+read
Plist
-.fromFile
(StringIO(f.
@@ -1833,15 +1833,30 @@
pl
-.write(
+istlib.writePlist(pl,
f)%0A
@@ -1881,22 +1881,17 @@
lib.
+read
Plist
-.fromFile
(Str
|
9a73dcfbc77236d11387909c3f97d3712b56fb2a
|
clean out commented code from old version
|
juice.py
|
juice.py
|
# replace all functionality of the disgusting mess below
'''
# read and process raw data, store processed data
# using config(?) and io_db, calculate the following and store its end-result data to db:
# - raw profitability ("hypothetical")
# - competitiveness of orders per typeID per hub
# - END-RESULT [DB]: competition-factored profitability ("actual")
# - END-RESULT [DB]: per-orderID update behaviours
import config
import io_http
import arrow
# remove specific dependency:
import requests
def group_dictvalue(raw_list, dictvalue):
new_dict = {}
for i in raw_list:
new_key = i.pop(dictvalue)
if new_key in new_dict:
new_dict[new_key].append(i)
else:
new_dict[new_key] = [i]
return new_dict
def market_import(hub_spec):
data_configs = config.market_configs(hub_spec)
hub_regionid = str(data_configs[0])
url_base = io_http.url_format(hub_regionid, 'orders')
url_market = url_base + '?page='
data_pages = requests.get(url_base).json()['pageCount']
url_set = [url_market + str(x) for x in range(1, data_pages+1)]
data_res = io_http.url_async(url_set, data_pages)
data_items = [x for i in data_res for x in i['items']]
return data_items, data_configs
def market_distill(raw_list, configs):
data_total = raw_list
hub_stationid = configs[1]
data_hubonly = [x for x in data_total if hub_stationid == x['stationID']]
data_timestamp = data_hubonly
for i in range(0, len(data_hubonly)):
order_time = arrow.get(data_hubonly[i]['issued'])
data_timestamp[i]['issued'] = order_time.timestamp
sort_choice = 'type'
data_grouped_type = group_dictvalue(data_timestamp, sort_choice)
data_grouped_buysell = {}
sort_choice = 'buy'
for k,v in data_grouped_type.items():
buysell_grouped = group_dictvalue(v, sort_choice)
data_grouped_buysell[k] = [buysell_grouped]
sort_choice = 'stationID'
data_grouped_station = {}
for k,v in data_grouped_buysell.items():
for order_pair in v:
for order_type, order in order_pair.items():
for attribute in order:
id_subgroup = attribute.pop(sort_choice)
data_grouped_station[k] = {id_subgroup: v}
return data_grouped_station
def market_context(raw_list, configs):
data_distilled = raw_list
hub_regionid = str(configs[0])
type_ids = data_distilled.keys()
url_context = io_http.url_format(hub_regionid, 'context')
url_set = [url_context + str(x) + '/history/' for x in type_ids]
data_res = io_http.url_async(url_set, len(type_ids))
# need to restructure this so the typeID is preserved
# data_context = [x for i in data_res for x in i['items']]
# make a new list that contains:
# - pricing/volume info
# add to dict such that {'typeid': [[orders],[context]]
data_contextualised = {}
return data_contextualised'''
|
Python
| 0
|
@@ -1,2937 +1,40 @@
# re
-place all functionality of the disgusting mess below%0A%0A'''%0A# read and process raw data, store processed data%0A%0A# using config(?) and io_db, calculate the following and store its end-result data to db:%0A# - raw profitability (%22hypothetical%22)%0A# - competitiveness of orders per typeID per hub%0A# - END-RESULT %5BDB%5D: competition-factored profitability (%22actual%22)%0A# - END-RESULT %5BDB%5D: per-orderID update behaviours%0A%0Aimport config%0Aimport io_http%0Aimport arrow%0A# remove specific dependency:%0Aimport requests%0A%0A%0Adef group_dictvalue(raw_list, dictvalue):%0A new_dict = %7B%7D%0A for i in raw_list:%0A new_key = i.pop(dictvalue)%0A if new_key in new_dict:%0A new_dict%5Bnew_key%5D.append(i)%0A else:%0A new_dict%5Bnew_key%5D = %5Bi%5D%0A return new_dict%0A%0A%0Adef market_import(hub_spec):%0A data_configs = config.market_configs(hub_spec)%0A hub_regionid = str(data_configs%5B0%5D)%0A url_base = io_http.url_format(hub_regionid, 'orders')%0A url_market = url_base + '?page='%0A data_pages = requests.get(url_base).json()%5B'pageCount'%5D%0A url_set = %5Burl_market + str(x) for x in range(1, data_pages+1)%5D%0A data_res = io_http.url_async(url_set, data_pages)%0A data_items = %5Bx for i in data_res for x in i%5B'items'%5D%5D%0A return data_items, data_configs%0A%0A%0Adef market_distill(raw_list, configs):%0A data_total = raw_list%0A hub_stationid = configs%5B1%5D%0A data_hubonly = %5Bx for x in data_total if hub_stationid == x%5B'stationID'%5D%5D%0A data_timestamp = data_hubonly%0A for i in range(0, len(data_hubonly)):%0A order_time = arrow.get(data_hubonly%5Bi%5D%5B'issued'%5D)%0A data_timestamp%5Bi%5D%5B'issued'%5D = order_time.timestamp%0A sort_choice = 'type'%0A data_grouped_type = group_dictvalue(data_timestamp, sort_choice)%0A data_grouped_buysell = %7B%7D%0A sort_choice = 'buy'%0A for k,v in data_grouped_type.items():%0A buysell_grouped = group_dictvalue(v, sort_choice)%0A data_grouped_buysell%5Bk%5D = %5Bbuysell_grouped%5D%0A sort_choice = 'stationID'%0A data_grouped_station = %7B%7D%0A for k,v in data_grouped_buysell.items():%0A for order_pair in v:%0A for order_type, order in order_pair.items():%0A for attribute in order:%0A id_subgroup = attribute.pop(sort_choice)%0A data_grouped_station%5Bk%5D = %7Bid_subgroup: v%7D%0A return data_grouped_station%0A%0A%0Adef market_context(raw_list, configs):%0A data_distilled = raw_list%0A hub_regionid = str(configs%5B0%5D)%0A type_ids = data_distilled.keys()%0A url_context = io_http.url_format(hub_regionid, 'context')%0A url_set = %5Burl_context + str(x) + '/history/' for x in type_ids%5D%0A data_res = io_http.url_async(url_set, len(type_ids))%0A # need to restructure this so the typeID is preserved%0A # data_context = %5Bx for i in data_res for x in i%5B'items'%5D%5D%0A # make a new list that contains:%0A # - pricing/volume info%0A # add to dict such that %7B'typeid': %5B%5Borders%5D,%5Bcontext%5D%5D%0A data_contextualised = %7B%7D%0A return data_contextualised'''
+ad from db data and provide analysis
|
e112a2651357f586bd1b5a6a4378ac46e3407d58
|
add v1.1.2 (#22081)
|
var/spack/repos/builtin/packages/py-awkward1/package.py
|
var/spack/repos/builtin/packages/py-awkward1/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAwkward1(PythonPackage):
"""ROOT I/O in pure Python and NumPy."""
git = "https://github.com/scikit-hep/awkward-1.0.git"
url = "https://github.com/scikit-hep/awkward-1.0/archive/0.3.1.tar.gz"
homepage = "https://awkward-array.org"
maintainers = ['vvolkl']
version('0.3.1', sha256='7126d9feab8828b0b4f4c6dbc9e28c269a91e28eef4a6033d7ebb5db21f1dab3')
patch('pybind11.patch')
depends_on('py-setuptools', type='build')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-numpy@1.13.1:')
depends_on('py-pybind11')
depends_on('rapidjson')
depends_on('cmake', type='build')
|
Python
| 0
|
@@ -499,16 +499,112 @@
olkl'%5D%0A%0A
+ version('1.1.2', sha256='626e3a6a2a92dd67abc8692b1ebfa1b447b9594352d6ce8c86c37d7299dc4602')%0A
vers
|
50411f83942ff033b4b55ef72d595e6d3ab9949f
|
add version 2.9.1 (#25646)
|
var/spack/repos/builtin/packages/py-psycopg2/package.py
|
var/spack/repos/builtin/packages/py-psycopg2/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPsycopg2(PythonPackage):
"""Python interface to PostgreSQL databases"""
homepage = "https://psycopg.org/"
pypi = "psycopg2/psycopg2-2.8.6.tar.gz"
version('2.8.6', sha256='fb23f6c71107c37fd667cb4ea363ddeb936b348bbd6449278eb92c189699f543')
version('2.7.5', sha256='eccf962d41ca46e6326b97c8fe0a6687b58dfc1a5f6540ed071ff1474cea749e')
# https://www.psycopg.org/docs/install.html#prerequisites
depends_on('python@2.7:2.8,3.4:3.8', type=('build', 'link', 'run'), when='@2.8:')
depends_on('python@2.6:2.8,3.2:3.7', type=('build', 'link', 'run'), when='@2.7')
depends_on('py-setuptools', type='build')
depends_on('postgresql@9.1:12', type=('build', 'link', 'run'))
|
Python
| 0
|
@@ -381,16 +381,112 @@
ar.gz%22%0A%0A
+ version('2.9.1', sha256='de5303a6f1d0a7a34b9d40e4d3bef684ccc44a49bbe3eb85e3c0bffb4a131b7c')%0A
vers
@@ -732,16 +732,94 @@
uisites%0A
+ depends_on('python@3.6:3.9', type=('build', 'link', 'run'), when='@2.9:')%0A
depe
@@ -892,17 +892,16 @@
en='@2.8
-:
')%0A d
@@ -974,16 +974,17 @@
when='@
+:
2.7')%0A
@@ -1027,16 +1027,97 @@
build')%0A
+ depends_on('postgresql@9.1:13', type=('build', 'link', 'run'), when='@2.9:')%0A
depe
@@ -1173,10 +1173,24 @@
, 'run')
+, when='@:2.8'
)%0A
|
5d5e6a505bf7282fdef4ca3b3555ecf6f3efa137
|
Update __copyright__
|
typepy/__version__.py
|
typepy/__version__.py
|
# encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2017-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.6.5"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
|
Python
| 0.000003
|
@@ -16,40 +16,8 @@
-8%0A%0A
-from datetime import datetime%0A%0A%0A
__au
@@ -80,11 +80,8 @@
2017
--%7B%7D
, %7B%7D
@@ -93,29 +93,8 @@
mat(
-datetime.now().year,
__au
|
39fbd9fabaa9945fe33475afe23c109711679192
|
Make output more compact
|
Lib/cu2qu/benchmark.py
|
Lib/cu2qu/benchmark.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import random
import timeit
MAX_ERR = 5
SETUP_CODE = '''
from %(module)s import %(function)s
from %(benchmark_module)s import %(setup_function)s
args = %(setup_function)s()
'''
def generate_curve():
return [
tuple(float(random.randint(0, 2048)) for coord in range(2))
for point in range(4)]
def setup_curve_to_quadratic():
return generate_curve(), MAX_ERR
def setup_curves_to_quadratic():
num_curves = 3
return (
[generate_curve() for curve in range(num_curves)],
[MAX_ERR] * num_curves)
def run_benchmark(
benchmark_module, module, function, setup_suffix='', repeat=1000):
setup_func = 'setup_' + function
if setup_suffix:
print('%s with %s:' % (function, setup_suffix))
setup_func += '_' + setup_suffix
else:
print('%s:' % function)
results = timeit.repeat(
'%s(*args)' % function,
setup=(SETUP_CODE % {
'benchmark_module': benchmark_module, 'setup_function': setup_func,
'module': module, 'function': function}),
repeat=repeat, number=1)
print('min: %dus' % (min(results) * 1000000.))
print('avg: %dus' % (sum(results) / len(results) * 1000000.))
print()
def main():
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curve_to_quadratic')
run_benchmark('cu2qu.benchmark', 'cu2qu', 'curves_to_quadratic')
if __name__ == '__main__':
random.seed(1)
main()
|
Python
| 0
|
@@ -1492,16 +1492,24 @@
function
+, end=''
)%0A re
@@ -1775,13 +1775,14 @@
nt('
-min:
+%5Ctavg=
%25dus
@@ -1786,18 +1786,33 @@
dus' %25 (
-mi
+sum(results) / le
n(result
@@ -1829,52 +1829,38 @@
00.)
-)
+,
%0A
-print('avg: %25dus' %25 (sum(results) / le
+ '%5Ctmin=%25dus' %25 (mi
n(re
@@ -1879,24 +1879,25 @@
0000.))%0A
+#
print()%0A%0A%0Ade
|
d3e673069977c392eb292ce8b313f6dba4da4d9f
|
Fix these to use non-deprecated APIs, i.e. get_content_maintype() and get_content_subtype().
|
Lib/email/_compat21.py
|
Lib/email/_compat21.py
|
# Copyright (C) 2002 Python Software Foundation
# Author: barry@zope.com
"""Module containing compatibility functions for Python 2.1.
"""
from cStringIO import StringIO
from types import StringType, UnicodeType
False = 0
True = 1
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
parts = []
parts.append(self)
if self.is_multipart():
for subpart in self.get_payload():
parts.extend(subpart.walk())
return parts
# Python 2.2 spells floor division //
def _floordiv(i, j):
"""Do a floor division, i/j."""
return i / j
def _isstring(obj):
return isinstance(obj, StringType) or isinstance(obj, UnicodeType)
# These two functions are imported into the Iterators.py interface module.
# The Python 2.2 version uses generators for efficiency.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
lines = []
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if _isstring(payload):
for line in StringIO(payload).readlines():
lines.append(line)
return lines
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
parts = []
for subpart in msg.walk():
if subpart.get_main_type('text') == maintype:
if subtype is None or subpart.get_subtype('plain') == subtype:
parts.append(subpart)
return parts
|
Python
| 0.000001
|
@@ -1793,24 +1793,25 @@
get_
+content_
main
-_
type(
-'text'
) ==
@@ -1871,16 +1871,24 @@
get_
+content_
subtype(
'pla
@@ -1887,15 +1887,8 @@
ype(
-'plain'
) ==
|
364d0a78725539a58862f672f718e4bf966da2f5
|
add stats-per-day route
|
plogx/app.py
|
plogx/app.py
|
from flask import Flask
from flask import render_template
from flask.ext.pymongo import PyMongo
import database
from bson.json_util import dumps
from datetime import datetime
app = Flask("log_db")
mongo = PyMongo(app)
app.debug = True
@app.route('/')
def overview():
return render_template('index.html')
@app.route('/all_items')
def all_items():
return dumps(database.all_log_items(mongo.db))
if __name__ == "__main__":
app.run()
|
Python
| 0.000005
|
@@ -245,11 +245,11 @@
ute(
-'/'
+%22/%22
)%0Ade
@@ -289,17 +289,17 @@
emplate(
-'
+%22
index.ht
@@ -300,17 +300,17 @@
dex.html
-'
+%22
)%0A%0A@app.
@@ -315,17 +315,17 @@
p.route(
-'
+%22
/all_ite
@@ -330,9 +330,9 @@
tems
-'
+%22
)%0Ade
@@ -398,16 +398,242 @@
o.db))%0A%0A
+@app.route(%22/stats_per_day/%3Cint:date%3E%22)%0Adef stats_per_day(date):%0A d = str(date)%0A day = datetime(int(d%5B:4%5D), int(d%5B4:6%5D), int(d%5B6:%5D))%0A log_items = database.get_stats_per_day(mongo.db, day)%0A return dumps(log_items)%0A%0A
if __nam
|
9bb2b19dc9c99513dad05ac39d22b7981968cb2b
|
Revert "Added indication when tester form doesn't validate. Also, removed redirects in tester as they are not needed."
|
routes.py
|
routes.py
|
import random
from BinTree import BinTree
import flask
import flask_login
import forms
import db
app = flask.Flask(__name__)
app.config.from_object('config')
login_manager = flask_login.LoginManager()
login_manager.init_app(app)
class User(flask_login.UserMixin):
def __init__(self, username, primary_id, active=True):
self.username = username
self.primary_id = primary_id
self.active = active
def get_id(self):
return str(self.primary_id).encode().decode()
def is_active(self):
return self.active
@login_manager.user_loader
def load_user(user_id):
try:
with db.create_connection() as connection:
with connection.cursor() as cursor:
sql = "SELECT * FROM users WHERE id=%s"
cursor.execute(sql, (user_id))
result = cursor.fetchone()
if result:
return User(result['username'], result['id'])
return None
except:
return None
def generate_statement_string(sections, rand_min=-100, rand_max=100):
statement_str = ''
while sections > 0:
statement_str += ' '.join([random.choice(['x', 'y', str(random.randint(rand_min, rand_max))]),
random.choice(['>', '<', '==', '!=', '>=', '<=']),
random.choice(['x', 'y', str(random.randint(rand_min, rand_max))])])
if sections == 1:
return statement_str
statement_str += ' ' + random.choice(['||', '&&']) + ' '
sections -= 1
@app.route('/')
def home():
x = random.randint(-100, 100)
y = random.randint(-100, 100)
statement_str = generate_statement_string(2)
tree = BinTree.build_tree(statement_str)
statement_result = BinTree.solve_tree(tree, x, y)
return flask.render_template('home.html', x_value=str(x), y_value=str(y), statement=statement_str,
result=str(statement_result))
@app.route('/tester', methods=['GET', 'POST'])
def tester():
form = forms.StatementForm()
if form.validate_on_submit():
try:
solution = BinTree.solve_tree(BinTree.build_tree(form.statement.data), 0, 0)
if type(solution) is not bool:
raise ValueError
flask.flash('Given statement is {0}, solution is {1}'.format(form.statement.data, str(solution)))
except (ValueError, IndexError):
flask.flash('Invalid statement!')
else:
flask.flash('Invalid or empty data.')
return flask.render_template('tester.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
if not flask_login.current_user.is_anonymous:
return flask.redirect('/')
login_form = forms.LoginForm(prefix='login_form')
signup_form = forms.SignupForm(prefix='signup_form')
if signup_form.register.data and signup_form.validate_on_submit():
with db.create_connection() as connection, connection.cursor() as cursor:
sql = "INSERT INTO users (username, email, password, score) VALUES (%s, %s, SHA1(%s), %s);"
cursor.execute(sql, (signup_form.username.data, signup_form.email.data, signup_form.password.data, 0))
connection.commit()
flask.flash('Signed up! Please log in.')
if login_form.login.data and login_form.validate_on_submit():
with db.create_connection() as connection, connection.cursor() as cursor:
sql = "SELECT * FROM users WHERE username=%s AND password=SHA1(%s)"
cursor.execute(sql, (login_form.username.data, login_form.password.data))
result = cursor.fetchone()
if result:
if flask_login.login_user(load_user(result['id']), remember=login_form.remember_me.data):
flask.flash('Logged in!')
return flask.redirect('/')
else:
flask.flash('Sorry, something went wrong.')
else:
flask.flash('Invalid username or password.')
return flask.render_template('login.html', login_form=login_form, signup_form=signup_form)
@app.route('/about')
def about():
return flask.render_template('about.html')
@app.route('/logout')
@flask_login.login_required
def logout():
flask_login.logout_user()
return flask.redirect('/')
if __name__ == '__main__':
app.run(debug=True)
|
Python
| 0
|
@@ -2372,16 +2372,54 @@
tion)))%0A
+ flask.redirect('/tester')%0A
@@ -2497,30 +2497,24 @@
ment!')%0A
-else:%0A
flas
@@ -2519,37 +2519,25 @@
ask.
-flash('Invalid or empty data.
+redirect('/tester
')%0A
|
602876c2b132664cc1802d467eaf8109a745d613
|
Add option for margin and strand selection
|
kufpybiotools/generate_igr_gff.py
|
kufpybiotools/generate_igr_gff.py
|
#!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <konrad@foerstner.org>"
__copyright__ = "2013 by Konrad Foerstner <konrad@foerstner.org>"
__license__ = "ISC license"
__email__ = "konrad@foerstner.org"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
for strand in ["+", "-"]:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
|
Python
| 0
|
@@ -606,16 +606,140 @@
e(%22w%22))%0A
+parser.add_argument(%22--margin%22, type=int, default=0)%0Aparser.add_argument(%22--plus_only%22, default=False, action=%22store_true%22)%0A
args = p
@@ -1171,16 +1171,86 @@
n 3%5Cn%22)%0A
+%0Astrands = %5B%22+%22, %22-%22%5D%0Aif args.plus_only is True:%0A strands = %5B%22+%22%5D%0A%0A
for star
@@ -1310,16 +1310,114 @@
y.end):%0A
+ start = start + args.margin%0A end = end - args.margin%0A if end %3C= start:%0A continue%0A
for
@@ -1426,26 +1426,23 @@
rand in
-%5B%22+%22, %22-%22%5D
+strands
:%0A
|
4bad79872547f90159e75b34b46e99e54f78b736
|
Fix error in big comment header.
|
l10n_ch_hr_payroll/__openerp__.py
|
l10n_ch_hr_payroll/__openerp__.py
|
# -*- coding: utf-8 -*-
#
# File: __init__.py
# Module: l10n_ch_hr_payroll
#
# Created by sge@open-net.ch
#
# Copyright (c) 2014-TODAY Open-Net Ltd. <http://www.open-net.ch>
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Switzerland - Payroll',
'summary': 'Swizerland Payroll Rules',
'category': 'Localization',
'author': 'Open-Net Sàrl',
'depends': ['decimal_precision', 'hr_payroll', 'hr_payroll_account'],
'version': '1.0.1',
'description': """
Swizerland Payroll Rules.
=========================
**Features list :**
* Add Swiss salary rule categories
* Add Swiss salary rules
* Add children in school to employee
* Add LPP range to contract
**For functionnal information:**
http://ur1.ca/ir5ou
**Author :**
Open Net Sàrl -Industrie 59 1030 Bussigny Suisse - http://www.open-net.ch
**Contact :**
info@open-net.ch
**History :**
V1.0.0: 2014-11-07/Sge
* Add Salary rule categories
* Add Salary rules
* Add Employee children in school
* Add Contract LPP rate
V1.0.1: 2014-11-11/Sge
* Set the 'LPP rate'' digits to 'Payroll Rate' decimal accuracy
V1.0.2:
* Add some minor changes, based on pull request #66 comments.
""",
'auto_install': False,
'demo': [],
'website': 'http://open-net.ch',
'data': [
'hr_contract_view.xml',
'hr_employee_view.xml',
'l10n_ch_hr_payroll_data.xml',
'data/hr.salary.rule-change.csv',
'data/hr.salary.rule-new.csv',
],
'installable': True
}
|
Python
| 0
|
@@ -34,12 +34,15 @@
: __
-init
+openerp
__.p
|
91e4ab4284490b0a232e2e0bc60df9059a264660
|
Revise exercise 1 checking
|
learntools/computer_vision/ex1.py
|
learntools/computer_vision/ex1.py
|
from learntools.core import *
class Q1(CodingProblem):
_var = 'pretrained_base'
_hint = "`True` or `False`?"
_correct_message = """When doing transfer learning, it's generally not a good idea to retrain the entire base -- at least not without some care. The reason is that the random weights in the head will initially create large gradient updates, which propogate back into the base layers and destroy much of the pretraining. Using techniques known as **fine tuning** it's possible to further train the base on new data, but this requires some care to do well."""
_solution = CS('pretrained_base.trainable = False')
def check(self, pretrained_base):
assert (not pretrained_base.trainable), \
("""The base should not be trainable. Since it's already been pretrained on a large dataset, you can expect that it will be hard to improve.""")
class Q2(CodingProblem):
hidden_units = 6
_var = 'model'
_hint = "You need to add two new `Dense` layers. The first should have {} units and `'relu'` activation. The second should have 1 unit and `'sigmoid'` activation.".format(hidden_units)
_solution = CS("""
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
model = Sequential([
pretrained_base,
layers.Flatten(),
layers.Dense({}, activation='relu'),
layers.Dense(1, activation='sigmoid'),
])
""".format(hidden_units))
def check(self, model):
hidden_units = 6
assert (len(model.layers) == 4), \
("""
You've added an incorrect number of layers. Try something like:
```python
model = Sequential([
pretrained_base,
layers.Flatten(),
layers.Dense({}, activation='relu'),
layers.Dense(1, activation='sigmoid'),
])
```
""".format(hidden_units))
layer_classes = [layer.__class__.__name__ for layer in model.layers]
assert (layer_classes[2] == 'Dense' and layer_classes[3] == 'Dense'), \
(("The two layers you add should both be `Dense` layers. " +
"You added a `{}` layer and a `{}` layer.")
.format(layer_classes[2], layer_classes[3]))
dense_1 = model.layers[-2]
assert (dense_1.units == hidden_units and
dense_1.activation.__name__ == 'relu'), \
(("The first dense layer should have {} units with `{}` activation. " +
"Yours had {} units and `{}` activation.")
.format(hidden_units, 'relu',
dense_1.units, dense_1.activation.__name__))
dense_2 = model.layers[-1]
assert (dense_2.units == 1 and
dense_2.activation.__name__ == 'sigmoid'), \
(("The second dense layer should have {} units with `{}` activation. " +
"Yours had {} unit and `{}` activation.")
.format(1, 'sigmoid',
dense_2.units, dense_2.activation.__name__))
class Q3(CodingProblem):
_hint = "This is a *binary* classification problem."
_solution = CS("""
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['binary_accuracy'],
)
""")
_var = "model"
def check(self, model):
loss = model.compiled_loss._losses
assert (loss == 'binary_crossentropy'), \
(("The loss should be `'binary_crossentropy'`. " +
"You gave `{}`").format(loss))
metrics = model.compiled_metrics._metrics
assert (metrics == ['binary_accuracy']), \
(("The metrics should be `['binary_accuracy']`. " +
"You gave `{}`").format(metrics))
class Q4(ThoughtExperiment):
_solution = """That the training loss and validation loss stay fairly close is evidence that the model isn't just memorizing the training data, but rather learning general properties of the two classes. But, because this model converges at a loss greater than the VGG16 model, it's likely that it is underfitting some, and could benefit from some extra capacity.
"""
qvars = bind_exercises(
globals(),
[Q1, Q2, Q3, Q4],
var_format='q_{n}',
)
__all__ = list(qvars)
|
Python
| 0
|
@@ -1686,106 +1686,49 @@
nse(
-%7B%7D, activation='relu'),%0A layers.Dense(1, activation='sigmoid'),%0A%5D)%0A%60%60%60%0A%22%22%22.format(hidden_units)
+____),%0A layers.Dense(____),%0A%5D)%0A%60%60%60%0A%22%22%22
)%0A%0A
@@ -2756,24 +2756,25 @@
had %7B%7D unit
+s
and %60%7B%7D%60 ac
|
13ff5be510c978715c45ab7387f00551299d5571
|
Fix lektor server issue with artifacts
|
lektor_creative_commons/plugin.py
|
lektor_creative_commons/plugin.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
# python 3
from urllib.error import URLError
except ImportError:
# legacy python
URLError = IOError
import os
from lektor.context import get_ctx
from lektor.pluginsystem import Plugin
from markupsafe import Markup
from .translation import translate_lazy as _
TEMPLATES = {
'full': (
'<a rel="license" target="_blank" href="https://creativecommons.org/'
'licenses/{type}/{version}/deed.{locale}">'
'<img alt="{license}" style="border-width:0" src="{icon_path}" />'
'</a><br />{message} '
'<a rel="license" target="_blank" href="https://creativecommons.org/'
'licenses/{type}/{version}/deed.{locale}">{license}</a>.'
),
'image-only': (
'<a rel="license" target="_blank" href="https://creativecommons.org/'
'licenses/{type}/{version}/deed.{locale}">'
'<img alt="{license}" style="border-width:0" src="{icon_path}" /></a>'
),
'text-only': (
'{message} '
'<a rel="license" target="_blank" href="https://creativecommons.org/'
'licenses/{type}/{version}/deed.{locale}">{license}</a>.'
),
}
LICENSES = {
'by': {
'type': 'by',
'version': '4.0',
'license_type': _('Attribution'),
},
'by-nc': {
'type': 'by-nc',
'version': '4.0',
'license_type': _('Attribution - NonCommercial'),
},
'by-sa': {
'type': 'by-sa',
'version': '4.0',
'license_type': _('Attribution - ShareAlike'),
},
'by-nc-sa': {
'type': 'by-nc-sa',
'version': '4.0',
'license_type': _('Attribution - NonCommercial - ShareAlike'),
},
'by-nd': {
'type': 'by-nd',
'version': '4.0',
'license_type': _('Attribution - NoDerivatives'),
},
'by-nc-nd': {
'type': 'by-nc-nd',
'version': '4.0',
'license_type': _('Attribution - NonCommercial - NoDerivatives'),
},
}
LICENSE_SIZES = {
'normal': '88x31',
'compact': '80x15',
}
class CreativeCommonsPlugin(Plugin):
name = 'Creative Commons'
description = 'Add Creative Commons license to your pages.'
def __init__(self, env, id):
self.locale = env.load_config().site_locale or 'en'
_.translator.configure(self.locale)
super(CreativeCommonsPlugin, self).__init__(env, id)
def render_cc_license(self, type, size='normal', template='full',
caller=None):
license = LICENSES[type].copy()
license['size'] = LICENSE_SIZES[size]
license['locale'] = self.locale
license['message'] = _('This work is licensed under a')
license['license'] = _(
'Creative Commons %(license_type)s 4.0 International License',
license
)
license['license_url'] = (
'https://creativecommons.org/'
'licenses/{type}/{version}/deed.{locale}'
).format(**license)
license['icon_path'] = self.icon_path(license)
if callable(caller):
if caller.catch_kwargs:
return Markup(caller(**license))
else:
license_subset = dict(
(argument_name, license[argument_name])
for argument_name in caller.arguments
)
return Markup(caller(**license_subset))
return Markup(TEMPLATES[template].format(**license))
def icon_path(self, license):
icon_target_path = (
'/static/lektor-creative-commons/{type}/{version}/{size}.png'
).format(**license)
icon_source_path = os.path.join(
os.path.dirname(__file__), 'assets', license['type'],
license['version'], license['size'] + '.png'
)
ctx = get_ctx()
@ctx.sub_artifact(
icon_target_path, sources=[], source_obj=icon_source_path
)
def copy_icon(artifact):
artifact.sources.append(artifact.dst_filename)
artifact.replace_with_file(icon_source_path, copy=True)
return icon_target_path
def on_setup_env(self, **extra):
self.env.jinja_env.globals.update(
render_cc_license=self.render_cc_license
)
|
Python
| 0
|
@@ -3912,20 +3912,70 @@
ath,
- sources=%5B%5D,
+%0A sources=%5Bctx.source.source_filename%5D,%0A
sou
@@ -4067,101 +4067,45 @@
act.
-sources.append(artifact.dst_filename)%0A artifact.replace_with_file(icon_
+replace_with_file(artifact.
source_
-path
+obj
, co
|
bff08843a3084353f1a72e3585f6a85a3114d79e
|
Print list of replacements when an error occurs
|
lib/methods/scripts.py
|
lib/methods/scripts.py
|
from base import BaseMethod
from fabric.api import *
from fabric.colors import green, red
from lib import configuration
import re
class ScriptMethod(BaseMethod):
@staticmethod
def supports(methodName):
return methodName == 'script'
def runScriptImpl(self, rootFolder, commands, callbacks= {}, environment = {}):
pattern = re.compile('\%(\S*)\%')
state = { 'warnOnly': True }
# preflight
ok = True
for line in commands:
if pattern.search(line) != None:
print red('Found replacement-pattern in script-line "%s", aborting ...' % line)
ok = False
for key in environment:
if pattern.search(environment[key]) != None:
print red('Found replacement-pattern in environment "%s:%s", aborting ...' % (key, environment[key]))
ok = False
if not ok:
return
for line in commands:
with cd(rootFolder), shell_env(**environment):
handled = False
start_p = line.find('(')
end_p = line.rfind(')')
if start_p >= 0 and end_p > 0:
func_name = line[0:start_p]
if func_name in callbacks:
arguments = False
func_args = line[start_p+1: end_p]
if func_args.strip() != '':
arguments = func_args.split(',')
arguments = map(lambda x: x.strip(), arguments)
if arguments:
callbacks[func_name](state, *arguments)
else:
callbacks[func_name](state)
handled = True
if not handled:
if state['warnOnly']:
with warn_only():
run(line)
else:
run(line)
def expandVariablesImpl(self, prefix, variables, result):
for key in variables:
if isinstance(variables[key], dict):
self.expandVariablesImpl(prefix + "." + key, variables[key], result)
else:
result["%" + prefix + "." + key + "%"] = str(variables[key])
def expandVariables(self, variables):
results = {}
for key in variables:
self.expandVariablesImpl(key, variables[key], results)
return results
def expandCommands(self, commands, replacements):
parsed_commands = []
pattern = re.compile('|'.join(re.escape(key) for key in replacements.keys()))
for line in commands:
result = pattern.sub(lambda x: replacements[x.group()], line)
parsed_commands.append(result)
return parsed_commands
def expandEnvironment(self, environment, replacements):
parsed_environment = {}
pattern = re.compile('|'.join(re.escape(key) for key in replacements.keys()))
for key in environment:
parsed_environment[key] = pattern.sub(lambda x: replacements[x.group()], environment[key])
return parsed_environment
def executeCallback(self, context, command, *args, **kwargs):
execute(command, *args)
def runTaskCallback(self, context, *args, **kwargs):
print red('run_task is not supported anymore, use "execute(docker, <your_task>)"');
def failOnErrorCallback(self, context, flag):
if flag == '1':
context['warnOnly'] = False
else:
context['warnOnly'] = True
def runScript(self, config, **kwargs):
script = kwargs['script']
callbacks = kwargs['callbacks'] if 'callbacks' in kwargs else {}
variables = kwargs['variables'] if 'variables' in kwargs else {}
environment = kwargs['environment'] if 'environment' in kwargs else {}
if 'environment' in config:
environment = configuration.data_merge(config['environment'], environment)
variables['host'] = config
callbacks['execute'] = self.executeCallback
callbacks['run_task'] = self.runTaskCallback
callbacks['fail_on_error'] = self.failOnErrorCallback
replacements = self.expandVariables(variables);
commands = self.expandCommands(script, replacements)
environment = self.expandEnvironment(environment, replacements)
self.runScriptImpl(config['rootFolder'], commands, callbacks, environment)
def runTaskSpecificScript(self, taskName, config, **kwargs):
script = False
if taskName in config:
script = config[taskName]
else:
common_scripts = configuration.getSettings('common')
type = config['type']
if taskName in common_scripts and type in common_scripts[taskName]:
script = common_scripts[taskName][type]
if script:
self.runScript(config, script=script)
def preflight(self, taskName, configuration, **kwargs):
self.runTaskSpecificScript(taskName + "Prepare", configuration, **kwargs)
def postflight(self, taskName, configuration, **kwargs):
self.runTaskSpecificScript(taskName + "Finished", configuration, **kwargs)
|
Python
| 0.000002
|
@@ -235,16 +235,177 @@
cript'%0A%0A
+ def printReplacements(self, replacements):%0A for key, value in replacements.iteritems():%0A print %22%7Bkey:%3C40%7D %7C %7Bvalue%7D%22.format(key = key, value=value)%0A%0A
%0A def r
@@ -473,16 +473,35 @@
onment =
+ %7B%7D, replacements =
%7B%7D):%0A%0A
@@ -1004,22 +1004,71 @@
:%0A
-return
+self.printReplacements(replacements)%0A return False
%0A%0A%0A f
@@ -4212,16 +4212,30 @@
ironment
+, replacements
)%0A%0A%0A de
|
05890290bf8fa62df61f0cb791abcf6e11a567fe
|
fix for updater issues after 83765a0dd9445d4f9e9acfc101aae406b003d5db
|
maraschino/__init__.py
|
maraschino/__init__.py
|
# -*- coding: utf-8 -*-
"""Maraschino module"""
import sys
import os
import subprocess
import threading
import wsgiserver
from Maraschino import app
from Logger import maraschinoLogger
from apscheduler.scheduler import Scheduler
FULL_PATH = None
RUNDIR = None
ARGS = None
DAEMON = False
PIDFILE = None
VERBOSE = True
LOG_FILE = None
PORT = None
DATABASE = None
INIT_LOCK = threading.Lock()
__INITIALIZED__ = False
DEVELOPMENT = False
SCHEDULE = Scheduler()
WEBROOT = ''
logger = None
SERVER = None
HOST = '0.0.0.0'
KIOSK = False
DATA_DIR = None
THREADS = 0
AUTH = {
'username': None,
'password': None,
}
CURRENT_COMMIT = None
LATEST_COMMIT = None
COMMITS_BEHIND = 0
COMMITS_COMPARE_URL = ''
def initialize():
"""Init function for this module"""
with INIT_LOCK:
global __INITIALIZED__, app, FULL_PATH, RUNDIR, ARGS, DAEMON, PIDFILE, VERBOSE, LOG_FILE, LOG_DIR, logger, PORT, SERVER, DATABASE, AUTH, \
CURRENT_COMMIT, LATEST_COMMIT, COMMITS_BEHIND, COMMITS_COMPARE_URL, USE_GIT, WEBROOT, HOST, KIOSK, DATA_DIR, THREADS
if __INITIALIZED__:
return False
# Set up logger
if not LOG_FILE:
LOG_FILE = os.path.join(DATA_DIR, 'logs', 'maraschino.log')
FILENAME = os.path.basename(LOG_FILE)
LOG_DIR = LOG_FILE[:-len(FILENAME)]
if not os.path.exists(LOG_DIR):
try:
os.makedirs(LOG_DIR)
except OSError:
if VERBOSE:
print 'Unable to create the log directory.'
logger = maraschinoLogger(LOG_FILE, VERBOSE)
# check if database exists or create it
from database import init_db
if KIOSK:
logger.log('Running in KIOSK Mode, settings disabled.', 'INFO')
try:
logger.log('Opening database at: %s' % (DATABASE), 'INFO')
open(DATABASE)
except IOError:
logger.log('Opening database failed', 'CRITICAL')
try:
logger.log('Checking if PATH exists: %s' % (DATABASE), 'WARNING')
dbpath = os.path.dirname(DATABASE)
if not os.path.exists(dbpath):
try:
logger.log('It does not exist, creating it...', 'WARNING')
os.makedirs(dbpath)
except:
logger.log('Could not create %s.' % (DATABASE), 'CRITICAL')
print 'Could not create %s.' % (DATABASE)
quit()
except:
logger.log('Could not create %s.' % (DATABASE), 'CRITICAL')
quit()
logger.log('Database successfully initialised', 'INFO')
init_db()
# Web server settings
from tools import get_setting_value
if get_setting_value('maraschino_port'):
port_arg = False
for arg in ARGS:
if arg == '--port' or arg == '-p':
port_arg = True
if not port_arg:
PORT = int(get_setting_value('maraschino_port'))
# Set up AUTH
username = get_setting_value('maraschino_username')
password = get_setting_value('maraschino_password')
if username and password != None:
AUTH = {
'username': username,
'password': password
}
# Set up web server
if '--webroot' not in str(ARGS):
WEBROOT = get_setting_value('maraschino_webroot')
if WEBROOT is None:
WEBROOT = ''
if WEBROOT:
if WEBROOT[0] != '/':
WEBROOT = '/' + WEBROOT
d = wsgiserver.WSGIPathInfoDispatcher({WEBROOT: app})
else:
d = wsgiserver.WSGIPathInfoDispatcher({'/': app})
SERVER = wsgiserver.CherryPyWSGIServer((HOST, PORT), d)
__INITIALIZED__ = True
return True
def init_updater():
from maraschino.updater import checkGithub, gitCurrentVersion
if os.name == 'nt':
USE_GIT = False
else:
USE_GIT = os.path.isdir(os.path.join(RUNDIR, '.git'))
if USE_GIT:
gitCurrentVersion()
version_file = os.path.join(DATA_DIR, 'Version.txt')
if os.path.isfile(version_file):
f = open(version_file, 'r')
CURRENT_COMMIT = f.read()
f.close()
else:
COMMITS_BEHIND = -1
threading.Thread(target=checkGithub).start()
def start_schedules():
"""Add all periodic jobs to the scheduler"""
# check every 6 hours for a new version
from maraschino.updater import checkGithub
SCHEDULE.add_interval_job(checkGithub, hours=6)
SCHEDULE.start()
def start():
"""Start the actual server"""
if __INITIALIZED__:
start_schedules()
if not DEVELOPMENT:
try:
logger.log('Starting Maraschino on %s:%i%s' % (HOST, PORT, WEBROOT), 'INFO')
SERVER.start()
while not True:
pass
except KeyboardInterrupt:
stop()
else:
logger.log('Starting Maraschino development server on port: %i' % (PORT), 'INFO')
logger.log(' ##### IMPORTANT : WEBROOT DOES NOT WORK UNDER THE DEV SERVER #######', 'INFO')
app.run(debug=True, port=PORT, host=HOST)
def stop():
"""Shutdown Maraschino"""
logger.log('Shutting down Maraschino...', 'INFO')
if not DEVELOPMENT:
SERVER.stop()
else:
from flask import request
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
SCHEDULE.shutdown(wait=False)
if PIDFILE:
logger.log('Removing pidfile: %s' % str(PIDFILE), 'INFO')
os.remove(PIDFILE)
def restart():
"""Restart Maraschino"""
SERVER.stop()
popen_list = [sys.executable, FULL_PATH]
popen_list += ARGS
logger.log('Restarting Maraschino with: %s' % popen_list, 'INFO')
SCHEDULE.shutdown(wait=False)
subprocess.Popen(popen_list, cwd=RUNDIR)
def daemonize():
"""Start Maraschino as a daemon"""
if threading.activeCount() != 1:
logger.log('There are %s active threads. Daemonizing may cause strange behavior.' % threading.activeCount(), 'WARNING')
sys.stdout.flush()
sys.stderr.flush()
try:
pid = os.fork()
if pid == 0:
pass
else:
logger.log('Forking once...', 'DEBUG')
os._exit(0)
except OSError, e:
sys.exit('1st fork failed: %s [%d]' % (e.strerror, e.errno))
os.chdir('/')
os.umask(0)
os.setsid()
try:
pid = os.fork()
if pid > 0:
logger.log('Forking twice...', 'DEBUG')
os._exit(0)
except OSError, e:
sys.exit('2nd fork failed: %s [%d]' % (e.strerror, e.errno))
pid = os.getpid()
logger.log('Daemonized to PID: %s' % pid, 'INFO')
if PIDFILE:
logger.log('Writing PID %s to %s' % (pid, PIDFILE), 'INFO')
file(PIDFILE, 'w').write("%s\n" % pid)
|
Python
| 0.000001
|
@@ -4032,16 +4032,67 @@
tVersion
+%0A global USE_GIT, CURRENT_COMMIT, COMMITS_BEHIND
%0A%0A if
|
f7266e7b9c2c719952a4e2c4e736e87a79c57a1b
|
Use schedule instead of schedule_interval
|
examples/application.py
|
examples/application.py
|
"""Demonstrates PyGLy's functionality.
Renders a simple scene graph and controls
a viewport.
Viewport is provided without any high level
wrappers and is entirely managed through events.
"""
from time import time
import pyglet
from pyglet.gl import *
import pygly.window
import pygly.gl
from pygly.projection_view_matrix import ProjectionViewMatrix
from pygly.scene_node import SceneNode
from pygly.camera_node import CameraNode
from pyrr import matrix44
# over-ride the default pyglet idle loop
import pygly.monkey_patch
pygly.monkey_patch.patch_idle_loop()
class Application( object ):
def __init__( self, gl_config ):
"""Sets up the core functionality we need
to begin rendering.
This includes the OpenGL configuration, the
window, the viewport, the event handler
and update loop registration.
"""
super( Application, self ).__init__()
# create our window
self.window = pyglet.window.Window(
fullscreen = False,
width = 1024,
height = 768,
resizable = True,
vsync = False,
config = gl_config,
)
# listen for on_draw events
self.window.push_handlers(
on_draw = self.on_draw,
on_resize = self.on_resize,
)
# setup our update loop the app
# we don't need to do this to get the window
# up, but it's nice to show the basic application
# structure in such a simple app
# render at max speed
# this also requires vsync to be False or
# the render loop will be stuck at the monitor
# frequency
self.update_delta = -1
# use a pyglet callback for our render loop
pyglet.clock.schedule_interval(
self.step,
self.update_delta
)
# print some debug info
pygly.gl.print_gl_info()
# setup our scene
self.setup()
def setup( self ):
self.setup_viewports()
self.setup_scene()
self.setup_camera()
def setup_viewports( self ):
# create a viewport that spans
# the entire screen
# this list is zip'ed with self.cameras
# to bind the camera to a viewport for rendering.
self.viewports = [
pygly.window.create_rectangle(
self.window
)
]
self.colours = [
(0.0, 0.0, 0.0, 1.0)
]
def setup_scene( self ):
"""Creates the scene to be rendered.
Creates our camera, scene graph,
"""
# enable scissoring for viewports
glEnable( GL_SCISSOR_TEST )
# create a scene
# we'll create the scene as a tree
# to demonstrate the depth-first iteration
# technique we will use to render it
self.scene_node = SceneNode( 'root' )
def setup_camera( self ):
# create a camera and a view matrix
view_matrix = ProjectionViewMatrix(
pygly.window.aspect_ratio( self.viewports[ 0 ] ),
fov = 45.0,
near_clip = 1.0,
far_clip = 200.0
)
camera = CameraNode('camera', view_matrix )
self.scene_node.add_child( camera )
# store our camera in a list
# this list is zip'ed with self.viewports
# to bind the camera to a viewport for rendering.
# a camera may appear in this array multiple times
self.cameras = [ camera ]
def run( self ):
"""Begins the Pyglet main loop.
"""
pyglet.app.run()
def on_resize( self, width, height ):
"""Called when the window is resized.
Pyglet fires an on_resize event and this
is where we handle it.
We need to update our view matrix with respect
to our viewport size, or the content will become
skewed.
"""
# update the viewport size
self.viewports[ 0 ] = pygly.window.create_rectangle(
self.window
)
# we would normally update the viewport ratio
# here, but because we're allowing a single
# camera to service multiple viewports, we
# update them each frame
def step( self, dt ):
"""Updates our scene and triggers the on_draw event.
This is scheduled in our __init__ method and
called periodically by pyglet's event callbacks.
We need to manually call 'on_draw' as we patched
it our of pyglets event loop when we patched it
out with pygly.monkey_patch.
Because we called 'on_draw', we also need to
perform the buffer flip at the end.
"""
# manually dispatch the on_draw event
# as we patched it out of the idle loop
self.window.dispatch_event( 'on_draw' )
# display the frame buffer
self.window.flip()
def on_draw( self ):
"""Triggered by the pyglet 'on_draw' event.
Causes the scene to be rendered.
"""
self.render()
def render( self ):
# set our window
self.window.switch_to()
# render each viewport
for viewport, camera, colour in zip( self.viewports, self.cameras, self.colours ):
glClearColor( *colour )
# render the viewport
self.render_viewport(
viewport,
camera,
)
# undo our viewport and our scissor
pygly.gl.set_scissor(
pygly.window.create_rectangle( self.window )
)
pygly.gl.set_viewport(
pygly.window.create_rectangle( self.window )
)
def render_viewport( self, viewport, camera ):
# activate our viewport
pygly.gl.set_viewport( viewport )
# scissor to our viewport
pygly.gl.set_scissor( viewport )
# clear our frame buffer and depth buffer
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
# update the camera's aspect ratio before
# we render using it
# we would normally do this in on_resize
# but because 1 camera could be used to render
# to multiple viewports in our example, we
# will do it each frame
camera.view_matrix.aspect_ratio = pygly.window.aspect_ratio(
viewport
)
self.render_scene( camera )
def render_scene( self, camera ):
pass
def main():
"""Main function entry point.
Simple creates the Application and
calls 'run'.
Also ensures the window is closed at the end.
"""
app = Application()
app.run()
app.window.close()
if __name__ == "__main__":
main()
|
Python
| 0.000014
|
@@ -1740,40 +1740,8 @@
ncy%0D
-%0A self.update_delta = -1%0D
%0A%0D%0A
@@ -1825,86 +1825,19 @@
dule
-_interval(%0D%0A self.step,%0D%0A self.update_delta%0D%0A
+( self.step
)%0D%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.