commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
2bf0c9e0d8bbce50f06ca08c79f97ecf5b76e21b
|
Fix logging
|
simplesqlite/_logger.py
|
simplesqlite/_logger.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, unicode_literals
import logbook
import tabledata
logger = logbook.Logger("SimpleSQLie")
logger.disable()
def set_logger(is_enable):
if is_enable != logger.disabled:
return
if is_enable:
logger.enable()
else:
logger.disable()
tabledata.set_logger(is_enable)
try:
import pytablereader
pytablereader.set_logger(is_enable)
except ImportError:
pass
def set_log_level(log_level):
"""
Set logging level of this module. Using
`logbook <http://logbook.readthedocs.io/en/stable/>`__ module for logging.
:param int log_level:
One of the log level of
`logbook <http://logbook.readthedocs.io/en/stable/api/base.html>`__.
Disabled logging if ``log_level`` is ``logbook.NOTSET``.
:raises LookupError: If ``log_level`` is an invalid value.
"""
# validate log level
logbook.get_level_name(log_level)
if log_level == logger.level:
return
if log_level == logbook.NOTSET:
set_logger(is_enable=False)
else:
set_logger(is_enable=True)
logger.level = log_level
tabledata.set_log_level(log_level)
try:
import pytablereader
pytablereader.set_log_level(log_level)
except ImportError:
pass
|
Python
| 0.000007
|
@@ -158,16 +158,36 @@
logbook%0A
+import sqliteschema%0A
import t
@@ -445,24 +445,63 @@
(is_enable)%0A
+ sqliteschema.set_logger(is_enable)%0A
try:%0A
@@ -1340,24 +1340,66 @@
(log_level)%0A
+ sqliteschema.set_log_level(log_level)%0A
try:%0A
|
930508e5ec00d9f174409097ba54e70c7c6b2b3c
|
Fix #421: RPN_DEFNS needs to passed to Pelegant via env
|
sirepo/pkcli/elegant.py
|
sirepo/pkcli/elegant.py
|
# -*- coding: utf-8 -*-
"""Wrapper to run elegant from the command line.
:copyright: Copyright (c) 2015 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkio
from pykern import pkresource
from pykern import pksubprocess
from pykern.pkdebug import pkdp, pkdc
from sirepo import mpi
from sirepo import simulation_db
from sirepo.template import template_common
from sirepo.template.elegant import extract_report_data, ELEGANT_LOG_FILE
import copy
import os
import re
import subprocess
_ELEGANT_STDERR_FILE = 'elegant.stderr'
def run(cfg_dir):
"""Run elegant in ``cfg_dir``
The files in ``cfg_dir`` must be configured properly.
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
_run_elegant(bunch_report=True)
_extract_bunch_report()
def run_background(cfg_dir):
"""Run elegant as a background task
Args:
cfg_dir (str): directory to run elegant in
"""
with pkio.save_chdir(cfg_dir):
_run_elegant(with_mpi=True);
simulation_db.write_result({})
def _run_elegant(bunch_report=False, with_mpi=False):
exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
if bunch_report and re.search('\&sdds_beam\s', elegant_file):
return
pkio.write_text('elegant.lte', lattice_file)
ele = 'elegant.ele'
pkio.write_text(ele, elegant_file)
# TODO(robnagler) Need to handle this specially, b/c different binary
if with_mpi and mpi.cfg.cores > 1:
return mpi.run_program(['Pelegant', ele], output=ELEGANT_LOG_FILE)
env = copy.deepcopy(os.environ)
env['RPN_DEFNS'] = pkresource.filename('defns.rpn')
pksubprocess.check_call_with_signals(
['elegant', ele],
output=ELEGANT_LOG_FILE,
env=env,
msg=pkdp,
)
def _extract_bunch_report():
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
if data['models']['bunchSource']['inputSource'] == 'sdds_beam':
file = 'bunchFile-sourceFile.{}'.format(data['models']['bunchFile']['sourceFile'])
else:
file = 'elegant.bun'
info = extract_report_data(file, data['models'][data['report']], data['models']['bunch']['p_central_mev'], 0)
simulation_db.write_result(info)
|
Python
| 0
|
@@ -1601,16 +1601,108 @@
binary%0A
+ env = copy.deepcopy(os.environ)%0A env%5B'RPN_DEFNS'%5D = pkresource.filename('defns.rpn')%0A
if w
@@ -1809,100 +1809,17 @@
FILE
-)%0A env = copy.deepcopy(os.environ)%0A env%5B'RPN_DEFNS'%5D = pkresource.filename('defns.rpn'
+, env=env
)%0A
|
5c8a6072309989ac97eefc2a6f63a6082a2c5ff0
|
Update matching_specific_string.py
|
hacker_rank/contests/regular_expresso/matching_specific_string.py
|
hacker_rank/contests/regular_expresso/matching_specific_string.py
|
Python
| 0.000002
|
@@ -0,0 +1,52 @@
+Regex_Pattern = r'hackerrank'%09# Do not delete 'r'.%0D%0A
|
|
05b15f2db049e8b722f17867f5163c0b6e3a3108
|
Allow POST to /git-update url.
|
pthemes.py
|
pthemes.py
|
import os
import logging
from flask import Flask, render_template, redirect, url_for, flash
from pq import PQ
from api import APIGrabber
from db import PonyDB
logging.basicConfig()
# Config
# ---------------
# App config
app = Flask(__name__)
app.config.from_object(os.environ.get('APP_SETTINGS', None))
db = PonyDB(app)
pq = PQ(db.get_connection()) # Postgres work queue
if db.table_exists('queue') is False:
pq.create()
queue = pq['themes']
# Routes
# ---------------
@app.route('/')
def show_entries():
"""
List out all the themes.
"""
image_themes = db.get_image_themes()
no_image_themes = db.get_no_image_themes()
sha = db.get_sha()
counts = {}
counts['image_themes'] = len(image_themes)
counts['no_image_themes'] = len(no_image_themes)
counts['total'] = counts['image_themes'] + counts['no_image_themes']
for t in image_themes:
if t['image_urls'] is not None:
t['image_urls'] = t['image_urls'].split(',')
return render_template('list.html',
image_themes=image_themes,
no_image_themes=no_image_themes,
counts=counts,
sha=sha)
@app.route('/git-update', methods=['GET'])
def refresh_themes():
"""
Adds a job to the job queue. The job is to refresh the theme list. As
all jobs are identical, the job will only be added if there are no
existing jobs.
"""
if len(queue) < 1:
queue.put('Refresh themes')
flash('Added theme refresh job to queue.')
else:
flash('A theme refresh job has already been scheduled.')
return redirect(url_for('show_entries'))
# App decorators
# ---------------
# @app.cli.command('initdb')
# def initdb_command():
# """Creates the database tables."""
# db.init_db()
# @app.cli.command('populatedb')
# def populatedb_command():
# db.populate_db()
@app.cli.command('worker')
def queue_worker():
"""
Process queue tasks and then exit
"""
for task in queue:
if task is None:
break
a = APIGrabber(app.config['GITHUB_API_KEY'])
sha, data = a.process()
db.populate_db(sha, data)
if __name__ == "__main__":
app.run()
|
Python
| 0
|
@@ -1256,16 +1256,24 @@
s=%5B'GET'
+, 'POST'
%5D)%0Adef r
|
865053d4a401bda2b6435d4449111f8997656863
|
active is an integer field
|
mint/django_rest/rbuilder/users/models.py
|
mint/django_rest/rbuilder/users/models.py
|
#
# Copyright (c) 2011 rPath, Inc.
#
# All Rights Reserved
#
from django.db import models
from mint.django_rest.rbuilder import modellib
from xobj import xobj
import sys
from mint.django_rest.rbuilder.users import manager_model
from django.db import connection
class UserGroups(modellib.Collection):
class Meta:
abstract = True
_xobj = xobj.XObjMetadata(tag='user_groups')
list_fields = ['user_group']
class UserGroup(modellib.XObjIdModel):
user_group_id = models.AutoField(primary_key=True, db_column='usergroupid')
name = models.CharField(unique=True, max_length=128, db_column='usergroup')
class Meta:
# managed = settings.MANAGE_RBUILDER_MODELS
db_table = u'usergroups'
_xobj = xobj.XObjMetadata(tag='user_group')
_xobj_hidden_accessors = set(['user_members_group_id'])
def __unicode__(self):
return self.name
class Users(modellib.Collection):
class Meta:
abstract = True
_xobj = xobj.XObjMetadata(tag='users')
list_fields = ['user']
view_name = 'Users'
class User(modellib.XObjIdModel):
objects = manager_model.UserManager()
user_id = models.AutoField(primary_key=True, db_column='userid')
user_name = models.CharField(unique=True, max_length=128, db_column='username')
full_name = models.CharField(max_length=128, db_column='fullname')
# salt and password should be hidden, users shouldn't see crypted
# passwords
salt = modellib.XObjHidden(models.TextField()) # This field type is a guess.
passwd = modellib.XObjHidden(models.CharField(max_length=254))
email = models.CharField(max_length=128)
display_email = models.TextField(db_column='displayemail')
created_date = models.DecimalField(max_digits=14, decimal_places=3, db_column='timecreated')
modified_date = models.DecimalField(max_digits=14, decimal_places=3, db_column='timeaccessed')
active = models.BooleanField()
blurb = models.TextField()
user_groups = modellib.DeferredManyToManyField(UserGroup, through="UserGroupMember", db_column='user_group_id', related_name='group')
is_admin = modellib.SyntheticField()
# Field used for the clear-text password when it is to be
# set/changed
password = modellib.XObjHidden(modellib.SyntheticField())
class Meta:
# managed = settings.MANAGE_RBUILDER_MODELS
db_table = u'users'
_xobj = xobj.XObjMetadata(tag='user', attributes = {'id':str})
_xobj_hidden_accessors = set(['creator', 'package_version_urls_last_modified',
'packages_last_modified', 'releaseCreator', 'imageCreator', 'package_source_jobs_created',
'releasePublisher', 'releaseUpdater', 'package_build_jobs_last_modified',
'package_build_jobs_created', 'package_builds_created', 'package_version_jobs_created',
'imageUpdater', 'package_version_urls_created', 'package_versions_last_modified',
'package_source_jobs_last_modified', 'package_builds_last_modified',
'targetusercredentials_set', 'package_version_jobs_last_modified', 'package_sources_created',
'system_set', 'package_builds_jobs_last_modified', 'package_sources_last_modified',
'usermember', 'package_versions_created', 'packages_created', 'user',
'created_images', 'updated_images', 'project_membership',
'created_releases', 'updated_releases', 'published_releases', 'user_tags'])
def __unicode__(self):
return self.user_name
def getIsAdmin(self):
# A bit of SQL here, so we only do one trip to the db
cu = connection.cursor()
cu.execute("""
SELECT 1
FROM UserGroupMembers
JOIN UserGroups USING (usergroupid)
WHERE UserGroups.usergroup = 'MintAdmin'
AND UserGroupMembers.userid = %s
""", [self.user_id])
row = cu.fetchone()
return bool(row)
def set_is_admin(self):
isAdmin = self.getIsAdmin()
# Unfortunately we don't have boolean synthetic fields yet, so
# let's save the string representation of it
self.is_admin = str(bool(isAdmin)).lower()
def save(self):
# Omit the salt field
localFields = self._meta.local_fields
neuteredFields = getattr(self._meta, 'neuteredLocalFields', None)
if neuteredFields is None:
neuteredFields = [ x for x in localFields if x.name != 'salt' ]
self._meta.neuteredLocalFields = neuteredFields
try:
self._meta.local_fields = neuteredFields
return super(User, self).save()
finally:
self._meta.local_fields = localFields
class UserGroupMembers(modellib.Collection):
class Meta:
abstract = True
list_fields = ['user_group_member']
_xobj = xobj.XObjMetadata(tag='user_group_members')
class UserGroupMember(modellib.XObjIdModel):
class Meta:
db_table = u'usergroupmembers'
user_group_id = modellib.XObjHidden(modellib.DeferredForeignKey(UserGroup, db_column='usergroupid', related_name='user_members_group_id'))
user_id = modellib.DeferredForeignKey(User, db_column='userid', related_name='usermember')
_xobj = xobj.XObjMetadata(tag='user_group_member')
for mod_obj in sys.modules[__name__].__dict__.values():
if hasattr(mod_obj, '_xobj'):
if mod_obj._xobj.tag:
modellib.type_map[mod_obj._xobj.tag] = mod_obj
|
Python
| 0.000014
|
@@ -4164,32 +4164,116 @@
def save(self):%0A
+ # Make active an integer field%0A self.active = int(bool(self.active))%0A
# Omit t
|
8a45ba91e64bebb345d2550d0442d2993be4b15c
|
make sure we build
|
publish.py
|
publish.py
|
#!/usr/bin/env python3
import asyncio
import logging
import re
import requests
#import sh
import sys
import time
import yaml
from datetime import datetime
from os import makedirs
from pathlib import Path
from aiohttp import ClientSession
TEMPLATE = """\
---
{front_matter}
---
{content}
"""
ROOT_DIR = Path.cwd()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def timing(f):
"""
decorator to time a function and print it
:param f: function to wrap
:return: wrapped function
"""
def wrap():
start = time.time()
f()
end = time.time()
logger.info(f"{f.__name__} took {end - start!r}")
return wrap
def slugify(name):
"""
Takes a article name and returns a slug appropriate version using hyphens
:param name: string to be converted
:return: converted string
"""
out = re.sub(r'[^\w\d\s]', '', name)
return re.sub(r'\s', '-', out)
def create_item(item):
"""
Takes a hacker news json object and creates a hugo markdown page from it
:param item: dict of article data
"""
yml = {}
slug = slugify(item.get('title'))
makedirs('./content/post/', exist_ok=True)
file_name = './content/post/{}.md'.format(slug)
with open(file=file_name, mode='w', encoding='utf-8') as f:
yml['date'] = datetime.fromtimestamp(item.get('time')).isoformat()
yml['linkurl'] = item.get('url')
yml['slug'] = slug
yml['tags'] = []
yml['categories'] = ["{}".format(item.get('type'))] if item.get('type', None) else []
f.write(TEMPLATE.format(front_matter=yaml.dump(yml).strip(), content=""))
def hugo_build():
"""
Builds the hugo site
We overwrite the baseurl all other settings are fine
"""
hugo = sh.hugo.bake(_cwd=str(ROOT_DIR))
hugo('--baseURL=https://davidejones.github.io/hugo-hn/', [], _out=sys.stdout)
def get_content_sync(data):
"""
Synchronously hit each hacker news article item for download.
:param data: tuple of url and article type
:return: list of json responses
"""
responses = []
for url, article_type in data:
response = requests.get(url)
for article_id in response.json():
item_request = requests.get('https://hacker-news.firebaseio.com/v0/item/{}.json'.format(article_id))
item = item_request.json()
item['type'] = article_type
responses.append(item)
return responses
async def get_content_async(data):
"""
Asynchronously hit each hacker news article item for download.
:param data: tuple of url and article type
:return: list of json responses
"""
parent_tasks = []
child_tasks = []
async with ClientSession(loop=asyncio.get_event_loop()) as session:
for url, article_type in data:
parent_tasks.append(asyncio.create_task(fetch(url, session)))
results = await asyncio.gather(*parent_tasks)
for index, section in enumerate(results):
_, article_type = data[index]
for article_id in section:
child_tasks.append(asyncio.create_task(
fetch(f'https://hacker-news.firebaseio.com/v0/item/{article_id}.json', session, article_type)))
return await asyncio.gather(*child_tasks)
async def fetch(url, session, article_type=None):
"""
Async fetch returning
:param url: url to hit
:param session: open session to make calls with
:param article_type: the type name of the article for front matter
:return: json response
"""
async with session.get(url) as response:
data = await response.json()
if article_type:
data['type'] = article_type
return data
@timing
def main():
"""
Entry function that grabs hacker news content saves it and builds the html site
"""
id_data = [('https://hacker-news.firebaseio.com/v0/topstories.json', 'story'),
('https://hacker-news.firebaseio.com/v0/askstories.json', 'ask'),
('https://hacker-news.firebaseio.com/v0/showstories.json', 'show'),
('https://hacker-news.firebaseio.com/v0/jobstories.json', 'job')]
# responses = get_content_sync(id_data)
responses = asyncio.run(get_content_async(id_data))
for item in responses:
create_item(item)
#hugo_build()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -4394,17 +4394,16 @@
m)%0A%0A
-#
hugo_bui
|
66370cecd7bdf9d0b5ecd358aa58b4f567d45c95
|
add a new keyword to the pypy lexer
|
pygments/lexers/pypylog.py
|
pygments/lexers/pypylog.py
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.pypylog
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for pypy log files.
:copyright: Copyright 2006-2011 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import Text, Keyword, Number, Comment, Punctuation, Name, \
String
__all__ = ["PyPyLogLexer"]
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()([\w_]+(?:\.[\w_]+)?)(\))",
bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr)", Name),
(r"<.*?>", Name.Builtin),
(r"(debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|"
r"int_floordiv|int_mod|int_lshift|int_rshift|int_and|int_or|"
r"int_xor|int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|"
r"int_is_true|"
r"uint_floordiv|uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|"
r"float_eq|float_ne|float_ge|float_gt|float_le|float_lt|float_abs|"
r"ptr_eq|"
r"cast_int_to_float|cast_float_to_int|cast_opaque_ptr|"
r"force_token|quasiimmut_field|same_as|virtual_ref_finish|virtual_ref|"
r"call_may_force|call_assembler|call_loopinvariant|call_release_gil|call_pure|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|"
r"getarrayitem_raw|setarrayitem_raw|getfield_gc_pure|getfield_gc|"
r"getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|"
r"guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|"
r"guard_not_forced|guard_no_exception|guard_not_invalidated)",
Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r"[:]", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"[\n\s]+", Text),
(r"#.*?$", Comment),
],
}
|
Python
| 0
|
@@ -2401,24 +2401,44 @@
getfield_gc%7C
+getinteriorfield_gc%7C
%22%0A
|
ebd152ca9b4126776e0f035477791be587907a8b
|
Fix coding style and add a file header.
|
pygments/lexers/pypylog.py
|
pygments/lexers/pypylog.py
|
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import (Text, Keyword, Number, Comment, Punctuation, Name,
String, Literal)
__all__ = [
"PyPyLogLexer",
]
class PyPyLogLexer(RegexLexer):
"""
Lexer for PyPy log files.
*New in Pygments 1.5.*
"""
name = "PyPy Log"
aliases = ["pypylog", "pypy"]
filenames = ["*.pypylog"]
mimetypes = ['application/x-pypylog']
tokens = {
"root": [
(r"\[\w+\] {jit-log-.*?$", Keyword, "jit-log"),
(r"\[\w+\] {jit-backend-counts$", Keyword, "jit-backend-counts"),
include("extra-stuff"),
],
"jit-log": [
(r"\[\w+\] jit-log-.*?}$", Keyword, "#pop"),
(r"[ifp]\d+", Name),
(r"ptr\d+", Name),
(r"(\()([\w_]+(?:\.[\w_]+)?)(\))", bygroups(Punctuation, Name.Builtin, Punctuation)),
(r"[\[\]=,()]", Punctuation),
(r"(\d+\.\d+|inf|-inf)", Number.Float),
(r"-?\d+", Number.Integer),
(r"'.*'", String),
(r"(None|descr|ConstClass|ConstPtr)", Name),
(r"<.*?>", Name.Builtin),
(r"(debug_merge_point|jump|finish)", Name.Class),
(r"(int_add_ovf|int_add|int_sub_ovf|int_sub|int_mul_ovf|int_mul|int_rshift|int_and|int_or|int_xor|"
r"int_eq|int_ne|int_ge|int_gt|int_le|int_lt|int_is_zero|int_is_true|"
r"uint_floordiv|"
r"uint_ge|uint_lt|"
r"float_add|float_sub|float_mul|float_truediv|"
r"float_eq|float_ne|float_gt|"
r"ptr_eq|"
r"force_token|"
r"call_may_force|call_assembler|call|"
r"new_with_vtable|new_array|newstr|newunicode|new|"
r"arraylen_gc|"
r"getarrayitem_gc_pure|getarrayitem_gc|setarrayitem_gc|getarrayitem_raw|setarrayitem_raw|"
r"getfield_gc_pure|getfield_gc|getfield_raw|setfield_gc|setfield_raw|"
r"strgetitem|strsetitem|strlen|copystrcontent|"
r"unicodegetitem|unicodesetitem|unicodelen|"
r"guard_true|guard_false|guard_value|guard_isnull|guard_nonnull_class|guard_nonnull|guard_class|guard_no_overflow|guard_not_forced|guard_no_exception|guard_not_invalidated)", Name.Builtin),
include("extra-stuff"),
],
"jit-backend-counts": [
(r"\[\w+\] jit-backend-counts}$", Keyword, "#pop"),
(r"[:]", Punctuation),
(r"\d+", Number),
include("extra-stuff"),
],
"extra-stuff": [
(r"[\n\s]+", Text),
(r"#.*?$", Comment),
],
}
|
Python
| 0
|
@@ -1,20 +1,256 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0A pygments.lexers.pypylog%0A ~~~~~~~~~~~~~~~~~~~~~~~%0A%0A Lexer for pypy log files.%0A%0A :copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.%0A :license: BSD, see LICENSE for details.%0A%22%22%22%0A%0A
from pygments.lexer
@@ -313,17 +313,16 @@
import
-(
Text, Ke
@@ -363,16 +363,18 @@
n, Name,
+ %5C
%0A Str
@@ -380,18 +380,8 @@
ring
-, Literal)
%0A%0A%0A_
@@ -390,21 +390,16 @@
ll__ = %5B
-%0A
%22PyPyLog
@@ -408,11 +408,10 @@
xer%22
-,%0A
%5D
+%0A
%0A%0Acl
@@ -1050,16 +1050,29 @@
?)(%5C))%22,
+%0A
bygroup
@@ -1513,16 +1513,33 @@
int_mul%7C
+%22%0A r%22
int_rshi
@@ -1564,33 +1564,16 @@
int_xor%7C
-%22%0A r%22
int_eq%7Ci
@@ -1592,16 +1592,33 @@
%7Cint_gt%7C
+%22%0A r%22
int_le%7Ci
@@ -1678,33 +1678,16 @@
loordiv%7C
-%22%0A r%22
uint_ge%7C
@@ -2068,16 +2068,33 @@
item_gc%7C
+%22%0A r%22
getarray
@@ -2119,33 +2119,16 @@
tem_raw%7C
-%22%0A r%22
getfield
@@ -2148,16 +2148,33 @@
ield_gc%7C
+%22%0A r%22
getfield
@@ -2383,24 +2383,41 @@
uard_isnull%7C
+%22%0A r%22
guard_nonnul
@@ -2468,16 +2468,33 @@
verflow%7C
+%22%0A r%22
guard_no
@@ -2545,16 +2545,29 @@
dated)%22,
+%0A
Name.Bu
|
757e95e297f27f46200b832e532f3cba95b68df8
|
move __all__ directly after imports
|
pylons/i18n/translation.py
|
pylons/i18n/translation.py
|
"""Translation/Localization functions.
Provides ``gettext`` translation functions via an app's ``pylons.translator``
and get/set_lang for changing the language translated to.
"""
import os
from gettext import NullTranslations, translation
import pylons
class LanguageError(Exception):
"""Exception raised when a problem occurs with changing languages"""
pass
class LazyString(object):
"""Has a number of lazily evaluated functions replicating a string. Just
override the eval() method to produce the actual value.
This method copied from TurboGears.
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def eval(self):
return self.func(*self.args, **self.kwargs)
def __unicode__(self):
return unicode(self.eval())
def __str__(self):
return str(self.eval())
def __mod__(self, other):
return self.eval() % other
def lazify(func):
"""Decorator to return a lazy-evaluated version of the original"""
def newfunc(*args, **kwargs):
return LazyString(func, *args, **kwargs)
try:
newfunc.__name__ = 'lazy_%s' % func.__name__
except TypeError: # Python < 2.4
pass
newfunc.__doc__ = 'Lazy-evaluated version of the %s function\n\n%s' % \
(func.__name__, func.__doc__)
return newfunc
def gettext_noop(value):
"""Mark a string for translation without translating it. Returns value.
Used for global strings, e.g.:
.. code-block:: Python
foo = N_('Hello')
class Bar:
def __init__(self):
self.local_foo = _(foo)
h.set_lang('fr')
assert Bar().local_foo == 'Bonjour'
h.set_lang('es')
assert Bar().local_foo == 'Hola'
assert foo == 'Hello'
"""
return value
N_ = gettext_noop
def gettext(value):
"""Mark a string for translation. Returns the localized string of value.
Mark a string to be localized as follows:
.. code-block:: Python
gettext('This should be in lots of languages')
"""
return pylons.translator.gettext(value)
lazy_gettext = lazify(gettext)
def ugettext(value):
"""Mark a string for translation. Returns the localized unicode string of
value.
Mark a string to be localized as follows:
.. code-block:: Python
_('This should be in lots of languages')
"""
return pylons.translator.ugettext(value)
_ = ugettext
lazy_ugettext = lazify(ugettext)
def ngettext(singular, plural, n):
"""Mark a string for translation. Returns the localized string of the
pluralized value.
This does a plural-forms lookup of a message id. ``singular`` is used as
the message id for purposes of lookup in the catalog, while ``n`` is used
to determine which plural form to use. The returned message is a string.
Mark a string to be localized as follows:
.. code-block:: Python
ngettext('There is %(num)d file here', 'There are %(num)d files here',
n) % {'num': n}
"""
return pylons.translator.ngettext(singular, plural, n)
lazy_ngettext = lazify(ngettext)
def ungettext(singular, plural, n):
"""Mark a string for translation. Returns the localized unicode string of
the pluralized value.
This does a plural-forms lookup of a message id. ``singular`` is used as
the message id for purposes of lookup in the catalog, while ``n`` is used
to determine which plural form to use. The returned message is a Unicode
string.
Mark a string to be localized as follows:
.. code-block:: Python
ungettext('There is %(num)d file here', 'There are %(num)d files here',
n) % {'num': n}
"""
return pylons.translator.ungettext(singular, plural, n)
lazy_ungettext = lazify(ungettext)
def _get_translator(lang, **kwargs):
"""Utility method to get a valid translator object from a language name"""
conf = pylons.config.current_conf()
# XXX: root_path is deprecated
rootdir = conf['pylons.paths'].get('root',
conf['pylons.paths'].get('root_path'))
localedir = os.path.join(rootdir, 'i18n')
if not isinstance(lang, list):
lang = [lang]
try:
translator = translation(conf['pylons.package'], localedir,
languages=lang, **kwargs)
except IOError, ioe:
raise LanguageError('IOError: %s' % ioe)
translator.pylons_lang = lang
return translator
def set_lang(lang, **kwargs):
"""Set the i18n language used"""
registry = pylons.request.environ['paste.registry']
if not lang:
registry.replace(pylons.translator, NullTranslations())
else:
translator = _get_translator(lang, **kwargs)
registry.replace(pylons.translator, translator)
def get_lang():
"""Return the current i18n language used"""
return getattr(pylons.translator, 'pylons_lang', None)
def add_fallback(lang):
"""Add a fallback language from which words not matched in other languages
will be translated to."""
return pylons.translator.add_fallback(_get_translator(lang))
__all__ = ['gettext_noop', 'N_', 'gettext', 'ugettext', '_', 'ngettext',
'ungettext', 'lazy_gettext', 'lazy_ugettext', 'lazy_ngettext',
'lazy_ungettext', 'set_lang', 'get_lang', 'LanguageError']
|
Python
| 0.000002
|
@@ -249,16 +249,234 @@
pylons%0A%0A
+__all__ = %5B'gettext_noop', 'N_', 'gettext', 'ugettext', '_', 'ngettext',%0A 'ungettext', 'lazy_gettext', 'lazy_ugettext', 'lazy_ngettext',%0A 'lazy_ungettext', 'set_lang', 'get_lang', 'LanguageError'%5D%0A%0A
class La
@@ -5463,222 +5463,4 @@
g))%0A
-%0A__all__ = %5B'gettext_noop', 'N_', 'gettext', 'ugettext', '_', 'ngettext',%0A 'ungettext', 'lazy_gettext', 'lazy_ugettext', 'lazy_ngettext',%0A 'lazy_ungettext', 'set_lang', 'get_lang', 'LanguageError'%5D%0A
|
38a4b41d942f40dd16e1a1c88ab68c0b9169ff0c
|
update tts
|
slackbot/plugins/tts.py
|
slackbot/plugins/tts.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# tts.py ---
#
# Filename: tts.py
# Description:
# Author: Werther Zhang
# Maintainer:
# Created: Sun Sep 10 16:24:08 2017 (+0800)
#
# Change Log:
#
#
import os
import sys
import baidutts
import hashlib
import commands
from slackbot.bot import plugin_init
from slackbot.bot import respond_to
class TTS(object):
def __init__(self, config, method):
self.__ttsdriver = None
if method == 'baidu':
self.__ttsdriver = baidutts.BaiduTTS(config.get('apikey', ""),
config.get('secretkey', ""),
config.get('speed', 5),
config.get('pitch', 9),
config.get('volume', 9),
config.get('person', 3))
def __text2tts(self, message):
return self.__ttsdriver.get_tts_audio(message, 'zh')
def __md5sum(contents):
hash = hashlib.md5()
hash.update(contents)
return hash.hexdigest()
def __mplayer(f):
st, output = commands.getstatusoutput('mplayer -really-quiet -noconsolecontrols -volume 82 {}'.format(f))
def text2play(self, message):
t, d = self.__text2tts(message)
basename = self.__md5sum(d)
basename = os.path.join('/tmp/' + basename + '.' + t)
with open(basename, 'w') as f:
f.write(d)
self.__mplayer(basename)
os.remove(basename)
tts_obj = None
@plugin_init
def init_tts(config):
global tts_obj
enable = config.get('enable', False)
driver = config.get('driver', 'baidu')
if enable:
tts_obj = TTS(config, driver)
@respond_to(r'tts (.*)')
def tts_command(message, rest):
global tts_obj
tts_obj.text2play(rest)
|
Python
| 0.000001
|
@@ -337,16 +337,133 @@
ond_to%0A%0A
+try:%0A from pydub import AudioSegment%0Aexcept Exception as e:%0A print 'Missing module pydub, please install it'%0A%0A
class TT
@@ -1024,16 +1024,357 @@
, 3))%0A%0A%0A
+ def __insert_silent(self, media_file, ftype):%0A try:%0A silent = AudioSegment.silent(duration=1000)%0A sound1 = AudioSegment.from_file(media_file, ftype)%0A combined = silent + sound1%0A combined.export(media_file, format=ftype)%0A except Exception as e:%0A print(%22%7B%7D%22.format(e))%0A%0A
def
@@ -1701,17 +1701,28 @@
volume 8
-2
+5 -speed 0.8
%7B%7D'.for
@@ -1728,16 +1728,93 @@
rmat(f))
+%0A if st != 0:%0A print('mplayer output:%5Cn %7B%7D'.format(output))
%0A%0A de
|
8f4f902042b848a6a212ae966aaf6435ae8d5c77
|
set background as a widget
|
sheldonchess/interface/web/sheldonchess.py
|
sheldonchess/interface/web/sheldonchess.py
|
from rajesh import Application, run, expr
from rajesh.element import Img, Div
from screens import MainMenu, NormalGameLobby
class Player(object):
def __init__(self, app):
self.app = app
self.name = ""
class SheldonChess(Application):
def begin(self):
self.player = Player(self)
self.title = "Sheldon Chess"
background = Img(id="background", src="images/sheldonchess_background.png", width="100%", height="100%")
self.put(background, (0, 0))
info_box = Div(id="info_box")
self.put(info_box, ("50%", 0))
main_menu = MainMenu(self)
self.put(main_menu, ("50%", "50%"))
def connectionLost(self, reason):
for player in NormalGameLobby.players:
if player == self.player:
NormalGameLobby.players.remove(player)
NormalGameLobby.update_players()
if __name__ == "__main__":
run()
|
Python
| 0.000001
|
@@ -354,16 +354,21 @@
+self.
backgrou
@@ -376,33 +376,8 @@
d =
-Img(id=%22background%22, src=
%22ima
@@ -412,75 +412,8 @@
png%22
-, width=%22100%25%22, height=%22100%25%22)%0A self.put(background, (0, 0))
%0A
|
86d3a58f46599d9afe07e4d8c0a9d4171d5219bd
|
Fix for when the user is a PyPI admin
|
pypi_show_urls/__main__.py
|
pypi_show_urls/__main__.py
|
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import sys
import urlparse
import xmlrpclib
import lxml.html
import requests
from pkg_resources import safe_name
from setuptools.package_index import distros_for_url
def installable(project, url):
normalized = safe_name(project).lower()
return bool([dist for dist in distros_for_url(url) if
safe_name(dist.project_name).lower() == normalized])
def version_for_url(project, url):
normalized = safe_name(project).lower()
return [dist for dist in distros_for_url(url) if
safe_name(dist.project_name).lower() == normalized][0].version
def process_page(html, package, url, verbose):
if verbose:
print("")
print(" Candidates from %s" % url)
print(" ----------------" + ("-" * len(url)))
installable_ = set()
for link in html.xpath("//a"):
try:
link.make_links_absolute(url)
except ValueError:
continue
if "href" in link.attrib and installable(package, link.attrib["href"]):
if verbose:
print(" " + link.attrib["href"])
installable_.add((url, link.attrib["href"]))
if not verbose:
print(" %s Candiates from %s" % (len(installable_), url))
return installable_
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true")
group = parser.add_argument_group('type')
group.add_argument("-p", "--packages",
dest="is_packages", action="store_true")
group.add_argument("-u", "--users", dest="is_users", action="store_true")
parser.add_argument("items", nargs="+")
args = parser.parse_args()
if args.is_packages and args.is_users:
return "Must specify only one of -u and -p"
if not args.is_packages and not args.is_users:
return "Must specify one of -u or -p"
if args.is_packages:
# A list of packages to look for
packages = args.items
if args.is_users:
# a list of users
users = args.items
xmlrpc = xmlrpclib.ServerProxy("https://pypi.python.org/pypi")
packages = []
for user in users:
packages.extend([x[1] for x in xmlrpc.user_packages(user)])
# Should we run in verbose mode
verbose = args.verbose
session = requests.session()
session.verify = False
for package in packages:
print("")
print("Download candidates for %s" % package)
print("========================" + ("=" * len(package)))
# Grab the page from PyPI
url = "https://pypi.python.org/simple/%s/" % package
resp = session.get(url)
if resp.status_code == 404:
continue
resp.raise_for_status()
html = lxml.html.document_fromstring(resp.content)
spider = set()
installable_ = set()
for link in itertools.chain(
html.find_rel_links("download"),
html.find_rel_links("homepage")):
try:
link.make_links_absolute(url)
except ValueError:
continue
if "href" in link.attrib and not \
installable(package, link.attrib["href"]):
parsed = urlparse.urlparse(link.attrib["href"])
if parsed.scheme.lower() in ["http", "https"]:
spider.add(link.attrib["href"])
# Find installable links from the PyPI page
installable_ |= process_page(html, package, url, verbose)
# Find installable links from pages we spider
for link in spider:
try:
resp = session.get(link)
resp.raise_for_status()
except Exception:
continue
html = lxml.html.document_fromstring(resp.content)
installable_ |= process_page(html, package, link, verbose)
# Find the ones only available externally
internal = set()
external = set()
for candidate in installable_:
version = version_for_url(package, candidate[1])
if candidate[0] == url:
internal.add(version)
else:
external.add(version)
# Display information
if verbose:
print("")
print(" Versions only available externally")
print(" ----------------------------------")
for version in (external - internal):
print(" " + version)
else:
print(" %s versions only available externally" %
len((external - internal)))
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0
|
@@ -2880,16 +2880,92 @@
es(user)
+%0A if x%5B1%5D is not None
%5D)%0A%0A
|
87b659ad0fe4932161939ffc437f9e09549b302a
|
Update shash test after adding BCRYPT_SALT
|
modules/deaddrop/files/deaddrop/crypto.py
|
modules/deaddrop/files/deaddrop/crypto.py
|
# -*- coding: utf-8 -*-
import bcrypt, subprocess, random, threading
myrandom = random.SystemRandom()
import gnupg
import config
import store
WORDS_IN_RANDOM_ID = 4
GPG_KEY_TYPE = "RSA"
GPG_KEY_LENGTH = "4096"
class CryptoException(Exception): pass
def clean(s, also=''):
"""
>>> clean("Hello, world!")
Traceback (most recent call last):
...
CryptoException: invalid input
>>> clean("Helloworld")
'Helloworld'
"""
ok = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
for c in s:
if c not in ok and c not in also: raise CryptoException("invalid input")
return s
words = file(config.WORD_LIST).read().split('\n')
def genrandomid():
return ' '.join(myrandom.choice(words) for x in range(WORDS_IN_RANDOM_ID))
def displayid(n):
badrandom = random.WichmannHill()
badrandom.seed(n)
return ' '.join(badrandom.choice(words) for x in range(WORDS_IN_RANDOM_ID))
def shash(s):
"""
>>> shash('Hello, world!')
'$2a$12$EW1aG/sVSDObG7QZu.xhHudPAJYajRpDaweePfwWK.iYn1C/tPnj6'
"""
return bcrypt.hashpw(s, config.BCRYPT_SALT)
GPG_BINARY = 'gpg2'
try:
p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
except OSError:
GPG_BINARY = 'gpg'
p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
assert p.stdout.readline().split()[-1].split('.')[0] == '2', "upgrade GPG to 2.0"
gpg = gnupg.GPG(gpgbinary=GPG_BINARY, gnupghome=config.GPG_KEY_DIR)
def genkeypair(name, secret):
"""
>>> if not gpg.list_keys(shash('randomid')):
... genkeypair(shash('randomid'), 'randomid').type
... else:
... u'P'
u'P'
"""
name, secret = clean(name), clean(secret, ' ')
return gpg.gen_key(gpg.gen_key_input(
key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,
passphrase=secret,
name_email="%s@deaddrop.example.com" % name
))
def getkey(name):
for key in gpg.list_keys():
for uid in key['uids']:
if ' <%s@' % name in uid: return key['fingerprint']
return None
def _shquote(s):
return "\\'".join("'" + p + "'" for p in s.split("'"))
_gpghacklock = threading.Lock()
def encrypt(fp, s, output=None, fn=None):
r"""
>>> encrypt(shash('randomid'), "Goodbye, cruel world!")[:75]
'-----BEGIN PGP MESSAGE-----\nVersion: GnuPG/MacGPG2 v2.0.17 (Darwin)\n\nhQIMA3'
"""
if output:
store.verify(output)
fp = fp.replace(' ', '')
if isinstance(s, unicode):
s = s.encode('utf8')
if isinstance(s, str):
out = gpg.encrypt(s, [fp], output=output, always_trust=True)
else:
if fn:
with _gpghacklock:
oldname = gpg.gpgbinary
gpg.gpgbinary += ' --set-filename ' + _shquote(fn)
out = gpg.encrypt_file(s, [fp], output=output, always_trust=True)
gpg.gpgbinary = oldname
else:
out = gpg.encrypt_file(s, [fp], output=output, always_trust=True)
if out.ok:
return out.data
else:
raise CryptoException(out.stderr)
def decrypt(name, secret, s):
"""
>>> decrypt(shash('randomid'), 'randomid',
... encrypt(shash('randomid'), 'Goodbye, cruel world!')
... )
'Goodbye, cruel world!'
"""
return gpg.decrypt(s, passphrase=secret).data
def secureunlink(fn):
store.verify(fn)
return subprocess.check_call(['srm', fn])
# crash if we don't have srm:
try:
subprocess.check_call(['srm'], stdout=subprocess.PIPE)
except subprocess.CalledProcessError:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Python
| 0
|
@@ -1006,61 +1006,61 @@
$12$
-EW1aG/sVSDObG7QZu.xhHudPAJYajRpDaweePfwWK.iYn1C/tPnj6
+gLZnkcyhZBrWbCZKHKYgKee8g/Yb9O7.24/H.09Yu9Jt9hzW6n0Ky
'%0A
|
fac2c5752c23d2fd415caafd2654f696c4842806
|
Bump version.
|
pyramid_addons/__init__.py
|
pyramid_addons/__init__.py
|
__version__ = '0.20'
|
Python
| 0
|
@@ -11,11 +11,11 @@
_ = '0.2
-0
+1
'%0A
|
1159cda1437085218b79345244897f2be8990ca9
|
fix tell delivery and possibly db lock
|
pyscp_bot/modules/notes.py
|
pyscp_bot/modules/notes.py
|
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import arrow
import peewee
import sopel
import re
import pyscp_bot.jarvis as vocab
###############################################################################
db = peewee.SqliteDatabase('jarvis.db')
class BaseModel(peewee.Model):
class Meta:
database = db
class Tell(BaseModel):
sender = peewee.CharField()
recipient = peewee.CharField()
message = peewee.TextField()
time = peewee.DateTimeField()
class Seen(BaseModel):
pass
class Message(BaseModel):
user = peewee.CharField()
channel = peewee.CharField()
time = peewee.CharField()
text = peewee.TextField()
###############################################################################
def setup(bot):
db.connect()
Tell.create_table(True)
Seen.drop_table(True)
Message.create_table(True)
sopel.bot.Sopel._say = sopel.bot.Sopel.say
sopel.bot.Sopel.say = log_and_say
@sopel.module.commands('tell')
def tell(bot, trigger):
name, text = trigger.group(2).split(maxsplit=1)
name = name.strip().lower()
now = arrow.utcnow().timestamp
Tell.create(
sender=str(trigger.nick), recipient=name, message=text, time=now)
bot.say(vocab.tell_stored(trigger.nick))
@sopel.module.rule('.*')
def chat_activity(bot, trigger):
user = trigger.nick.strip()
channel = trigger.sender
time = arrow.utcnow().timestamp
message = trigger.group(0)
Message.create(user=user, channel=channel, time=time, text=message)
if re.match(r'[!\.](st|showt|showtells)$', trigger.group(0)):
deliver_tells(bot, trigger.nick)
def log_and_say(bot, text, recipient, max_messages=1):
if recipient != 'NickServ':
time = arrow.utcnow().timestamp
Message.create(
user=bot.config.core.nick, channel=recipient, time=time, text=text)
bot._say(text, recipient, max_messages)
@sopel.module.commands('showtells', 'showt', 'st')
def showtells(bot, trigger):
if Tell.select().where(Tell.recipient == trigger.nick.lower()).exists():
deliver_tells(bot, trigger.nick)
else:
bot.notice(vocab.no_tells(trigger.nick), trigger.nick)
@sopel.module.commands('seen')
def seen(bot, trigger):
name = trigger.group(2).strip().lower()
channel = trigger.sender
try:
message = (
Message.select()
.where(
peewee.fn.Lower(Message.user) == name,
Message.channel == channel)
.limit(1).order_by(Message.time.desc()).get())
time = arrow.get(message.time).humanize()
bot.say('{}: I saw {} {} saying "{}"'.format(
trigger.nick, message.user, time, message.text))
except Message.DoesNotExist:
bot.say(vocab.user_never_seen(trigger.nick))
def deliver_tells(bot, name):
query = Tell.select().where(Tell.recipient == name.lower())
if not query.exists():
return
bot.notice(
'{}: you have {} new messages.'.format(name, query.count()), name)
for tell in query:
time_passed = arrow.get(tell.time).humanize()
msg = '{} said {}: {}'.format(tell.sender, time_passed, tell.message)
bot.say(msg, name)
Tell.delete().where(Tell.recipient == name.lower()).execute()
|
Python
| 0
|
@@ -1435,16 +1435,74 @@
ule.
-rule('.*
+thread(False)%0A@sopel.module.rule('.*')%0A@sopel.module.priority('low
')%0Ad
@@ -1740,16 +1740,20 @@
%0A if
+not
re.match
|
e913bbffde84403018e741a62318df029a641950
|
Delete more not needed stuff
|
archive/archive_api/src/conftest.py
|
archive/archive_api/src/conftest.py
|
# -*- encoding: utf-8
import os
import random
import uuid
import betamax
import pytest
import requests
import json
@pytest.fixture(scope="session")
def recorded_sess(pytestconfig):
with betamax.Betamax.configure() as config:
config.cassette_library_dir = str(
pytestconfig.rootdir.join("src", "tests", "cassettes")
)
session = requests.Session()
with betamax.Betamax(session) as vcr:
vcr.use_cassette("test_archive_api")
yield session
@pytest.fixture
def client(
dynamodb_resource,
s3_client,
sns_client,
topic_arn,
table_name_bag,
bucket_bag,
recorded_sess,
):
# This only has to work when populating the betamax recording file;
# although we run on Linux in Travis CI, this will still fine because
# we use the cached recordings.
os.environ.update(
{"PROGRESS_MANAGER_ENDPOINT": "http://docker.for.mac.localhost:6000"}
)
os.environ.update(
{"BAGS_MANAGER_ENDPOINT": "http://host.docker.internal:6001"}
)
from archive_api import app
app.config["DYNAMODB_RESOURCE"] = dynamodb_resource
app.config["SNS_CLIENT"] = sns_client
app.config["SNS_TOPIC_ARN"] = topic_arn
app.config["S3_CLIENT"] = s3_client
app.config["BAG_VHS_TABLE_NAME"] = table_name_bag
app.config["BAG_VHS_BUCKET_NAME"] = bucket_bag
app.config["PROGRESS_MANAGER"].sess = recorded_sess
app.config["BAGS_MANAGER"].sess = recorded_sess
yield app.test_client()
@pytest.fixture
def guid():
return str(uuid.uuid4())
@pytest.fixture
def external_identifier():
return "b22454408"
@pytest.fixture
def space_name():
return "digitised"
@pytest.fixture
def bag_id(external_identifier, space_name):
return f"{space_name}/{external_identifier}"
@pytest.fixture()
def table_name_bag(dynamodb_client):
dynamodb_table_name = "bag--table-%d" % random.randint(0, 10000)
os.environ.update({"BAG_VHS_TABLE_NAME": dynamodb_table_name})
create_table(dynamodb_client, dynamodb_table_name)
yield dynamodb_table_name
dynamodb_client.delete_table(TableName=dynamodb_table_name)
try:
del os.environ["BAG_VHS_TABLE_NAME"]
except KeyError:
pass
def create_table(dynamodb_client, table_name):
try:
dynamodb_client.create_table(
TableName=table_name,
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1},
)
dynamodb_client.get_waiter("table_exists").wait(TableName=table_name)
except dynamodb_client.exceptions.ResourceInUseException:
pass
@pytest.fixture
def bucket_bag(s3_client):
bucket_name = "test-python-bag-bucket-%d" % random.randint(0, 10000)
os.environ.update({"BAG_VHS_BUCKET_NAME": bucket_name})
s3_client.create_bucket(Bucket=bucket_name)
yield bucket_name
try:
del os.environ["BAG_VHS_BUCKET_NAME"]
except KeyError:
pass
@pytest.fixture
def s3_bag(bag_id):
file_location = "src/tests/resources/vhs_bag.json"
with open(file_location, "r") as io:
json_bag = json.load(io)
json_bag["id"]["value"] = bag_id
return json_bag
|
Python
| 0
|
@@ -30,22 +30,8 @@
os%0A
-import random%0A
impo
@@ -88,20 +88,8 @@
sts%0A
-import json%0A
%0A%0A@p
@@ -501,107 +501,33 @@
-dynamodb_resource,%0A s3_client,%0A sns_client,%0A topic_arn,%0A table_name_bag,%0A bucket_bag
+sns_client,%0A topic_arn
,%0A
@@ -972,64 +972,8 @@
pp%0A%0A
- app.config%5B%22DYNAMODB_RESOURCE%22%5D = dynamodb_resource%0A
@@ -1058,153 +1058,8 @@
arn%0A
- app.config%5B%22S3_CLIENT%22%5D = s3_client%0A app.config%5B%22BAG_VHS_TABLE_NAME%22%5D = table_name_bag%0A app.config%5B%22BAG_VHS_BUCKET_NAME%22%5D = bucket_bag%0A
@@ -1494,1532 +1494,4 @@
%7D%22%0A%0A
-%0A@pytest.fixture()%0Adef table_name_bag(dynamodb_client):%0A dynamodb_table_name = %22bag--table-%25d%22 %25 random.randint(0, 10000)%0A os.environ.update(%7B%22BAG_VHS_TABLE_NAME%22: dynamodb_table_name%7D)%0A create_table(dynamodb_client, dynamodb_table_name)%0A yield dynamodb_table_name%0A dynamodb_client.delete_table(TableName=dynamodb_table_name)%0A try:%0A del os.environ%5B%22BAG_VHS_TABLE_NAME%22%5D%0A except KeyError:%0A pass%0A%0A%0Adef create_table(dynamodb_client, table_name):%0A try:%0A dynamodb_client.create_table(%0A TableName=table_name,%0A KeySchema=%5B%7B%22AttributeName%22: %22id%22, %22KeyType%22: %22HASH%22%7D%5D,%0A AttributeDefinitions=%5B%7B%22AttributeName%22: %22id%22, %22AttributeType%22: %22S%22%7D%5D,%0A ProvisionedThroughput=%7B%22ReadCapacityUnits%22: 1, %22WriteCapacityUnits%22: 1%7D,%0A )%0A dynamodb_client.get_waiter(%22table_exists%22).wait(TableName=table_name)%0A except dynamodb_client.exceptions.ResourceInUseException:%0A pass%0A%0A%0A@pytest.fixture%0Adef bucket_bag(s3_client):%0A bucket_name = %22test-python-bag-bucket-%25d%22 %25 random.randint(0, 10000)%0A os.environ.update(%7B%22BAG_VHS_BUCKET_NAME%22: bucket_name%7D)%0A s3_client.create_bucket(Bucket=bucket_name)%0A yield bucket_name%0A try:%0A del os.environ%5B%22BAG_VHS_BUCKET_NAME%22%5D%0A except KeyError:%0A pass%0A%0A%0A@pytest.fixture%0Adef s3_bag(bag_id):%0A file_location = %22src/tests/resources/vhs_bag.json%22%0A%0A with open(file_location, %22r%22) as io:%0A json_bag = json.load(io)%0A json_bag%5B%22id%22%5D%5B%22value%22%5D = bag_id%0A%0A return json_bag%0A
|
3fa970e9fb94319df587cf45f90dd0fdb396df1d
|
update usage
|
rdhinet.py
|
rdhinet.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dongdong Tian @ USTC
#
# Revision History:
# 2014-09-03 Dongdong Tian Initial Coding
#
"""Extract SAC data files from Hi-net WIN32 files
Usage:
rdhinet.py DIRNAME [-C <comps>] [-D <outdir>] [-S <suffix>] [-P <procs>]
rdhinet.py -h
Options:
-h Show this help.
-C <comps> Selection of components to extract.
Avaiable components are U, N, E, X, Y et. al.
Default to extract all components.
-D <outdir> Output directory for SAC files.
-S <suffix> Suffix of output SAC files.
-P <procs> Parallel using multiple processes. Set number of cpus to <procs>
if <procs> equals 0. [default: 0]
"""
import os
import glob
import shlex
import zipfile
import datetime
import subprocess
import multiprocessing
from docopt import docopt
# external tools from Hi-net
catwin32 = "catwin32"
win2sac = "win2sac_32"
def unzip(zips):
"""unzip zip filelist"""
for file in zips:
print("Unzip %s" % (file))
zipFile = zipfile.ZipFile(file, "r")
for name in zipFile.namelist():
zipFile.extract(name)
def win32_cat(cnts, cnt_total):
"""merge WIN32 files to one total WIN32 file"""
print("Total %d win32 files" % (len(cnts)))
cmd = "%s %s -o %s" % (catwin32, ' '.join(cnts), cnt_total)
args = shlex.split(cmd)
subprocess.call(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def win_prm(chfile, prmfile="win.prm"):
"""four line parameters file"""
with open(prmfile, "w") as f:
f.write(".\n")
f.write(chfile + "\n")
f.write(".\n")
f.write(".\n")
def get_chno(chfile, comps):
""" read channel no list from channel table"""
chno = []
with open(chfile, "r") as f:
for line in f:
if line[0] == '#':
continue
items = line.split()
no, comp = items[0], items[4]
if comps is None or comp in comps:
chno.append(no)
print("Total %d channels" % len(chno))
return chno
def _exctract_channel(tup):
"""extract only one channel for one time"""
winfile, chno, outdir, prmfile, pmax = tup
subprocess.call([win2sac, winfile, chno, "SAC", outdir, prmfile, '-m'+str(pmax)],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def win32_sac(winfile, ch_no, outdir=".", prmfile="win.prm", pmax=2000000):
tuple_list = []
for ch in chno:
t = winfile, ch, outdir, prmfile, pmax
tuple_list.append(t)
procs = int(arguments['-P'])
if procs == 1:
for t in tuple_list:
_exctract_channel(t)
else:
if procs == 0:
procs = multiprocessing.cpu_count()
else:
procs = min(multiprocessing.cpu_count(), procs)
pool = multiprocessing.Pool(processes=procs)
pool.map(_exctract_channel, tuple_list)
def rename_sac(outdir, sacfile=None):
for file in glob.glob(outdir + '/*.SAC'):
dest = os.path.splitext(file)[0]
if sacfile:
dest += "." + sacfile
os.rename(file, dest)
def unlink_lists(files):
for f in files:
os.unlink(f)
if __name__ == "__main__":
arguments = docopt(__doc__)
# change directory
os.chdir(arguments['DIRNAME'])
print("Working in dir %s" % (arguments['DIRNAME']))
# unzip zip files
unzip(glob.glob("??_??_????????????_*_?????.zip"))
# merge win32 files
cnts = sorted(glob.glob("??????????????????.cnt"))
cnt_total = "%s_%d.cnt" % (cnts[0][0:11], len(cnts))
win32_cat(cnts, cnt_total)
unlink_lists(cnts)
chfile = glob.glob("??_??_????????.euc.ch")[0]
# generate win32 paramerter file
win_prm(chfile)
# get channel NO. lists for channel table
comps = None
if arguments['-C']:
comps = arguments['-C'].split(",")
chno = get_chno(chfile, comps)
# extract sac files
outdir = '.'
if arguments['-D']:
outdir = arguments['-D']
if not os.path.exists(outdir):
os.makedirs(outdir)
# maximum number of points
pmax = len(cnts) * 60 * 100
win32_sac(cnt_total, chno, outdir=outdir, pmax=pmax)
sacfile = arguments['-S']
rename_sac(outdir, sacfile)
|
Python
| 0.000001
|
@@ -361,22 +361,9 @@
s%3E
-Selection of c
+C
ompo
@@ -378,16 +378,40 @@
extract
+, delimited using commas
.%0A
|
8e4a5ef25c87879fb01aa79f88c6c6a833820f8b
|
bump version
|
python/baseline/version.py
|
python/baseline/version.py
|
__version__ = "1.1.2"
|
Python
| 0
|
@@ -16,8 +16,8 @@
1.1.
-2
+3
%22%0A%0A
|
76ffb1b8891e2ad349140044d78e766e02ebf87a
|
Remove import taichi from expr.py (#3871)
|
python/taichi/lang/expr.py
|
python/taichi/lang/expr.py
|
import numpy as np
from taichi._lib import core as _ti_core
from taichi.lang import impl
from taichi.lang.common_ops import TaichiOperations
from taichi.lang.util import is_taichi_class
import taichi as ti
# Scalar, basic data type
class Expr(TaichiOperations):
"""A Python-side Expr wrapper, whose member variable `ptr` is an instance of C++ Expr class. A C++ Expr object contains member variable `expr` which holds an instance of C++ Expression class."""
def __init__(self, *args, tb=None):
_taichi_skip_traceback = 1
self.tb = tb
if len(args) == 1:
if isinstance(args[0], _ti_core.Expr):
self.ptr = args[0]
elif isinstance(args[0], Expr):
self.ptr = args[0].ptr
self.tb = args[0].tb
elif is_taichi_class(args[0]):
raise ValueError('cannot initialize scalar expression from '
f'taichi class: {type(args[0])}')
else:
# assume to be constant
arg = args[0]
try:
if isinstance(arg, np.ndarray):
arg = arg.dtype(arg)
except:
pass
self.ptr = impl.make_constant_expr(arg).ptr
else:
assert False
if self.tb:
self.ptr.set_tb(self.tb)
self.ptr.type_check()
def __hash__(self):
return self.ptr.get_raw_address()
def __str__(self):
return '<ti.Expr>'
def __repr__(self):
return '<ti.Expr>'
def make_var_list(size):
exprs = []
for _ in range(size):
exprs.append(_ti_core.make_id_expr(''))
return exprs
def make_expr_group(*exprs):
if len(exprs) == 1:
if isinstance(exprs[0], (list, tuple)):
exprs = exprs[0]
elif isinstance(exprs[0], ti.Matrix):
mat = exprs[0]
assert mat.m == 1
exprs = mat.entries
expr_group = _ti_core.ExprGroup()
for i in exprs:
if isinstance(i, ti.Matrix):
assert i.local_tensor_proxy is not None
expr_group.push_back(i.local_tensor_proxy)
else:
expr_group.push_back(Expr(i).ptr)
return expr_group
|
Python
| 0
|
@@ -155,20 +155,22 @@
ang.
-util
+matrix
import
is_t
@@ -169,44 +169,59 @@
ort
-is_taichi_class%0A%0A
+Matrix%0Afrom taichi.lang.util
import
+is_
taichi
- as ti
+_class
%0A%0A%0A#
@@ -1900,19 +1900,16 @@
prs%5B0%5D,
-ti.
Matrix):
@@ -2081,19 +2081,16 @@
ance(i,
-ti.
Matrix):
|
1d4776adb0d9926efaf38d8db9691e7339c0174e
|
Add improve email a bit
|
python/update_fsurf_job.py
|
python/update_fsurf_job.py
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from email.mime.text import MIMEText
import psycopg2
import shutil
import fsurfer
VERSION = fsurfer.__version__
PARAM_FILE_LOCATION = "/etc/freesurfer/db_info"
FREESURFER_BASE = '/stash2/user/fsurf/'
def get_db_parameters():
"""
Read database parameters from a file and return it
:return: a tuple of (database_name, user, password, hostname)
"""
parameters = {}
with open(PARAM_FILE_LOCATION) as param_file:
for line in param_file:
key, val = line.strip().split('=')
parameters[key.strip()] = val.strip()
return (parameters['database'],
parameters['user'],
parameters['password'],
parameters['hostname'])
def get_db_client():
"""
Get a postgresql client instance and return it
:return: a redis client instance or None if failure occurs
"""
db, user, password, host = get_db_parameters()
return psycopg2.connect(database=db, user=user, host=host, password=password)
def process_results(jobid, success=True):
"""
Email user informing them that a workflow has completed
:param success: True if workflow completed successfully
:param jobid: id for workflow
:return: None
"""
info_query = "SELECT jobs.subject, " \
" jobs.job_date, " \
" jobs.pegasus_ts, " \
" users.email, " \
" users.username " \
"FROM freesurfer_interface.jobs AS jobs, " \
" freesurfer_interface.users AS users " \
"WHERE jobs.id = %s AND jobs.username = users.username"
conn = get_db_client()
cursor = conn.cursor()
try:
cursor.execute(info_query, [jobid])
row = cursor.fetchone()
if row:
subject_name = row[0]
submit_date = row[1]
pegasus_ts = row[2]
user_email = row[3]
username = row[4]
else:
return
except psycopg2.Error:
return
if success:
msg = MIMEText('Your freesurfer workflow {0} '.format(jobid) +
'submitted on {0} and processing '.format(submit_date) +
'subject {0} '.format(subject_name) +
'has completed succesfully')
else:
msg = MIMEText('Your freesurfer workflow {0} '.format(jobid) +
'submitted on {0} and processing '.format(submit_date) +
'subject {0} '.format(subject_name) +
'has completed with errors')
msg['Subject'] = 'Freesurfer workflow {0} completed'.format(jobid)
sender = 'fsurf@login.osgconnect.net'
msg['From'] = sender
msg['To'] = user_email
try:
sendmail = subprocess.Popen(['/usr/sbin/sendmail', '-t'], stdin=subprocess.PIPE)
sendmail.communicate(msg.as_string())
except subprocess.CalledProcessError:
pass
# copy output to the results directory
output_filename = os.path.join(FREESURFER_BASE,
username,
'results',
"{0}_{1}_output.tar.bz2".format(jobid,
subject_name))
result_filename = os.path.join(FREESURFER_BASE,
username,
'workflows',
'output',
'fsurf',
'pegasus',
'freesurfer',
pegasus_ts,
'{0}_output.tar.bz2'.format(subject_name))
shutil.copyfile(result_filename, output_filename)
result_logfile = os.path.join(FREESURFER_BASE,
username,
'workflows',
'output',
'fsurf',
'pegasus',
'freesurfer',
pegasus_ts,
'recon-all.log')
log_filename = os.path.join(FREESURFER_BASE,
username,
'results',
'recon_all-{0}.log'.format(jobid))
shutil.copyfile(result_logfile, log_filename)
try:
if success:
job_update = "UPDATE freesurfer_interface.jobs " \
"SET state = 'COMPLETED'" \
"WHERE id = %s;"
else:
job_update = "UPDATE freesurfer_interface.jobs " \
"SET state = 'FAILED'" \
"WHERE id = %s;"
cursor.execute(job_update, [jobid])
conn.commit()
conn.close()
except psycopg2.Error:
return
def main():
"""
Main function that parses arguments and generates the pegasus
workflow
:return: True if any errors occurred during DAX generaton
"""
parser = argparse.ArgumentParser(description="Process freesurfer information")
# version info
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
# Arguments for workflow outcome
parser.add_argument('--success', dest='success',
action='store_true',
help='Workflow completed successfully')
parser.add_argument('--failure', dest='success',
action='store_false',
help='Workflow completed with errors')
# Arguments identifying workflow
parser.add_argument('--id', dest='workflow_id',
action='store', help='Pegasus workflow id to use')
args = parser.parse_args(sys.argv[1:])
if args.success:
process_results(args.workflow_id, success=True)
else:
process_results(args.workflow_id, success=False)
sys.exit(0)
if __name__ == '__main__':
main()
|
Python
| 0.000038
|
@@ -274,16 +274,403 @@
surf/'%0A%0A
+EMAIL_TEMPLATE = '''%0AThis email is being sent to inform you that your freesurfer workflow %7B0%7D submitted on %7B1%7D%0Ahas completed %7B2%7D. You can download the output by running%0A%60fsurf --output %7B0%7D --user %7B3%7D --password %3Cpass%3E%60%0Aor download the Freesurfer log files by running %60fsurf --log %7B0%7D --user %7B3%7D --password %3Cpass%3E.%60%0A%0APlease contact support@osgconnect.net if you have any questions.%0A'''%0A%0A
%0Adef get
@@ -1791,24 +1791,45 @@
%22
+ date_trunc('second',
jobs.pegasu
@@ -1832,16 +1832,17 @@
gasus_ts
+)
, %22 %5C%0A
@@ -2511,24 +2511,25 @@
return%0A
+%0A
if succe
@@ -2544,536 +2544,273 @@
-msg = MIMEText('Your freesurfer workflow %7B0%7D '.format(jobid) +%0A 'submitted on %7B0%7D and processing '.format(submit_date) +%0A 'subject %7B0%7D '.format(subject_name) +%0A 'has completed succesfully')%0A else:%0A msg = MIMEText('Your freesurfer workflow %7B0%7D '.format(jobid) +%0A 'submitted on %7B0%7D and processing '.format(submit_date) +%0A 'subject %7B0%7D '.format(subject_name) +%0A 'has completed with errors'
+status = 'succesfully'%0A else:%0A status = 'with errors'%0A msg = MIMEText(EMAIL_TEMPLATE.format(jobid,%0A submit_date,%0A status,%0A username)
)%0A%0A
|
93e0e92ca1b49aa200895a316a5411c8afff96b5
|
Reset notification errors to return 400
|
monasca_api/v2/reference/notifications.py
|
monasca_api/v2/reference/notifications.py
|
# Copyright 2014 Hewlett-Packard
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import falcon
from oslo_config import cfg
from oslo_log import log
import simport
from monasca_api.api import notifications_api_v2
from monasca_api.common.repositories import exceptions
from monasca_api.v2.common.exceptions import HTTPUnprocessableEntityError
from monasca_api.v2.common.schemas import (
notifications_request_body_schema as schemas_notifications)
from monasca_api.v2.common.schemas import exceptions as schemas_exceptions
from monasca_api.v2.reference import helpers
from monasca_api.v2.reference import resource
LOG = log.getLogger(__name__)
class Notifications(notifications_api_v2.NotificationsV2API):
def __init__(self):
super(Notifications, self).__init__()
self._region = cfg.CONF.region
self._default_authorized_roles = (
cfg.CONF.security.default_authorized_roles)
self._notifications_repo = simport.load(
cfg.CONF.repositories.notifications_driver)()
def _validate_notification(self, notification):
"""Validates the notification
:param notification: An event object.
:raises falcon.HTTPBadRequest
"""
try:
schemas_notifications.validate(notification)
except schemas_exceptions.ValidationException as ex:
LOG.debug(ex)
raise HTTPUnprocessableEntityError('Unprocessable Entity', ex.message)
def _validate_name_not_conflicting(self, tenant_id, name, expected_id=None):
notification = self._notifications_repo.find_notification_by_name(tenant_id, name)
if notification:
if not expected_id:
LOG.warn("Found existing notification method for {} with tenant_id {}".format(name, tenant_id))
raise exceptions.AlreadyExistsException(
"A notification method with the name {} already exists".format(name))
found_notification_id = notification['id']
if found_notification_id != expected_id:
LOG.warn("Found existing notification method for {} with tenant_id {} with unexpected id {}"
.format(name, tenant_id, found_notification_id))
raise exceptions.AlreadyExistsException(
"A notification method with name {} already exists with id {}"
.format(name, found_notification_id))
@resource.resource_try_catch_block
def _create_notification(self, tenant_id, notification, uri):
name = notification['name']
notification_type = notification['type'].upper()
address = notification['address']
self._validate_name_not_conflicting(tenant_id, name)
notification_id = self._notifications_repo.create_notification(
tenant_id,
name,
notification_type,
address)
return self._create_notification_response(notification_id,
name,
notification_type,
address,
uri)
@resource.resource_try_catch_block
def _update_notification(self, notification_id, tenant_id, notification, uri):
name = notification['name']
notification_type = notification['type'].upper()
address = notification['address']
self._validate_name_not_conflicting(tenant_id, name, expected_id=notification_id)
self._notifications_repo.update_notification(notification_id, tenant_id, name,
notification_type,
address)
return self._create_notification_response(notification_id,
name,
notification_type,
address,
uri)
def _create_notification_response(self, id, name, type,
address, uri):
response = {
'id': id,
'name': name,
'type': type,
'address': address
}
return helpers.add_links_to_resource(response, uri)
@resource.resource_try_catch_block
def _list_notifications(self, tenant_id, uri, offset, limit):
rows = self._notifications_repo.list_notifications(tenant_id, offset,
limit)
result = [self._build_notification_result(row,
uri) for row in rows]
return helpers.paginate(result, uri, limit)
@resource.resource_try_catch_block
def _list_notification(self, tenant_id, notification_id, uri):
row = self._notifications_repo.list_notification(
tenant_id,
notification_id)
return self._build_notification_result(row, uri)
def _build_notification_result(self, notification_row, uri):
result = {
u'id': notification_row['id'],
u'name': notification_row['name'],
u'type': notification_row['type'],
u'address': notification_row['address']
}
helpers.add_links_to_resource(result, uri)
return result
@resource.resource_try_catch_block
def _delete_notification(self, tenant_id, notification_id):
self._notifications_repo.delete_notification(tenant_id,
notification_id)
def on_post(self, req, res):
helpers.validate_json_content_type(req)
helpers.validate_authorization(req, self._default_authorized_roles)
notification = helpers.read_http_resource(req)
self._validate_notification(notification)
tenant_id = helpers.get_tenant_id(req)
result = self._create_notification(tenant_id, notification, req.uri)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_201
def on_get(self, req, res, notification_method_id=None):
if notification_method_id is None:
helpers.validate_authorization(req, self._default_authorized_roles)
tenant_id = helpers.get_tenant_id(req)
offset = helpers.get_query_param(req, 'offset')
limit = helpers.get_limit(req)
result = self._list_notifications(tenant_id, req.uri, offset,
limit)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
else:
helpers.validate_authorization(req,
self._default_authorized_roles)
tenant_id = helpers.get_tenant_id(req)
result = self._list_notification(tenant_id,
notification_method_id,
req.uri)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
def on_delete(self, req, res, notification_method_id):
helpers.validate_authorization(req, self._default_authorized_roles)
tenant_id = helpers.get_tenant_id(req)
self._delete_notification(tenant_id, notification_method_id)
res.status = falcon.HTTP_204
def on_put(self, req, res, notification_method_id):
helpers.validate_json_content_type(req)
helpers.validate_authorization(req, self._default_authorized_roles)
notification = helpers.read_http_resource(req)
self._validate_notification(notification)
tenant_id = helpers.get_tenant_id(req)
result = self._update_notification(notification_method_id, tenant_id,
notification, req.uri)
res.body = helpers.dumpit_utf8(result)
res.status = falcon.HTTP_200
|
Python
| 0.000131
|
@@ -760,82 +760,8 @@
ons%0A
-from monasca_api.v2.common.exceptions import HTTPUnprocessableEntityError%0A
from
@@ -1809,58 +1809,42 @@
ise
-HTTPUnprocessableEntityError('Unprocessable Entity
+falcon.HTTPBadRequest('Bad Request
', e
|
49f365ecfc18e32e5664ffc53c163320ed0af6ac
|
Add the missing rows() function
|
pywind/bmreports/prices.py
|
pywind/bmreports/prices.py
|
# coding=utf-8
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
""" BMReports make the system electricity prices available. This module contains
classes to access those reports.
"""
import os
from datetime import date, datetime
from pywind.utils import get_or_post_a_url, parse_response_as_xml
class SystemPrices(object):
""" Class to get the electricity prices from BMreports. """
URL = 'http://www.bmreports.com/bsp/additional/soapfunctions.php'
def __init__(self, dtt=None):
self.dtt = dtt or date.today()
self.xml = None
self.prices = []
def get_data(self):
""" Get the data from the remote server. """
data = {'element': 'SYSPRICE',
'dT': self.dtt.strftime("%Y-%m-%d")}
resp = get_or_post_a_url(self.URL, params=data)
self.xml = parse_response_as_xml(resp)
if self.xml is None:
return False
for elm in self.xml.xpath('.//ELEMENT'):
data = {}
for elm2 in elm.getchildren():
if elm2.tag == 'SP':
data['period'] = int(elm2.text)
elif elm2.tag == 'SD':
data['date'] = datetime.strptime(elm2.text, "%Y-%m-%d")
else:
data[elm2.tag.lower()] = elm2.text
self.prices.append(data)
return len(self.prices) > 0
def save_original(self, filename):
""" Save the downloaded certificate data into the filename provided.
:param filename: Filename to save the file to.
:returns: True or False
:rtype: bool
"""
if self.xml is None:
return False
name, ext = os.path.splitext(filename)
if ext is '':
filename += '.xml'
self.xml.write(filename)
return True
def as_dict(self):
""" Return the data as a dict. """
return {'date': self.dtt, 'data': self.prices}
|
Python
| 0.999729
|
@@ -2579,16 +2579,303 @@
s) %3E 0%0A%0A
+ def rows(self):%0A %22%22%22Generator to return rows for export.%0A%0A :returns: Dict containing information for a single price period.%0A :rtype: dict%0A %22%22%22%0A for per in self.prices:%0A yield %7B'PricePeriod': %7B'@%7B%7D'.format(key):per%5Bkey%5D for key in per%7D%7D%0A%0A
def
|
86768d83067745def16761db24ce496e3125399e
|
test script works
|
mtgreatest/process_results/test_script.py
|
mtgreatest/process_results/test_script.py
|
#!/usr/bin/env python
import scrape_results
from mtgreatest.rdb import Cursor
cursor = Cursor()
event_info = cursor.execute('select event_id, event_link from event_table where results_loaded = 1')
event_info = [dict(zip(('event_id','event_link'), item)) for item in event_info] #this should be a method (or default return structure) in rdb
soups = [scrape_results.event_soup(row['event_link']) for row in event_info]
failed = []
for i in range(len(soups)):
try:
scrape_results.scrape_standings(soups[i], event_info[i]['event_id'])
except:
failed.append(event_info[i]['event_id'])
|
Python
| 0.000001
|
@@ -341,17 +341,55 @@
db%0A%0A
+failed = %5B%5D%0A%0Afor row in event_info:%0A
soup
-s
=
-%5B
scra
@@ -432,73 +432,76 @@
k'%5D)
- for row in event_info%5D%0A%0Afailed = %5B%5D%0A%0Afor i in range(len(soups)):
+%0A print 'scraping standings for event %7B%7D'.format(row%5B'event_id'%5D)
%0A
@@ -554,27 +554,13 @@
soup
-s%5Bi%5D, event_info%5Bi%5D
+, row
%5B'ev
@@ -607,21 +607,11 @@
end(
-event_info%5Bi%5D
+row
%5B'ev
|
722a3735f9b14f84a5e2e5dd04e38b3446504ed8
|
Fix example for random_bipartite
|
networkx/algorithms/bipartite/__init__.py
|
networkx/algorithms/bipartite/__init__.py
|
r""" This module provides functions and operations for bipartite
graphs. Bipartite graphs `B = (U, V, E)` have two node sets `U,V` and edges in
`E` that only connect nodes from opposite sets. It is common in the literature
to use an spatial analogy referring to the two node sets as top and bottom nodes.
The bipartite algorithms are not imported into the networkx namespace
at the top level so the easiest way to use them is with:
>>> import networkx as nx
>>> from networkx.algorithms import bipartite
NetworkX does not have a custom bipartite graph class but the Graph()
or DiGraph() classes can be used to represent bipartite graphs. However,
you have to keep track of which set each node belongs to, and make
sure that there is no edge between nodes of the same set. The convention used
in NetworkX is to use a node attribute named "bipartite" with values 0 or 1 to
identify the sets each node belongs to.
For example:
>>> B = nx.Graph()
>>> B.add_nodes_from([1,2,3,4], bipartite=0) # Add the node attribute "bipartite"
>>> B.add_nodes_from(['a','b','c'], bipartite=1)
>>> B.add_edges_from([(1,'a'), (1,'b'), (2,'b'), (2,'c'), (3,'c'), (4,'a')])
Many algorithms of the bipartite module of NetworkX require, as an argument, a
container with all the nodes that belong to one set, in addition to the bipartite
graph `B`. If `B` is connected, you can find the node sets using a two-coloring
algorithm:
>>> nx.is_connected(B)
True
>>> bottom_nodes, top_nodes = bipartite.sets(B)
list(top_nodes)
[1, 2, 3, 4]
list(bottom_nodes)
['a', 'c', 'b']
However, if the input graph is not connected, there are more than one possible
colorations. Thus, the following result is correct:
>>> B.remove_edge(2,'c')
>>> nx.is_connected(B)
False
>>> bottom_nodes, top_nodes = bipartite.sets(B)
list(top_nodes)
[1, 2, 4, 'c']
list(bottom_nodes)
['a', 3, 'b']
Using the "bipartite" node attribute, you can easily get the two node sets:
>>> top_nodes = set(n for n,d in B.nodes(data=True) if d['bipartite']==0)
>>> bottom_nodes = set(B) - top_nodes
list(top_nodes)
[1, 2, 3, 4]
list(bottom_nodes)
['a', 'c', 'b']
So you can easily use the bipartite algorithms that require, as an argument, a
container with all nodes that belong to one node set:
>>> print(round(bipartite.density(B, bottom_nodes),2))
0.42
>>> G = bipartite.projected_graph(B, top_nodes)
>>> G.edges()
[(1, 2), (1, 4)]
All bipartite graph generators in NetworkX build bipartite graphs with the
"bipartite" node attribute. Thus, you can use the same approach:
>>> RB = nx.bipartite_random_graph(5, 7, 0.2)
>>> nx.is_connected(RB)
False
>>> RB_top = set(n for n,d in RB.nodes(data=True) if d['bipartite']==0)
>>> RB_bottom = set(RB) - RB_top
>>> list(RB_top)
[0, 1, 2, 3, 4]
>>> list(RB_bottom)
[5, 6, 7, 8, 9, 10, 11]
For other bipartite graph generators see the bipartite section of
:doc:`generators`.
"""
from networkx.algorithms.bipartite.basic import *
from networkx.algorithms.bipartite.centrality import *
from networkx.algorithms.bipartite.cluster import *
from networkx.algorithms.bipartite.projection import *
from networkx.algorithms.bipartite.redundancy import *
from networkx.algorithms.bipartite.spectral import *
|
Python
| 0
|
@@ -2570,38 +2570,8 @@
.2)%0A
-%3E%3E%3E nx.is_connected(RB)%0AFalse%0A
%3E%3E%3E
|
e164a3ccb7625d4f36c83628aa6f7f030f38d6cf
|
remove normalized test
|
networkx/algorithms/tests/test_smetric.py
|
networkx/algorithms/tests/test_smetric.py
|
from nose.tools import assert_equal
import networkx as nx
def test_smetric():
g = nx.Graph()
g.add_edge(1,2)
g.add_edge(2,3)
g.add_edge(2,4)
g.add_edge(1,4)
sm = nx.s_metric(g,normalized=False)
assert_equal(sm, 19.0)
smNorm = nx.s_metric(g,normalized=True)
# assert_equal(smNorm, 0.95)
|
Python
| 0.000051
|
@@ -241,16 +241,17 @@
, 19.0)%0A
+#
smNo
|
9b19de89c7492e24a9ba71e8289ee186648667ad
|
Remove unneeded nesting
|
lib/ansible/modules/extras/cloud/amazon/ec2_search.py
|
lib/ansible/modules/extras/cloud/amazon/ec2_search.py
|
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_search
short_description: ask EC2 for information about other instances.
description:
- Only supports seatch for hostname by tags currently. Looking to add more later.
version_added: "1.9"
options:
key:
description:
- instance tag key in EC2
required: false
default: Name
aliases: []
value:
description:
- instance tag value in EC2
required: false
default: null
aliases: []
lookup:
description:
- What type of lookup to use when searching EC2 instance info.
required: false
default: tags
aliases: []
region:
description:
- EC2 region that it should look for tags in
required: false
default: All Regions
aliases: []
ignore_state:
description:
- instance state that should be ignored such as terminated.
required: false
default: terminated
aliases: []
author:
- "Michael Schuett (@michaeljs1990)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic provisioning example
- ec2_search:
key: mykey
value: myvalue
register: servers
'''
try:
import boto
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def todict(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = todict(v, classkey)
return data
elif hasattr(obj, "_ast"):
return todict(obj._ast())
elif hasattr(obj, "__iter__"):
return [todict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
# This Class causes a recursive loop and at this time is not worth
# debugging. If it's useful later I'll look into it.
if not isinstance(obj, boto.ec2.blockdevicemapping.BlockDeviceType):
data = dict([(key, todict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
def get_all_ec2_regions(module):
try:
regions = boto.ec2.regions()
except Exception, e:
module.fail_json('Boto authentication issue: %s' % e)
return regions
# Connect to ec2 region
def connect_to_region(region, module):
try:
conn = boto.ec2.connect_to_region(region.name)
except Exception, e:
print module.jsonify('error connecting to region: ' + region.name)
conn = None
# connect_to_region will fail "silently" by returning
# None if the region name is wrong or not supported
return conn
def main():
module = AnsibleModule(
argument_spec = dict(
key = dict(default='Name'),
value = dict(),
lookup = dict(default='tags'),
ignore_state = dict(default='terminated'),
region = dict(),
)
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
server_info = list()
for region in get_all_ec2_regions(module):
conn = connect_to_region(region, module)
try:
# Run when looking up by tag names, only returning hostname currently
if module.params.get('lookup') == 'tags':
ec2_key = 'tag:' + module.params.get('key')
ec2_value = module.params.get('value')
reservations = conn.get_all_instances(filters={ec2_key : ec2_value})
for instance in [i for r in reservations for i in r.instances]:
if instance.private_ip_address != None:
instance.hostname = 'ip-' + instance.private_ip_address.replace('.', '-')
if instance._state.name not in module.params.get('ignore_state'):
server_info.append(todict(instance))
except:
print module.jsonify('error getting instances from: ' + region.name)
ansible_facts = {'info': server_info}
ec2_facts_result = dict(changed=True, ec2=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
Python
| 0.000001
|
@@ -4784,50 +4784,8 @@
e)%0A%0A
- ansible_facts = %7B'info': server_info%7D%0A
@@ -4830,21 +4830,19 @@
ec2=
-ansible_facts
+server_info
)%0A%0A
|
d616d93a2a37ed7d5133376ffdc763eb2c52b6e1
|
Set save_best_so_far_agent=False
|
examples/ale/train_ppo_ale.py
|
examples/ale/train_ppo_ale.py
|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases()
import argparse
import gym
gym.undo_logger_setup()
import chainer
import numpy as np
from chainerrl.agents.a3c import A3CModel
from chainerrl.agents import PPO
from chainerrl import experiments
from chainerrl import links
from chainerrl import misc
from chainerrl.optimizers.nonbias_weight_decay import NonbiasWeightDecay
from chainerrl import policy
from chainerrl import v_function
import atari_wrappers
class A3CFF(chainer.ChainList, A3CModel):
def __init__(self, n_actions):
self.head = links.NIPSDQNHead()
self.pi = policy.FCSoftmaxPolicy(
self.head.n_output_channels, n_actions)
self.v = v_function.FCVFunction(self.head.n_output_channels)
super().__init__(self.head, self.pi, self.v)
def pi_and_v(self, state):
out = self.head(state)
return self.pi(out), self.v(out)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 31)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--use-sdl', action='store_true')
parser.add_argument('--max-episode-len', type=int, default=10000)
parser.add_argument('--profile', action='store_true')
parser.add_argument('--steps', type=int, default=8 * 10 ** 7)
parser.add_argument('--lr', type=float, default=2.5e-4)
parser.add_argument('--eval-interval', type=int, default=10 ** 6)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--standardize-advantages', action='store_true')
parser.add_argument('--weight-decay', type=float, default=0.0)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logging-level', type=int, default=20,
help='Logging level. 10:DEBUG, 20:INFO etc.')
parser.add_argument('--render', action='store_true', default=False,
help='Render env states in a GUI window.')
parser.add_argument('--monitor', action='store_true', default=False,
help='Monitor env. Videos and additional information'
' are saved as output files.')
# In the original paper, agent runs in 8 environments parallely
# and samples 128 steps per environment.
# Sample 128 * 8 steps, instead.
parser.add_argument('--update-interval', type=int, default=128 * 8)
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--epochs', type=int, default=3)
args = parser.parse_args()
import logging
logging.basicConfig(level=args.logging_level)
# Set a random seed used in ChainerRL.
misc.set_random_seed(args.seed, gpus=(args.gpu,))
# Set different random seeds for train and test envs.
train_seed = args.seed
test_seed = 2 ** 31 - 1 - args.seed
args.outdir = experiments.prepare_output_dir(args, args.outdir)
print('Output files are saved in {}'.format(args.outdir))
def make_env(test):
# Use different random seeds for train and test envs
env_seed = test_seed if test else train_seed
env = atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(args.env),
episode_life=not test,
clip_rewards=not test)
env.seed(int(env_seed))
if args.monitor:
env = gym.wrappers.Monitor(
env, args.outdir,
mode='evaluation' if test else 'training')
if args.render:
misc.env_modifiers.make_rendered(env)
return env
env = make_env(test=False)
eval_env = make_env(test=True)
n_actions = env.action_space.n
model = A3CFF(n_actions)
opt = chainer.optimizers.Adam(alpha=args.lr)
opt.setup(model)
opt.add_hook(chainer.optimizer.GradientClipping(40))
if args.weight_decay > 0:
opt.add_hook(NonbiasWeightDecay(args.weight_decay))
def phi(x):
# Feature extractor
return np.asarray(x, dtype=np.float32) / 255
agent = PPO(model, opt,
gpu=args.gpu,
phi=phi,
update_interval=args.update_interval,
minibatch_size=args.batchsize, epochs=args.epochs,
clip_eps=0.1,
clip_eps_vf=None,
standardize_advantages=args.standardize_advantages,
)
if args.load:
agent.load(args.load)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env,
agent=agent,
n_runs=args.eval_n_runs)
print('n_runs: {} mean: {} median: {} stdev: {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
# Linearly decay the learning rate to zero
def lr_setter(env, agent, value):
agent.optimizer.alpha = value
lr_decay_hook = experiments.LinearInterpolationHook(
args.steps, args.lr, 0, lr_setter)
# Linearly decay the clipping parameter to zero
def clip_eps_setter(env, agent, value):
agent.clip_eps = value
clip_eps_decay_hook = experiments.LinearInterpolationHook(
args.steps, 0.1, 0, clip_eps_setter)
experiments.train_agent_with_evaluation(
agent=agent,
env=env,
eval_env=eval_env,
outdir=args.outdir,
steps=args.steps,
eval_n_runs=args.eval_n_runs,
eval_interval=args.eval_interval,
max_episode_len=args.max_episode_len,
step_hooks=[
lr_decay_hook,
clip_eps_decay_hook,
],
)
if __name__ == '__main__':
main()
|
Python
| 0.999484
|
@@ -6195,16 +6195,58 @@
de_len,%0A
+ save_best_so_far_agent=False,%0A
|
a15bda8b4a4d32d25f1e47f69b20f7df0ff45691
|
Remove debugging code
|
ether/ethcontract.py
|
ether/ethcontract.py
|
from ether import asm, util
import re
import persistent
from ethereum import utils
class ETHContract(persistent.Persistent):
def __init__(self, code = ""):
self.code = code
def get_xrefs(self):
disassembly = asm.disassemble(util.safe_decode(self.code))
xrefs = []
for instruction in disassembly:
if instruction['opcode'] == "PUSH20":
if instruction['argument']:
xref = instruction['argument'].decode("utf-8")
if xref not in xrefs:
xrefs.append(xref)
return xrefs
def get_disassembly(self):
return asm.disassemble(util.safe_decode(self.code))
def get_easm(self):
return asm.disassembly_to_easm(asm.disassemble(util.safe_decode(self.code)))
def matches_expression(self, expression):
easm_code = self.get_easm()
str_eval = ''
matches = re.findall(r'func:([a-zA-Z0-9\s,(\[\]]+?\))', expression)
for m in matches:
# Calculate function signature hashes
sign_hash = utils.sha3(m)[:4].hex()
expression = expression.replace(m, sign_hash)
tokens = re.split("( and | or )", expression, re.IGNORECASE)
for token in tokens:
if token == " and " or token == " or ":
str_eval += token
continue
m = re.match(r'^code:([a-zA-Z0-9\s,\[\]]+)', token)
if (m):
code = m.group(1).replace(",", "\\n")
str_eval += "\"" + code + "\" in easm_code"
continue
m = re.match(r'^func:([a-zA-Z0-9\s,()\[\]]+)$', token)
if (m):
str_eval += "\"" + m.group(1) + "\" in easm_code"
print(str_eval)
continue
return eval(str_eval)
class InstanceList(persistent.Persistent):
def __init__(self):
self.addresses = []
self.balances = []
pass
def add(self, address, balance = 0):
self.addresses.append(address)
self.balances.append(balance)
self._p_changed = True
|
Python
| 0.000739
|
@@ -1794,41 +1794,8 @@
%0A%0A
- print(str_eval)%0A%0A
|
531797bc4252e12a3a6b4cea1c0b4744d26a766a
|
missing comma
|
example_capacities.py
|
example_capacities.py
|
#!/usr/bin/python
import argparse
import pyvmax
#################################
### Define and Parse CLI arguments
PARSER = argparse.ArgumentParser(
description='Example implementation of a Python REST client for EMC Unisphere for VMAX.')
RFLAGS = PARSER.add_argument_group('Required arguments')
RFLAGS.add_argument('-url', required=True, help='Base Unisphere URL. e.g. https://10.0.0.1:8443')
RFLAGS.add_argument('-user', required=True, help='Unisphere username. e.g. smc')
RFLAGS.add_argument('-passwd', required=True, help='Unisphere password. e.g. smc')
ARGS = PARSER.parse_args()
URL = ARGS.url
USER = ARGS.user
PASSWORD = ARGS.passwd
vmax_api = pyvmax.connect(URL, USER, PASSWORD)
# discover the known symmetrix serial #'s
prov_array_ids = vmax_api.get_prov_arrays()['symmetrixId']
# going to build a list of dicts, each one a symmetrix
prov_array_list = list()
for symmId in prov_array_ids:
# get the array details
symmetrix = vmax_api.get_prov_array(symmId)['symmetrix'][0]
# for this symmetrix, go ahead and build a list of thin pools
tpList = list()
# make sure to check whether any list results returned..
tp_result = vmax_api.get_prov_array_thinpools(symmId)
if 'poolId' in tp_result:
# iterate through the thin pools, get their details and build a list
for tpId in vmax_api.get_prov_array_thinpools(symmId)['poolId']:
tp = vmax_api.get_prov_array_thinpool(symmId, tpId)['thinPool'][0]
tpList.append(tp)
# add a dict entry for the thin pool list data structure we just created
symmetrix['thinpools'] = tpList
prov_array_list.append(symmetrix)
# do something with this great list of thin provisioned arrays
# print it out!! (the json printer is good for lists and dicts too)
vmax_api.rest.print_json(prov_array_list)
# discover the known slo symmetrix serial #'s
slo_array_ids = vmax_api.get_slo_arrays()['symmetrixId']
# going to build a list of dicts, each one a symmetrix
slo_array_list = list()
for symm_id in slo_array_ids:
# get the array details
symm_result = vmax_api.get_slo_array(symmId)['symmetrix'][0]
symmetrix = {'symmetrix_id' : symm_result['symmetrixId'],
'array_raw_gb' : symm_result['physicalCapacity'],
'array_usable_gb' : symm_result['virtualCapacity'],
'srps_total_subscribed_gb' : 0,
'srps_total_usable_gb' : 0,
'srps_total_allocated_gb' : 0,
'srps_total_host_allocated_gb' : 0,
'srps_total_dse_gb' : 0,
'srps_total_snaps_gb' : 0
'srps_virtual_replica_gb' : 0}
# for this symmetrix, go ahead and build a list of SRP's
srp_list = list()
srps_result = vmax_api.get_slo_array_srps(symm_id)
if 'srpId' in srps_result:
for srp_id in vmax_api.get_slo_array_srps(symm_id)['srpId']:
srp_result = vmax_api.get_slo_array_srp(symm_id, srp_id)['srp'][0]
srp = {'srp_id' : srp_result['srpId'],
'srp_usable_cap_gb' : srp_result['total_usable_cap_gb'],
'srp_allocated_cap_gb' : srp_result['total_alloated_cap_gb'],
'srp_snapshot_allocated_cap_gb' : srp_result['total_snapshot_allocated_cap_gb'],
'srp_srdf_dse_allocated_cap_gb' : srp_result['total_srdf_dse_allocated_cap_gb'],
'srp_subscribed_cap_gb' : srp_result['total_subscribed_cap_gb'],
'srp_host_allocated_cap_gb' : srp_result['total_alloated_cap_gb'] - srp_result['total_snapshot_allocated_cap_gb'] - srp_result['total_srdf_dse_allocated_cap_gb'],
'sgs_cap_gb' : 0,
'sgs_replica_cap_gb' : 0}
# for this SRP, build a list of Storage Groups
sg_list = list()
# make sure to check whether any sg results returned.. not every SRP has storage groups!
result_filter = {'srp_name' : srp['srp_id']}
sgs_result = vmax_api.get_slo_array_storagegroups(symm_Id, result_filter)
if 'storageGroupId' in sgs_result:
# iterate through the sg's, get their details and build a list
for sg_id in sgs_result['storageGroupId']:
sg_result = vmax_api.get_slo_array_storagegroup(symmId, sgId)['storageGroup'][0]
sg = {'sg_id' : sg_result['storageGroupId'],
'sg_cap_gb' : sg_restult['cap_gb'],
'num_snapshots' : sg_result['num_of_snapshots'],
'sg_replica_cap_gb' : sg_restult['cap_gb'] * sg_result['num_of_snapshots']}
# before we loop, update parent SRP's calculated values for this sg
srp['sgs_cap_gb'] += sg['sg_cap_gb']
srp['sgs_replica_cap_gb'] += sg['sg_replicat_cap_gb']
# append this sg to the running list for this SRP
sg_list.append(sg)
# add a dict entry for the Storage Group list data structure we just created
srp['storage_groups'] = sg_list
# before we loop, update parent array's calculated values for this SRP
symmetrix['srps_total_subscribed_gb'] += srp['srp_subscribed_cap_gb']
symmetrix['srps_total_usable_gb'] += srp['srp_usable_cap_gb']
symmetrix['srps_total_allocated_gb'] += srp['srp_allocated_cap_gb']
symmetrix['srps_total_host_allocated_gb'] += srp['srp_host_allocated_cap_gb']
symmetrix['srps_total_dse_gb'] += srp['srp_srdf_dse_allocated_cap_gb']
symmetrix['srps_total_snaps_gb'] += srp['srp_snapshot_allocated_cap_gb']
symmetrix['srps_virtual_replica_gb'] += srp['sgs_replica_cap_gb']
# append this SRP to the running list for this array
srp_list.append(srp)
# add a dict entry for the SRP list data structure we just created
symmetrix['srp'] = srp_list
slo_array_list.append(symmetrix)
# do something with this great list of thin provisioned arrays
# print it out!! (the json printer is good for lists and dicts too)
vmax_api.rest.print_json(slo_array_list)
|
Python
| 0.999885
|
@@ -2674,16 +2674,17 @@
_gb' : 0
+,
%0D%0A
|
4aa81b72e6476fae9717bbcf5791483b581cd0f6
|
add return type annotations to aggregate.py
|
examples/aggregate.py
|
examples/aggregate.py
|
import sqlite3
import string
import time
from operator import attrgetter
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple
from numba import float64, int64, optional
from numba.experimental import jitclass
from numba.types import ClassType
from slumba import create_aggregate, sqlite_udaf
@sqlite_udaf(float64(float64))
@jitclass(
[
("mean", float64),
("sum_of_squares_of_differences", float64),
("count", int64),
]
)
class Var:
def __init__(self):
self.mean = 0.0
self.sum_of_squares_of_differences = 0.0
self.count = 0
def step(self, value: float) -> None:
self.count += 1
delta = value - self.mean
self.mean += delta
self.sum_of_squares_of_differences += delta * (value - self.mean)
def finalize(self) -> float:
return self.sum_of_squares_of_differences / (self.count - 1)
@sqlite_udaf(optional(float64)(optional(float64), optional(float64)))
@jitclass(
[("mean1", float64), ("mean2", float64), ("mean12", float64), ("count", int64)]
)
class Cov:
def __init__(self):
self.mean1 = 0.0
self.mean2 = 0.0
self.mean12 = 0.0
self.count = 0
def step(self, x: Optional[float], y: Optional[float]) -> None:
if x is not None and y is not None:
self.count += 1
n = self.count
delta1 = (x - self.mean1) / n
self.mean1 += delta1
delta2 = (y - self.mean2) / n
self.mean2 += delta2
self.mean12 += (n - 1) * delta1 * delta2 - self.mean12 / n
def finalize(self) -> Optional[float]:
n = self.count
if not n:
return None
return n / (n - 1) * self.mean12
@sqlite_udaf(optional(float64)(optional(float64)))
@jitclass([("total", float64), ("count", int64)])
class Avg:
def __init__(self):
self.total = 0.0
self.count = 0
def step(self, value: Optional[float]) -> None:
if value is not None:
self.total += value
self.count += 1
def finalize(self) -> Optional[float]:
if not self.count:
return None
return self.total / self.count
@sqlite_udaf(optional(float64)(float64))
@jitclass([("total", float64), ("count", int64)])
class Sum:
def __init__(self):
self.total = 0.0
self.count = 0
def step(self, value: Optional[float]) -> None:
if value is not None:
self.total += value
self.count += 1
def finalize(self) -> Optional[float]:
return self.total if self.count > 0 else None
def main() -> None:
import random
con = sqlite3.connect(":memory:")
con.execute(
"""
CREATE TABLE t (
id INTEGER PRIMARY KEY,
key VARCHAR(1),
value DOUBLE PRECISION
)
"""
)
con.execute("CREATE INDEX key_index ON t (key)")
random_numbers: List[Tuple[str, float]] = [
(random.choice(string.ascii_lowercase[:2]), random.random())
for _ in range(500000)
]
placeholders = ", ".join("?" * len(random_numbers[0]))
query = f"INSERT INTO t (key, value) VALUES ({placeholders})"
con.executemany(query, random_numbers)
cls: ClassType = Avg
builtin = cls.__name__.lower()
cfunc_defined = f"my{builtin}"
python_defined = f"my{builtin}2"
# new way of registering UDAFs using cfuncs
create_aggregate(con, cfunc_defined, 1, cls)
con.create_aggregate(python_defined, 1, cls.class_type.class_def)
query1 = f"select key, {builtin}(value) as builtin_{builtin} from t " f"group by 1"
query2 = (
f"select key, {cfunc_defined}(value) as cfunc_{cfunc_defined} "
f"from t group by 1"
)
query3 = (
f"select key, {python_defined}(value) as python_{python_defined}"
f" from t group by 1"
)
queries: Dict[str, str] = {
f"{builtin}_builtin": query1,
f"{cfunc_defined}_cfunc": query2,
f"{python_defined}_python": query3,
}
Result = NamedTuple(
"Result",
[("name", str), ("result", List[Tuple[str, float]]), ("duration", float)],
)
results: List[Result] = []
execute: Callable = con.execute
for name, query in queries.items():
start = time.time()
exe = execute(query)
stop = time.time()
duration = stop - start
result = list(exe)
results.append(Result(name=name, result=result, duration=duration))
builtin_result: List = results[0].result
results.sort(key=attrgetter("duration"))
strings: List[str] = [
(
f"{name} duration == {duration:.2f}s | "
f"{round(results[-1].duration / duration):d}x faster | "
f"values equal? {'yes' if builtin_result == result else 'no'}"
)
for name, result, duration in results
]
width = max(map(len, strings))
print("\n".join(string.rjust(width, " ")) for string in strings)
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -491,34 +491,42 @@
f __init__(self)
+ -%3E None
:%0A
-
self.mea
@@ -1104,32 +1104,40 @@
f __init__(self)
+ -%3E None
:%0A self.m
@@ -1888,32 +1888,40 @@
f __init__(self)
+ -%3E None
:%0A self.t
@@ -2335,16 +2335,16 @@
ss Sum:%0A
-
def
@@ -2357,16 +2357,24 @@
__(self)
+ -%3E None
:%0A
|
d1490510cd5f66a5da699ffea39b6a8b37c88f01
|
add test for composite potential
|
gary/potential/tests/test_io.py
|
gary/potential/tests/test_io.py
|
# coding: utf-8
""" test reading/writing potentials to files """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
import numpy as np
# Project
from ..io import read, write
from ..builtin import IsochronePotential
from ...units import galactic
# TODO: config item to specify path to test data?
test_data_path = os.path.abspath(os.path.join(os.path.split(__file__)[0],
"../../../test-data/"))
def test_read():
f1 = os.path.join(test_data_path, 'potential', 'isochrone.yml')
potential = read(f1)
assert np.allclose(potential.parameters['m'], 1E11)
assert np.allclose(potential.parameters['b'], 0.76)
f2 = os.path.join(test_data_path, 'potential', 'pw14.yml')
potential = read(f2)
def test_write():
tmp_filename = "/tmp/potential.yml"
# try a simple potential
potential = IsochronePotential(m=1E11, b=0.76, units=galactic)
with open(tmp_filename,'w') as f:
write(potential, f)
write(potential, tmp_filename)
|
Python
| 0
|
@@ -300,16 +300,51 @@
tential%0A
+from ..custom import PW14Potential%0A
from ...
@@ -365,16 +365,16 @@
alactic%0A
-
%0A# TODO:
@@ -860,16 +860,107 @@
ad(f2)%0A%0A
+ f3 = os.path.join(test_data_path, 'potential', 'pw14_2.yml')%0A potential = read(f3)%0A%0A
def test
@@ -1175,16 +1175,16 @@
al, f)%0A%0A
-
writ
@@ -1210,8 +1210,164 @@
lename)%0A
+%0A # more complex%0A potential = PW14Potential()%0A%0A with open(tmp_filename,'w') as f:%0A write(potential, f)%0A%0A write(potential, tmp_filename)%0A%0A
|
11499596e27e8f4c792cf5e36ea5a1c8f6d6053f
|
Improve logging (learning)
|
evilminions/hydra.py
|
evilminions/hydra.py
|
'''Replicates the behavior of a minion many times'''
import logging
import tornado.gen
import zmq
import salt.config
import salt.loader
import salt.payload
from evilminions.hydrahead import HydraHead
from evilminions.utils import fun_call_id
# HACK: turn the trace function into a no-op
# this almost doubles evil-minion's performance
salt.log.mixins.LoggingTraceMixIn.trace = lambda self, msg, *args, **kwargs: None
class Hydra(object):
'''Spawns HydraHeads, listens for messages coming from the Vampire.'''
def __init__(self, hydra_number):
self.hydra_number = hydra_number
self.current_reactions = []
self.reactions = {}
self.last_time = None
self.serial = salt.payload.Serial({})
self.log = None
def start(self, hydra_count, chunk, prefix, offset,
ramp_up_delay, slowdown_factor, keysize, semaphore):
'''Per-process entry point (one per Hydra)'''
# set up logging
self.log = logging.getLogger(__name__)
self.log.debug("Starting Hydra on: %s", chunk)
# set up the IO loop
zmq.eventloop.ioloop.install()
io_loop = zmq.eventloop.ioloop.ZMQIOLoop.current()
# set up ZeroMQ connection to the Proxy
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('ipc:///tmp/evil-minions-pub.ipc')
socket.setsockopt(zmq.SUBSCRIBE, "")
stream = zmq.eventloop.zmqstream.ZMQStream(socket, io_loop)
stream.on_recv(self.update_reactions)
# Load original settings and grains
opts = salt.config.minion_config('/etc/salt/minion')
grains = salt.loader.grains(opts)
# set up heads!
first_head_number = chunk[0] if chunk else 0
delays = [ramp_up_delay * ((head_number - first_head_number) * hydra_count + self.hydra_number)
for head_number in chunk]
offset_head_numbers = [head_number + offset for head_number in chunk]
heads = [HydraHead('{}-{}'.format(prefix, offset_head_numbers[i]), io_loop, keysize, opts, grains,
delays[i], slowdown_factor, self.reactions) for i in range(len(chunk))]
# start heads!
for head in heads:
io_loop.spawn_callback(head.start)
semaphore.release()
io_loop.start()
@tornado.gen.coroutine
def update_reactions(self, packed_events):
'''Called whenever a message from Vampire is received.
Updates the internal self.reactions hash to contain reactions that will be mimicked'''
for packed_event in packed_events:
event = self.serial.loads(packed_event)
load = event['load']
socket = event['header']['socket']
current_time = event['header']['time']
self.last_time = self.last_time or current_time
if socket == 'PUB' and self.reactions == {}:
self.reactions[fun_call_id(None, None)] = [self.current_reactions]
self.current_reactions = []
self.last_time = current_time
if socket == 'REQ':
if load['cmd'] == '_auth':
continue
self.current_reactions.append(event)
if load['cmd'] == '_return':
call_id = fun_call_id(load['fun'], load['fun_args'])
self.reactions[call_id] = (self.reactions.get(call_id) or []) + [self.current_reactions]
self.log.debug("Hydra #{} learned reaction #{} for call: {}".format(self.hydra_number,
len(self.reactions[call_id]),
call_id))
self.current_reactions = []
event['header']['duration'] = current_time - self.last_time
self.last_time = current_time
|
Python
| 0
|
@@ -3539,33 +3539,142 @@
ion
-#%7B%7D for call: %7B%7D%22.format(
+list #%7B%7D (%7B%7D reactions) for call: %7B%7D%22.format(%0A
self
@@ -3898,17 +3898,443 @@
-call_id))
+len(self.current_reactions),%0A call_id))%0A for reaction in self.current_reactions:%0A load = reaction%5B'load'%5D%0A cmd = load%5B'cmd'%5D%0A path = %22path=%7B%7D%22.format(load%5B'path'%5D) if 'path' in load else ''%0A self.log.debug(%22 - %7B%7D(%7B%7D)%22.format(cmd, path))%0A
%0A
|
42b0c14d1e34dd88a92f22eda9d87dd104ea61f0
|
tweak demo
|
examples/meanfield.py
|
examples/meanfield.py
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from pyhsmm.basic.distributions import Regression, Gaussian, PoissonDuration
from autoregressive.distributions import AutoRegression
from pyhsmm.util.text import progprint_xrange
from pyslds.models import HMMSLDS
np.random.seed(0)
###################
# generate data #
###################
import autoregressive
As = [np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
for alpha, theta in ((0.95,0.1), (0.95,-0.1), (1., 0.))]
truemodel = autoregressive.models.ARHSMM(
alpha=4.,init_state_concentration=4.,
obs_distns=[AutoRegression(A=A,sigma=0.05*np.eye(2)) for A in As],
dur_distns=[PoissonDuration(alpha_0=5*50,beta_0=5) for _ in As])
truemodel.prefix = np.array([[0.,3.]])
data, labels = truemodel.generate(1000)
data = data[truemodel.nlags:]
plt.figure()
plt.plot(data[:,0],data[:,1],'bx-')
#################
# build model #
#################
Nmax = 10
P = 2
D = data.shape[1]
dynamics_distns = [
AutoRegression(
A=np.eye(P),sigma=np.eye(P),
nu_0=3,S_0=3.*np.eye(P),M_0=np.eye(P),K_0=10.*np.eye(P))
for _ in xrange(Nmax)]
emission_distns = [
Regression(
A=np.eye(D),sigma=0.05*np.eye(D),
nu_0=5.,S_0=np.eye(P),M_0=np.eye(P),K_0=10.*np.eye(P))
for _ in xrange(Nmax)]
init_dynamics_distns = [
Gaussian(nu_0=4,sigma_0=4.*np.eye(P),mu_0=np.zeros(P),kappa_0=0.1)
for _ in xrange(Nmax)]
model = HMMSLDS(
dynamics_distns=dynamics_distns,
emission_distns=emission_distns,
init_dynamics_distns=init_dynamics_distns,
alpha=3.,init_state_distn='uniform')
model.add_data(data)
model.resample_states()
for _ in progprint_xrange(10):
model.resample_model()
model.states_list[0]._init_mf_from_gibbs()
####################
# run mean field #
####################
# plt.figure()
# vlbs = [model.meanfield_coordinate_descent_step() for _ in progprint_xrange(50)]
# plt.plot(vlbs)
for _ in progprint_xrange(50):
model.meanfield_coordinate_descent_step(compute_vlb=False)
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9,3))
gs = gridspec.GridSpec(7,1)
ax1 = fig.add_subplot(gs[:-2])
ax2 = fig.add_subplot(gs[-2], sharex=ax1)
ax3 = fig.add_subplot(gs[-1], sharex=ax1)
im = ax1.matshow(model.states_list[0].expected_states.T, aspect='auto')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.matshow(model.states_list[0].expected_states.argmax(1)[None,:], aspect='auto')
ax2.set_xticks([])
ax2.set_yticks([])
ax3.matshow(labels[None,:], aspect='auto')
ax3.set_xticks([])
ax3.set_yticks([])
plt.show()
|
Python
| 0.000001
|
@@ -753,17 +753,17 @@
alpha_0=
-5
+3
*50,beta
@@ -765,17 +765,17 @@
,beta_0=
-5
+3
) for _
@@ -1911,15 +1911,17 @@
)%0A#
-vlbs =
+plt.plot(
%5Bmod
@@ -1993,24 +1993,8 @@
50)%5D
-%0A# plt.plot(vlbs
)%0A%0Af
|
5c2db731c85e358df6af67767ec483a375a9b661
|
return a list instead, to allow for needing multiple types
|
fedimg/util.py
|
fedimg/util.py
|
# This file is part of fedimg.
# Copyright (C) 2014 Red Hat, Inc.
#
# fedimg is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# fedimg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with fedimg; if not, see http://www.gnu.org/licenses,
# or write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: David Gay <dgay@redhat.com>
#
"""
Utility functions for fedimg.
"""
import socket
import subprocess
import paramiko
from libcloud.compute.types import Provider
import fedimg
def get_file_arch(file_name):
""" Takes a file name (probably of a .raw.xz image file) and returns
the suspected architecture of the contained image. If it doesn't look
like a 32-bit or 64-bit image, None is returned. """
if file_name.find('i386') != -1:
return 'i386'
elif file_name.find('x86_64') != -1:
return 'x86_64'
else:
return None
def get_rawxz_url(task_result):
""" Returns the URL of the raw.xz file produced by the Koji task whose
output files are passed as a list via the task_result argument. """
# I think there might only ever be one qcow2 file per task,
# but doing it this way plays it safe.
file_name = [f for f in task_result['files'] if f.endswith('.raw.xz')][0]
task_id = task_result['task_id']
# extension to base URL to exact file directory
koji_url_extension = "/{}/{}".format(str(task_id)[3:], str(task_id))
full_file_location = fedimg.BASE_KOJI_TASK_URL + koji_url_extension
return full_file_location + "/{}".format(file_name)
def get_virt_type(file_name):
""" Takes a file name (probably of a .raw.xz image file) and returns
the suspected virtualization type that the image file should be
registered as. """
file_name = file_name.lower()
if file_name.find('atomic') != -1:
# hvm is required for atomic images
return 'hvm'
else:
# otherwise, use the default (paravirtual)
# Note: We want all images to be available in HVM, since
# HVM is required for some EC2 instance types. Therefore,
# returning 'paravirtual' will likely result in the upload
# code registering the image as both virtualization
# types (paravirtual and HVM).
return 'paravirtual'
def region_to_provider(region):
""" Takes a region name (ex. 'eu-west-1') and returns
the appropriate libcloud provider value. """
providers = {'ap-northeast-1': Provider.EC2_AP_NORTHEAST,
'ap-southeast-1': Provider.EC2_AP_SOUTHEAST,
'ap-southeast-2': Provider.EC2_AP_SOUTHEAST2,
'eu-west-1': Provider.EC2_EU_WEST,
'sa-east-1': Provider.EC2_SA_EAST,
'us-east-1': Provider.EC2_US_EAST,
'us-west-1': Provider.EC2_US_WEST,
'us-west-2': Provider.EC2_US_WEST_OREGON}
return providers[region]
def ssh_connection_works(username, ip, keypath):
""" Returns True if an SSH connection can me made to `username`@`ip`. """
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
works = False
try:
ssh.connect(ip, username=username,
key_filename=keypath)
works = True
except (paramiko.BadHostKeyException,
paramiko.AuthenticationException,
paramiko.SSHException, socket.error) as e:
pass
ssh.close()
return works
|
Python
| 0.000056
|
@@ -2051,20 +2051,16 @@
)%0A%0A%0Adef
-get_
virt_typ
@@ -2060,16 +2060,31 @@
irt_type
+s_from_filename
(file_na
@@ -2388,13 +2388,15 @@
urn
+%5B
'hvm'
+%5D
%0A
@@ -2427,334 +2427,53 @@
se,
-use the default (paravirtual)%0A # Note: We want all images to be available in HVM, since%0A # HVM is required for some EC2 instance types. Therefore,%0A # returning 'paravirtual' will likely result in the upload%0A # code registering the image as both virtualization%0A # types (paravirtual and HVM).
+build the AMIs with both virtualization types
%0A
@@ -2483,16 +2483,24 @@
return
+ %5B'hvm',
'paravi
@@ -2505,16 +2505,17 @@
virtual'
+%5D
%0A%0A%0Adef r
|
db69d08b61f83703fc40ef7273cba1b2e0b825a3
|
stop checking if an entry already exists when polling
|
feeds/tasks.py
|
feeds/tasks.py
|
import time
import feedparser
from celery import task
from feeds.models import Feed, Entry
from django.core.exceptions import ObjectDoesNotExist
from profiles.models import UserProfile, UserEntryDetail
def poll_feed(feed):
parser = feedparser.parse(feed.link)
# Add entries from feed
entries = parser.entries
for entry in entries:
try:
Entry.objects.get(link=entry.link)
except ObjectDoesNotExist:
pass
else:
continue
published = time.strftime('%Y-%m-%d %H:%M', entry.published_parsed)
entry_obj, _ = Entry.objects.get_or_create(feed=feed,
title=entry.title,
link=entry.link,
published=published)
subscribers = UserProfile.objects.filter(feeds=feed)
for profile in subscribers:
if not UserEntryDetail.objects.filter(entry=entry_obj,
profile=profile)\
.exists():
UserEntryDetail(entry=entry_obj,
profile=profile,
read=False).save()
@task
def poll_all_feeds():
feeds = Feed.objects.all()
for feed in feeds:
poll_feed(feed)
|
Python
| 0
|
@@ -347,156 +347,8 @@
es:%0A
- try:%0A Entry.objects.get(link=entry.link)%0A except ObjectDoesNotExist:%0A pass%0A else:%0A continue%0A%0A
|
81806fe89b9c4a364d373020bd56ee1a396a5858
|
Add automatic escaping of the settings docstring for Windows' sake in py3k
|
evennia/game_template/server/conf/settings.py
|
evennia/game_template/server/conf/settings.py
|
"""
Evennia settings file.
The available options are found in the default settings file found
here:
{settings_default}
Remember:
Don't copy more from the default file than you actually intend to
change; this will make sure that you don't overload upstream updates
unnecessarily.
When changing a setting requiring a file system path (like
path/to/actual/file.py), use GAME_DIR and EVENNIA_DIR to reference
your game folder and the Evennia library folders respectively. Python
paths (path.to.module) should be given relative to the game's root
folder (typeclasses.foo) whereas paths within the Evennia library
needs to be given explicitly (evennia.foo).
If you want to share your game dir, including its settings, you can
put secret game- or server-specific settings in secret_settings.py.
"""
# Use the defaults from Evennia unless explicitly overridden
from evennia.settings_default import *
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = {servername}
# Server ports. If enabled and marked as "visible", the port
# should be visible to the outside world on a production server.
# Note that there are many more options available beyond these.
# Telnet ports. Visible.
TELNET_ENABLED = True
TELNET_PORTS = [4000]
# (proxy, internal). Only proxy should be visible.
WEBSERVER_ENABLED = True
WEBSERVER_PORTS = [(4001, 4002)]
# Telnet+SSL ports, for supporting clients. Visible.
SSL_ENABLED = False
SSL_PORTS = [4003]
# SSH client ports. Requires crypto lib. Visible.
SSH_ENABLED = False
SSH_PORTS = [4004]
# Websocket-client port. Visible.
WEBSOCKET_CLIENT_ENABLED = True
WEBSOCKET_CLIENT_PORT = 4005
# Internal Server-Portal port. Not visible.
AMP_PORT = 4006
######################################################################
# Settings given in secret_settings.py override those in this file.
######################################################################
try:
from server.conf.secret_settings import *
except ImportError:
print("secret_settings.py file not found or failed to import.")
|
Python
| 0
|
@@ -1,12 +1,13 @@
+r
%22%22%22%0AEvennia
|
a85f6f86522dbb984818defc0d5c3cee049f1341
|
add simple async post support
|
eventbus/eventbus.py
|
eventbus/eventbus.py
|
__author__ = 'Xsank'
import inspect
from listener import Listener
from exception import RegisterError
from exception import UnregisterError
class EventBus(object):
def __init__(self):
self.listeners=dict()
def register(self,listener):
if not isinstance(listener,Listener):
raise RegisterError
self.listeners[listener.__class__.__name__]=listener
def unregister(self,listener):
try:
self.listeners.pop(listener.__class__.__name__)
except Exception:
raise UnregisterError
def post(self,event):
for listener in self.listeners.values():
for name,func in inspect.getmembers(listener,predicate=inspect.ismethod):
func(event)
def destroy(self):
self.listeners.clear()
|
Python
| 0
|
@@ -28,16 +28,60 @@
inspect
+%0Afrom multiprocessing.pool import ThreadPool
%0A%0Afrom l
@@ -221,24 +221,36 @@
_init__(self
+,pool_size=4
):%0A s
@@ -269,16 +269,56 @@
s=dict()
+%0A self.pool=ThreadPool(pool_size)
%0A%0A de
@@ -843,16 +843,91 @@
event)%0A%0A
+ def async_post(self,event):%0A self.pool.map(self.post,(event,))%0A%0A
def
@@ -971,8 +971,34 @@
.clear()
+%0A self.pool.close()
|
30e00089247b314e82bc7792ac6f9641cd632bbd
|
Bump to dev.
|
eventlet/__init__.py
|
eventlet/__init__.py
|
version_info = (0, 9, 15)
__version__ = ".".join(map(str, version_info))
try:
from eventlet import greenthread
from eventlet import greenpool
from eventlet import queue
from eventlet import timeout
from eventlet import patcher
from eventlet import convenience
import greenlet
sleep = greenthread.sleep
spawn = greenthread.spawn
spawn_n = greenthread.spawn_n
spawn_after = greenthread.spawn_after
kill = greenthread.kill
Timeout = timeout.Timeout
with_timeout = timeout.with_timeout
GreenPool = greenpool.GreenPool
GreenPile = greenpool.GreenPile
Queue = queue.Queue
import_patched = patcher.import_patched
monkey_patch = patcher.monkey_patch
connect = convenience.connect
listen = convenience.listen
serve = convenience.serve
StopServe = convenience.StopServe
wrap_ssl = convenience.wrap_ssl
getcurrent = greenlet.greenlet.getcurrent
# deprecated
TimeoutError = timeout.Timeout
exc_after = greenthread.exc_after
call_after_global = greenthread.call_after_global
except ImportError, e:
# This is to make Debian packaging easier, it ignores import
# errors of greenlet so that the packager can still at least
# access the version. Also this makes easy_install a little quieter
if 'greenlet' not in str(e):
# any other exception should be printed
import traceback
traceback.print_exc()
|
Python
| 0
|
@@ -20,9 +20,16 @@
9, 1
-5
+6, %22dev%22
)%0A__
|
673b00f81baa9f124920ec471f91d50279e8b007
|
return the response
|
roushagent/plugins/lib/bashscriptrunner.py
|
roushagent/plugins/lib/bashscriptrunner.py
|
import os
import string
import subprocess
from threading import Thread
from Queue import Queue, Empty
def name_mangle(s, prefix=""):
# we only support upper case variables and as a convenience convert
# - to _, as - is not valid in bash variables.
prefix = prefix.upper()
r = s.upper().replace("-", "_")
# first character must be _ or alphabet
if not r[0] == '_' and not (r[0].isalpha() and len(prefix) == 0):
r = "".join(["_", r])
# rest of the characters must be alphanumeric or _
valid = string.digits + string.ascii_uppercase + "_"
r = "".join([l for l in r if l in valid])
if len(r) >= 1:
#valid r, prefix it unless it is already prefixed
return r if r.find(prefix) == 0 else prefix + r
raise ValueError("Failed to convert %s to valid bash identifier" % s)
def posix_escape(s):
#The only special character inside of a ' is ', which terminates
#the '. We will surround s with single quotes. If we encounter a
#single quote inside of s, we need to close our enclosure with ',
#escape the single quote in s with "'", then reopen our enclosure
#with '.
return "'%s'" % (s.replace("'", "'\"'\"'"))
def find_script(script, script_path):
path = None
found = False
for path in script_path:
filename = os.path.join(path, script)
if os.path.exists(filename) and \
os.path.dirname(os.path.realpath(filename)) == \
os.path.realpath(path):
found = True
break
if not found:
return None
return filename
class BashScriptRunner(object):
def __init__(self, script_path=["scripts"], environment=None, log=None):
self.script_path = script_path
self.environment = environment or {"PATH":
"/usr/sbin:/usr/bin:/sbin:/bin"}
self.log = log
def run(self, script, *args):
return self.run_env(script, {}, "RCB", *args)
def run_env(self, script, environment, prefix, *args):
# first pass: no input, return something like the following
# { "response": {
# "result_code": <result-code-ish>
# "result_str": <error-or-success-message>
# "result_data": <extended error info or arbitrary data>
# }
# }
env = {}
env.update(self.environment)
env.update(dict([(name_mangle(k, prefix), v)
for k, v in environment.iteritems()]))
response = {"response": {}}
path = find_script(script, self.script_path)
if path is None:
response['result_code'] = 127
response['result_str'] = "%s not found in %s" % (
script, ":".join(self.script_path))
response['result_data'] = {"script": script,
"output": "",
"error": ""}
return response
to_run = [path] + list(args)
#first pass, never use bash to run things
c = subprocess.Popen(to_run,
stdin=open("/dev/null", "r"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
if self.log is None:
response['result_code'] = c.wait()
response['result_str'] = os.strerror(c.returncode)
response['result_data'] = {"script": path,
"output": c.stdout.read(),
"error": c.stderr.read()}
else:
response['result_data'] = {"script": path,
"output": "",
"error": ""}
stdout = Queue()
stderr = Queue()
t1 = Thread(target=enqueue_output, args=(c.stdout, stdout))
t2 = Thread(target=enqueue_output, args=(c.stderr, stderr))
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
while c.poll() is None:
for out, name, attr in ((stdout, "output", "info"),
(stderr, "error", "error")):
try:
line = out.get(timeout=0.5)
getattr(self.log, attr)(line.strip())
response['result_data'][name] += line
except Empty:
pass
response['result_code'] = c.returncode
response['result_str'] = os.strerror(c.returncode)
def enqueue_output(out, queue):
for line in out:
queue.put(line)
out.close()
|
Python
| 1
|
@@ -4636,16 +4636,39 @@
rncode)%0A
+ return response
%0A%0Adef en
|
eac52d28584ef62daaad5a92f70fe93cb3ee1476
|
change Target
|
rf_model/chembl_rf.py
|
rf_model/chembl_rf.py
|
# Author: xiaotaw@qq.com (Any bug report is welcome)
# Time Created: Nov 2016
# Time Last Updated: Dec 2016
# Addr: Shenzhen, China
# Description:
import os
import sys
import math
import time
import getpass
import numpy as np
import pandas as pd
from scipy import sparse
from collections import defaultdict
from matplotlib import pyplot as plt
from sklearn.externals import joblib
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import RandomForestClassifier
sys.path.append("/home/%s/Documents/chembl/data_files/" % getpass.getuser())
import chembl_input as ci
# the newly picked out 15 targets, include 9 targets from 5 big group, and 6 targets from others.
target_list = ["CHEMBL279", "CHEMBL203", # Protein Kinases
"CHEMBL217", "CHEMBL253", # GPCRs (Family A)
"CHEMBL235", "CHEMBL206", # Nuclear Hormone Receptors
"CHEMBL240", "CHEMBL4296", # Voltage Gated Ion Channels
"CHEMBL4805", # Ligand Gated Ion Channels
"CHEMBL204", "CHEMBL244", "CHEMBL4822", "CHEMBL340", "CHEMBL205", "CHEMBL4005" # Others
]
# the target
target = "CHEMBL203"
#
model_dir = "model_files"
if not os.path.exists(model_dir):
os.mkdir(model_dir)
#
pred_dir = "pred_files"
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
def train_pred(target, train_pos_multiply=0):
#
d = ci.Dataset(target, train_pos_multiply=train_pos_multiply)
# random forest clf
clf = RandomForestClassifier(n_estimators=100, max_features=1.0/3, n_jobs=10, max_depth=None, min_samples_split=5, random_state=0)
# fit model
clf.fit(d.train_features, d.train_labels)
# save model
joblib.dump(clf, model_dir + "/rf_%s.m" % target)
# predict class probabilities
#train_pred_proba = clf.predict_proba(d.train_features)[:, 1]
test_pred_proba = clf.predict_proba(d.test_features)[:, 1]
# save pred
test_pred_file = open(pred_dir + "/test_%s.pred" % target, "w")
for id_, pred_v, l_v in zip(d.time_split_test["CMPD_CHEMBLID"], test_pred_proba, d.test_labels):
test_pred_file.write("%s\t%f\t%f\n" % (id_, pred_v, l_v))
test_pred_file.close()
# draw roc fig
fpr, tpr, _ = roc_curve(d.test_labels, test_pred_proba)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color="r", lw=2, label="ROC curve (area = %.2f)" % roc_auc)
plt.plot([0, 1], [0, 1], color="navy", lw=1, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic of RF model on %s" % target)
plt.legend(loc="lower right")
plt.savefig("%s.png" % target)
#plt.show()
target_list = ["CHEMBL4805", "CHEMBL204", "CHEMBL4822", "CHEMBL244"]
tpm_list = [2, 0, 0, 0]
for target, tpm in zip(target_list, tpm_list):
t0 = time.time()
train_pred(target, train_pos_multiply=tpm)
t1 = time.time()
print("%s duration: %.3f" % (target, t1-t0))
"""
pns_pred = clf.predict(d.target_pns_features)
cns_pred = clf.predict(d.target_cns_features_train)
train_pred = clf.predict(d.train_features)
test_pred = clf.predict(d.test_features)
pns_result = ci.compute_performance(d.target_pns_mask.values.astype(int), pns_pred)
cns_result = ci.compute_performance(d.target_cns_mask_train.values.astype(int), cns_pred)
train_result = ci.compute_performance(d.train_labels, train_pred)
test_result = ci.compute_performance(d.test_labels, test_pred)
print(train_result)
print(test_result)
"""
# load model
#clf = joblib.load(model_dir + "/rf_%s.m" % target)
|
Python
| 0.998505
|
@@ -2672,16 +2672,22 @@
how()%0A%0A%0A
+%0A%0A%22%22%22%0A
target_l
@@ -2950,16 +2950,158 @@
t1-t0))%0A
+%22%22%22%0A%0At0 = time.time()%0Atrain_pred(%22CHEMBL4805%22, train_pos_multiply=0)%0At1 = time.time()%0Aprint(%22%25s duration: %25.3f%22 %25 (target, t1-t0))%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A%0A
%0A%0A%22%22%22%0Apn
|
f6c7cda72aab179fa9cae0210c482c8753e1e46b
|
Fix number badge has not been rendered
|
src/blockdiag/noderenderer/__init__.py
|
src/blockdiag/noderenderer/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
from blockdiag.utils import images, Box, XY
renderers = {}
searchpath = []
def init_renderers():
for plugin in pkg_resources.iter_entry_points('blockdiag_noderenderer'):
module = plugin.load()
if hasattr(module, 'setup'):
module.setup(module)
def install_renderer(name, renderer):
renderers[name] = renderer
def set_default_namespace(path):
searchpath[:] = []
for path in path.split(','):
searchpath.append(path)
def get(shape):
if not renderers:
init_renderers()
for path in searchpath:
name = "%s.%s" % (path, shape)
if name in renderers:
return renderers[name]
return renderers.get(shape)
class NodeShape(object):
def __init__(self, node, metrics=None):
self.node = node
self.metrics = metrics
m = self.metrics.cell(self.node)
self.textalign = 'center'
self.connectors = [m.top, m.right, m.bottom, m.left]
if node.icon is None:
self.iconbox = None
self.textbox = m.box
else:
image_size = images.get_image_size(node.icon)
if image_size is None:
iconsize = (0, 0)
else:
boundedbox = [metrics.node_width / 2, metrics.node_height]
iconsize = images.calc_image_size(image_size, boundedbox)
vmargin = (metrics.node_height - iconsize[1]) / 2
self.iconbox = Box(m.topleft.x,
m.topleft.y + vmargin,
m.topleft.x + iconsize[0],
m.topleft.y + vmargin + iconsize[1])
self.textbox = Box(self.iconbox[2], m.top.y,
m.bottomright.x, m.bottomright.y)
def render(self, drawer, format, **kwargs):
if self.node.stacked and not kwargs.get('stacked'):
node = self.node.duplicate()
node.label = ""
node.background = ""
for i in range(2, 0, -1):
# use original_metrics FORCE
r = self.metrics.original_metrics.cellsize / 2 * i
metrics = self.metrics.shift(r, r)
self.__class__(node, metrics).render(drawer, format,
stacked=True, **kwargs)
if hasattr(self, 'render_vector_shape') and format == 'SVG':
self.render_vector_shape(drawer, format, **kwargs)
else:
self.render_shape(drawer, format, **kwargs)
self.render_icon(drawer, **kwargs)
self.render_label(drawer, **kwargs)
self.render_number_badge(drawer, **kwargs)
def render_icon(self, drawer, **kwargs):
if self.node.icon is not None and kwargs.get('shadow') is False:
drawer.loadImage(self.node.icon, self.iconbox)
def render_shape(self, drawer, format, **kwargs):
pass
def render_label(self, drawer, **kwargs):
if not kwargs.get('shadow'):
font = self.metrics.font_for(self.node)
drawer.textarea(self.textbox, self.node.label, font,
rotate=self.node.rotate,
fill=self.node.textcolor, halign=self.textalign,
line_spacing=self.metrics.line_spacing)
def render_number_badge(self, drawer, **kwargs):
if self.node.numbered is not None and kwargs.get('shadow') is False:
badgeFill = kwargs.get('badgeFill')
xy = self.metrics.cell(self.node).topleft
r = self.metrics.cellsize * 3 / 2
box = (xy.x - r, xy.y - r, xy.x + r, xy.y + r)
font = self.metrics.font_for(self.node)
drawer.ellipse(box, outline=self.node.linecolor, fill=badgeFill)
drawer.textarea(box, self.node.numbered, font,
rotate=self.node.rotate,
fill=self.node.textcolor)
@property
def top(self):
return self.connectors[0]
@property
def left(self):
return self.connectors[3]
@property
def right(self):
point = self.connectors[1]
if self.node.stacked:
point = XY(point.x + self.metrics.cellsize, point.y)
return point
@property
def bottom(self):
point = self.connectors[2]
if self.node.stacked:
point = XY(point.x, point.y + self.metrics.cellsize)
return point
def shift_shadow(self, value):
xdiff = self.metrics.shadow_offset.x
ydiff = self.metrics.shadow_offset.y
if isinstance(value, XY):
ret = XY(value.x + xdiff, value.y + ydiff)
elif isinstance(value, Box):
ret = Box(value.x1 + xdiff, value.y1 + ydiff,
value.x2 + xdiff, value.y2 + ydiff)
elif isinstance(value, (list, tuple)):
ret = [self.shift_shadow(x) for x in value]
return ret
|
Python
| 0
|
@@ -4070,36 +4070,35 @@
et('shadow') is
-Fals
+Non
e:%0A b
|
d742fffd753ed85d164ceafcf2a0cceeb8576e22
|
Add jobstore alias for redis backend.
|
robo/handlers/cron.py
|
robo/handlers/cron.py
|
# -*- coding: utf-8 -*-
"""
robo.handlers.cron
~~~~~~~~~~~~~~~~~~
cron handler for robo.
:copyright: (c) 2015 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.jobstores.base import JobLookupError
from robo.decorators import cmd
logger = logging.getLogger('robo')
class Scheduler(object):
#: Robo's signal.
signal = None
def __init__(self):
"""Construct a scheduler."""
self.scheduler = BackgroundScheduler()
if not self.scheduler.running:
self.scheduler.start()
@classmethod
def message(cls, **kwargs):
"""Send registered message to robot.
:param **kwargs: Data to be sent to receivers
"""
cls.signal.send(kwargs['message_body'], **kwargs)
def parse_cron_expression(self, cron):
"""Parse cron expression.
* * * * *
T T T T T
| | | | `- day_of_week - 0 .. 6
| | | `--- month ------- 1 .. 12
| | `----- day --------- 1 .. 31
| `------- hour -------- 0 .. 23
`--------- minute ------ 0 .. 59
:param cron: Cron expression
"""
expressions = cron.split(' ')
if len(expressions) != 5:
return None
ret = {
'minute': expressions[0],
'hour': expressions[1],
'day': expressions[2],
'month': expressions[3],
'day_of_week': expressions[4]
}
return ret
def add_job(self, cron, message, **kwargs):
"""Add job to scheduler.
:param cron: Cron style expression
:param message: Message to show
"""
kwargs.update({'message_body': message})
cron = self.parse_cron_expression(cron)
if cron is None:
return None
job = None
try:
job = self.scheduler.add_job(self.message, 'cron',
kwargs=kwargs, **cron)
except Exception as e:
logger.error(e)
logger.info('Cron expression is {0}.'.format(cron))
logger.info('Action is {0}.'.format(message))
return job
def list_jobs(self):
"""List jobs.
Returns job contains id, cron expression, message, next trigger.
"""
jobs = self.scheduler.get_jobs()
results = []
fmt = '{0}: "{1}" {2} {3}'
for job in jobs:
cron = '{0} {1} {2} {3} {4}'.format(
job.trigger.fields[6], # minute
job.trigger.fields[5], # hour
job.trigger.fields[2], # day
job.trigger.fields[1], # month
job.trigger.fields[4], # day of week
)
#: When job is paused, job.next_run_time is null.
if job.next_run_time is None:
time = 'paused'
else:
time = job.next_run_time.strftime('%Y/%m/%d %H:%M:%S')
message = job.kwargs['message_body']
results.append(fmt.format(job.id, cron, time, message))
return results
def remove_job(self, id):
"""Remove job.
:param id: Job id
"""
try:
self.scheduler.remove_job(id)
except JobLookupError as e:
logger.error(e)
return False
return True
def pause_job(self, id):
"""Pause job.
:param id: Job id
"""
self.scheduler.pause_job(id)
def resume_job(self, id):
"""Resume job.
:param id: Job id
"""
self.scheduler.resume_job(id)
def add_jobstore(self, jobstore, **kwargs):
"""Add a job store to scheduler.
:param jobstore: Job store
"""
try:
self.scheduler.add_jobstore(jobstore, **kwargs)
except ValueError as e:
logger.error(e)
return False
return True
class Cron(object):
def __init__(self):
"""Construct a cron handler.
> robo add job "0 * * * *" robo echo message
Every 0 minute fired `robo echo message` command.
"""
#: Disable apscheduler's log.
apslogger = logging.getLogger('apscheduler')
apslogger.setLevel(logging.ERROR)
self.scheduler = Scheduler()
self._signal = None
self._options = None
@property
def signal(self):
return None
@signal.setter
def signal(self, signal):
#: Signal should be classmethod because if signal is instance,
#: `apscheduler` would raise ValueError.
#: ValueError: This Job cannot be serialized since the reference to
#: its callable
Scheduler.signal = signal
@property
def options(self):
return None
@options.setter
def options(self, options):
if isinstance(options, dict) and 'cron' in options:
if 'jobstore' in options['cron']:
jobstore = options['cron']['jobstore']
else:
return
job_options = {}
if 'options' in options['cron']:
job_options = options['cron']['options']
self.scheduler.add_jobstore(jobstore, **job_options)
@cmd(regex=r'add job "(?P<schedule>.+)" (?P<body>.+)',
description='Add a cron job.')
def add(self, message, **kwargs):
job = self.scheduler.add_job(message.match.group(1),
message.match.group(2),
**kwargs)
return 'Job {0} created.'.format(job)
@cmd(regex=r'list jobs$', description='List all cron jobs.')
def list(self, message, **kwargs):
jobs = self.scheduler.list_jobs()
return '\n'.join(jobs)
@cmd(regex=r'delete job (?P<id>.+)', description='Delte a cron job.')
def delete(self, message, **kwargs):
ret = self.scheduler.remove_job(message.match.group(1))
if ret is False:
return 'Fail to delete job. Job not found.'
return 'Success to delete job.'
@cmd(regex=r'pause job (?P<id>.+)', description='Pause a cron job.')
def pause(self, message, **kwargs):
self.scheduler.pause_job(message.match.group(1))
return 'Job paused.'
@cmd(regex=r'resume job (?P<id>.+)', description='Resume a cron job.')
def resume(self, message, **kwargs):
self.scheduler.resume_job(message.match.group(1))
return 'Job resumed.'
@cmd(regex=r'job expression$', description='Show job expression.')
def usage(self, message, **kwargs):
usage = """
* * * * *
T T T T T
| | | | `- day_of_week - 0 .. 6
| | | `--- month ------- 1 .. 12
| | `----- day --------- 1 .. 31
| `------- hour -------- 0 .. 23
`--------- minute ------ 0 .. 59
"""
return usage
|
Python
| 0
|
@@ -662,24 +662,51 @@
er.start()%0A%0A
+ self.alias = None%0A%0A
@classme
@@ -2031,16 +2031,78 @@
'cron',%0A
+ jobstore=self.alias,%0A
@@ -5364,16 +5364,124 @@
ions'%5D%0A%0A
+ if 'alias' in job_options:%0A self.scheduler.alias = job_options%5B'alias'%5D%0A%0A
|
22f373c0e6ef3a2c7bfb128ad014af245f113ea9
|
Add default serial baudrate.
|
robus/io/serial_io.py
|
robus/io/serial_io.py
|
import serial as _serial
from serial.tools.list_ports import comports
from . import IOHandler
class Serial(IOHandler):
@classmethod
def is_host_compatible(cls, host):
available_host = (p.device for p in comports())
return host in available_host
def __init__(self, host, baudrate):
self._serial = _serial.Serial(host, baudrate)
def recv(self):
return self._serial.readline()
def write(self, data):
self._serial.write(data + '\r'.encode())
|
Python
| 0
|
@@ -304,16 +304,22 @@
baudrate
+=57600
):%0A
|
e0024790be3a85e529c93397500e4f736c82a5ef
|
Fix the runner for Python 2.
|
src/calmjs/parse/tests/test_testing.py
|
src/calmjs/parse/tests/test_testing.py
|
# -*- coding: utf-8 -*-
import unittest
from calmjs.parse.testing.util import build_equality_testcase
from calmjs.parse.testing.util import build_exception_testcase
class BuilderEqualityTestCase(unittest.TestCase):
def test_build_equality_testcase(self):
DummyTestCase = build_equality_testcase('DummyTestCase', int, [
('str_to_int_pass', '1', 1),
('str_to_int_fail', '2', 1),
('str_to_int_exception', 'z', 1),
])
testcase = DummyTestCase()
testcase.test_str_to_int_pass()
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail()
with self.assertRaises(ValueError):
testcase.test_str_to_int_exception()
def test_build_equality_testcase_flag_dupe_labels(self):
with self.assertRaises(ValueError):
build_equality_testcase('DummyTestCase', int, [
('str_to_int_dupe', '1', 1),
('str_to_int_dupe', '2', 2),
])
class BuilderExceptionTestCase(unittest.TestCase):
def test_build_exception_testcase(self):
FailTestCase = build_exception_testcase(
'FailTestCase', int, [
('str_to_int_fail1', 'hello'),
('str_to_int_fail2', 'goodbye'),
('str_to_int_fail3', '1'),
],
ValueError,
)
testcase = FailTestCase()
# ValueError should have been caught.
testcase.test_str_to_int_fail1()
testcase.test_str_to_int_fail2()
# Naturally, the final test will not raise it.
with self.assertRaises(AssertionError):
testcase.test_str_to_int_fail3()
|
Python
| 0.001551
|
@@ -161,16 +161,73 @@
tcase%0A%0A%0A
+def run(self):%0A %22%22%22%0A A dummy run method.%0A %22%22%22%0A%0A%0A
class Bu
@@ -524,16 +524,52 @@
%5D)%0A
+ DummyTestCase.runTest = run%0A
@@ -1464,16 +1464,51 @@
)%0A
+ FailTestCase.runTest = run%0A
|
d926c984e895b68ad0cc0383926451c0d7249512
|
Fix use of deprecated find_module
|
astropy/tests/tests/test_imports.py
|
astropy/tests/tests/test_imports.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pkgutil
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
def onerror(name):
# We should raise any legitimate error that occurred, but not
# any warnings which happen to be caught because of our pytest
# settings (e.g., DeprecationWarning).
try:
raise
except Warning:
pass
for imper, nm, ispkg in pkgutil.walk_packages(['astropy'], 'astropy.',
onerror=onerror):
imper.find_module(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert 'os' not in d
assert 'log' in d
assert 'test' in d
assert 'sys' not in d
|
Python
| 0.000002
|
@@ -671,22 +671,20 @@
er.find_
-module
+spec
(nm)%0A%0A%0Ad
|
d7d733f12b346c43a8b2751be6b32c0359c728e5
|
Add comment marking tfx.orchestration.interactive.* as experimental.
|
tfx/orchestration/interactive/interactive_context.py
|
tfx/orchestration/interactive/interactive_context.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX interactive context for iterative development.
See `examples/chicago_taxi_pipeline/taxi_pipeline_interactive.ipynb` for an
example of how to run TFX in a Jupyter notebook for iterative development.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import logging
import os
import tempfile
from typing import Text
from ml_metadata.proto import metadata_store_pb2
from tfx.components.base import base_component
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration.component_launcher import ComponentLauncher
class InteractiveContext(object):
"""TFX interactive context for interactive TFX notebook development."""
_DEFAULT_SQLITE_FILENAME = 'metadata.sqlite'
def __init__(
self,
pipeline_name: Text = None,
pipeline_root: Text = None,
metadata_connection_config: metadata_store_pb2.ConnectionConfig = None):
"""Initialize an InteractiveContext.
Args:
pipeline_name: Optional name of the pipeline for ML Metadata tracking
purposes. If not specified, a name will be generated for you.
pipeline_root: Optional path to the root of the pipeline's outputs. If not
specified, an ephemeral temporary directory will be created and used.
metadata_connection_config: Optional metadata_store_pb2.ConnectionConfig
instance used to configure connection to a ML Metadata connection. If
not specified, an ephemeral SQLite MLMD connection contained in the
pipeline_root directory with file name "metadata.sqlite" will be used.
"""
if not pipeline_name:
pipeline_name = 'interactive-%s' % datetime.datetime.now().isoformat()
if not pipeline_root:
pipeline_root = tempfile.mkdtemp(prefix='tfx-%s-' % pipeline_name)
logging.info(
'InteractiveContext pipeline_root argument not provided: using '
'temporary directory %s as root for pipeline outputs.',
pipeline_root)
if not metadata_connection_config:
# TODO(ccy): consider reconciling similar logic here with other instances
# in tfx/orchestration/...
metadata_sqlite_path = os.path.join(
pipeline_root, self._DEFAULT_SQLITE_FILENAME)
metadata_connection_config = metadata.sqlite_metadata_connection_config(
metadata_sqlite_path)
logging.info(
'InteractiveContext metadata_connection_config not provided: using '
'SQLite ML Metadata database at %s.',
metadata_sqlite_path)
self.pipeline_name = pipeline_name
self.pipeline_root = pipeline_root
self.metadata_connection_config = metadata_connection_config
def run(self,
component: base_component.BaseComponent,
enable_cache: bool = True):
"""Run a given TFX component in the interactive context.
Args:
component: Component instance to be run.
enable_cache: whether caching logic should be enabled in the driver.
Returns:
ExecutionResult object.
"""
run_id = datetime.datetime.now().isoformat()
pipeline_info = data_types.PipelineInfo(
pipeline_name=self.pipeline_name,
pipeline_root=self.pipeline_root,
run_id=run_id)
driver_args = data_types.DriverArgs(
enable_cache=enable_cache,
interactive_resolution=True)
additional_pipeline_args = {}
for name, output in component.outputs.get_all().items():
for artifact in output.get():
artifact.pipeline_name = self.pipeline_name
artifact.producer_component = component.component_name
artifact.run_id = run_id
artifact.name = name
launcher = ComponentLauncher(component, pipeline_info, driver_args,
self.metadata_connection_config,
additional_pipeline_args)
execution_id = launcher.launch()
return ExecutionResult(component, execution_id)
class ExecutionResult(object):
"""Execution result from a component run in an InteractiveContext."""
def __init__(self,
component: base_component.BaseComponent,
execution_id: int):
self.component = component
self.execution_id = execution_id
def __repr__(self):
outputs_parts = []
for name, channel in self.component.outputs.get_all().items():
repr_string = '%s: %s' % (name, repr(channel))
for line in repr_string.split('\n'):
outputs_parts.append(line)
outputs_str = '\n'.join(' %s' % line for line in outputs_parts)
return ('ExecutionResult(\n component: %s'
'\n execution_id: %s'
'\n outputs:\n%s'
')') % (self.component.component_name,
self.execution_id,
outputs_str)
|
Python
| 0
|
@@ -794,16 +794,108 @@
lopment.
+%0A%0ANote: these APIs are experimental and changes to interface and functionality%0Aare expected.
%0A%22%22%22%0A%0Afr
|
c479c360d979d22182e787f74f5a74473fc41002
|
Save sales ranges only when the forms data is changed
|
shoop/campaigns/admin_module/form_parts.py
|
shoop/campaigns/admin_module/form_parts.py
|
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from shoop.admin.form_part import FormPart, TemplatedFormDef
from shoop.campaigns.models import ContactGroupSalesRange
from shoop.core.models import Shop, ShopStatus
from shoop.core.models._contacts import PROTECTED_CONTACT_GROUP_IDENTIFIERS
class SalesRangesForm(forms.ModelForm):
class Meta:
model = ContactGroupSalesRange
fields = ["min_value", "max_value"]
labels = {
"min_value": _("Minimum value"),
"max_value": _("Maximum value")
}
help_texts = {
"max_value": _("Leave empty for no maximum")
}
def __init__(self, **kwargs):
super(SalesRangesForm, self).__init__(**kwargs)
class SalesRangesFormPart(FormPart):
priority = 3
name = "contact_group_sales_ranges"
form = SalesRangesForm
def __init__(self, request, object=None):
super(SalesRangesFormPart, self).__init__(request, object)
self.shops = Shop.objects.filter(status=ShopStatus.ENABLED)
def _get_form_name(self, shop):
return "%d-%s" % (shop.pk, self.name)
def get_form_defs(self):
if not self.object.pk or self.object.identifier in PROTECTED_CONTACT_GROUP_IDENTIFIERS:
return
for shop in self.shops:
instance, _ = ContactGroupSalesRange.objects.get_or_create(group=self.object, shop=shop)
yield TemplatedFormDef(
name=self._get_form_name(shop),
form_class=self.form,
template_name="shoop/campaigns/admin/sales_ranges_form_part.jinja",
required=False,
kwargs={"instance": instance}
)
def form_valid(self, form):
for shop in self.shops:
name = self._get_form_name(shop)
if name in form.forms:
form.forms[name].save()
|
Python
| 0
|
@@ -2015,32 +2015,72 @@
, form):%0A
+ form_names = %5Bself._get_form_name(shop)
for shop in sel
@@ -2078,33 +2078,33 @@
op in self.shops
-:
+%5D
%0A nam
@@ -2100,78 +2100,139 @@
- name = self._get_form_name(shop)%0A if name in form.forms
+forms = %5Bform.forms%5Bname%5D for name in form_names if name in form.forms%5D%0A for form in forms:%0A if form.changed_data
:%0A
@@ -2254,20 +2254,8 @@
orm.
-forms%5Bname%5D.
save
|
8b17411d16c22c7a28017f67c827c81e28535f41
|
Fix rate_watcher locking error
|
sacad/rate_watcher.py
|
sacad/rate_watcher.py
|
""" This module provides a class with a context manager to help avoid overloading web servers. """
import asyncio
import logging
import os
import random
import sqlite3
import time
import urllib.parse
class AccessRateWatcher:
""" Access rate limiter, supporting concurrent access by threads and/or processes. """
def __init__(self, db_filepath, url, min_delay_between_accesses, *, jitter_range_ms=None, logger=logging.getLogger()):
self.domain = urllib.parse.urlsplit(url).netloc
self.min_delay_between_accesses = min_delay_between_accesses
self.jitter_range_ms = jitter_range_ms
self.logger = logger
os.makedirs(os.path.dirname(db_filepath), exist_ok=True)
self.connection = sqlite3.connect(db_filepath)
with self.connection:
self.connection.executescript("""CREATE TABLE IF NOT EXISTS access_timestamp (domain TEXT PRIMARY KEY,
timestamp FLOAT NOT NULL);""")
self.lock = asyncio.Lock()
async def waitAccessAsync(self):
""" Wait the needed time before sending a request to honor rate limit. """
async with self.lock:
while True:
last_access_ts = self.__getLastAccess()
if last_access_ts is not None:
now = time.time()
last_access_ts = last_access_ts[0]
time_since_last_access = now - last_access_ts
if time_since_last_access < self.min_delay_between_accesses:
time_to_wait = self.min_delay_between_accesses - time_since_last_access
if self.jitter_range_ms is not None:
time_to_wait += random.randint(*self.jitter_range_ms) / 1000
self.logger.debug("Sleeping for %.2fms because of rate limit for domain %s" % (time_to_wait * 1000,
self.domain))
await asyncio.sleep(time_to_wait)
access_time = time.time()
self.__access(access_time)
# now we should be good... except if another process did the same query at the same time
# the database serves as an atomic lock, query again to be sure the last row is the one
# we just inserted
last_access_ts = self.__getLastAccess()
if last_access_ts[0] == access_time:
break
def __getLastAccess(self):
with self.connection:
return self.connection.execute("""SELECT timestamp
FROM access_timestamp
WHERE domain = ?;""",
(self.domain,)).fetchone()
def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain))
|
Python
| 0.000001
|
@@ -1000,22 +1000,12 @@
k =
-asyncio.Lock()
+None
%0A%0A
@@ -1112,24 +1112,84 @@
limit. %22%22%22%0A
+ if self.lock is None:%0A self.lock = asyncio.Lock()%0A%0A
async wi
|
029e39edfb733a524d7ea4fc7f64fa93e81b9f53
|
Add SACREBLEU_DIR and smart_open to imports (#73)
|
sacrebleu/__init__.py
|
sacrebleu/__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
__version__ = '1.4.5'
__description__ = 'Hassle-free computation of shareable, comparable, and reproducible BLEU scores'
from .sacrebleu import corpus_bleu, corpus_chrf, sentence_bleu, sentence_chrf, compute_bleu,\
raw_corpus_bleu, BLEU, CHRF, DATASETS, TOKENIZERS
# more imports for backward compatibility
from .sacrebleu import ref_stats, bleu_signature, extract_ngrams, extract_char_ngrams, \
get_corpus_statistics, display_metric, get_sentence_statistics, download_test_set
|
Python
| 0
|
@@ -757,16 +757,28 @@
u import
+ smart_open,
corpus_
@@ -894,16 +894,31 @@
KENIZERS
+, SACREBLEU_DIR
%0A%0A# more
|
09faf99eb775a36a70d03958a58bf1df8bbb1b93
|
update for structure changes
|
salt/auth/__init__.py
|
salt/auth/__init__.py
|
'''
Salt's pluggable authentication system
This sysetm allows for authentication to be managed in a module pluggable way
so that any external authentication system can be used inside of Salt
'''
# 1. Create auth loader instance
# 2. Accept arguments as a dict
# 3. Verify with function introspection
# 4. Execute auth function
# 5. Cache auth token with relative data opts['token_dir']
# 6. Interface to verify tokens
# Import Python libs
import time
import logging
import random
#
# Import Salt libs
import salt.loader
import salt.utils
import salt.payload
log = logging.getLogger(__name__)
class LoadAuth(object):
'''
Wrap the authentication system to handle periphrial components
'''
def __init__(self, opts):
self.opts = opts
self.max_fail = 1.0
self.serial = salt.payload.Serial(opts)
self.auth = salt.loader.auth(opts)
def load_name(self, load):
'''
Return the primary name associate with the load, if an empty string
is returned then the load does not match the function
'''
if not 'fun' in load:
return ''
fstr = '{0}.auth'.format(load['fun'])
if not fstr in self.auth:
return ''
fcall = salt.utils.format_call(self.auth[fstr], load)
try:
return fcall['args'][0]
except IndexError:
return ''
def auth_call(self, load):
'''
Return the token and set the cache data for use
'''
if not 'fun' in load:
return False
fstr = '{0}.auth'.format(load['fun'])
if not fstr in self.auth:
return False
fcall = salt.utils.format_call(self.auth[fstr], load)
try:
if 'kwargs' in fcall:
return self.auth[fstr](*fcall['args'], **fcall['kwargs'])
else:
return self.auth[fstr](*fcall['args'])
except Exception as exc:
err = 'Authentication module threw an exception: {0}'.format(exc)
log.critical(err)
return False
return False
def time_auth(self, load):
'''
Make sure that all failures happen in the same amount of time
'''
start = time.time()
ret = self.auth_call(load)
if ret:
return ret
f_time = time.time() - start
if f_time > self.max_fail:
self.max_fail = f_time
deviation = self.max_time / 4
r_time = random.uniform(
self.max_time - deviation,
self.max_time + deviation
)
while start + r_time > time.time():
time.sleep(0.001)
return False
def mk_token(self, load):
'''
Run time_auth and create a token. Return False or the token
'''
ret = time_auth(load)
if ret is False:
return ret
tok = hashlib.md5(os.urandom(512)).hexdigest()
t_path = os.path.join(opts['token_dir'], tok)
while os.path.isfile(t_path):
tok = hashlib.md5(os.urandom(512)).hexdigest()
t_path = os.path.join(opts['token_dir'], tok)
fcall = salt.utils.format_call(self.auth[fstr], load)
tdata = {'start': time.time(),
'expire': time.time() + self.opts['token_expire'],
'name': fcall['args'][0],}
with open(t_path, 'w+') as fp_:
fp_.write(self.serial.dumps(tdata))
return tok
def get_tok(self, tok):
'''
Return the name associate with the token, or False ifthe token is not valid
'''
t_path = os.path.join(opts['token_dir'], tok)
if not os.path.isfile:
return False
with open(t_path, 'r') as fp_:
return self.serial.loads(fp_.read())
return False
|
Python
| 0
|
@@ -1074,35 +1074,37 @@
if not '
-fun
+eauth
' in load:%0A
@@ -1151,35 +1151,37 @@
h'.format(load%5B'
-fun
+eauth
'%5D)%0A if n
|
dd47871f0a82610186aefa21ed7a736885ca44c0
|
Fix unused args in modules/cloud.py
|
salt/modules/cloud.py
|
salt/modules/cloud.py
|
# -*- coding: utf-8 -*-
'''
Salt-specific interface for calling Salt Cloud directly
'''
# Import python libs
import os
import logging
import copy
# Import salt libs
try:
import salt.cloud
HAS_SALTCLOUD = True
except ImportError:
HAS_SALTCLOUD = False
import salt.utils
log = logging.getLogger(__name__)
__func_alias__ = {
'profile_': 'profile'
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
if HAS_SALTCLOUD:
return True
return False
def _get_client():
'''
Return a cloud client
'''
client = salt.cloud.CloudClient(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'),
pillars=copy.deepcopy(__pillar__.get('cloud', {}))
)
return client
def list_sizes(provider='all'):
'''
List cloud provider sizes for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_sizes my-gce-config
'''
client = _get_client()
sizes = client.list_sizes(provider)
return sizes
def list_images(provider='all'):
'''
List cloud provider images for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_images my-gce-config
'''
client = _get_client()
images = client.list_images(provider)
return images
def list_locations(provider='all'):
'''
List cloud provider locations for the given providers
CLI Example:
.. code-block:: bash
salt '*' cloud.list_locations my-gce-config
'''
client = _get_client()
locations = client.list_locations(provider)
return locations
def query(query_type='list_nodes'):
'''
List cloud provider data for all providers
CLI Examples:
.. code-block:: bash
salt '*' cloud.query
salt '*' cloud.query list_nodes_full
salt '*' cloud.query list_nodes_select
'''
client = _get_client()
info = client.query(query_type)
return info
def full_query(query_type='list_nodes_full'):
'''
List all available cloud provider data
CLI Example:
.. code-block:: bash
salt '*' cloud.full_query
'''
return query(query_type='list_nodes_full')
def select_query(query_type='list_nodes_select'):
'''
List selected nodes
CLI Example:
.. code-block:: bash
salt '*' cloud.select_query
'''
return query(query_type='list_nodes_select')
def profile_(profile, names, vm_overrides=None, **kwargs):
'''
Spin up an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt '*' cloud.profile my-gce-config myinstance
'''
client = _get_client()
info = client.profile(profile, names, vm_overrides=vm_overrides, **kwargs)
return info
def destroy(names):
'''
Destroy the named VM(s)
CLI Example:
.. code-block:: bash
salt '*' cloud.destroy myinstance
'''
client = _get_client()
info = client.destroy(names)
return info
def action(
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
**kwargs):
'''
Execute a single action on the given provider/instance
CLI Example:
.. code-block:: bash
salt '*' cloud.action start instance=myinstance
salt '*' cloud.action stop instance=myinstance
salt '*' cloud.action show_image provider=my-ec2-config \
image=ami-1624987f
'''
client = _get_client()
info = client.action(fun, cloudmap, names, provider, instance, kwargs)
return info
def create(provider, names, **kwargs):
'''
Create an instance using Salt Cloud
CLI Example:
.. code-block:: bash
salt minionname cloud.create my-ec2-config myinstance \
image=ami-1624987f size='Micro Instance' ssh_username=ec2-user \
securitygroup=default delvol_on_destroy=True
'''
client = _get_client()
info = client.create(provider, names, **kwargs)
return info
def volume_list(provider):
'''
List block storage volumes
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_list my-nova
'''
client = _get_client()
info = client.extra_action(action='volume_list', provider=provider, names='name')
return info['name']
def volume_delete(provider, names, **kwargs):
'''
Delete volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_delete my-nova myblock
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_delete', **kwargs)
return info
def volume_create(provider, names, **kwargs):
'''
Create volume
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_create my-nova myblock size=100 \
voltype=SSD
'''
client = _get_client()
info = client.extra_action(action='volume_create', names=names, provider=provider, **kwargs)
return info
def volume_attach(provider, names, **kwargs):
'''
Attach volume to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_attach my-nova myblock \
server_name=myserver \
device='/dev/xvdf'
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_attach', **kwargs)
return info
def volume_detach(provider, names, **kwargs):
'''
Detach volume from a server
CLI Example:
.. code-block:: bash
salt minionname cloud.volume_detach my-nova myblock \
server_name=myserver
'''
client = _get_client()
info = client.extra_action(provider=provider, names=names, action='volume_detach', **kwargs)
return info
def network_list(provider):
'''
List private networks
CLI Example:
.. code-block:: bash
salt minionname cloud.network_list my-nova
'''
client = _get_client()
return client.extra_action(action='network_list', provider=provider, names='names')
def network_create(provider, names, **kwargs):
'''
Create private network
CLI Example:
.. code-block:: bash
salt minionname cloud.network_create my-nova names=['salt'] cidr='192.168.100.0/24'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='network_create', **kwargs)
def virtual_interface_list(provider, names, **kwargs):
'''
List virtual interfaces on a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_list my-nova names=['salt-master']
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_list', **kwargs)
def virtual_interface_create(provider, names, **kwargs):
'''
Attach private interfaces to a server
CLI Example:
.. code-block:: bash
salt minionname cloud.virtual_interface_create my-nova names=['salt-master'] net_name='salt'
'''
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_create', **kwargs)
|
Python
| 0
|
@@ -2180,33 +2180,26 @@
ry_type=
-'list_nodes_full'
+query_type
)%0A%0A%0Adef
@@ -2393,35 +2393,26 @@
ry_type=
-'list_nodes_select'
+query_type
)%0A%0A%0Adef
|
c50189679dd61bd2cada7c1e10656ecdade43095
|
Fix `ThumbnailField` tests
|
user_management/api/avatar/tests/test_serializers.py
|
user_management/api/avatar/tests/test_serializers.py
|
from unittest import expectedFailure
from django.test import TestCase
from mock import MagicMock, patch
from user_management.models.tests.factories import UserFactory
from .. import serializers
class AvatarSerializerTest(TestCase):
@expectedFailure
def test_deserialize(self):
user = UserFactory.build()
data = {'avatar': ''}
serializer = serializers.AvatarSerializer(user, data=data)
self.assertTrue(serializer.is_valid())
class ThumbnailField(TestCase):
def test_get_generator_kwargs(self):
expected = {
'width': 50,
'height': 50,
'anchor': 'tr',
'crop': 1,
'upscale': 1,
}
field = serializers.ThumbnailField()
kwargs = field.get_generator_kwargs(expected)
self.assertEqual(kwargs, expected)
def test_get_generator_kwargs_defaults(self):
expected = {
'width': None,
'height': None,
'anchor': None,
'crop': None,
'upscale': None,
}
field = serializers.ThumbnailField()
kwargs = field.get_generator_kwargs({})
self.assertEqual(kwargs, expected)
def test_get_generator_kwargs_limited(self):
expected = {
'width': None,
'height': None,
'anchor': None,
'crop': None,
'upscale': None,
}
field = serializers.ThumbnailField()
kwargs = field.get_generator_kwargs({'ignored': 'value'})
self.assertEqual(kwargs, expected)
def test_generate_thumbnail(self):
field = serializers.ThumbnailField()
source = 'test'
kwargs = {'width': 10}
generator = 'generator'
get_path = 'user_management.api.avatar.serializers.generator_registry.get'
image_cache_path = 'user_management.api.avatar.serializers.ImageCacheFile'
with patch(get_path) as get:
get.return_value = generator
with patch(image_cache_path) as ImageCacheFile:
field.generate_thumbnail(source, **kwargs)
get.assert_called_once_with(field.generator_id, source=source, **kwargs)
ImageCacheFile.assert_called_once_with(generator)
def test_to_native_no_image(self):
"""Calling to_native with empty image should return None."""
field = serializers.ThumbnailField()
mocked_image = MagicMock()
mocked_image.name = None
image = field.to_native(mocked_image)
self.assertEqual(image, None)
@expectedFailure
def test_to_native_no_request(self):
"""Calling to_native with no request returns the image url."""
field = serializers.ThumbnailField()
field.context = {'request': None}
expected = '/url/'
mocked_image = MagicMock(
name='image.png',
url=expected
)
image = field.to_native(mocked_image)
self.assertEqual(image, expected)
@expectedFailure
def test_to_native_no_kwargs(self):
"""Calling to_native with no QUERY_PARAMS returns the absolute image url."""
field = serializers.ThumbnailField()
request = MagicMock()
expected = 'test.com/url/'
request.build_absolute_uri.return_value = expected
field.context = {'request': request}
field.get_generator_kwargs = MagicMock(return_value={})
mocked_image = MagicMock(
name='image.png',
url='/anything/'
)
image = field.to_native(mocked_image)
self.assertEqual(image, expected)
request.build_absolute_uri.assert_called_once_with(mocked_image.url)
@expectedFailure
def test_to_native_calls_generate_thumbnail(self):
"""Calling to_native with QUERY_PARAMS calls generate_thumbnail."""
field = serializers.ThumbnailField()
request = MagicMock()
field.context = {'request': request}
kwargs = {'width': 100}
field.get_generator_kwargs = MagicMock(return_value=kwargs)
thumbnailed_image = MagicMock(
url='/thumbnail/'
)
field.generate_thumbnail = MagicMock(return_value=thumbnailed_image)
expected = 'test.com/url/'
request.build_absolute_uri.return_value = expected
mocked_image = MagicMock(
name='image.png',
url='/anything/'
)
image = field.to_native(mocked_image)
self.assertEqual(image, expected)
field.generate_thumbnail.assert_called_once_with(mocked_image, **kwargs)
request.build_absolute_uri.assert_called_once_with(thumbnailed_image.url)
|
Python
| 0
|
@@ -2553,32 +2553,162 @@
e)%0A%0A
-@expectedFailure
+def mock_parent(self, context):%0A parent = MagicMock()%0A parent._context = context%0A parent.parent = None%0A return parent%0A
%0A def
@@ -2849,32 +2849,33 @@
humbnailField()%0A
+%0A
field.co
@@ -2868,34 +2868,50 @@
field.
-context =
+parent = self.mock_parent(
%7B'request':
@@ -2915,16 +2915,18 @@
': None%7D
+)%0A
%0A
@@ -3133,37 +3133,16 @@
ected)%0A%0A
- @expectedFailure%0A
def
@@ -3438,34 +3438,50 @@
field.
-context =
+parent = self.mock_parent(
%7B'request':
@@ -3488,16 +3488,18 @@
request%7D
+)%0A
%0A
@@ -3824,37 +3824,16 @@
e.url)%0A%0A
- @expectedFailure%0A
def
@@ -4045,26 +4045,42 @@
field.
-context =
+parent = self.mock_parent(
%7B'reques
@@ -4091,16 +4091,17 @@
request%7D
+)
%0A%0A
|
cad9160baa6d3841910ad68b48a212c4475a62e1
|
Fix contact list type filter
|
shuup/admin/modules/contacts/views/list.py
|
shuup/admin/modules/contacts/views/list.py
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from shuup.admin.toolbar import NewActionButton, SettingsActionButton, Toolbar
from shuup.admin.utils.picotable import (
ChoicesFilter, Column, RangeFilter, TextFilter
)
from shuup.admin.utils.views import PicotableListView
from shuup.core.models import (
CompanyContact, Contact, ContactGroup, PersonContact
)
class ContactTypeFilter(ChoicesFilter):
def __init__(self):
super(ContactTypeFilter, self).__init__(choices=[("person", _("Person")), ("company", _("Company"))])
def filter_queryset(self, queryset, column, value):
if value == "_all":
return queryset
model_class = PersonContact
if value == "company":
model_class = CompanyContact
return queryset.instance_of(model_class)
class ContactListView(PicotableListView):
model = Contact
default_columns = [
Column("name", _(u"Name"), linked=True, filter_config=TextFilter()),
Column("type", _(u"Type"), display="get_type_display", sortable=False, filter_config=ContactTypeFilter()),
Column("email", _(u"Email"), filter_config=TextFilter()),
Column("phone", _(u"Phone"), filter_config=TextFilter()),
Column(
"is_active",
_(u"Active"),
filter_config=ChoicesFilter([(False, _("no")), (True, _("yes"))], default=True)
),
Column("n_orders", _(u"# Orders"), class_name="text-right", filter_config=RangeFilter(step=1)),
Column("groups", _("Groups"), filter_config=ChoicesFilter(ContactGroup.objects.all(), "groups"))
]
mass_actions = [
"shuup.admin.modules.contacts.mass_actions:EditContactsAction",
"shuup.admin.modules.contacts.mass_actions:EditContactGroupsAction",
]
def get_toolbar(self):
return Toolbar([
NewActionButton.for_model(
PersonContact, url=reverse("shuup_admin:contact.new") + "?type=person"),
NewActionButton.for_model(
CompanyContact, extra_css_class="btn-info", url=reverse("shuup_admin:contact.new") + "?type=company"),
SettingsActionButton.for_model(Contact, return_url="contact")
])
def get_queryset(self):
groups = self.get_filter().get("groups")
query = Q(groups__in=groups) if groups else Q()
return (
super(ContactListView, self).get_queryset()
.filter(query)
.annotate(n_orders=Count("customer_orders"))
.order_by("-created_on"))
def get_type_display(self, instance):
if isinstance(instance, PersonContact):
return _(u"Person")
elif isinstance(instance, CompanyContact):
return _(u"Company")
else:
return _(u"Contact")
def get_object_abstract(self, instance, item):
"""
:type instance: shuup.core.models.Contact
"""
bits = filter(None, [
item.get("type"),
_("Active") if instance.is_active else _("Inactive"),
_("Email: %s") % (instance.email or "\u2014"),
_("Phone: %s") % (instance.phone or "\u2014"),
_("%d orders") % instance.n_orders,
])
return [
{"text": instance.name or _("Contact"), "class": "header"},
{"text": ", ".join([force_text(bit) for bit in bits])}
]
|
Python
| 0
|
@@ -1022,16 +1022,25 @@
n, value
+, context
):%0A
|
9d9737f765416305dd2adbd816b447de5c5eae7c
|
add version 8.0.6.0 (#14003)
|
var/spack/repos/builtin/packages/ibm-java/package.py
|
var/spack/repos/builtin/packages/ibm-java/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
import os
class IbmJava(Package):
"""Binary distribution of the IBM Java Software Development Kit
for big and little-endian powerpc (power7, 8 and 9)."""
homepage = "https://developer.ibm.com/javasdk/"
# There are separate tar files for big and little-endian machine
# types. When we add more versions, then turn this into a mapping
# from version and machine type to sha256sum.
mach = platform.machine() if platform.machine() == 'ppc64' else 'ppc64le'
if mach == 'ppc64le':
sha = 'dec6434d926861366c135aac6234fc28b3e7685917015aa3a3089c06c3b3d8f0'
else:
sha = 'd39ce321bdadd2b2b829637cacf9c1c0d90235a83ff6e7dcfa7078faca2f212f'
version('8.0.5.30', sha256=sha, expand=False)
provides('java@8')
conflicts('target=x86_64:', msg='ibm-java is only available for ppc64 and ppc64le')
# This assumes version numbers are 4-tuples: 8.0.5.30
def url_for_version(self, version):
# Convert 8.0.5.30 to 8.0-5.30 for the file name.
dash = '{0}.{1}-{2}.{3}'.format(*(str(version).split('.')))
url = ('http://public.dhe.ibm.com/ibmdl/export/pub/systems/cloud'
'/runtimes/java/{0}/linux/{1}/ibm-java-sdk-{2}-{1}'
'-archive.bin').format(version, self.mach, dash)
return url
@property
def home(self):
return self.prefix
@property
def libs(self):
return find_libraries(['libjvm'], root=self.home, recursive=True)
def setup_run_environment(self, env):
env.set('JAVA_HOME', self.home)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set('JAVA_HOME', self.home)
def setup_dependent_package(self, module, dependent_spec):
self.spec.home = self.home
def install(self, spec, prefix):
archive = os.path.basename(self.stage.archive_file)
# The archive.bin file is quite fussy and doesn't work as a
# symlink.
if os.path.islink(archive):
targ = os.readlink(archive)
os.unlink(archive)
copy(targ, archive)
# The properties file is how we avoid an interactive install.
prop = 'properties'
with open(prop, 'w') as file:
file.write('INSTALLER_UI=silent\n')
file.write('USER_INSTALL_DIR=%s\n' % prefix)
file.write('LICENSE_ACCEPTED=TRUE\n')
# Running the archive file installs everything.
set_executable(archive)
inst = Executable(join_path('.', archive))
inst('-f', prop)
return
|
Python
| 0
|
@@ -455,309 +455,532 @@
#
-There are separate tar files for big and little-endian machine%0A # types. When we add more versions, then turn this into a mapping%0A # from version and machine type to sha256sum.%0A mach = platform.machine() if platform.machine() == 'ppc64' else 'ppc64le'%0A if mach == 'ppc64le':%0A sha =
+Note: IBM is fairly aggressive about taking down old versions,%0A # so we may need to update this frequently. Also, old revs may%0A # not be available for download.%0A%0A version_list = %5B%0A ('8.0.6.0', 'ppc64', 'e142746a83e47ab91d71839d5776f112ed154ae180d0628e3f10886151dad710'),%0A ('8.0.6.0', 'ppc64le', '18c2eccf99225e6e7643141d8da4110cacc39f2fa00149fc26341d2272cc0102'),%0A ('8.0.5.30', 'ppc64', 'd39ce321bdadd2b2b829637cacf9c1c0d90235a83ff6e7dcfa7078faca2f212f'),%0A ('8.0.5.30', 'ppc64le',
'de
@@ -1046,101 +1046,231 @@
8f0'
+),
%0A
-else:%0A sha = 'd39ce321bdadd2b2b829637cacf9c1c0d90235a83ff6e7dcfa7078faca2f212f'%0A%0A
+%5D%0A%0A # There are separate tar files for big and little-endian machine%0A # types. And no, this won't work cross platform.%0A%0A for (ver, mach, sha) in version_list:%0A if mach == platform.machine():%0A
@@ -1277,26 +1277,19 @@
version(
-'8.0.5.30'
+ver
, sha256
@@ -1839,17 +1839,26 @@
on,
-self.mach
+platform.machine()
, da
|
9fdcc147a754e3b4f85decb858066610652bd713
|
update version. (#5439)
|
var/spack/repos/builtin/packages/r-tibble/package.py
|
var/spack/repos/builtin/packages/r-tibble/package.py
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTibble(RPackage):
"""Provides a 'tbl_df' class that offers better checking and printing
capabilities than traditional data frames."""
homepage = "https://github.com/hadley/tibble"
url = "https://cran.r-project.org/src/contrib/tibble_1.2.tar.gz"
version('1.2', 'bdbc3d67aa16860741add6d6ec20ea13')
version('1.1', '2fe9f806109d0b7fadafb1ffafea4cb8')
depends_on('r@3.1.2:')
depends_on('r-assertthat', type=('build', 'run'))
depends_on('r-lazyeval@0.1.10:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
|
Python
| 0
|
@@ -1438,14 +1438,17 @@
com/
-hadley
+tidyverse
/tib
@@ -1486,20 +1486,18 @@
an.r
--project.org
+studio.com
/src
@@ -1514,17 +1514,19 @@
ibble_1.
-2
+3.4
.tar.gz%22
@@ -1526,16 +1526,96 @@
tar.gz%22%0A
+ list_url = homepage%0A version('1.3.4', '298e81546f999fb0968625698511b8d3')
%0A ver
|
802926f151d9a1337c2a07adbc485b6193e91733
|
Add template string calling to the state module
|
salt/modules/state.py
|
salt/modules/state.py
|
'''
Control the state system on the minion
'''
# Import Python modules
import os
# Import salt modules
import salt.state
def low(data):
'''
Execute a single low data call
'''
st_ = salt.state.State(__opts__)
err = st_.verify_data(data)
if err:
return err
return st_.call(data)
def high(data):
'''
Execute the compound calls stored in a single set of high data
'''
st_ = salt.state.State(__opts__)
return st_.call_high(data)
def template(tem):
'''
Execute the information stored in a template file on the minion
'''
st_ = salt.state.State(__opts__)
return st_.call_template(tem)
|
Python
| 0.000001
|
@@ -656,8 +656,195 @@
m)%0A %0A
+def template_str(tem):%0A '''%0A Execute the information stored in a template file on the minion%0A '''%0A st_ = salt.state.State(__opts__)%0A return st_.call_template_str(tem)%0A %0A
|
83fea631f1765d4641cde8af2c5c931b22e4ee33
|
extend trilinos
|
var/spack/repos/builtin/packages/trilinos/package.py
|
var/spack/repos/builtin/packages/trilinos/package.py
|
from spack import *
class Trilinos(Package):
"""
The Trilinos Project is an effort to develop algorithms and enabling technologies within an object-oriented
software framework for the solution of large-scale, complex multi-physics engineering and scientific problems.
A unique design feature of Trilinos is its focus on packages.
"""
homepage = "https://trilinos.org/"
url = "http://trilinos.csbsju.edu/download/files/trilinos-12.2.1-Source.tar.gz"
version('12.4.2', '7c830f7f0f68b8ad324690603baf404e')
version('12.2.1', '6161926ea247863c690e927687f83be9')
version('12.0.1', 'bd99741d047471e127b8296b2ec08017')
version('11.14.3', '2f4f83f8333e4233c57d0f01c4b57426')
version('11.14.2', 'a43590cf896c677890d75bfe75bc6254')
version('11.14.1', '40febc57f76668be8b6a77b7607bb67f')
variant('shared', default=True, description='Enables the build of shared libraries')
variant('debug', default=False, description='Builds a debug version of the libraries')
# Everything should be compiled with -fpic
depends_on('blas')
depends_on('lapack')
depends_on('boost')
depends_on('matio')
depends_on('glm')
depends_on('swig')
# MPI related dependencies
depends_on('mpi')
depends_on('netcdf+mpi')
depends_on('python') # Needs py-numpy activated
def install(self, spec, prefix):
options = []
options.extend(std_cmake_args)
options.extend(['-DTrilinos_ENABLE_ALL_PACKAGES:BOOL=ON',
'-DTrilinos_ENABLE_TESTS:BOOL=OFF',
'-DTrilinos_ENABLE_EXAMPLES:BOOL=OFF',
'-DCMAKE_BUILD_TYPE:STRING=%s' % ('Debug' if '+debug' in spec else 'Release'),
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('ON' if '+shared' in spec else 'OFF'),
'-DTPL_ENABLE_MPI:STRING=ON',
'-DBLAS_LIBRARY_DIRS:PATH=%s' % spec['blas'].prefix,
'-DLAPACK_LIBRARY_DIRS:PATH=%s' % spec['lapack'].prefix
])
with working_dir('spack-build', create=True):
cmake('..', *options)
make()
make('install')
|
Python
| 0.000001
|
@@ -13,17 +13,26 @@
mport *%0A
+import os
%0A
-
%0Aclass T
@@ -482,16 +482,106 @@
ar.gz%22%0A%0A
+ version('12.6.1', 'adcf2d3aab74cdda98f88fee19cd1442604199b0515ee3da4d80cbe8f37d00e4')%0A
vers
@@ -1591,24 +1591,99 @@
S:BOOL=ON',%0A
+ '-DTrilinos_ENABLE_ALL_OPTIONAL_PACKAGES:BOOL=ON',%0A
@@ -2038,203 +2038,1254 @@
MPI:
-STRING=ON',%0A '-DBLAS_LIBRARY_DIRS:PATH=%25s' %25 spec%5B'blas'%5D.prefix,%0A '-DLAPACK_LIBRARY_DIRS:PATH=%25s' %25 spec%5B'lapack'%5D.prefix%0A %5D)
+BOOL=ON',%0A '-DMPI_BASE_DIR:PATH=%25s' %25 spec%5B'mpi'%5D.prefix,%0A '-DTPL_ENABLE_BLAS=ON',%0A '-DBLAS_LIBRARY_NAMES=blas',%0A '-DBLAS_LIBRARY_DIRS=/usr/lib', # %25 spec%5B'blas'%5D.prefix,%0A '-DTPL_ENABLE_LAPACK=ON',%0A '-DLAPACK_LIBRARY_NAMES=lapack',%0A '-DLAPACK_LIBRARY_DIRS=/usr/lib', # %25 spec%5B'lapack'%5D.prefix,%0A '-DTPL_ENABLE_Boost:BOOL=ON',%0A '-DBOOST_BASE_DIR:PATH=%25s' %25 spec%5B'boost'%5D.prefix,%0A '-DTrilinos_ENABLE_Fortran=OFF',%0A '-DTrilinos_ENABLE_EXPLICIT_INSTANTIATION:BOOL=ON',%0A '-DTrilinos_ENABLE_CXX11:BOOL=ON',%0A '-DTrilinos_CXX11_FLAGS=-std=c++11'%0A %5D)%0A%0A # disable due to compiler / config errors:%0A options.extend(%5B'-DTrilinos_ENABLE_SEACAS=OFF',%0A '-DTrilinos_ENABLE_Pike=OFF',%0A '-DTrilinos_ENABLE_STK=OFF'%0A %5D)%0A%0A if self.compiler.name == %22clang%22:%0A os.environ%5B'CPPFLAGS'%5D=%22-Qunused-arguments%22%0A%0A #os.environ%5B'LDFLAGS'%5D=%22lgfortran%22
%0A%0A
|
ac8d29c5855ea05bd42766cd142808704aded867
|
Add space to trigger travis
|
web/impact/impact/permissions/graphql_permissions.py
|
web/impact/impact/permissions/graphql_permissions.py
|
from accelerator.models import (
UserRole,
)
from accelerator_abstract.models.base_user_utils import is_employee
from accelerator.models import ACTIVE_PROGRAM_STATUS
BASIC_ALLOWED_USER_ROLES = [
UserRole.FINALIST,
UserRole.AIR,
UserRole.MENTOR,
UserRole.PARTNER,
UserRole.ALUM
]
BASIC_VISIBLE_USER_ROLES = [UserRole.FINALIST, UserRole.STAFF, UserRole.ALUM]
def check_for_no_user_role(logged_in_user_roles):
count = len(logged_in_user_roles) == 1
return not logged_in_user_roles or count and not logged_in_user_roles[0]
def check_for_basic_user_roles(logged_in_user_roles):
return any(
[role in BASIC_ALLOWED_USER_ROLES for role in logged_in_user_roles]
)
def visible_roles(current_user):
current_logged_in_user_roles = list(
current_user.programrolegrant_set.filter(
program_role__program__program_status=ACTIVE_PROGRAM_STATUS
).values_list('program_role__user_role__name', flat=True).distinct())
if check_for_no_user_role(current_logged_in_user_roles):
return [UserRole.STAFF]
if check_for_basic_user_roles(current_logged_in_user_roles):
return BASIC_VISIBLE_USER_ROLES + [UserRole.MENTOR]
if UserRole.JUDGE in current_logged_in_user_roles:
return BASIC_VISIBLE_USER_ROLES
def can_view_profile(profile_user, roles):
return profile_user.programrolegrant_set.filter(
program_role__user_role__name__in=roles
).exists()
def can_view_entrepreneur_profile(current_user, profile_user):
if not is_employee(current_user):
roles = visible_roles(current_user)
return can_view_profile(profile_user, roles)
return True
|
Python
| 0
|
@@ -1436,16 +1436,17 @@
in=roles
+
%0A ).e
|
7ad6da17a72010967ccd82d3393a86762cf2a786
|
Mark import-std-module/empty-module as libc++ test
|
packages/Python/lldbsuite/test/commands/expression/import-std-module/empty-module/TestEmptyStdModule.py
|
packages/Python/lldbsuite/test/commands/expression/import-std-module/empty-module/TestEmptyStdModule.py
|
"""
Test that LLDB doesn't crash if the std module we load is empty.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host", CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.runCmd("log enable lldb expr")
# Use the typedef that is only defined in our 'empty' module. If this fails, then LLDB
# somehow figured out the correct define for the header and compiled the right
# standard module that actually contains the std::vector template.
self.expect("expr MissingContent var = 3; var", substrs=['$0 = 3'])
# Try to access our mock std::vector. This should fail but not crash LLDB as the
# std::vector template should be missing from the std module.
self.expect("expr (size_t)v.size()", substrs=["Couldn't lookup symbols"], error=True)
|
Python
| 0.000037
|
@@ -272,16 +272,283 @@
ile__)%0A%0A
+ # We only emulate a fake libc++ in this test and don't use the real libc++,%0A # but we still add the libc++ category so that this test is only run in%0A # test configurations where libc++ is actually supposed to be tested.%0A @add_test_categories(%5B%22libc++%22%5D)%0A
@ski
|
95829615b19bd2729606f7492b3f3b2b174ae7b8
|
Fix typos.
|
src/epiweb/apps/survey/data/example.py
|
src/epiweb/apps/survey/data/example.py
|
# -*- coding: utf-8 -*-
from epiweb.apps.survey.data.conditions import *
from epiweb.apps.survey.data import Survey, Section, Question
_ = lambda x: x
data = Survey()
section = Section()
q = Question('q10001', _('Did you have one or more of the following symptoms since your last visit?'))
q.type = 'option-multiple'
q.options = [
_('Runnynose'),
_('Stuffy nose'),
_('Hacking cough'),
_('Dry cough'),
_('Sneezing'),
_('Sorethroat'),
_('Musclepain'),
_('Headache'),
_('Chestpain'),
_('Feeling exhausted'),
_('Feeling tired'),
_('Lossofappetite'),
_('Nausea'),
_('Vomiting'),
_('Diarrhoea'),
_('Watery, bloodshot eyes'),
_('Chillsandfeverishfeeling'),
_('Colouredsputum'),
]
section.questions.append(q)
q = Question('q10002', _('When did these symptoms started?'))
q.type = 'date'
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10003', _('Did you have fever? If yes, what was the highest temperature measured? Please estimate if you had fever, but did not measure.'))
q.type = 'option-single'
q.options = [
_('No'),
_('Less than 37°C'),
_('37°C'),
_('37° - 37.5°C'),
_('37.5° - 38°C'),
_('38°'),
_('38.5°C'),
_('38.5° - 39°C'),
_('39° - 39.5°C'),
_('39.5° - 40°C'),
_('More than 40°C'),
]
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10004', _('When was your temperature for the first time above 38°C?'))
q.type = 'date'
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10005', _('Did these symptoms develop abruptly with sudden high fever or chills?'))
q.type = 'option-single'
q.options = [
_('No'),
_('Yes'),
_("Don't know"),
]
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10006', _('Did you consult a medical doctor for these symptoms?'))
q.type = 'yes-no'
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10007', _('Did you take medication for these symptoms?'))
q.type = 'option-single'
q.options = [
_('Tamiflu, Relenza, or another anti viral drug'),
_('Antibiotics'),
_('Antipyretics'),
_('Anti-inflammatory drugs'),
_('Vitamins'),
_('Other'),
]
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10008', _('Did you change your occupations due to these symptoms?'))
q.type = 'option-single'
q.options = [
_('No'),
_('Yes, I staid at home'),
_('Yes, but went to work/school as usual'),
_('I staid at home, but was able to work'),
]
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10009', _('How long did you staid at home?'))
q.type = 'option-single'
q.options = [
_('1 day'),
_('2 days'),
_('3 days'),
_('4 days'),
_('5 days'),
_('6 days'),
_('1 week'),
_('Less than 2 weeks'),
_('Less than 3 weeks'),
_('More than 3 weeks'),
]
q.condition = NotEmpty(Q("q10001")) & OneOf(Q("q10008"), [1, 2])
section.questions.append(q)
q = Question('q10010', _('Do other people from your family/home have/had comparable symptoms?'))
q.type = 'yes-no'
q.condition = NotEmpty(Q("q10001"))
section.questions.append(q)
q = Question('q10011', _('According to our data you did not receive a seasonal flu vaccination?'))
q.type = 'option-single'
q.options = [
_('Yes'),
_('No, meanwhile I have received a seasonal flu vaccination'),
]
q.condition = Intake("q20005") == False
section.questions.append(q)
q = Question('q10012', _('According to our data you did not receive a Mexican flu vaccination?'))
q.type = 'option-single'
q.options = [
_('Yes'),
_('No, meanwhile I have received a Mexican flu vaccination'),
]
q.condition = Intake("q20006") == False
section.questions.append(q)
data.sections.append(section)
|
Python
| 0.998899
|
@@ -339,16 +339,17 @@
_('Runny
+
nose'),%0A
@@ -444,16 +444,17 @@
_('Sore
+
throat')
@@ -468,16 +468,17 @@
('Muscle
+
pain'),%0A
@@ -508,16 +508,17 @@
_('Chest
+
pain'),%0A
@@ -580,18 +580,20 @@
_('Loss
+
of
+
appetite
@@ -702,19 +702,21 @@
ills
+
and
+
feverish
feel
@@ -711,16 +711,17 @@
feverish
+
feeling'
@@ -738,16 +738,17 @@
Coloured
+
sputum')
|
973a7754623c330f0352979bf9e0f2a6020acf62
|
reformat >80 char import line
|
tendrl/commons/tests/objects/cluster/atoms/check_cluster_available/test_check_cluster_available_init.py
|
tendrl/commons/tests/objects/cluster/atoms/check_cluster_available/test_check_cluster_available_init.py
|
import etcd
import maps
import pytest
from tendrl.commons.objects.cluster.atoms.check_cluster_available import CheckClusterAvailable # noqa
from tendrl.commons.objects import AtomExecutionFailedError
class MockCluster(object):
def __init__(self, integration_id = 0):
self.is_managed = True
def load(self):
return self
def exists(self):
return self
def test_check_cluster_available():
NS.publisher_id = 0
NS._int = maps.NamedDict()
NS.tendrl = maps.NamedDict()
NS.tendrl.objects = maps.NamedDict()
NS.tendrl.objects.Cluster = MockCluster
test = CheckClusterAvailable()
test.parameters = maps.NamedDict()
test.parameters['TendrlContext.integration_id'] = \
"7a3f2238-ef79-4943-9edf-762a80cf22a0"
test.parameters['job_id'] = 0
test.parameters['flow_id'] = 0
NS.tendrl_context = maps.NamedDict(integration_id="")
NS._int.client = etcd.Client()
with pytest.raises(AtomExecutionFailedError):
test.run()
|
Python
| 0
|
@@ -104,16 +104,22 @@
import
+%5C%0A
CheckClu
@@ -135,16 +135,8 @@
able
- # noqa
%0Afro
|
eea0c4fd610882ee748410063a62c30ce95da0ee
|
Fix the snapshot creation script for the new command line syntax. Review URL: http://codereview.chromium.org//8414015
|
runtime/tools/create_snapshot_file.py
|
runtime/tools/create_snapshot_file.py
|
#!/usr/bin/env python
#
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script to create snapshot files.
import getopt
import optparse
import string
import subprocess
import sys
import utils
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--executable",
action="store", type="string",
help="path to executable")
result.add_option("--output_bin",
action="store", type="string",
help="binary snapshot output file name")
result.add_option("--input_cc",
action="store", type="string",
help="input template file name")
result.add_option("--output",
action="store", type="string",
help="generated snapshot output file name")
result.add_option("--scripts",
action="store", type="string",
help="list of scripts to include in snapshot")
result.add_option("-v", "--verbose",
help='Verbose output.',
default=False, action="store_true")
return result
def ProcessOptions(options):
if not options.executable:
sys.stderr.write('--executable not specified\n')
return False
if not options.output_bin:
sys.stderr.write('--output_bin not specified\n')
return False
if not options.input_cc:
sys.stderr.write('--input_cc not specified\n')
return False
if not options.output:
sys.stderr.write('--output not specified\n')
return False
return True
def makeString(input_file):
result = ' '
fileHandle = open(input_file, 'rb')
lineCounter = 0
for byte in fileHandle.read():
result += ' %d,' % ord(byte)
lineCounter += 1
if lineCounter == 10:
result += '\n '
lineCounter = 0
if lineCounter != 0:
result += '\n '
return result
def makeFile(output_file, input_cc_file, input_file):
snapshot_cc_text = open(input_cc_file).read()
snapshot_cc_text = snapshot_cc_text % makeString(input_file)
open(output_file, 'w').write(snapshot_cc_text)
return True
def Main():
# Parse the options.
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
# Construct the path to the dart binary.
snapshot_argument = ''.join([ "--snapshot=", options.output_bin ])
if not options.scripts:
command = [ options.executable, snapshot_argument ]
else:
scripts = string.split(options.scripts)
command = [ options.executable, snapshot_argument, "--" ] + scripts + [ "--" ]
if options.verbose:
print ' '.join(command)
subprocess.call(command)
if not makeFile(options.output, options.input_cc, options.output_bin):
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
|
Python
| 0.999999
|
@@ -2631,14 +2631,8 @@
ment
-, %22--%22
%5D +
@@ -2643,19 +2643,8 @@
ipts
- + %5B %22--%22 %5D
%0A i
|
7136ebd77980f63944f3afc4d9577507df956dde
|
Update fake licensing generator to use correct bucket
|
generate_fake_licensing_data.py
|
generate_fake_licensing_data.py
|
import datetime
import json
from random import choice, randint
from pymongo import MongoClient
import sys
HOST = 'localhost'
PORT = 27017
DB_NAME = 'backdrop'
BUCKET = 'licensify'
# IMPORTANT:
#
# Dates are required to be stored as ISODate so you need to run these
# postprocessing scripts after loading data into db:
#
# /usr/bin/mongo your_database --eval "
# db.your_collection.find({ _timestamp: { \$type: 2}}).forEach(
# function(doc){
# doc._timestamp = new ISODate(doc._timestamp);
# db.your_collection.save(doc)
# });"
#
# /usr/bin/mongo your_database --eval "
# db.your_collection.find({ _week_start_at: { \$type: 2}}).forEach(
# function(doc){
# doc._week_start_at = new ISODate(doc._week_start_at);
# db.your_collection.save(doc)
# });"
USAGE = """
./generate_fake_licensing_data.py (save_to_db|print_json)
"""
def find_last_monday():
now = datetime.datetime.now().replace(hour=0, minute=0, second=0,
microsecond=0)
return now - datetime.timedelta(days=now.weekday())
def generate_last_n_mondays(n):
last_monday = find_last_monday()
n_mondays = []
for i in range(0, n):
n_mondays.append(last_monday - (i * datetime.timedelta(days=7)))
n_mondays.reverse()
return n_mondays
def select_random_Monday():
dates_to_use = generate_last_n_mondays(9)
return choice(dates_to_use)
def select_random_authority():
authorities = [
"Fakesville",
"NoSuchTown",
"TestPort",
"Nowhere-on-sea",
"Not-a-real-place",
]
return choice(authorities)
def licence_event():
some_monday = select_random_Monday()
return {
'dataType': 'licenceApplication',
'_week_start_at': some_monday,
'_timestamp': some_monday + datetime.timedelta(days=randint(0, 6)),
'_id': 'fake-%i' % i
}
def authority():
authority_name = select_random_authority()
return {
'authorityUrlSlug': authority_name.lower(),
'authorityName': 'City of %s Council' % authority_name
}
def select_random_licence():
licences = [
{'name': 'Fake Licence 1', 'code': '1111-1-1', 'payment': True},
{'name': 'Fake Licence 2', 'code': '1111-2-1', 'payment': True},
{'name': 'Fake Licence 3', 'code': '1111-3-1', 'payment': False},
{'name': 'Fake Licence 4', 'code': '1111-3-1', 'payment': True},
{'name': 'Fake Licence 5', 'code': '1111-3-1', 'payment': False},
{'name': 'Fake Licence 6', 'code': '1111-3-1', 'payment': False},
]
return choice(licences)
def generate_payment_status(payment):
if payment:
return choice(['Unknown', 'Success'])
else:
return ""
def licence():
selected_licence = select_random_licence()
return {
'licenceUrlSlug': selected_licence['name'].lower().replace(' ', '-'),
'licenceCode': selected_licence['code'],
'licenceName': selected_licence['name'],
'isPaymentRequired': selected_licence['payment'],
'paymentStatus': generate_payment_status(selected_licence['payment'])
}
def select_random_interaction():
interactions = [
{'action': 'apply', 'code': 1},
{'action': 'renew', 'code': 2},
{'action': 'change', 'code': 3},
]
return choice(interactions)
def interaction(licence_name):
selected_interaction = select_random_interaction()
return {
'licenceInteractionName': "%s for a %s" % (
selected_interaction['action'],
licence_name),
'licenceInteractionlgilId': selected_interaction['action'],
'licenceInteractionlgilSubId': selected_interaction['code']
}
def time_to_str(time):
return time.strftime('%Y-%m-%dT%H:%M:%S')
if len(sys.argv) < 2:
print USAGE
sys.exit(1)
if __name__ == "__main__":
argument = sys.argv[1]
licence_apps = []
for i in range(0, 10000):
licence_application = {}
licence_application.update(licence_event())
licence_application.update(authority())
licence_application.update(licence())
licence_application.update(
interaction(licence_application['licenceName']))
licence_apps.append(licence_application)
if argument == "save_to_db":
for application in licence_apps:
MongoClient(HOST, PORT)[DB_NAME][BUCKET].save(application)
sys.exit(0)
elif argument == "print_json":
for application in licence_apps:
application['_timestamp'] = time_to_str(application['_timestamp'])
application['_week_start_at'] = \
time_to_str(application['_week_start_at'])
for application in licence_apps:
print json.dumps(application)
sys.exit(0)
else:
print USAGE
sys.exit(1)
|
Python
| 0
|
@@ -174,10 +174,10 @@
ensi
-fy
+ng
'%0A%0A#
|
377d35873aed0993e1dbaf147d7b24abccd960c2
|
add raise SystemExit
|
cliche/cli.py
|
cliche/cli.py
|
""":mod:`cliche.cli` --- Command-line interfaces
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import code
import functools
import logging.config
import os
import pathlib
import sys
from alembic.util import CommandError
from click import Path, argument, echo, group, option
from flask import _request_ctx_stack
from setuptools import find_packages
from werkzeug.utils import import_string
from .celery import app as celery_app
from .config import read_config
from .orm import downgrade_database, upgrade_database
from .web.app import app as flask_app
from .web.db import get_database_engine
__all__ = ('initialize_app', 'config', 'main')
ALEMBIC_LOGGING = {
'version': 1,
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'formatter': 'generic'
}
},
'formatters': {
'generic': {
'format': '%(levelname)-5.5s [%(name)s] %(message)s',
'datefmt': '%H:%M:%S'
}
},
'root': {
'level': 'WARN',
'handlers': ['console']
},
'loggers': {
'alembic': {
'level': 'INFO',
'handlers': []
},
'sqlalchemy.engine': {
'level': 'WARN',
'handlers': []
}
}
}
def config(func):
"""Provide :option:`--config` or :option:`-c` option and
run :func:`initialize_app()` automatically.
:param func: a command function to decorate
:type func: :class:`collections.abc.Callable`
:returns: decorated ``func``
"""
@functools.wraps(func)
def internal(*args, **kwargs):
initialize_app(kwargs.pop('config'))
func(*args, **kwargs)
deco = option('--config', '-c', type=Path(exists=True),
help='Configuration file (YAML or Python)')
return deco(internal)
def initialize_app(config=None):
"""Initialize celery/flask app.
:param config: a config file path. accept :file:`.py`, :file:`.yml` file.
default value is :const:`None`
"""
if config is None:
try:
config = os.environ['CLICHE_CONFIG']
except KeyError:
print('The -c/--config option or CLICHE_CONFIG environment '
'variable is required', file=sys.stderr)
raise SystemExit(1)
if not os.path.isfile(config):
print('The configuration file', config, 'cannot be read.')
raise SystemExit(1)
config = read_config(filename=pathlib.Path(config))
flask_app.config.update(config)
celery_app.conf.update(config)
@group()
def cli():
"""cliche for integrated command for cliche.io service."""
@cli.command()
@argument('revision', default='head')
@config
def upgrade(revision):
"""Create the database tables, or upgrade it to the latest revision."""
logging_config = dict(ALEMBIC_LOGGING)
logging.config.dictConfig(logging_config)
with flask_app.app_context():
engine = get_database_engine()
try:
upgrade_database(engine, revision)
except CommandError as e:
if revision != 'head':
try:
downgrade_database(engine, revision)
except CommandError as e:
echo(e, file=sys.stderr)
else:
echo(e, file=sys.stderr)
@cli.command()
@argument('service')
@config
def sync(service): # FIXME available service listing
"""Sync to services."""
package = 'cliche.services.' + service
if package in find_packages():
import_string(package + ':sync').delay()
else:
echo('There is no such service \'{}\' suitable for synchronization.'
.format(service),
file=sys.stderr)
@cli.command()
@config
def shell():
"""Run a Python shell inside Flask application context."""
with flask_app.test_request_context():
context = dict(app=_request_ctx_stack.top.app)
# Use basic python shell
code.interact(local=context)
@cli.command()
@option('--host', '-h')
@option('--port', '-p', type=int)
@option('--threaded', is_flag=True)
@option('--processes', type=int, default=1)
@option('--passthrough-errors', is_flag=True)
@option('--debug/--no-debug', '-d/-D', default=False,
help='enable the Werkzeug debugger'
' (DO NOT use in production code)')
@option('--reload/--no-reload', '-r/-R', default=False,
help='monitor Python files for changes'
' (not 100% safe for production use)')
@config
def runserver(host, port, threaded, processes,
passthrough_errors, debug, reload):
"""Run the Flask development server i.e. app.run()"""
flask_app.run(host=host,
port=port,
debug=debug,
use_debugger=debug,
use_reloader=reload,
threaded=threaded,
processes=processes,
passthrough_errors=passthrough_errors)
#: (:class:`collections.abc.Callable`) The CLI entry point.
main = cli
|
Python
| 0.000001
|
@@ -3288,32 +3288,72 @@
ile=sys.stderr)%0A
+ raise SystemExit(1)%0A
else
@@ -3387,32 +3387,68 @@
ile=sys.stderr)%0A
+ raise SystemExit(1)%0A
%0A%0A@cli.command()
|
788073cdf2a5e2ee142cbcf1263accad7baac153
|
Move chmod
|
genes/gnu_coreutils/commands.py
|
genes/gnu_coreutils/commands.py
|
#!/usr/bin/env python
from genes.posix.traits import only_posix
from genes.process.commands import run
@only_posix()
def chgrp(path, group):
run(['chgrp', group, path])
@only_posix()
def chown(path, user):
run(['chown', user, path])
@only_posix()
def groupadd(*args):
run(['groupadd'] + list(args))
@only_posix()
def ln(*args):
run(['ln'] + list(args))
@only_posix()
def mkdir(path, mode=None):
if mode:
run(['mkdir', '-m', mode, path])
else:
run(['mkdir', path])
@only_posix()
def useradd(*args):
# FIXME: this is a bad way to do things
# FIXME: sigh. this is going to be a pain to make it idempotent
run(['useradd'] + list(args))
@only_posix()
def usermod(*args):
# FIXME: this is a bad way to do things
run(['usermod'] + list(args))
|
Python
| 0.000001
|
@@ -233,32 +233,145 @@
user, path%5D)%0A%0A%0A
+@only_posix()%09%09%0Adef chmod(*args):%09%09%0A # FIXME: this is ugly, name the args%09%09%0A run(%5B'chmod'%5D + list(args))%0A%0A%0A
@only_posix()%0Ade
|
14ba4d581428ff38190153955315dc13483623e1
|
simplify (at least for me)
|
find_groups.py
|
find_groups.py
|
import random
from collections import namedtuple, defaultdict, deque
Point = namedtuple('Point', ['x', 'y'])
Size = namedtuple('Size', ['w', 'h'])
EMPTY = 0
BLACK = 1
WHITE = 2
BOARD_LETTERS = 'ABCDEFGHJKLMNOPQRST'
class Group:
def __init__(self, color):
self.color = color
self.points = set()
self.liberties = set()
def get_num_liberties(self):
return len(self.liberties)
def __len__(self):
return len(self.points)
def __repr__(self):
return '<group color={} {} points {} liberties>'.format(
self.color, len(self.points), len(self.liberties))
class Board:
def __init__(self, size):
self.size = size
self.stones = {}
def random_fill(self, seed=None):
rand = random.Random(seed)
for point in self.iter_points():
color = rand.choice([EMPTY, BLACK, WHITE])
if color != EMPTY:
self.stones[point] = color
def is_inside(self, point):
return 0 <= point.x < self.size.w and 0 <= point.y < self.size.h
def get_color(self, point):
return self.stones.get(point, 0)
def get_neighbours(self, point):
x, y = point
_points = [Point(x-1, y), Point(x+1, y), Point(x, y-1), Point(x, y+1)]
points = filter(lambda p: self.is_inside(p), _points)
return points
def iter_points(self):
for x in range(self.size.w):
for y in range(self.size.h):
yield Point(x, y)
def find_groups(self):
groups = []
grouped_points = set()
for point, color in self.stones.items():
assert color != EMPTY
if point in grouped_points:
continue
group = Group(color)
todo = [point]
while todo:
point = todo.pop()
color = self.stones.get(point, EMPTY)
if point in grouped_points:
continue
elif color == EMPTY:
group.liberties.add(point)
elif color == group.color:
group.points.add(point)
grouped_points.add(point)
todo.extend(self.get_neighbours(point))
groups.append(group)
return groups
def print_board(board):
color_chars = {
# Characters that are easy to tell apart at a glance.
EMPTY: '.',
BLACK: '#',
WHITE: 'o',
}
print()
print(' ', ' '.join(BOARD_LETTERS[:board.size.w]))
print()
for y in range(board.size.h):
line = []
for x in reversed(range(board.size.w)):
line.append(color_chars[board.get_color(Point(x, y))])
rownum = board.size.h - y
print(' {:2} '.format(rownum), ' '.join(line))
print()
def print_captured_groups(groups, board_size):
board = Board(board_size)
for group in groups:
if group.get_num_liberties() == 0:
for point in group.points:
board.stones[point] = group.color
print_board(board)
board = Board(Size(9, 9))
board.random_fill(seed=13)
print('Board:')
print_board(board)
groups = board.find_groups()
print('Captured groups:')
print_captured_groups(groups, board.size)
|
Python
| 0.000002
|
@@ -1875,70 +1875,19 @@
-color = self.stones.get(point, EMPTY)%0A%0A
if poin
+t no
t in
@@ -1925,22 +1925,51 @@
co
-ntinue
+lor = self.stones.get(point, EMPTY)
%0A
@@ -1969,34 +1969,36 @@
-el
+
if color == EMPT
@@ -1992,32 +1992,36 @@
color == EMPTY:%0A
+
@@ -2067,16 +2067,20 @@
+
+
elif col
@@ -2114,24 +2114,28 @@
+
group.points
@@ -2158,32 +2158,36 @@
+
+
grouped_points.a
@@ -2188,32 +2188,36 @@
ints.add(point)%0A
+
|
0ab67a50c711e3a15974f3bb4fe9df84fac6608a
|
use new template
|
gimmemotifs/commands/cluster.py
|
gimmemotifs/commands/cluster.py
|
#!/usr/bin/env python
# Copyright (c) 2009-2016 Simon van Heeringen <s.vanheeringen@science.ru.nl>
#
# This module is free software. You can redistribute it and/or modify it under
# the terms of the MIT License, see the file COPYING included with this
# distribution.
from distutils import sysconfig
from gimmemotifs.motif import pwmfile_to_motifs
from gimmemotifs.comparison import MotifComparer
from gimmemotifs.cluster import cluster_motifs
import sys
import os
import kid
def cluster(args):
revcomp = not args.single
outdir = os.path.abspath(args.outdir)
if not os.path.exists(outdir):
os.mkdir(outdir)
trim_ic = 0.2
clusters = []
motifs = pwmfile_to_motifs(args.inputfile)
if len(motifs) == 1:
clusters = [[motifs[0], motifs]]
else:
tree = cluster_motifs(args.inputfile, "total", "wic", "mean", True, threshold=args.threshold, include_bg=True)
clusters = tree.getResult()
ids = []
mc = MotifComparer()
sys.stderr.write("Creating images\n")
for cluster,members in clusters:
cluster.trim(trim_ic)
cluster.to_img(os.path.join(outdir,"%s.png" % cluster.id), format="PNG")
ids.append([cluster.id, {"src":"%s.png" % cluster.id},[]])
if len(members) > 1:
scores = {}
for motif in members:
scores[motif] = mc.compare_motifs(cluster, motif, "total", "wic", "mean", pval=True)
add_pos = sorted(scores.values(),cmp=lambda x,y: cmp(x[1], y[1]))[0][1]
for motif in members:
score, pos, strand = scores[motif]
add = pos - add_pos
if strand in [1,"+"]:
pass
else:
#print "RC %s" % motif.id
rc = motif.rc()
rc.id = motif.id
motif = rc
#print "%s\t%s" % (motif.id, add)
motif.to_img(os.path.join(outdir, "%s.png" % motif.id.replace(" ", "_")), format="PNG", add_left=add)
ids[-1][2] = [dict([("src", "%s.png" % motif.id.replace(" ", "_")), ("alt", motif.id.replace(" ", "_"))]) for motif in members]
kid.enable_import()
prefix = sysconfig.get_config_var("prefix")
template_file = os.path.join(prefix, "share/gimmemotifs/templates/cluster_template.kid")
template = kid.Template(file=template_file, motifs=ids)
f = open(os.path.join(outdir, "cluster_report.html"), "w")
f.write(template.serialize())
f.close()
f = open(os.path.join(outdir, "cluster_key.txt"), "w")
for id in ids:
f.write("%s\t%s\n" % (id[0], ",".join([x["alt"] for x in id[2]])))
f.close()
f = open(os.path.join(outdir, "clustered_motifs.pwm"), "w")
if len(clusters) == 1 and len(clusters[0][1]) == 1:
f.write("%s\n" % clusters[0][0].to_pwm())
else:
for motif in tree.get_clustered_motifs():
f.write("%s\n" % motif.to_pwm())
f.close()
|
Python
| 0
|
@@ -441,16 +441,59 @@
_motifs%0A
+from gimmemotifs.config import MotifConfig%0A
import s
@@ -513,19 +513,22 @@
%0Aimport
-kid
+jinja2
%0A%0Adef cl
@@ -2261,144 +2261,152 @@
-kid.enable_import()%0A prefix = sysconfig.get_config_var(%22prefix%22)%0A template_file = os.path.join(prefix, %22share/gimmemotifs/
+config = MotifConfig()%0A env = jinja2.Environment(loader=jinja2.FileSystemLoader(%5Bconfig.get_template_dir()%5D))%0A template = env.get_
template
s/cl
@@ -2405,10 +2405,10 @@
late
-s/
+(%22
clus
@@ -2424,62 +2424,50 @@
ate.
-kid%22)%0A template = kid.Template(file=template_file,
+jinja.html%22)%0A result = template.render(
moti
@@ -2474,23 +2474,25 @@
fs=ids)%0A
+%0A
-f =
+with
open(os
@@ -2542,21 +2542,31 @@
%22w%22)
-%0A
+ as f:%0A
+
f.write(
temp
@@ -2565,42 +2565,30 @@
ite(
-template.serialize())%0A f.close(
+result.encode('utf-8')
)%0A%0A
|
394954fc80230e01112166db4fe133c107febead
|
Allow more than one GitHub repo from the same user
|
gitautodeploy/parsers/common.py
|
gitautodeploy/parsers/common.py
|
class WebhookRequestParser(object):
"""Abstract parent class for git service parsers. Contains helper
methods."""
def __init__(self, config):
self._config = config
def get_matching_repo_configs(self, urls):
"""Iterates over the various repo URLs provided as argument (git://,
ssh:// and https:// for the repo) and compare them to any repo URL
specified in the config"""
configs = []
for url in urls:
for repo_config in self._config['repositories']:
if repo_config in configs:
continue
if repo_config['url'] == url:
configs.append(repo_config)
elif 'url_without_usernme' in repo_config and repo_config['url_without_usernme'] == url:
configs.append(repo_config)
return configs
|
Python
| 0.998892
|
@@ -628,15 +628,44 @@
nfig
-%5B
+.get('repo', repo_config.get(
'url'
-%5D
+))
==
@@ -885,16 +885,17 @@
return configs
+%0A
|
eec0415702c728b83ddba6c7ccef5db7f5f332e5
|
fix hovering bug introduced by template rewrite
|
extstats/templates.py
|
extstats/templates.py
|
### template engine
import html, types
def render(node):
assert node is not None # TODO: None => '' ?
if type(node) in (tuple, list, types.GeneratorType):
return ''.join(render(child) for child in node)
if type(node) == str:
return html.escape(node)
children_rendered = ''
if node.children:
children_rendered = render(node.children)
attrs_rendered = ''
if node.attrs:
# TODO: really, should I ignore the None values ?
# TODO: class_, is_, ...
attrs_rendered = ' ' + ' '.join(
html.escape(key.replace('class_', 'class')) + '="' + html.escape(value) + '"'
for key, value in node.attrs.items()
if value is not None)
if node.tag in ('br', 'img', 'hr'):
assert not node.children
return '<{tag}{attrs} />'.format(tag=node.tag, attrs=attrs_rendered)
return '<{tag}{attrs}>{children}</{tag}>'.format(
tag=node.tag, children=children_rendered, attrs=attrs_rendered)
class Node:
def __init__(self, tag, attrs=None, children=None):
assert tag
self.tag = tag
self.attrs = attrs
self.children = children
def __truediv__(self, children):
if type(children) not in (tuple, list):
children = (children,)
self.children = children
return self
def __str__(self):
return render(self)
class _H:
def __getattr__(self, tag):
return lambda **attrs: Node(tag, attrs)
H = _H()
### templates
import datetime, json
VIEW_SOURCE_URL = "/source/crxviewer.html?crx="
def _base(content='', title_prefix=''):
return render(H.html() / (
H.head() / (
H.meta(charset="utf-8"),
H.meta(content="width=device-width, initial-scale=1", name="viewport"),
H.title() / (title_prefix + "Chrome Extensions Archive"),
H.link(href="/style.css", media="screen", rel="stylesheet", type="text/css"),
),
H.body() / (
H.a(href='/') / (H.h1() / "Chrome Extensions Archive"),
H.div(style='text-align: right') /
H.a(href="https://github.com/mdamien/chrome-extensions-archive") /
"github.com/mdamien/chrome-extensions-archive",
H.hr(),
content,
),
))
def _add_commas(n):
return "{:,}".format(int(n))
def _sizeof_fmt(num):
for unit in ['', 'Ko', 'Mo', 'Go', 'To']:
if abs(num) < 1024.0:
return "%3.1f%s" % (num, unit)
num /= 1024.0
return "%.1f%s" % (num, 'Yi')
def _nl2br(text):
return ((t, H.br()) for t in text.split('\n') if t)
def _ext(ext):
return (
H.small(class_='extlink') / (
H.a(href='/ext/%s.html' % ext['ext_id']) / ('#' + ext['ext_id'])),
H.h2(id=ext['ext_id']) / (H.a(href=ext['url']) / ext['name']),
H.small() / _add_commas(ext['user_count']),
H.ul() / ((
H.li() / (
H.a(href=file['storage_url']) / (
file['name'].replace('.zip', ''),
' - ',
H.small() / (' ', _sizeof_fmt(file['size'])),
),
H.small() /
H.a(target='_blank', rel='noreferrer',
href=VIEW_SOURCE_URL + file['storage_url']),
)
) for file in ext['files'])
)
def list(exts, page, pages, name, exts_count, files_count, total_size):
def _page(p):
link = H.a(href='/' + name) / (' %d ' % p)
if p == page:
return H.strong() / link
return link
def _ext(ext):
return (
H.small(class_='extlink') / (
H.a(href='/ext/%s.html' % ext['ext_id']) / ('#' + ext['ext_id'])),
H.h2(id=ext['ext_id']) / (H.a(href=ext['url']) / ext['name']),
H.small() / _add_commas(ext['user_count']),
H.ul() / ((
H.li() / (
H.a(href=file['storage_url']) / (
file['name'].replace('.zip', ''),
' - ',
H.small() / (' ', _sizeof_fmt(file['size'])),
),
H.small() /
H.a(target='_blank', rel='noreferrer',
href=VIEW_SOURCE_URL + file['storage_url']),
)
) for file in ext['files'])
)
return _base((
H.div(style="text-align: center") / (
H.strong() / _add_commas(exts_count),
' extensions, ',
H.strong() / _add_commas(files_count),
' versions, ',
H.strong() / _sizeof_fmt(total_size),
' stored',
H.br(),
'Last update: ' + datetime.datetime.now().strftime('%Y-%m-%d'),
),
H.div(style="text-align: center") / (
'Pages:',
*(_page(p) for p in range(1, pages)),
'(ordered by # of users)'
),
H.hr(),
*(_ext(ext) for ext in exts),
))
def ext(ext):
return _base((
*_ext(ext),
H.p(class_='description') / _nl2br(ext['full_description']),
H.hr(),
H.pre(class_='pprint') / json.dumps(ext, indent=2, sort_keys=True)
), title_prefix=ext['name'])
|
Python
| 0
|
@@ -2607,35 +2607,45 @@
ext):%0A return
+ H.div() /
(%0A
-
H.small(
@@ -3569,828 +3569,8 @@
nk%0A%0A
- def _ext(ext):%0A return (%0A H.small(class_='extlink') / (%0A H.a(href='/ext/%25s.html' %25 ext%5B'ext_id'%5D) / ('#' + ext%5B'ext_id'%5D)),%0A H.h2(id=ext%5B'ext_id'%5D) / (H.a(href=ext%5B'url'%5D) / ext%5B'name'%5D),%0A H.small() / _add_commas(ext%5B'user_count'%5D),%0A H.ul() / ((%0A H.li() / (%0A H.a(href=file%5B'storage_url'%5D) / (%0A file%5B'name'%5D.replace('.zip', ''),%0A ' - ',%0A H.small() / (' ', _sizeof_fmt(file%5B'size'%5D)),%0A ),%0A H.small() /%0A H.a(target='_blank', rel='noreferrer',%0A href=VIEW_SOURCE_URL + file%5B'storage_url'%5D),%0A )%0A ) for file in ext%5B'files'%5D)%0A )%0A%0A
@@ -4229,24 +4229,24 @@
_base((%0A
+
*_ext(ex
@@ -4237,17 +4237,16 @@
-*
_ext(ext
|
d4fed426153105a9f8cab595848d5303003449b8
|
revert last commit, import properly
|
cogs/games.py
|
cogs/games.py
|
import discord
from discord.ext import commands
from datetime import datetime
from utils import aiohttp_wrap as aw
class Game:
""" Cog which allows fetching of video game information """
IG_URL = 'https://api-2445582011268.apicast.io/{}/'
with open('data/apikeys.json') as f:
KEY = json.load(f)['pgdb']
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.comand(aliases=['games'])
async def game(self, ctx, *, query: str):
""" Search for some information about a game """
url = self.IG_URL.format('games')
headers = {'user-key': self.KEY}
params = {'search': query,
'fields': 'name,summary,first_release_date,aggregated_rating,cover'}
resp = await aw.aio_get_json(self.session, url, headers=headers, params=params)
await ctx.send(f'{resp}'[:500])
def setup(bot):
bot.add_cog(Game(bot))
|
Python
| 0
|
@@ -9,16 +9,28 @@
iscord %0A
+import json%0A
from dis
@@ -817,20 +817,19 @@
aw.
-a
+sess
io
+n
_get
-_json
(sel
@@ -875,16 +875,23 @@
=params)
+.json()
%0A%0A
|
ecddf1c61df3b803b4b00560600db9ac2f17b567
|
Add profile
|
forum/admin.py
|
forum/admin.py
|
import json
import os
import csv
from flask import Response, flash, stream_with_context, redirect
from flask_admin.babel import gettext
from flask_admin.base import expose
from flask_admin.contrib.pymongo import ModelView
from flask_admin.contrib.pymongo.filters import BasePyMongoFilter, FilterEqual
from flask_admin.helpers import get_redirect_target
from flask_admin.form import rules
from flask_login import current_user
from werkzeug import secure_filename
from wtforms import fields, form, validators
from export import log, generate_vals
SECTIONS = ['equipement', 'transport', 'restauration', 'badges', 'programme',
'name', 'acompte',
'emplacement', 'duration', 'equiped', 'banner', 'size']
def get_sections():
fn = os.path.join(os.path.dirname(__file__), 'sections.json')
with open(fn) as file:
return json.load(file)
class CompanyForm(form.Form):
# Basic
id = fields.StringField('Identifiant', validators=[validators.Required(), validators.Length(min=3, max=30)], render_kw={"placeholder": "Ex. LOREAL"})
password = fields.StringField('Mot de passe', validators=[validators.Required(), validators.Length(min=3, max=30)], render_kw={"placeholder": "Ex. motdepasse"})
name = fields.StringField('Nom complet', render_kw={"placeholder": "Ex. L'Oreal"})
acompte = fields.SelectField('Acompte paye?', choices=[('non', 'non'), ('oui', 'oui')])
# Equipement
emplacement = fields.StringField('Emplacement', render_kw={"placeholder": "Ex. F13"})
size = fields.SelectField('Surface', choices=[(9, '9 m2'), (12, '12 m2'), (18, '18 m2'), (36, '36 m2')], coerce=int)
duration = fields.SelectField('Jours de presence', choices=[(1, '1 jour'), (2, '2 jours')], coerce=int)
equiped = fields.SelectField('Equipe?', choices=[('non', 'non'), ('oui', 'oui')])
# Dashboard
equipement = fields.SelectField('Valider Equipement', choices=[('non', 'non'), ('oui', 'oui')])
restauration = fields.SelectField('Valider Restauration', choices=[('non', 'non'), ('oui', 'oui')])
badges = fields.SelectField('Valider Badges', choices=[('non', 'non'), ('oui', 'oui')])
transport = fields.SelectField('Valider Transports', choices=[('non', 'non'), ('oui', 'oui')])
programme = fields.SelectField('Valider Programme', choices=[('non', 'non'), ('oui', 'oui')])
class CompanyView(ModelView):
form = CompanyForm
column_list = ['id'] + SECTIONS[:5]
export_types = SECTIONS[:4]
form_rules = [
rules.FieldSet(('id', 'password', 'name'), 'Profil'),
rules.FieldSet(('equipement', 'restauration', 'badges',
'programme', 'transport'), 'Avancement'),
rules.FieldSet(('acompte',), 'Finances'),
rules.FieldSet(('size', 'duration', 'equiped', 'emplacement'), 'Finances'),
]
can_export = True
can_delete = True
create_modal = False
edit_modal = False
column_searchable_list = ['id']
def __init__(self, *args, **kwargs):
super(CompanyView, self).__init__(*args, **kwargs)
self.static_folder = 'static'
self.endpoint = 'admin'
self.name = 'Entreprises'
self.sections = get_sections()
def is_accessible(self):
return current_user.get_id() == 'admin' and current_user.is_authenticated
def _on_model_change(self, form, model, is_created):
if is_created:
model['sections'] = self.sections
@expose('/export/<export_type>/')
def export(self, export_type):
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_export or (export_type not in self.export_types):
flash(gettext('Permission denied.'), 'error')
return redirect(return_url)
if export_type == 'csv':
return self._export_csv(return_url)
else:
return self._export_fields(export_type, return_url)
def _export_fields(self, export_type, return_url):
count, data = self._export_data()
# Dummy object for csv creation
class Echo(object):
def write(self, value):
return value
writer = csv.writer(Echo())
data = [r for r in data if r['id'] != 'admin']
gen_vals = generate_vals(writer, export_type, data)
filename = self.get_export_name(export_type='csv')
disposition = 'attachment;filename=%s' % (
secure_filename(filename.replace(self.name, export_type)),)
return Response(
stream_with_context(gen_vals),
headers={'Content-Disposition': disposition},
mimetype='text/csv'
)
class UserForm(form.Form):
id = fields.StringField(
'Email', render_kw={"placeholder": "Ex. yokoya@live.com"})
password = fields.PasswordField('Mot de passe', validators=[validators.Required(
), validators.Length(min=5, max=30)], render_kw={"placeholder": "Ex. 123456"})
class FilterRegister(FilterEqual, BasePyMongoFilter):
def apply(self, query, value):
query.append({'events.{}.registered'.format(value): True})
return query
def operation(self):
return "evenement"
class UserView(ModelView):
column_list = ['id', 'events', 'confirmed_on', 'registered_on']
column_labels = dict(id='Email')
export_types = ['csv']
can_export = True
can_delete = True
can_view_details = True
form = UserForm
column_export_list = ['id', 'registered_on', 'confirmed_on', 'events', 'profile']
column_filters = (FilterRegister(column='events', name='participants', options=(('styf', 'styf'), ('joi', 'joi'))),)
def __init__(self, *args, **kwargs):
super(UserView, self).__init__(*args, **kwargs)
self.name = 'Utilisateurs'
class EventForm(form.Form):
name = fields.StringField('Nom')
type = fields.StringField('Type')
quota = fields.IntegerField('Quota')
places_left = fields.IntegerField('Places restantes')
class EventView(ModelView):
column_list = ['name', 'type', 'quota', 'places_left']
column_labels = dict(id='Email')
export_types = ['csv']
can_export = True
can_delete = True
form = EventForm
def __init__(self, *args, **kwargs):
super(EventView, self).__init__(*args, **kwargs)
self.name = 'Journee Objectif Ingenieur'
|
Python
| 0.000001
|
@@ -5264,16 +5264,27 @@
ered_on'
+, 'profile'
%5D%0A co
@@ -5429,24 +5429,61 @@
= UserForm%0A
+ column_searchable_list = ('id',)%0A
column_e
|
086ea392e0c61a35f799cd99e8972021bdb7959c
|
Add verbosity level
|
forum/views.py
|
forum/views.py
|
# coding=utf-8
import json
import os
import requests
import datetime
from deepdiff import DeepDiff
from flask import abort, redirect, render_template, request, send_from_directory, url_for, make_response, send_file, session
from flask_login import current_user, login_required, login_user, logout_user
from login import validate_login
from storage import Company, get_company, set_company, get_db
from forum import app, GridFS, log
from gridfs.errors import NoFile
from mailing import send_mail
from bson.objectid import ObjectId
# Admin
@app.route('/dashboard')
@app.route('/dashboard/<page>')
@login_required
def dashboard(page=None):
if page == 'cvtheque':
abort(404)
company = None
if current_user.id == 'admin':
if request.args.get('id'):
session['company_id'] = request.args.get('id')
if not session.get('company_id'):
return redirect('/admin')
company = get_db().companies.find_one({'id': session['company_id']}, {'_id': 0})
if not page or page == 'accueil':
return render_template('dashboard/dashboard.html', company=company)
return render_template('dashboard/sections/{}.html'.format(page), company=company)
@app.route('/connexion', methods=["GET", "POST"])
def login():
if request.method == 'POST':
remember_me = 'remember_me' in request.form
company_id = request.form.get('id', None)
password = request.form.get('password', None)
company = get_company(company_id)
# checking stuff out
if not company_id or not password:
return render_template('login.html', error="blank_fields")
if not company:
return render_template('login.html', error="no_company_found")
if not validate_login(company['password'], password):
return render_template('login.html', error="wrong_password")
# all is good
company = Company(id=company_id, password=password)
print('connected_as: {}'.format(company_id))
login_user(company, remember=remember_me)
if company_id == "admin":
return redirect('/admin')
else:
return redirect(request.args.get('next') or url_for('dashboard'))
return render_template('login.html')
@app.route('/js_error', methods=["POST"])
def js_error():
print('js_error', request.form.to_dict())
return 'success'
@app.route('/deconnexion')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/update_company', methods=["POST"])
@login_required
def update_company():
page = request.form.get('page')
if current_user.data.get(page) and current_user.id != 'admin':
return "error"
else:
company = request.form.get('company')
company = json.loads(company)
old_company = get_db().companies.find_one({'id': company['id']}, {'_id': 0})
diff = DeepDiff(old_company, company, ignore_order=True).json
set_company(company['id'], company)
get_db().stream.insert({'delivered': False, 'validated': False, 'created_on': datetime.datetime.now().isoformat(' '), 'company': company['id'], 'diff': diff})
return "success"
@app.route('/get_resume/<oid>')
def get_resume(oid):
try:
file = GridFS.get(ObjectId(oid))
response = make_response(file.read())
response.mimetype = file.content_type
return response
except NoFile:
abort(404)
@app.route('/validate_section', methods=["POST"])
@login_required
def validate_section():
page = request.form.get('page')
if not current_user.data.get(page):
get_db().companies.update_one({'id': current_user.id}, {'$set': {page: True}})
return "success"
else:
return "error"
@app.route('/update_banner', methods=["POST"])
@login_required
def update_banner():
if not current_user.data.get('equipement'):
company_id = request.form.get('pk')
banner = request.form.get('value')
company = get_company(company_id)
company['banner'] = banner
set_company(company['id'], company)
return "success"
else:
abort(500)
@app.route('/add_job', methods=["POST"])
@login_required
def add_job():
job = request.form.get('job')
job = json.loads(job)
get_db().jobs.insert_one(job)
return "success"
@app.route('/remove_job', methods=["POST"])
@login_required
def remove_job():
job_id = request.form.get('id')
get_db().jobs.delete_one({'_id': ObjectId(job_id)})
return "success"
@app.route('/identicon', methods=["GET"])
@login_required
def identicon():
from binascii import hexlify
from identicon import render_identicon
from io import BytesIO
text = request.args.get('text', 'EMPTY')
code = int(hexlify(text), 16)
size = 25
img = render_identicon(code, size)
stream = BytesIO()
img.save(stream, format='png')
stream.seek(0)
return send_file(
stream,
mimetype='image/png'
)
# VITRINE
# start of app
@app.route('/', methods=["GET"])
@app.route('/<page>', methods=["GET"])
def index(page=None):
if page == 'presse':
return render_template('press.html'.format(page))
return render_template('index.html')
@app.route('/send_request', methods=["GET"])
def send_request():
# Params
email = request.args.get('email')
contact_name = request.args.get('nom_complet')
company_name = request.args.get('nom')
telephone = request.args.get('tel')
captcha = request.args.get('captcha')
# ReCaptcha
base_url = 'https://www.google.com/recaptcha/api/siteverify'
secret = os.environ.get('RECAPTCHA_SECRET_KEY')
res = requests.post(base_url, data={'response': captcha, 'secret': secret}).json()
# Sending mail...
if res.get('success'):
return send_mail(email, contact_name, company_name, telephone)
else:
abort(500)
# INDEXING
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
|
Python
| 0.99989
|
@@ -2877,24 +2877,41 @@
%7B'_id': 0%7D)%0A
+ try:%0A
diff
@@ -2961,22 +2961,160 @@
der=True
-).json
+, verbose_level=2).json%0A except:%0A diff = %7B'error': True, 'message': 'an error has occured. see company directly for changes.'%7D
%0A
|
688e0426b80f64152b22a3a859020b46d8abc8c2
|
Update radio.py
|
cogs/radio.py
|
cogs/radio.py
|
from .utils import config, checks, formats
import discord
from discord.ext import commands
import discord.utils
import random, json, asyncio
from urllib.parse import unquote
class Radio:
"""The radio-bot related commands."""
def __init__(self, bot):
self.bot = bot
if not discord.opus.is_loaded():
discord.opus.load_opus('/usr/local/lib/libopus.so') #FreeBSD path
self.player = None
self.stopped = True
self.break_loop = asyncio.Event()
#self.break_loop.clear()
self.q = asyncio.Queue()
self.play_next_song = asyncio.Event()
self.current_song = None
self.songs_dir = 'radio/'
self.songs = []
self.playlists = {}
self.update_song_list()
@property
def is_playing(self):
return self.player is not None and self.player.is_playing() and not self.stopped
def toggle_next_song(self):
if not self.stopped:
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
def update_song_list(self):
self.songs = self.bot.yadisk.list_files(self.songs_dir)
self.update_playlists()
def update_playlists(self):
pl_files = self.bot.yadisk.list_files(self.songs_dir + 'playlists/')
for pl in pl_files:
pl_url = self.bot.yadisk.direct_link(self.songs_dir + 'playlists' + pl)
pl_data = self.bot.yadisk._get(pl_url).json()['songs']
self.playlists[pl] = []
for song in pl_data:
if song in self.songs:
self.playlists[pl].append(song)
@commands.command(pass_context=True)
async def join(self, ctx, *, channel_name : str):
"""Зайти на указанный голосовой канал."""
if self.bot.is_voice_connected():
await ctx.invoke(self.leave)
check = lambda c: c.name == channel_name and c.type == discord.ChannelType.voice
channel = discord.utils.find(check, ctx.message.server.channels)
if channel is None:
await self.bot.say('Нет такого голосового канала.')
await self.bot.join_voice_channel(channel)
@commands.command(pass_context=True)
async def leave(self, ctx):
"""Покинуть текущий голосовой канал."""
await ctx.invoke(self.stop)
await self.bot.voice.disconnect()
@commands.command()
async def pause(self):
"""Приостановить воспроизведение."""
if self.player is not None:
self.player.pause()
@commands.command()
async def resume(self):
"""Продолжить воспроизведение."""
if self.player is not None and not self.is_playing:
self.player.resume()
@commands.command()
async def skip(self):
"""Перейти к следующей песне в очереди."""
if self.player is not None and self.is_playing:
self.player.stop()
self.toggle_next_song()
@commands.command()
async def stop(self):
"""Остановить воспроизведение."""
if self.is_playing:
self.break_loop.set()
self.play_next_song.set()
self.stopped = True
self.player.stop()
@commands.command(pass_context=True)
async def play(self, ctx):
"""Начать воспроизведение песен из очереди."""
if self.player is not None and not self.stopped:
if not self.is_playing:
await ctx.invoke(self.resume)
return
else:
await self.bot.say('Уже играю песенку.')
return
while True:
if not self.bot.is_voice_connected():
author_channel = ctx.message.author.voice_channel
if author_channel is not None:
print('Joining {}.'.format(author_channel.name))
await ctx.invoke(self.join, channel_name=author_channel.name)
else:
await self.bot.say('Не выбран голосовой канал.')
return
if self.q.empty():
await self.q.put(random.choice(self.songs))
self.play_next_song.clear()
self.current = await self.q.get()
self.player = self.bot.voice.create_ffmpeg_player(
self.bot.yadisk.direct_link(self.songs_dir + self.current),
after=self.toggle_next_song)
#options="-loglevel debug -report",
#headers = dict(self.bot.pycopy.session.headers)
self.stopped = False
self.player.start()
song_name = unquote(self.current.split('/')[-1])
await self.bot.change_status(discord.Game(name=song_name))
await self.play_next_song.wait()
if self.break_loop.is_set():
self.break_loop.clear()
return
await self.bot.say('Leaving play loop')
@commands.command(aliases=['c'])
async def current(self):
"""Что там играет?"""
if self.is_playing:
song_name = unquote(self.current.split('/')[-1])
await self.bot.say(song_name)
@commands.command()
async def update(self):
"""Обновить список песен."""
self.update_song_list()
await self.bot.say("Найдено {} песенок".format(len(self.songs)))
@commands.command()
async def list(self):
"""Вывести список всех доступных песен."""
song_list = ""
id = 1
for song in self.songs:
song_list += "{}. {}\n".format(id, song)
id += 1
if len(song_list) > 1800:
await self.bot.say(song_list)
song_list = ''
await self.bot.say(song_list)
@commands.command(aliases=['ss'])
async def searchsong(self, search_word : str):
"""Искать песню по названию."""
search_result = ""
id = 1
found = False
for song in self.songs:
if search_word.lower() in song.lower():
found = True
search_result += "{}. {}\n".format(id, song)
if len(search_result) > 1800:
await self.bot.say(search_result)
search_result = ''
id += 1
if not found:
search_result = "Нет такой песни."
await self.bot.say(search_result)
@commands.command()
async def add(self, song_num : int):
"""Добавить в конец очереди песню с данным номером."""
await self.q.put(self.songs[song_num-1])
await self.bot.say("{} будет следующей песенкой".format(self.songs[song_num-1]))
@commands.group(pass_context=True, aliases=['pl'])
async def playlist(self):
return
@playlist.command()
async def add(self, playlist : str, song : str):
if not playlist in self.playlists:
await ctx.invoke(self.new, playlist=playlist)
self.playlists[playlist].append(song)
pl_json = {'name': playlist, 'songs': self.playlists[playlist]}
self.bot.yadisk.upload(self.songs_dir + 'playlists' + playlist, json.dumps(pl_json))
def setup(bot):
bot.add_cog(Radio(bot))
|
Python
| 0.000001
|
@@ -1441,24 +1441,25 @@
+ 'playlists
+/
' + pl)%0D%0A
|
d37b4677f12514521a9aed9755007758a8b77c1f
|
Made it so that case functions can take *args, **kwargs
|
fp/__init__.py
|
fp/__init__.py
|
"""
functools2
Provides a number of operator and higher-order functions for FP fun
"""
####
# atoms
####
undefined = object()
####
## Operators
####
import sys
from operator import (
# arithmatic
add, sub, mul, truediv as div, pow, mod, neg,
# bitwise ops
and_, xor, or_, invert, not_,
concat, contains,
# comparison
lt, le, eq, ne, gt, ge
)
import operator
def case(*args):
# If the args has a length of one, args[0] is an iterator
if len(args) == 1:
args = args[0]
# if args is not a length of one, its len has to be even
else:
assert even(len(args)), "uneven case expression"
def inner(value):
for pred, f in ichunk(2, args):
if pred is True:
return f(value)
elif pred(value):
return f(value)
return None
return inner
def quot(x, y):
"""returns the quotient after dividing the its first integral
argument by its second integral argument"""
return operator.floordiv(x, y)
def in_(item, container):
"""This mimics the signature of Haskell's `elem` function and the
Python "in" operator."""
return item in container
def not_in(item, container):
return item not in container
###
# Partials
###
from functools import partial as p
def pp(f, *args0, **kwargs0):
"""Creates a partial where arguments are prepended to the partial
args"""
def inner(*args1, **kwargs1):
args = args1 + args0
kwargs = dict(kwargs0, **kwargs1)
return f(*args, **kwargs)
return inner
def c(f, g):
return lambda x: f(g(x))
def t(fs):
"""Creates a threaded function call
Where compose is right associative
>>> c(p(mul, 2), p(add, 3))(2)
10
The thread partial is left associative
>>> t([p(add, 3), p(mul, 2)])(2)
10
"""
head = ifirst(fs)
tail = irest(fs)
return reduce(
lambda composed, f: c(f, composed),
tail, head)
def const(x):
return lambda *args, **kwargs: x
def flip(f):
return lambda x, y: f(y, x)
def identity(x):
return x
def binfunc(f):
"""turns a binary function into an unary function which takes a pair
>>> points = [(1,2), (110, 320)]
>>> getx = binfunc(lambda x,y: x)
>>> gety = binfunc(lambda x,y: y)
>>> list(imap(getx, points))
[1, 110]
>>> list(imap(gety, points))
[2, 320]
"""
def inner(pair):
x,y = pair
return f(x,y)
return inner
def get(key, obj):
if obj is None:
return None
# treat set membership differently
if hasattr(obj, "issubset") and key in obj:
return key
# handle lists and dicts
try:
return obj[key]
except (KeyError, IndexError, TypeError):
return None
def getter(*args):
"""Creates a function that digs into a nested data structure or returns
default if any of the keys are missing
>>> addresses = [{"address": {"city": "Reston"}},
... {"address": {"city": "Herndon"}},
... {"address": {}}]
>>>
>>> get_city = getter("address", "city")
>>> get_cities = p(imap, get_city)
>>> list(get_cities(addresses))
["Reston", "Herndon", None]
"""
# thread a list of get(key, obj) calls
get_functions = [p(get, key) for key in args]
return t(get_functions)
def kwunary(func, *keys):
if keys:
def inner(dct):
kwargs = {k:dct[k] for k in keys}
return func(**kwargs)
else:
def inner(dct):
return func(**dct)
return inner
###
## generators
###
from itertools import (
izip,
izip_longest,
imap,
ifilter,
islice,
cycle as icycle,
repeat as irepeat,
dropwhile as idropwhile,
takewhile as itakewhile,
compress as icompress,
)
import itertools
def iand(iterable):
"""takes the logical conjunction of a iterable of boolean values"""
# The built-in all function does the same thing as iand
return all(iterable)
def iall(f, iterable):
"""returns True if all elements of the list satisfy the predicate,
and False otherwise."""
return iand(imap(f, iterable))
def ior(iterable):
"""takes the logical disjunction of a iterable of boolean values"""
# The built-in any function does the same thing as ior
return any(iterable)
def iany(f, iterable):
"""returns True if any of the elements of the list satisfy the
predicate, and False otherwise"""
return ior(imap(f, iterable))
def iadd(iterable):
return reduce(add, iterable)
def itake(n, iterable):
return islice(iterable, n)
def ifirst(iterable):
return iter(iterable).next()
def irest(iterable):
return idrop(1, iterable)
def idrop(n, iterable):
return itertools.islice(iterable, n, None)
def isplit_at(i, iterable):
yield itake(i, iterable)
yield iterable
def izip_with(f, iterable1, iterable2):
return imap(f, iterable1, iterable2)
def ichain(iterables):
return itertools.chain.from_iterable(iterables)
def igroupby(keyfunc, iterable):
return itertools.groupby(iterable, keyfunc)
def ichunk(size, iterable, fillvalue=undefined):
args = [iter(iterable)] * size
if fillvalue is undefined:
return izip(*args)
else:
return izip_longest(fillvalue=fillvalue, *args)
####
## Reducers
####
def rsorted(keyfunc, iterable, **kwargs):
return sorted(iterable, key=keyfunc, **kwargs)
####
## Partials
####
####
## Predicates
####
def even(x):
return mod(x, 2) == 0
odd = c(not_, even)
def is_none(x):
return x is None
not_none = c(not_, is_none)
|
Python
| 0.99962
|
@@ -398,19 +398,20 @@
f case(*
-arg
+rule
s):%0A
@@ -479,19 +479,20 @@
if len(
-arg
+rule
s) == 1:
@@ -500,26 +500,28 @@
-args = arg
+rules = rule
s%5B0%5D%0A
@@ -612,19 +612,20 @@
ven(len(
-arg
+rule
s)), %22un
@@ -661,21 +661,31 @@
f inner(
-value
+*args, **kwargs
):%0A
@@ -712,19 +712,20 @@
hunk(2,
-arg
+rule
s):%0A
@@ -766,37 +766,47 @@
return f(
-value
+*args, **kwargs
)%0A el
@@ -813,21 +813,31 @@
if pred(
-value
+*args, **kwargs
):%0A
@@ -856,21 +856,31 @@
eturn f(
-value
+*args, **kwargs
)%0A
|
7adeb5e668a132ab540fa45c8e6c62cb8481930d
|
fix infinite recursion
|
fluff/sync_couchdb.py
|
fluff/sync_couchdb.py
|
from django.db.models import signals
import os
from couchdbkit.ext.django.loading import get_db
from pillowtop.utils import import_pillows
from dimagi.utils.couch.sync_docs import sync_design_docs as sync_docs
FLUFF = 'fluff'
def sync_design_docs(temp=None):
dir = os.path.abspath(os.path.dirname(__file__))
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
print 'fluff sync: %s' % app_label
db = get_db(app_label)
sync_docs(db, os.path.join(dir, "_design"), FLUFF, temp=temp)
def catch_signal(app, **kwargs):
"""Function used by syncdb signal"""
app_name = app.__name__.rsplit('.', 1)[0]
app_label = app_name.split('.')[-1]
if app_label == FLUFF:
sync_design_docs()
def copy_designs(temp='tmp', delete=True):
for pillow in import_pillows(instantiate=False):
if hasattr(pillow, 'indicator_class'):
app_label = pillow.indicator_class._meta.app_label
db = get_db(app_label)
copy_designs(db, FLUFF)
signals.post_syncdb.connect(catch_signal)
|
Python
| 0.00056
|
@@ -159,45 +159,15 @@
ouch
-.sync_docs import sync_design_docs as
+ import
syn
@@ -446,55 +446,8 @@
bel%0A
- print 'fluff sync: %25s' %25 app_label%0A
@@ -498,16 +498,33 @@
ync_docs
+.sync_design_docs
(db, os.
@@ -1039,16 +1039,26 @@
+sync_docs.
copy_des
|
9cc7218f2eef7135e5402a47c2783def31add9f3
|
save screenshot in 800x480 too
|
screenshots.py
|
screenshots.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from PIL import Image, ImageFile
from shovel import task
from meta.utils import path_meta, path_generated, depends
ImageFile.MAXBLOCK = 2**20
def save(image, filename):
image.save(filename, "JPEG", quality=98, optimize=True, progressive=True)
@task
def retina_resize():
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
if image.size != (2048, 1580):
continue
resized = image.resize((1024, 790), Image.ANTIALIAS)
resized.save(filename, filename.ext[1:].upper())
@task
def export():
depends("meta.pxm.export")
depends("meta.screenshots.retina_resize")
for filename in path_meta().files("screen-*.png"):
image = Image.open(filename)
# crop
width, height = image.size
box = (0, height - 768, width, height)
cropped = image.crop(box)
# overlay
name = "".join(filename.namebase.split("-")[1:])
overlayfile = path_meta() / "overlay-" + name + ".png"
if overlayfile.exists():
overlay = Image.open(overlayfile)
cropped.paste(overlay, None, overlay)
# save
for x, y in ((1024, 768), (960, 640), (1136, 640), (1280, 720)):
resized = cropped.resize((x, y), Image.ANTIALIAS)
savename = "screen-" + name + "-" + str(x) + "x" + str(y) + ".jpg"
save(resized, path_generated() / savename)
|
Python
| 0
|
@@ -1275,16 +1275,28 @@
4, 768),
+ (800, 480),
(960, 6
|
eba4f4afe1eac7284518b073f43078b4cb7d7f6c
|
Update search.py (#5409)
|
frappe/desk/search.py
|
frappe/desk/search.py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Search
from __future__ import unicode_literals
import frappe, json
from frappe.utils import cstr, unique
from frappe import _
from six import string_types
# this is called by the Link Field
@frappe.whitelist()
def search_link(doctype, txt, query=None, filters=None, page_length=20, searchfield=None):
search_widget(doctype, txt, query, searchfield=searchfield, page_length=page_length, filters=filters)
frappe.response['results'] = build_for_autosuggest(frappe.response["values"])
del frappe.response["values"]
# this is called by the search box
@frappe.whitelist()
def search_widget(doctype, txt, query=None, searchfield=None, start=0,
page_length=10, filters=None, filter_fields=None, as_dict=False):
if isinstance(filters, string_types):
filters = json.loads(filters)
meta = frappe.get_meta(doctype)
if not searchfield:
searchfield = "name"
standard_queries = frappe.get_hooks().standard_queries or {}
if query and query.split()[0].lower()!="select":
# by method
frappe.response["values"] = frappe.call(query, doctype, txt,
searchfield, start, page_length, filters, as_dict=as_dict)
elif not query and doctype in standard_queries:
# from standard queries
search_widget(doctype, txt, standard_queries[doctype][0],
searchfield, start, page_length, filters)
else:
if query:
frappe.throw(_("This query style is discontinued"))
# custom query
# frappe.response["values"] = frappe.db.sql(scrub_custom_query(query, searchfield, txt))
else:
if isinstance(filters, dict):
filters_items = filters.items()
filters = []
for f in filters_items:
if isinstance(f[1], (list, tuple)):
filters.append([doctype, f[0], f[1][0], f[1][1]])
else:
filters.append([doctype, f[0], "=", f[1]])
if filters==None:
filters = []
or_filters = []
# build from doctype
if txt:
search_fields = ["name"]
if meta.title_field:
search_fields.append(meta.title_field)
if meta.search_fields:
search_fields.extend(meta.get_search_fields())
for f in search_fields:
fmeta = meta.get_field(f.strip())
if f == "name" or (fmeta and fmeta.fieldtype in ["Data", "Text", "Small Text", "Long Text",
"Link", "Select", "Read Only", "Text Editor"]):
or_filters.append([doctype, f.strip(), "like", "%{0}%".format(txt)])
if meta.get("fields", {"fieldname":"enabled", "fieldtype":"Check"}):
filters.append([doctype, "enabled", "=", 1])
if meta.get("fields", {"fieldname":"disabled", "fieldtype":"Check"}):
filters.append([doctype, "disabled", "!=", 1])
# format a list of fields combining search fields and filter fields
fields = get_std_fields_list(meta, searchfield or "name")
if filter_fields:
fields = list(set(fields + json.loads(filter_fields)))
formatted_fields = ['`tab%s`.`%s`' % (meta.name, f.strip()) for f in fields]
# find relevance as location of search term from the beginning of string `name`. used for sorting results.
formatted_fields.append("""locate("{_txt}", `tab{doctype}`.`name`) as `_relevance`""".format(
_txt=frappe.db.escape((txt or "").replace("%", "")), doctype=frappe.db.escape(doctype)))
# In order_by, `idx` gets second priority, because it stores link count
from frappe.model.db_query import get_order_by
order_by_based_on_meta = get_order_by(doctype, meta)
order_by = "if(_relevance, _relevance, 99999), `tab{0}`.idx desc, {1}".format(doctype, order_by_based_on_meta)
values = frappe.get_list(doctype,
filters=filters, fields=formatted_fields,
or_filters = or_filters, limit_start = start,
limit_page_length=page_length,
order_by=order_by,
ignore_permissions = True if doctype == "DocType" else False, # for dynamic links
as_list=not as_dict)
# remove _relevance from results
if as_dict:
for r in values:
r.pop("_relevance")
frappe.response["values"] = values
else:
frappe.response["values"] = [r[:-1] for r in values]
def get_std_fields_list(meta, key):
# get additional search fields
sflist = meta.search_fields and meta.search_fields.split(",") or []
title_field = [meta.title_field] if (meta.title_field and meta.title_field not in sflist) else []
sflist = ['name'] + sflist + title_field
if not key in sflist:
sflist = sflist + [key]
return sflist
def build_for_autosuggest(res):
results = []
for r in res:
out = {"value": r[0], "description": ", ".join(unique(cstr(d) for d in r if d)[1:])}
results.append(out)
return results
def scrub_custom_query(query, key, txt):
if '%(key)s' in query:
query = query.replace('%(key)s', key)
if '%s' in query:
query = query.replace('%s', ((txt or '') + '%'))
return query
|
Python
| 0.000001
|
@@ -3495,15 +3495,20 @@
99),
+ %7B0%7D,
%60tab%7B
-0
+1
%7D%60.i
@@ -3518,13 +3518,8 @@
desc
-, %7B1%7D
%22.fo
@@ -3523,25 +3523,16 @@
.format(
-doctype,
order_by
@@ -3545,16 +3545,25 @@
_on_meta
+, doctype
)%0A%0A%09%09%09va
|
bcb1c8d48532159f76708bdfd0e6868dbda92343
|
make sure command processes run in test database when needed
|
freppledb/__init__.py
|
freppledb/__init__.py
|
r'''
A Django project implementing a web-based user interface for frePPLe.
'''
VERSION = '4.5.0'
def runCommand(taskname, *args, **kwargs):
'''
Auxilary method to run a django command. It is intended to be used
as a target for the multiprocessing module.
The code is put here, such that a child process loads only
a minimum of other python modules.
'''
# Initialize django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "freppledb.settings")
import django
django.setup()
# Be sure to use the correct default database
from django.db import DEFAULT_DB_ALIAS
from freppledb.common.middleware import _thread_locals
database = kwargs.get("database", DEFAULT_DB_ALIAS)
setattr(_thread_locals, 'database', database)
# Run the command
try:
from django.core import management
management.call_command(taskname, *args, **kwargs)
except Exception as e:
taskid = kwargs.get("task", None)
if taskid:
from datetime import datetime
from freppledb.execute.models import Task
task = Task.objects.all().using(database).get(pk=taskid)
task.status = 'Failed'
now = datetime.now()
if not task.started:
task.started = now
task.finished = now
task.message = str(e)
task.processid = None
task.save(using=database)
|
Python
| 0
|
@@ -538,16 +538,8 @@
ect
-default
data
@@ -583,16 +583,29 @@
DB_ALIAS
+, connections
%0A from
@@ -708,16 +708,16 @@
_ALIAS)%0A
-
setatt
@@ -755,16 +755,210 @@
atabase)
+%0A if 'FREPPLE_TEST' in os.environ:%0A from django.conf import settings%0A connections%5Bdatabase%5D.close()%0A settings.DATABASES%5Bdatabase%5D%5B'NAME'%5D = settings.DATABASES%5Bdatabase%5D%5B'TEST'%5D%5B'NAME'%5D
%0A%0A # Ru
|
0ec9feb9daf45af6c8ed8aa48afe68f698892748
|
version bump for 0.7.3.
|
ftr/version.py
|
ftr/version.py
|
version = '0.7.2'
|
Python
| 0
|
@@ -12,8 +12,8 @@
0.7.
-2
+3
'%0A%0A
|
b56145ad1aebd931ca0e741ea7d4315520e6ed40
|
Tweak version incrementer
|
release.py
|
release.py
|
from os import fdopen, remove
from shutil import move
import subprocess
import sys
from tempfile import mkstemp
from geomeppy import __version__
def replace(file_path, pattern, subst):
# Create temp file
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open(file_path) as old_file:
for line in old_file:
new_file.write(line.replace(pattern, subst))
# Remove original file
remove(file_path)
# Move new file
move(abs_path, file_path)
def main(increment):
# check we're on master
assert b'* develop' in subprocess.check_output(['git', 'branch']), 'Not on develop branch'
# check we're up-to-date
status = subprocess.check_output(['git', 'status'])
assert b'modified' not in status, 'Repository contains modified files'
assert b'Untracked' not in status, 'Repository contains untracked files'
# increment version
version = __version__
major, minor, patch = version.split('.')
version = [int(i) for i in version.split('.')]
version[increment] += 1
for i in range(len(version)):
if i == increment:
version[i] += 1
if i > increment:
version[i] = 1
new_version = '%d.%d.%d' % (major, minor, int(patch) + 1)
replace('geomeppy/__init__.py', version, new_version)
replace('setup.py', "version='%s'" % version, "version='%s'" % new_version)
replace('setup.py', "tarball/v%s" % version, "tarball/v%s" % new_version)
try:
# add and commit changes
print(subprocess.check_output(['git', 'add', 'geomeppy/__init__.py']))
print(subprocess.check_output(['git', 'add', 'setup.py']))
print(subprocess.check_output(['git', 'add', 'README.rst']))
print(subprocess.check_output(['git', 'commit', '-m', 'release/%s' % new_version]))
except Exception as e:
# rollback
print('rolling back')
print(e)
replace('geomeppy/__init__.py', new_version, version)
replace('setup.py', new_version, version)
exit()
try:
# push the changes
print(subprocess.check_output(['git', 'push', 'origin', 'develop', '-f']))
# create a tagged release
print(subprocess.check_output(['git', 'tag', 'release/%s' % new_version, '-m', 'v%s' % new_version]))
# push to github
print(subprocess.check_output(['git', 'push', 'origin', 'release/%s' % new_version, '-f']))
except Exception as e:
# rollback
print('rolling back tag')
print(e)
# delete the tagged release
print(subprocess.check_output(['git', 'tag', '-d', 'release/%s' % new_version, 'v%s' % new_version]))
# push to github
print(subprocess.check_output(
['git', 'push', 'origin', ':refs/tags/release/%s' % new_version, 'v%s' % new_version])
)
# from here, the Travis CI magic begins
if __name__ == '__main__':
args = sys.argv[1:]
VERSION = ['major', 'minor', 'patch']
try:
increment = VERSION.index(sys.argv[1])
except ValueError:
print('%s is not a valid semantic version level (use major, minor, or patch)' % sys.argv[1])
except IndexError:
# default
increment = VERSION.index('patch')
main(increment)
|
Python
| 0
|
@@ -949,53 +949,12 @@
-major, minor, patch = version.split('.')%0A
+new_
vers
@@ -1000,36 +1000,8 @@
')%5D%0A
- version%5Bincrement%5D += 1%0A
@@ -1019,16 +1019,20 @@
nge(len(
+new_
version)
@@ -1065,32 +1065,36 @@
nt:%0A
+new_
version%5Bi%5D += 1%0A
@@ -1127,24 +1127,28 @@
+new_
version%5Bi%5D =
@@ -1174,49 +1174,43 @@
= '
-%25d.%25d.%25d' %25 (major, minor, int(patch) + 1
+.'.join(str(v) for v in new_version
)%0A
|
720c841d0930f73d1efe90518b0a2d9dcbd6425d
|
Document context
|
funktional/context.py
|
funktional/context.py
|
import sys
from contextlib import contextmanager
training = False
@contextmanager
def context(**kwargs):
current = dict((k, getattr(sys.modules[__name__], k)) for k in kwargs)
for k,v in kwargs.items():
setattr(sys.modules[__name__], k, v)
yield
for k,v in current.items():
setattr(sys.modules[__name__], k, v)
|
Python
| 0.000004
|
@@ -43,16 +43,47 @@
anager%0A%0A
+# Are we training (or testing)%0A
training
@@ -132,16 +132,170 @@
wargs):%0A
+ %22%22%22Temporarily change the values of context variables passed.%0A%0A Enables the %60with%60 syntax:%0A%0A %3E%3E%3E with context(training=True):%0A ... %0A %22%22%22%0A
curr
|
55987e48997f7f5a94adc3c53fcb8ae58e672c3c
|
increase version number
|
gdc_client/version.py
|
gdc_client/version.py
|
__version__ = 'v1.3.0'
|
Python
| 0.00036
|
@@ -15,9 +15,9 @@
'v1.
-3
+4
.0'%0A
|
ed7f0e555b438b611f4a9b0fdf6de1fca6ec2914
|
fix incorrect use of str replace
|
genSongbook.py
|
genSongbook.py
|
#!/usr/bin/python
import sys, os
def query(question, default):
sys.stdout.write(question + " [" + default + "] ? ")
choice = raw_input()
if choice == '':
return default
return choice
if __name__ == '__main__':
print("----------------------")
print("Welcome to genSongbook")
print("----------------------")
# Query song directory path string
songDirectory = query("Please specify the path of the input song directory","/opt/Dropbox/lyrics/english")
print("Will use song directory: " + songDirectory)
# Query template file path string
templateFile = query("Please specify the path of the template file","template/english.tex")
print("Will use template file: " + templateFile)
print("----------------------")
templateFileFd = open(templateFile, 'r')
s = templateFileFd.read()
#sys.stdout.write(s) #-- Screen output for debugging.
rep = ""
for dirname, dirnames, filenames in os.walk( songDirectory ):
for filename in sorted(filenames):
rep += "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
name, extension = os.path.splitext(filename)
rep += "\\chapter{" + name + "}\n" #-- Note that we use \\ instead of \.
rep += "\\begin{verbatim}\n"
song = open( os.path.join(dirname, filename) )
rep += song.read()
rep += "\\end{verbatim}\n"
rep += "\n"
s.replace("genSongbook",rep)
outFd = open("out.tex", 'w')
outFd.write(s)
|
Python
| 0.000014
|
@@ -1456,20 +1456,85 @@
+= %22%5Cn%22%0A
-%0A
+ #sys.stdout.write(rep) #-- Screen output for debugging.%0A%0A s =
s.repla
|
c7067fce8723f810ed48de6513c6f756d499d807
|
add whitelist tags.
|
dp_tornado/helper/html.py
|
dp_tornado/helper/html.py
|
# -*- coding: utf-8 -*-
from dp_tornado.engine.helper import Helper as dpHelper
try:
# py 2.x
import HTMLParser
html_parser = HTMLParser.HTMLParser()
except:
# py 3.4-
try:
import html.parser
html_parser = html.parser.HTMLParser()
except:
# py 3.4+
import html as html_parser
try:
import htmltag
except:
htmltag = None
import re
class HtmlHelper(dpHelper):
def strip_xss(self, html, whitelist=None, replacement='entities'):
if not htmltag:
raise Exception('htmltag library required.')
if whitelist is None:
whitelist = (
'a', 'abbr', 'aside', 'audio', 'bdi', 'bdo', 'blockquote', 'canvas',
'caption', 'code', 'col', 'colgroup', 'data', 'dd', 'del',
'details', 'div', 'dl', 'dt', 'em', 'figcaption', 'figure', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'ins', 'kbd', 'li',
'mark', 'ol', 'p', 'pre', 'q', 'rp', 'rt', 'ruby', 's', 'samp',
'small', 'source', 'span', 'strong', 'sub', 'summary', 'sup',
'table', 'td', 'th', 'time', 'tr', 'track', 'u', 'ul', 'var',
'video', 'wbr', 'b')
return htmltag.strip_xss(html, whitelist, replacement)
def strip_tags(self, text):
return re.sub('<[^<]+?>', '', text)
def entity_decode(self, text):
return html_parser.unescape(text)
|
Python
| 0
|
@@ -1228,16 +1228,38 @@
br', 'b'
+, 'br', 'site', 'font'
)%0A%0A
|
ab6d09c93a9d43ffbf442880633170f5fc678edd
|
add verbose mode to print processing module
|
get_modules.py
|
get_modules.py
|
#!/usr/bin/env python3
import os
import sys
import requests
import yaml
import git
import svn.remote
import zipfile
import argparse
def get_modules(yml_file, dest):
f = open(yml_file)
for data in yaml.load(f):
if (not dest.endswith('/')):
dest = dest + '/'
if not 'version' in data:
version = None
else:
version = data['version']
download_module(data['url'], dest, data['name'], data['type'], version)
f.close()
def download_module(src, dest, name, type, version):
if os.path.exists(dest + name):
return
if type == 'git':
download_git(src, dest + name, version)
elif type == 'svn':
download_svn(src, dest + name, version)
elif type == 'zip':
download_zip(src, dest, name)
def download_git(src, dest, version):
if version is None:
git.Repo.clone_from(src, dest)
else:
git.Repo.clone_from(src, dest, branch=version)
def download_svn(src, dest, version):
r = svn.remote.RemoteClient(src)
r.checkout(dest)
def download_zip(src, dest, name):
filename = download_file(src, dest)
zfile = zipfile.ZipFile(filename, "r")
zfile.extractall(dest)
os.rename(dest+zfile.namelist()[0].split("/")[0], dest+name)
os.remove(filename)
def download_file(url, destdir):
filename = destdir + url.split('/')[-1]
r = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return filename
def create_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('modules',
help='list of modules to download')
parser.add_argument('dest_dir',
help='dest directory to save modules')
return parser
if __name__ == '__main__':
args = create_argparser().parse_args()
get_modules(args.modules, args.dest_dir)
|
Python
| 0.000001
|
@@ -157,16 +157,25 @@
le, dest
+, verbose
):%0A f
@@ -476,24 +476,33 @@
e'%5D, version
+, verbose
)%0A f.clos
@@ -548,32 +548,41 @@
e, type, version
+, verbose
):%0A if os.pat
@@ -612,22 +612,257 @@
-return
+if verbose: print(name + ' already exist')%0A return%0A%0A if verbose and version is not None:%0A print('download ' + name + ':' + version + ' (' + type + ')')%0A elif verbose:%0A print('download ' + name + ' (' + type + ')')%0A
%0A if
@@ -2124,16 +2124,157 @@
odules')
+%0A parser.add_argument('-V', '--verbose',%0A action='store_true',%0A help='show verbose message')
%0A%0A re
@@ -2399,10 +2399,24 @@
dest_dir
+, args.verbose
)%0A
|
ad70f37b4a02ba117a91dcbdf5387ade2cbdfcf5
|
Change petset to stateful set
|
e2e/tests/test_volumes.py
|
e2e/tests/test_volumes.py
|
from clickclick import fatal_error
from .helpers import PETSET_PATH, SECRET_PATH, create_resource, wait_for_pod
def test_volumes(run_id, url, token):
secret_manifest = '''
apiVersion: v1
kind: Secret
metadata:
name: &cluster_name spilodemo
labels:
application: spilo
spilo-cluster: *cluster_name
type: Opaque
data:
superuser-password: emFsYW5kbw==
replication-password: cmVwLXBhc3M=
admin-password: YWRtaW4=
'''
create_resource(secret_manifest, url + SECRET_PATH, token)
manifest = '''
apiVersion: apps/v1alpha1
kind: PetSet
metadata:
name: &cluster_name spilodemo
labels:
application: spilo
spilo-cluster: *cluster_name
spec:
replicas: 3
serviceName: *cluster_name
template:
metadata:
labels:
application: spilo
spilo-cluster: *cluster_name
annotations:
pod.alpha.kubernetes.io/initialized: "true"
spec:
containers:
- name: *cluster_name
image: registry.opensource.zalan.do/acid/spilotest-9.6:1.1-p10 # put the spilo image here
imagePullPolicy: Always
ports:
- containerPort: 8008
protocol: TCP
- containerPort: 5432
protocol: TCP
volumeMounts:
- mountPath: /home/postgres/pgdata
name: pgdata
env:
- name: ETCD_HOST
value: 'etcd.default.svc.cluster.local:2379' # where is your etcd?
- name: POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: PGPASSWORD_SUPERUSER
valueFrom:
secretKeyRef:
name: *cluster_name
key: superuser-password
- name: PGPASSWORD_ADMIN
valueFrom:
secretKeyRef:
name: *cluster_name
key: admin-password
- name: PGPASSWORD_STANDBY
valueFrom:
secretKeyRef:
name: *cluster_name
key: replication-password
- name: SCOPE
value: *cluster_name
- name: PGROOT
value: /home/postgres/pgdata/pgroot
terminationGracePeriodSeconds: 0
volumes:
- name: pgdata
emptyDir: {}
volumeClaimTemplates:
- metadata:
labels:
application: spilo
spilo-cluster: *cluster_name
annotations:
volume.beta.kubernetes.io/storage-class: standard
name: pgdata
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
'''
create_resource(manifest, url + PETSET_PATH, token)
for i in range(3):
available = wait_for_pod('spilodemo-{}'.format(i), url, token)
if not available:
fatal_error('e2e test for volumes failed')
|
Python
| 0.000002
|
@@ -534,20 +534,19 @@
apps/v1
-alph
+bet
a1%0Akind:
@@ -550,11 +550,16 @@
nd:
-Pet
+Stateful
Set%0A
@@ -823,79 +823,8 @@
ame%0A
- annotations:%0A pod.alpha.kubernetes.io/initialized: %22true%22%0A
|
3c008b66e71a287b29dca6e753094fbba6228660
|
fix silly name thing
|
app/mim/routes.py
|
app/mim/routes.py
|
import os
from flask import Flask, redirect, render_template, request, session, flash, url_for
from flask_bcrypt import Bcrypt
# from flask.ext.csrf import csrf
from pymongo.errors import DuplicateKeyError
from models import *
import core
import mendeley_api
import util
from . import RegistrationForm
# from mim import flask_app
flask_app = Flask(__name__)
if 'SECRET_KEY' in os.environ:
flask_app.secret_key = os.environ['SECRET_KEY']
else:
flask_app.secret_key = "LOCAL"
bcrypt = Bcrypt(flask_app)
# csrf(flask_app)
# def login_required(f):
# @wraps(f)
# def wrap(*args, **kwargs):
# if 'logged_in' in session:
# return f(*args, **kwargs)
# else:
# flash("You need to login first")
# return redirect(url_for('login'))
#
# return wrap
@flask_app.route('/')
def index():
if 'token' not in session:
return redirect(url_for('login'))
rec = core.get_random()
name = session.get("name", "friend")
classes = {
"interesting": "button-primary",
"uninteresting": "",
"next": ""
}
return render_template('index.html',
rec=rec,
name=name,
classes=classes)
@flask_app.route('/record', methods=['GET', 'POST'])
def record():
try:
if request.method == "POST":
doc = {
"id": request.form["id"],
"url": request.form["url"],
"content_type": request.form["type"],
"title": request.form["title"],
}
opinion = {
"user": session["email"],
"opinion": util.get_opinion_value(request.form["opinion"]),
"rec_id": request.form["id"]
}
try:
user_history.insert({"content": doc,
"opinion": opinion})
except Exception, e:
flash(e.message)
flash("Thanks for your feedback!", "opinion")
except:
flash("Something went wrong and we couldn't record your response.", "error")
return redirect(url_for('index'))
@flask_app.route('/history')
def history():
if "token" not in session:
return redirect(url_for('login'))
name = session.get("name", "friend")
docs = user_history.find({"user": session["email"]})
return render_template('history.html', name=name, docs=docs)
@flask_app.route("/document")
def get_document():
if "token" not in session:
return redirect(url_for("login"))
mendeley_session = mendeley_api.get_session_from_cookies()
document_id = request.args.get("document_id")
doc = mendeley_session.documents.get(document_id)
return render_template("metadata.html", doc=doc)
@flask_app.route('/search')
def metadata_lookup():
if 'token' not in session:
return redirect(url_for('login'))
mendeley_session = mendeley_api.get_session_from_cookies()
doi = request.args.get('doi')
doc = mendeley_session.catalog.by_identifier(doi=doi)
return render_template("history.html", doc=doc)
@flask_app.route("/download")
def download():
if "token" not in session:
return redirect(url_for("login"))
mendeley_session = mendeley_api.get_session_from_cookies()
document_id = request.args.get("document_id")
doc = mendeley_session.documents.get(document_id)
doc_file = doc.files.list().items[0]
return redirect(doc_file.download_url)
@flask_app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
try:
# this_user = User.objects.get(email=request.form['username'])
this_user = users.find_one({"email": request.form["email"]})
if request.form["email"] != this_user["email"]:
error = "Invalid username."
elif bcrypt.check_password_hash(this_user["password"], request.form['password']) is False:
error = 'Invalid password'
else:
session["logged_in"] = True
session["token"] = util.generate_key()
session["email"] = this_user["email"]
session["name"] = this_user["name"]
flash("Logged in!")
return redirect(url_for('index'))
except Exception as e:
flash("That's not quite right. Try that username and password one more time?", "error")
return render_template('login.html', error=error)
# @login_required
@flask_app.route('/logout')
def logout():
session.pop('token', None)
session["logged_in"] = False
session.clear()
flash("You have been logged out!")
return redirect(url_for('login'))
@flask_app.route('/register', methods=["GET", "POST"])
def register():
try:
form = RegistrationForm(request.form)
if request.method == "POST" and form.validate():
email = form.email.data
password = bcrypt.generate_password_hash(str(form.password.data), 10)
name = form.name.data.capitalize()
gender = form.gender.data
year_of_birth = util.get_year(form.age.data)
tos_check_date = util.get_today()
# check username for duplicate
try:
result = users.insert_one(
{
"email": email,
"password": password,
"name": name,
"gender": gender,
"yob": year_of_birth,
"tos": tos_check_date
}
)
except DuplicateKeyError, e:
flash("That username is already taken, please choose another.", "error")
return render_template('register.html', form=form)
# No exception is good...
flash("Thanks for registering!")
session['logged_in'] = True
session["token"] = util.generate_key()
session['username'] = email
session['this_user'] = name
return redirect(url_for('index'))
return render_template("register.html", form=form)
except Exception as e:
flash(e.message, "error")
return render_template('register.html', form=form)
# can leave this in probably...
if __name__ == '__main__':
flask_app.run(debug=True)
|
Python
| 0.998391
|
@@ -6146,25 +6146,20 @@
ession%5B'
-this_user
+name
'%5D = nam
|
740cf4e1a25533b4d3279a17e23b1ff9f6c13006
|
Update Watchers.py
|
examples/Watchers.py
|
examples/Watchers.py
|
Import openpyxl
from seleniumbase import BaseCase
los = []
url = 'https://stocktwits.com/symbol/'
workbook = openpyxl.load_workbook('Test.xlsx')
worksheet = workbook.get_sheet_by_name(name = 'Sheet1')
for col in worksheet['A']:
los.append(col.value)
los2 = []
print(los)
class MyTestClass(BaseCase):
#for i in los:
# stocksite = url +i + '?q=' +i
#driver.get(stocksite)
#driver.find_element_by_id('sentiment-tab').click()
#Bullish = driver.find_elements_by_css_selector('span.bullish:nth-child(1)')
#Sentiment = [x.text for x in Bullish]
#los2.append(Sentiment[0])
|
Python
| 0.000001
|
@@ -1,20 +1,4 @@
-Import openpyxl%0A
from
@@ -32,329 +32,114 @@
se%0A%0A
-los = %5B%5D%0Aurl = 'https://stocktwits.com/symbol/'%0Aworkbook = openpyxl.load_workbook('Test.xlsx')%0Aworksheet = workbook.get_sheet_by_name(name = 'Sheet1') %0Afor col in worksheet%5B'A'%5D
+%0Aclass MyTestClass(BaseCase)
:%0A
+%0A
-los.append(col.value)%0Alos2 = %5B%5D%0Aprint(los)%0A%0Aclass MyTestClass(BaseCase):%0A%0A%0A%0A#for i in los:%0A
+def test_basic(self):%0A
-#
s
-tocksite = url +i + '?q=' +i %0A
+elf.open('stockstwits.com')
-%0A
#
driv
@@ -138,58 +138,61 @@
#
-driver.get(stocksite) %0A #driver.find
+ Navigate to the web page%0A self.assert
_element
_by_
@@ -191,14 +191,8 @@
ment
-_by_id
('se
@@ -208,182 +208,118 @@
ab')
-.click()%0A #Bullish = driver.find_elements_by_css_selector('span.bullish:nth-child(1)')%0A #Sentiment = %5Bx.text for x in Bullish%5D%0A #los2.append(Sentiment%5B0%5D)%0A
+ # Assert element on page%0A self.click('sentiment-tab') # Click element on page%0A
|
3462a4755eac0ea74b9c90f867e769c47504c5bd
|
add license to top of __init__ in examples
|
examples/__init__.py
|
examples/__init__.py
|
Python
| 0.000007
|
@@ -0,0 +1,768 @@
+# Licensed to the Cloudkick, Inc under one or more%0A# contributor license agreements. See the NOTICE file distributed with%0A# this work for additional information regarding copyright ownership.%0A# libcloud.org licenses this file to You under the Apache License, Version 2.0%0A# (the %22License%22); you may not use this file except in compliance with%0A# the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A
|
|
5f9e04c77c2bf1a77dd078438869882432c411a4
|
添加参数 +1
|
app/remote_rpc.py
|
app/remote_rpc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by follow on 2016/10/28
from flask import Flask
from flaskext.xmlrpc import XMLRPCHandler, Fault
import jenkins
import time
import base64
import commands
# from jinja2.nodes import Output
import os
# import MySQLdb
import re
app = Flask(__name__)
handler = XMLRPCHandler('api')
handler.connect(app, '/api')
j = jenkins.Jenkins("http://127.1:8080", 'rpcuser', '2266bcc74441b07e9c50ba468a620199')
manager_host = '10.1.2.49'
package_root = "/opt/scm-manager/wars"
@handler.register
def Hello(name='follow'):
if not name:
raise Fault('WTF')
return "hello %s" % name
@handler.register
def GetInfo():
data = j.get_info()
print data
return data
@handler.register
def GetQueueInfo():
data = j.get_queue_info()
print data
return data
@handler.register
def GetJobs():
data = j.get_jobs()
print data
return data
@handler.register
def GetBuildConsoleOutput(name, number):
data = j.get_build_console_output(name, number)
d1 = base64.b64encode(data)
print d1
return d1
@handler.register
def GetJobInfo(n):
data = j.get_job_info(n)
return str(data)
@handler.register
def BuildJob(n):
data = j.build_job(n)
return data
# @handler.register
# def CleanTheMess():
# db = MySQLdb.connect("10.168.2.125", "svn", "svnpassword", "svntool")
# cursor = db.cursor()
# sql = "update scm_projectstatus set status=0,approve_status=0 where status > 0 and approve_status = 3"
# sql2 = "delete from scm_proj_with_user where projectid=(select projectid from scm_projectstatus where status > 0 and approve_status = 3)"
# cursor.execute(sql)
# cursor.execute(sql2)
# db.commit()
# db.close()
# return
@handler.register
def DoCmd(operate, system):
print "cmd is %s" % operate
tomcat_port = "28080" if system == 'cnshipping' else "8080"
tomcat_root = "/home/cscm/apache-tomcat-7.0.39" if system == 'cnshipping' else "/home/scm/apache-tomcat-7.0.39"
if operate == "start":
command = "su - scm -c {}/bin/startup.sh".format(tomcat_root)
status = [os.system(command), "nothing"]
con = False
while not con:
pidinfo = commands.getstatusoutput(
'netstat -nlp | grep {} | awk \'{{print $7}}\' | cut -d / -f 1'.format(tomcat_port))
print "pid is:%s" % str(pidinfo[1])
if not re.match("\d", pidinfo[1]):
print "sleep"
time.sleep(1)
else:
con = True
elif operate == "update":
command = "rm -fr {}/work/*; rm -fr {}/webapps/*; cp -a {}/*.war {}/webapps/scm.war".format(tomcat_root,
tomcat_root,
package_root,
tomcat_root)
time.sleep(2)
status = commands.getstatusoutput(command)
else:
pid = \
commands.getstatusoutput(
'netstat -nlp | grep {} | awk \'{{print $7}}\' | cut -d / -f 1'.format(tomcat_port))[1]
if re.match("\d", pid):
command = "kill -9 %s" % pid
status = commands.getstatusoutput(command)
print "start to kill %s" % pid
print status
time.sleep(2)
return status
@handler.register
def GetProcessInfo(system, ver):
status = {}
tomcat_port = "28080" if system == 'cnshipping' else "8080"
tomcat_root = "/home/cscm/apache-tomcat-7.0.39" if system == 'cnshipping' else "/home/scm/apache-tomcat-7.0.39"
pidinfo = commands.getstatusoutput(
'netstat -nlp | grep :{} | awk \'{{print $7}}\' | cut -d / -f 1'.format(tomcat_port))
status['qa_mtime'] = commands.getoutput(
'stat {}/webapps/scm.war | grep \'^Modify\' | cut -d " " -f 2-3 | cut -d . -f1'.format(tomcat_root))
status['newest_filename'] = commands.getoutput('ls /opt/scm-manager/wars/{}-{}/'.format(system, ver)).lstrip()
status['newest_mtime'] = commands.getoutput(
'stat /opt/scm-manager/wars/*.war | grep \'^Modify\' | cut -d " " -f 2-3 | cut -d . -f1')
status['load_info'] = commands.getoutput(' w |grep \'load\' | cut -d , -f 4,5,6')
if pidinfo[1] == "":
return status
else:
status['pid'] = pidinfo[1]
status['uptime'] = commands.getoutput('ps -p %s -o lstart | sed -n \'2p\'' % pidinfo[1])
status['mem_info'] = commands.getoutput("cat /proc/{}/status | grep RSS".format(pidinfo[1]))
return status
@handler.register
def GetBuildInfo(name, number):
data = j.get_build_info(name, number)
t1 = str(data['timestamp'])
duration = data['duration'] / 1000
t2 = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(t1[:10])))
data['timestamp'] = t2
data['duration'] = duration
data['changeSet'] = 'changeSet'
return data # xml-rpc 不能使用长整数 WTF
@handler.register
def DownloadPackage(path, filename):
file_url = "http://{}:{}/uploaded_file/{}?folder=SCM-{}".format(manager_host, "80", filename, path)
download_dir = "/opt/scm-manager/wars/{}".format(path)
download_command = "aria2c -s 2 -x 2 {} -d {} -D".format(file_url, download_dir)
print "Start to download file {}".format(download_command)
commands.getoutput("rm -f {}/*.war".format(download_dir))
output = commands.getoutput(download_command)
print output
return output
app.run('0.0.0.0', port=8085, debug=True)
|
Python
| 0.000116
|
@@ -4254,16 +4254,22 @@
er/wars/
+%7B%7D-%7B%7D/
*.war %7C
@@ -4321,16 +4321,36 @@
d . -f1'
+.format(system, ver)
)%0A st
|
87da5bcf5b11762605c60f57b3cb2019d458fcd3
|
Set version to v2.1.0a3
|
spacy/about.py
|
spacy/about.py
|
# inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__title__ = 'spacy-nightly'
__version__ = '2.1.0a3.dev0'
__summary__ = 'Industrial-strength Natural Language Processing (NLP) with Python and Cython'
__uri__ = 'https://spacy.io'
__author__ = 'Explosion AI'
__email__ = 'contact@explosion.ai'
__license__ = 'MIT'
__release__ = False
__download_url__ = 'https://github.com/explosion/spacy-models/releases/download'
__compatibility__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json'
__shortcuts__ = 'https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json'
|
Python
| 0.000041
|
@@ -223,13 +223,8 @@
.0a3
-.dev0
'%0A__
|
6ee6fbb0106d0688ce2f292b59569faa430e4904
|
change comment for N test
|
qutip/tests/test_random.py
|
qutip/tests/test_random.py
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import scipy.sparse as sp
import scipy.linalg as la
import numpy as np
from numpy.testing import assert_equal, assert_, run_module_suite, assert_raises
from qutip.random_objects import (rand_ket, rand_dm, rand_herm, rand_unitary,
rand_ket_haar, rand_dm_hs,
rand_super, rand_unitary_haar, rand_dm_ginibre,
rand_super_bcsz)
from qutip.operators import qeye
from qutip.dimensions import dims_to_tensor_shape
def test_rand_unitary_haar_unitarity():
"""
Random Qobjs: Tests that unitaries are actually unitary.
"""
U = rand_unitary_haar(5)
I = qeye(5)
assert_(U * U.dag() == I)
def test_rand_dm_ginibre_rank():
"""
Random Qobjs: Ginibre-random density ops have correct rank.
"""
rho = rand_dm_ginibre(5, rank=3)
rank = sum([abs(E) >= 1e-10 for E in rho.eigenenergies()])
assert_(rank == 3)
def test_rand_super_bcsz_cptp():
"""
Random Qobjs: Tests that BCSZ-random superoperators are CPTP.
"""
S = rand_super_bcsz(5)
assert_(S.iscptp)
def check_func_dims(func, args, kwargs, dims):
# TODO: promote this out of test_random, as it's generically useful
# in writing tests.
resdims = func(*args, **kwargs).dims
assert_(resdims == dims, "Checking {}; expected dimensions of {}, got {}.".format(func.__name__, dims, resdims))
def check_func_shape(func, args, kwargs, shape):
resdims = func(*args, **kwargs).dims
assert_(dims_to_tensor_shape(resdims)==shape,"Checking {}; expected shape of {}, got {}.".format(func.__name__, shape, dims_to_tensor_shape(resdims)))
def check_func_N(func, args, kwargs, N):
new_state_shape=func(*args, **kwargs).shape
assert_(new_state_shape[0]==N,"Checking {}; expected dimensions of {}, got {}.".format(func.__name__, N, new_state_shape[0]))
def test_rand_vector_dims():
FUNCS = [rand_ket, rand_ket_haar]
for func in FUNCS:
# N or dims are not given
assert_raises(ValueError,check_func_dims,func,(),{},[])
# both N and dims (named argument) are specified
check_func_dims( func, (6, ), {'dims': [[2,3], [1,1]]}, [[2,3], [1,1]])
check_func_shape( func, (6, ), {'dims': [[2,3], [1,1]]}, (2,3,1,1))
check_func_N(func,(6, ),{'dims': [[2,3], [1,1]]},6)
# only N is specified and dims is defined via default
check_func_dims( func, (7, ), {}, [[7], [1]])
check_func_shape( func, (7, ), {}, (7,1))
check_func_N( func, (7, ), {}, 7)
# only dims is specified and N has to be determined
check_func_dims( func, (), {'dims': [[2,3], [1,1]]},[[2,3], [1,1]])
check_func_shape( func, (), {'dims': [[2,3], [1,1]]},(2,3,1,1))
check_func_N( func, (), {'dims': [[2,3], [1,1]]},6)
def test_rand_oper_dims():
FUNCS = [rand_unitary, rand_herm, rand_dm, rand_unitary_haar, rand_dm_ginibre, rand_dm_hs]
for func in FUNCS:
check_func_dims( func, (7, ), {}, [[7], [7]])
check_func_dims( func, (6, ), {'dims': [[2, 3], [2, 3]]}, [[2, 3], [2, 3]])
def test_rand_super_dims():
FUNCS = [rand_super, rand_super_bcsz]
for func in FUNCS:
check_func_dims(func, (7, ), {}, [[[7], [7]]] * 2)
check_func_dims(func, (6, ), {'dims': [[[2, 3], [2, 3]], [[2, 3], [2, 3]]]}, [[[2, 3], [2, 3]], [[2, 3], [2, 3]]])
if __name__ == "__main__":
run_module_suite()
|
Python
| 0
|
@@ -3611,34 +3611,25 @@
%7D; expected
-dimensions
+N
of %7B%7D, got
|
09f55d300bcf4aba1bf4b2855df3b35172d29cd4
|
Fix issue #32138 - fix examples typo (#32140)
|
lib/ansible/modules/storage/purestorage/purefa_snap.py
|
lib/ansible/modules/storage/purestorage/purefa_snap.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_snap
version_added: '2.4'
short_description: Manage volume snapshots on Pure Storage FlashArrays
description:
- Create or delete volumes and volume snapshots on Pure Storage FlashArray.
author:
- Simon Dodsley (@sdodsley)
options:
name:
description:
- The name of the source volume.
required: true
suffix:
description:
- Suffix of snapshot name.
target:
description:
- Name of target volume if creating from snapshot.
overwrite:
description:
- Define whether to overwrite existing volume when creating from snapshot.
type: bool
default: 'no'
state:
description:
- Define whether the volume snapshot should exist or not.
choices: [ absent, copy, present ]
default: present
eradicate:
description:
- Define whether to eradicate the snapshot on delete or leave in trash.
type: bool
default: 'no'
extends_documentation_fragment:
- purestorage
'''
EXAMPLES = r'''
- name: Create snapshot foo.ansible
purefa_snap:
name: foo
suffix: ansible
fa_url: 10.10.10.2
fa_api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Create R/W clone foo_clone from snapshot foo.snap
purefa_snap:
name: foo
suffix: snap
target: foo_clone
fa_url: 10.10.10.2
fa_api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
- name: Overwrite existing volume foo_clone with snapshot foo.snap
purefa_snap:
name: foo
suffix: snap
target: foo_clone
overwrite: true
fa_url: 10.10.10.2
fa_api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
- name: Delete and eradicate snapshot named foo.snap
purefa_snap:
name: foo
suffix: snap
eradicate: true
fa_url: 10.10.10.2
fa_api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
from datetime import datetime
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
def get_volume(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['name'])
except:
return None
def get_target(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['target'])
except:
return None
def get_snapshot(module, array):
"""Return Snapshot or None"""
try:
snapname = module.params['name'] + "." + module.params['suffix']
for s in array.get_volume(module.params['name'], snap='true'):
if s['name'] == snapname:
return snapname
except:
return None
def create_snapshot(module, array):
"""Create Snapshot"""
if not module.check_mode:
array.create_snapshot(module.params['name'], suffix=module.params['suffix'])
module.exit_json(changed=True)
def create_from_snapshot(module, array):
"""Create Volume from Snapshot"""
source = module.params['name'] + "." + module.params['suffix']
tgt = get_target(module, array)
if tgt is None:
changed = True
if not module.check_mode:
array.copy_volume(source,
module.params['target'])
elif tgt is not None and module.params['overwrite']:
changed = True
if not module.check_mode:
array.copy_volume(source,
module.params['target'],
overwrite=module.params['overwrite'])
elif tgt is not None and not module.params['overwrite']:
changed = False
module.exit_json(changed=changed)
def update_snapshot(module, array):
"""Update Snapshot"""
changed = False
module.exit_json(changed=changed)
def delete_snapshot(module, array):
""" Delete Snapshot"""
if not module.check_mode:
snapname = module.params['name'] + "." + module.params['suffix']
array.destroy_volume(snapname)
if module.params['eradicate']:
array.eradicate_volume(snapname)
module.exit_json(changed=True)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
suffix=dict(type='str'),
target=dict(type='str'),
overwrite=dict(type='bool', default=False),
eradicate=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'copy', 'present']),
))
required_if = [('state', 'copy', ['target', 'suffix'])]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in volume')
if module.params['suffix'] is None:
suffix = "snap-" + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
module.params['suffix'] = suffix.replace(".", "")
state = module.params['state']
array = get_system(module)
volume = get_volume(module, array)
target = get_target(module, array)
snap = get_snapshot(module, array)
if state == 'present' and volume and not snap:
create_snapshot(module, array)
elif state == 'present' and volume and snap:
update_snapshot(module, array)
elif state == 'present' and not volume:
update_snapshot(module, array)
elif state == 'copy' and snap:
create_from_snapshot(module, array)
elif state == 'copy' and not snap:
update_snapshot(module, array)
elif state == 'absent' and snap:
delete_snapshot(module, array)
elif state == 'absent' and not snap:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1499,35 +1499,32 @@
10.10.10.2%0A
-fa_
api_token: e3106
@@ -1547,32 +1547,32 @@
40-25983c6c4592%0A
+
state: prese
@@ -1720,35 +1720,32 @@
10.10.10.2%0A
-fa_
api_token: e3106
@@ -1967,35 +1967,32 @@
10.10.10.2%0A
-fa_
api_token: e3106
@@ -2182,23 +2182,20 @@
10.10.2%0A
+
-fa_
api_toke
|
939f7a9e91022c8dab5da13e9e3f738f6c25c524
|
Update perception_obstacle_sender.py
|
modules/tools/record_analyzer/tools/perception_obstacle_sender.py
|
modules/tools/record_analyzer/tools/perception_obstacle_sender.py
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import time
import argparse
import google.protobuf.text_format as text_format
from cyber_py import cyber
from modules.perception.proto import perception_obstacle_pb2
def update(perception_obstacles):
"""update perception obstacles timestamp"""
now = time.time()
perception_obstacles.header.timestamp_sec = now
perception_obstacles.header.lidar_timestamp = \
(long(now) - long(0.5)) * long(1e9)
for perception_obstacle in perception_obstacles.perception_obstacle:
perception_obstacle.timestamp = now - 0.5
for measure in perception_obstacle.measurements:
measure.timestamp = now - 0.5
return perception_obstacles
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Recode Analyzer is a tool to analyze record files.",
prog="main.py")
parser.add_argument(
"-f", "--file", action="store", type=str, required=True,
help="Specify the message file for sending.")
args = parser.parse_args()
record_file = args.file
cyber.init()
node = cyber.Node("perception_obstacle_sender")
perception_pub = node.create_writer(
"/apollo/perception/obstacles",
perception_obstacle_pb2.PerceptionObstacles)
perception_obstacles = perception_obstacle_pb2.PerceptionObstacles()
with open(args.file, 'r') as f:
text_format.Merge(f.read(), perception_obstacles)
while not cyber.is_shutdown():
now = time.time()
perception_obstacles = update(perception_obstacles)
perception_pub.write(perception_obstacles)
sleep_time = 0.1 - (time.time() - now)
if sleep_time > 0:
time.sleep(sleep_time)
cyber.shutdown()
|
Python
| 0.000001
|
@@ -1799,37 +1799,8 @@
()%0A%0A
- record_file = args.file%0A%0A
@@ -1812,17 +1812,16 @@
.init()%0A
-%0A
node
|
8fa528696393c18f74ceb5d6bbcf87231e072b21
|
update gentle/transcriber.py __main__
|
gentle/transcriber.py
|
gentle/transcriber.py
|
import math
import logging
import wave
from gentle import transcription
from multiprocessing.pool import ThreadPool as Pool
class MultiThreadedTranscriber:
def __init__(self, kaldi_queue, chunk_len=20, overlap_t=2, nthreads=4):
self.chunk_len = chunk_len
self.overlap_t = overlap_t
self.nthreads = nthreads
self.kaldi_queue = kaldi_queue
def transcribe(self, wavfile, progress_cb=None):
wav_obj = wave.open(wavfile, 'r')
duration = wav_obj.getnframes() / float(wav_obj.getframerate())
n_chunks = int(math.ceil(duration / float(self.chunk_len - self.overlap_t)))
chunks = []
def transcribe_chunk(idx):
wav_obj = wave.open(wavfile, 'r')
start_t = idx * (self.chunk_len - self.overlap_t)
# Seek
wav_obj.setpos(int(start_t * wav_obj.getframerate()))
# Read frames
buf = wav_obj.readframes(int(self.chunk_len * wav_obj.getframerate()))
k = self.kaldi_queue.get()
k.push_chunk(buf)
ret = k.get_final()
k.reset()
self.kaldi_queue.put(k)
chunks.append({"start": start_t, "words": ret})
logging.info('%d/%d' % (len(chunks), n_chunks))
if progress_cb is not None:
progress_cb({"message": ' '.join([X['word'] for X in ret]),
"percent": len(chunks) / float(n_chunks)})
pool = Pool(min(n_chunks, self.nthreads))
pool.map(transcribe_chunk, range(n_chunks))
pool.close()
chunks.sort(key=lambda x: x['start'])
# Combine chunks
words = []
for c in chunks:
chunk_start = c['start']
chunk_end = chunk_start + self.chunk_len
chunk_words = [transcription.Word(**wd).shift(time=chunk_start) for wd in c['words']]
# At chunk boundary cut points the audio often contains part of a
# word, which can get erroneously identified as one or more different
# in-vocabulary words. So discard one or more words near the cut points
# (they'll be covered by the ovlerap anyway).
#
trim = min(0.25 * self.overlap_t, 0.5)
if c is not chunks[0]:
while len(chunk_words) > 1:
chunk_words.pop(0)
if chunk_words[0].end > chunk_start + trim:
break
if c is not chunks[-1]:
while len(chunk_words) > 1:
chunk_words.pop()
if chunk_words[-1].start < chunk_end - trim:
break
words.extend(chunk_words)
# Remove overlap: Sort by time, then filter out any Word entries in
# the list that are adjacent to another entry corresponding to the same
# word in the audio.
words.sort(key=lambda word: word.start)
words.append(transcription.Word(word="__dummy__"))
words = [words[i] for i in range(len(words)-1) if not words[i].corresponds(words[i+1])]
return words
if __name__=='__main__':
# full transcription
from Queue import Queue
from util import ffmpeg
from gentle import standard_kaldi
import sys
import logging
logging.getLogger().setLevel('INFO')
k_queue = Queue()
for i in range(3):
k_queue.put(standard_kaldi.Kaldi())
trans = MultiThreadedTranscriber(k_queue)
with gentle.resampled(sys.argv[1]) as filename:
out = trans.transcribe(filename)
open(sys.argv[2], 'w').write(out.to_json())
|
Python
| 0
|
@@ -3236,70 +3236,19 @@
-from util import ffmpeg%0A from gentle import standard_kaldi%0A
+import json
%0A
@@ -3320,16 +3320,110 @@
'INFO')%0A
+%0A import gentle%0A from gentle import standard_kaldi%0A%0A resources = gentle.Resources()%0A%0A
%0A
@@ -3505,16 +3505,90 @@
i.Kaldi(
+resources.nnet_gpu_path, resources.full_hclg_path, resources.proto_langdir
))%0A%0A
@@ -3757,19 +3757,54 @@
).write(
+transcription.Transcription(words=
out
+)
.to_json
|
ee2c6e18d9a59ba6781957a6ad153a657f1e7816
|
Fix datetime conversion when editing the issues in TUI.
|
fuzzinator/ui/tui/dialogs.py
|
fuzzinator/ui/tui/dialogs.py
|
# Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from urwid import *
from ...config import config_get_callable
from ...formatter import JsonFormatter
from ...pkgdata import __pkg_name__, __version__, __author__, __author_email__, __url__
from .decor_widgets import PatternBox
from .graphics import fz_box_pattern
from .button import FormattedButton
class Dialog(PopUpTarget):
signals = ['close']
exit_keys = ['esc']
def __init__(self, title, body, footer_btns, warning=False):
if not warning:
style = dict(body='dialog', title='dialog_title', border='dialog_border')
else:
style = dict(body='warning', title='warning_title', border='warning_border')
self.walker = SimpleListWalker(body)
self.listbox = ListBox(self.walker)
self.frame = Frame(body=AttrMap(self.listbox, style['body']),
footer=Columns([('pack', btn) for btn in footer_btns], dividechars=1),
focus_part='body')
super(Dialog, self).__init__(AttrMap(PatternBox(self.frame, title=(style['title'], title), **fz_box_pattern()),
attr_map=style['border']))
def keypress(self, size, key):
if key in self.exit_keys:
self._emit('close')
elif key in ['tab']:
if self.frame.focus_part == 'body':
try:
next_pos = self.walker.next_position(self.listbox.focus_position)
self.listbox.focus_position = next_pos
except IndexError:
self.frame.focus_part = 'footer'
self.frame.footer.focus_col = 0
elif self.frame.footer and self.frame.footer.contents:
if self.frame.footer.focus_col < len(self.frame.footer.contents) - 1:
self.frame.footer.focus_col += 1
else:
self.frame.focus_part = 'body'
self.listbox.focus_position = 0
else:
super(Dialog, self).keypress(size, key)
class AboutDialog(Dialog):
exit_keys = ['esc', 'f1']
def __init__(self, ):
self.content = self.compile_about_data()
super(AboutDialog, self).__init__(title='About',
body=[Text(self.content)],
footer_btns=[FormattedButton('Close', lambda button: self._emit('close'))])
def compile_about_data(self, prop_width=15):
return '{name_prop}: {name}\n' \
'{version_prop}: {version}\n' \
'{authors_prop}: {authors}\n' \
'{mail_prop}: {email}\n' \
'{homepage_prop}: {homepage}\n'.format(name_prop='Name'.ljust(prop_width),
name=__pkg_name__,
version_prop='Version'.ljust(prop_width),
version=__version__,
authors_prop='Authors'.ljust(prop_width),
authors=__author__,
mail_prop='E-mail'.ljust(prop_width),
email=__author_email__,
homepage_prop='Homepage'.ljust(prop_width),
homepage=__url__)
class WarningDialog(Dialog):
exit_keys = ['esc', 'enter']
def __init__(self, msg):
super(WarningDialog, self).__init__(title='WARNING',
body=[Text(msg)],
footer_btns=[FormattedButton('Close', lambda button: self._emit('close'))],
warning=True)
class YesNoDialog(Dialog):
signals = ['yes', 'no']
def keypress(self, size, key):
if key == 'enter':
self._emit('yes')
elif key == 'esc':
self._emit('no')
def __init__(self, msg):
super(YesNoDialog, self).__init__(title='Question',
body=[Text(msg)],
footer_btns=[
FormattedButton('Yes', lambda button: self._emit('yes')),
FormattedButton('No', lambda button: self._emit('no'))],
warning=True)
class FormattedIssueDialog(Dialog):
exit_keys = ['esc', 'f3']
def __init__(self, config, issue, db):
formatter = config_get_callable(config, 'sut.' + issue['sut'], ['tui_formatter', 'formatter'])[0] or JsonFormatter
super(FormattedIssueDialog, self).__init__(title=formatter(issue=issue, format='short'),
body=[Padding(Text(line, wrap='clip'), left=2, right=2) for line in formatter(issue=issue).splitlines()],
footer_btns=[FormattedButton('Close', lambda button: self._emit('close'))])
class BugEditor(Edit):
def keypress(self, size, key):
if key == 'ctrl k':
lines = self._edit_text.splitlines(keepends=True)
line_cnt = self._edit_text[:self.edit_pos].count('\n')
before = ''.join(lines[:line_cnt])
after = ''.join(lines[line_cnt + 1:]) if line_cnt + 1 < len(lines) else ''
self.set_edit_text(before + after)
self.set_edit_pos(len(before))
else:
super(BugEditor, self).keypress(size, key)
class EditIssueDialog(Dialog):
exit_keys = ['esc', 'f4']
def __init__(self, issue, db):
self.issue = issue
self.db = db
self.edit_boxes = dict()
self.type_dict = dict()
rows = []
for prop in issue:
if prop == '_id':
continue
self.edit_boxes[prop] = BugEditor('', self._to_str(prop, issue[prop]), multiline=True)
rows.append(Columns([('weight', 1, Text(('dialog_secondary', prop + ': '))),
('weight', 10, self.edit_boxes[prop])], dividechars=1))
super(EditIssueDialog, self).__init__(title=issue['id'],
body=rows,
footer_btns=[FormattedButton('Save', self.save_modifications),
FormattedButton('Close', lambda button: self._emit('close'))])
def _to_str(self, prop, value):
t = type(value)
self.type_dict[prop] = t
if t == str:
return value
if t == bytes:
return value.decode('utf-8', errors='ignore')
if value is None:
self.type_dict[prop] = None
return ''
return str(value)
def _from_str(self, prop, value):
t = self.type_dict[prop]
if t == str:
return value
if t == int:
return int(value)
if t == bytes:
return value.encode('utf-8', errors='ignore')
if t is None:
return value or None
return eval(value)
def save_modifications(self, btn):
updated = dict()
for prop, box in self.edit_boxes.items():
updated[prop] = self._from_str(prop, box.edit_text)
self.db.update_issue(self.issue, updated)
self._emit('close')
|
Python
| 0
|
@@ -247,16 +247,46 @@
terms.%0A%0A
+from datetime import datetime%0A
from urw
@@ -7119,32 +7119,113 @@
rrors='ignore')%0A
+ if t == datetime:%0A return value.strftime('%25Y-%25m-%25d %25H:%25M:%25S')%0A
if value
@@ -7565,32 +7565,123 @@
rrors='ignore')%0A
+ if t == datetime:%0A return datetime.strptime(value, '%25Y-%25m-%25d %25H:%25M:%25S')%0A
if t is
|
203cba83527ed39cc478c4f0530e513c71f2a6ad
|
format date in title
|
examples/daynight.py
|
examples/daynight.py
|
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# example showing how to compute the day/night terminator and shade nightime
# areas on a map.
# miller projection
map = Basemap(projection='mill',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date)
plt.show()
|
Python
| 0.000172
|
@@ -834,24 +834,54 @@
UTC)' %25 date
+.strftime(%22%25d %25b %25Y %25H:%25M:%25S%22)
)%0Aplt.show()
|
9c5c8792c3f350614095b9203967fce83b09ee4c
|
Remove unused constant
|
lib/python2.6/aquilon/aqdb/model/address_assignment.py
|
lib/python2.6/aquilon/aqdb/model/address_assignment.py
|
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2010,2011,2012 Contributor
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the EU DataGrid Software License. You should
# have received a copy of the license with this program, and the
# license is published at
# http://eu-datagrid.web.cern.ch/eu-datagrid/license.html.
#
# THE FOLLOWING DISCLAIMER APPLIES TO ALL SOFTWARE CODE AND OTHER
# MATERIALS CONTRIBUTED IN CONNECTION WITH THIS PROGRAM.
#
# THIS SOFTWARE IS LICENSED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE AND ANY WARRANTY OF NON-INFRINGEMENT, ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THIS
# SOFTWARE MAY BE REDISTRIBUTED TO OTHERS ONLY BY EFFECTIVELY USING
# THIS OR ANOTHER EQUIVALENT DISCLAIMER AS WELL AS ANY OTHER LICENSE
# TERMS THAT MAY APPLY.
""" Assign Addresses to interfaces """
from datetime import datetime
import re
from sqlalchemy import (Column, Integer, String, DateTime, ForeignKey, Sequence,
UniqueConstraint)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relation, backref, deferred
from sqlalchemy.sql import and_
from aquilon.exceptions_ import InternalError
from aquilon.aqdb.column_types import IPV4, AqStr
from aquilon.aqdb.model import (Base, Interface, ARecord, DnsEnvironment, Fqdn,
Network)
from aquilon.aqdb.model.a_record import dns_fqdn_mapper
_TN = 'address_assignment'
_ABV = 'addr_assign'
# Valid values:
# - system: used/configured by the operating system
# - zebra: used/configured by Zebra
ADDR_USAGES = ['system', 'zebra']
class AddressAssignment(Base):
"""
Assignment of IP addresses to network interfaces.
It's kept as an association map to model the linkage, since we need to
have maximum ability to provide potentially complex configuration
scenarios, such as advertising certain VIP addresses from some, but not
all of the network interfaces on a machine (to be used for backup
servers, cluster filesystem servers, NetApp filers, etc.). While in
most cases we can assume VIPs are broadcast out all interfaces on the
box we still need to have the underlying model as the more complex
many to many relationship implemented here.
"""
__tablename__ = _TN
_label_check = re.compile('^[a-z0-9]{0,16}$')
id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True)
interface_id = Column(Integer, ForeignKey('interface.id',
name='%s_interface_id_fk' % _ABV,
ondelete='CASCADE'),
nullable=False)
_label = Column("label", AqStr(16), nullable=False)
ip = Column(IPV4, nullable=False)
network_id = Column(Integer, ForeignKey('network.id',
name='%s_network_fk' % _TN),
nullable=False)
service_address_id = Column(Integer, ForeignKey('service_address.resource_id',
name='%s_srv_addr_id' % _ABV,
ondelete="CASCADE"),
nullable=True)
dns_environment_id = Column(Integer, ForeignKey('dns_environment.id',
name='%s_dns_env_fk' %
_ABV),
nullable=False)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
comments = deferred(Column(String(255), nullable=True))
interface = relation(Interface, lazy=False, innerjoin=True,
backref=backref('assignments', order_by=[_label],
cascade='all, delete-orphan'))
dns_environment = relation(DnsEnvironment, innerjoin=True)
# Setting viewonly is very important here as we do not want the removal of
# an AddressAssignment record to change the linked DNS record(s)
# Can't use backref or back_populates due to the different mappers
dns_records = relation(dns_fqdn_mapper, uselist=True,
primaryjoin=and_(ip == ARecord.ip,
dns_environment_id == Fqdn.dns_environment_id),
foreign_keys=[ARecord.ip, Fqdn.dns_environment_id],
viewonly=True)
fqdns = association_proxy('dns_records', 'fqdn')
network = relation(Network, backref=backref('assignments',
passive_deletes=True,
order_by=[ip]))
@property
def logical_name(self):
"""
Compute an OS-agnostic name for this interface/address combo.
BIG FAT WARNING: do _NOT_ assume that this name really exist on the
host!
There are external systems like DSDB that can not handle having multiple
addresses on the same interface. Because of that this function generates
an unique name for every interface/address tuple.
"""
# Use the Linux naming convention because people are familiar with that
# and it is easy to parse if needed
name = self.interface.name
if self.label:
name += ":%s" % self.label
return name
@property
def label(self):
if self._label == '-':
return ""
else:
return self._label
def __init__(self, label=None, network=None, **kwargs):
# This is dirty. We want to allow empty labels, but Oracle converts
# empty strings to NULL, violating the NOT NULL constraint. We could
# allow label to be NULL and relying on the unique indexes to forbid
# adding multiple empty labels, but that is again Oracle-specific
# behavior which actually violates the SQL standard, so it would not
# work with other databases.
if not label:
label = '-'
elif not self._label_check.match(label):
raise ValueError("Illegal address label '%s'." % label)
# Right now network_id is nullable due to how refresh_network works, so
# verify the network here
if not network: # pragma: no cover
raise InternalError("AddressAssignment needs a network")
super(AddressAssignment, self).__init__(_label=label, network=network,
**kwargs)
def __repr__(self):
return "<Address %s on %s/%s>" % (self.ip,
self.interface.hardware_entity.label,
self.logical_name)
address = AddressAssignment.__table__ # pylint: disable=C0103
address.primary_key.name = '%s_pk' % _TN
address.append_constraint(
UniqueConstraint("interface_id", "ip", name="%s_iface_ip_uk" % _ABV))
address.append_constraint(
UniqueConstraint("interface_id", "label", name="%s_iface_label_uk" % _ABV))
# Assigned to external classes here to avoid circular dependencies.
Interface.addresses = association_proxy('assignments', 'ip')
# Can't use backref or back_populates due to the different mappers
# This relation gives us the two other sides of the triangle mentioned above
ARecord.assignments = relation(
AddressAssignment,
primaryjoin=and_(AddressAssignment.ip == ARecord.ip,
ARecord.fqdn_id == Fqdn.id,
AddressAssignment.dns_environment_id == Fqdn.dns_environment_id),
foreign_keys=[AddressAssignment.ip, Fqdn.id],
viewonly=True)
|
Python
| 0
|
@@ -2223,147 +2223,8 @@
n'%0A%0A
-# Valid values:%0A# - system: used/configured by the operating system%0A# - zebra: used/configured by Zebra%0AADDR_USAGES = %5B'system', 'zebra'%5D%0A%0A
%0Acla
|
ddb0a5d3b684c96b8fe8c4678cdb5e018f1b3d7b
|
Revert last change. Just in case...
|
rbm2m/action/downloader.py
|
rbm2m/action/downloader.py
|
# -*- coding: utf-8 -*-
import urllib
import sys
import requests
from .debug import dump_exception
HOST = 'http://www.recordsbymail.com/'
GENRE_LIST_URL = '{host}browse.php'.format(host=HOST)
SEARCH_URL = '{host}search.php?genre={genre_slug}&instock=1'
IMAGE_LIST_URL = '{host}php/getImageArray.php?item={rec_id}'
TIMEOUTS = (3.05, 30) # Connect, read
def fetch(url):
"""
Download content from url and return response object.
Raises `DownloadError` if operation fails
"""
resp = None
try:
resp = requests.get(url, timeout=TIMEOUTS)
resp.raise_for_status()
except requests.RequestException as e:
exc_type, exc_val, tb = sys.exc_info()
notes = resp.text if resp else ''
dump_exception('download', exc_type, exc_val, tb, notes)
raise DownloadError(e)
else:
assert resp is not None
return resp
def fetch_text(url):
"""
Download text content from url and return it.
Raises `DownloadError` if operation fails
"""
return fetch(url).text
def genre_list():
"""
Download page with the list of genres
"""
return fetch_text(GENRE_LIST_URL)
def get_results_page(genre_title, page):
"""
Download search result page
"""
url = SEARCH_URL.format(host=HOST,
genre_slug=urllib.quote_plus(genre_title))
if page:
url = url + '&page={}'.format(page)
return fetch_text(url)
def get_image_list(rec_id):
"""
Download list of images for a record
"""
url = IMAGE_LIST_URL.format(host=HOST, rec_id=rec_id)
return fetch_text(url)
def get_content(url):
"""
Downloads content from url
"""
return fetch(url).content
class DownloadError(requests.RequestException):
"""
Raised for all download errors (timeouts, http errors etc)
"""
pass
|
Python
| 0
|
@@ -239,16 +239,26 @@
e_slug%7D&
+format=LP&
instock=
|
6ce67ab2e0c47459bc51fb94b0bbfaa6a8e57689
|
Split Milestone out into _Entry and Milestone
|
github-push-issues.py
|
github-push-issues.py
|
#!/usr/bin/env python
#
# Copyright (C) 2015 W. Trevor King <wking@tremily.us>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Create GitHub issues based on local templates.
This is useful for pushing, for example, a set of checklists with
per-item issues grouped by milestone. The template directory
structure is::
.
|-- milestone-1
| |-- README.md
| |-- issue-1.1.md
| |-- issue-1.2.md
| ...
|-- milestone-2
| |-- README.md
| |-- issue-2.1.md
| |-- issue-2.2.md
| ...
...
Both the ``README.md`` and per-issue files are in Markdown, with a
summary line (which may optionally include `Atx-style headers`__)
followed by a blank line and an optional Markdown body. For example,
if you want each of your product to have a ``joel`` milestone tracking
the `Joel Test`__, you might have a ``joel/README.md`` with::
# joel
Keep track of how well the project handles the [Joel
Test][joel-test].
[joel-test]: http://www.joelonsoftware.com/articles/fog0000000043.html
And per-feature issue milestones like ``joel/source-control.md``::
# Do you use source control?
I've used commercial source control packages, and I've used CVS,
which is free, and let me tell you, CVS is fine...
Of course, you probably can't copy Joel's text wholesale into your
issue files, so you'd want to make your own summaries. Then run::
# github-push-issues.py [options] /path/to/your/template/directory
Or::
# github-push-issues.py [options] https://example.com/url/for/template.zip
The latter is useful if you have your template directory structure
hosted online in a version control system that supports tar or zip
archive snaphots.
__ http://daringfireball.net/projects/markdown/syntax#header
__ http://www.joelonsoftware.com/articles/fog0000000043.html
"""
import base64
import getpass
import os
try:
import readline
except ImportError:
pass # carry on without readline support
import sys
try: # Python 2
from urllib2 import urlopen, Request
except ImportError: # Python 3
from urllib.request import urlopen, Request
if sys.version_info < (3,): # Python 2
input = raw_input
__version__ = '0.1'
class Milestone(object):
def __init__(self, title=None, state='open', description=None,
due_on=None):
self.title = title
self.state = state
self.description = description
self.due_on = due_on
def load(self, stream):
self.title = stream.readline().strip().strip('#').strip()
blank = stream.readline().strip()
if blank:
raise ValueError(
'non-blank line after the milestone title: {!r}'.format(blank))
self.description = ''.join(stream.readlines())
def get_authorization_headers(username=None):
if username is None:
username = input('GitHub username: ')
password = getpass.getpass(prompt='GitHub password: ')
basic_auth_payload = '{0}:{1}'.format(username, password)
auth = 'Basic {}'.format(
base64.b64encode(basic_auth_payload.encode('US-ASCII')))
return {'Authorization': auth}
def add_issues(root_endpoint='https://api.github.com', username=None,
repository=None, template_root='.'):
authorization_headers = get_authorization_headers(username=username)
for dirpath, dirnames, filenames in os.walk(top=template_root):
if 'README.md' in filenames:
milestone = Milestone()
with open(os.path.join(dirpath, 'README.md'), 'r') as f:
milestone.load(stream=f)
print(authorization_headers)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=__doc__.splitlines()[0],
epilog='\n'.join(__doc__.splitlines()[2:]),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument(
'--root-endpoint', default='https://api.github.com',
help='GitHub API root endpoint')
parser.add_argument('-u', '--username', help='GitHub username')
parser.add_argument(
'-r', '--repository',
help='GitHub repository (user/repo) to push issues to')
parser.add_argument(
'template_root', metavar='TEMPLATE-ROOT', nargs='?', default='.',
help='Path or URL for the template directory')
args = parser.parse_args()
add_issues(
root_endpoint=args.root_endpoint,
username=args.username,
repository=args.repository,
template_root=args.template_root)
|
Python
| 0
|
@@ -3429,25 +3429,22 @@
%0A%0Aclass
-Milestone
+_Entry
(object)
@@ -3484,192 +3484,71 @@
ne,
-state='open', description=None,%0A due_on=None):%0A self.title = title%0A self.state = state%0A self.description = description%0A self.due_on = due_on
+body=None):%0A self.title = title%0A self.body = body
%0A%0A
@@ -3771,26 +3771,16 @@
ter the
-milestone
title: %7B
@@ -3812,27 +3812,20 @@
self.
-description
+body
= ''.jo
@@ -3849,16 +3849,210 @@
es())%0A%0A%0A
+class Milestone(_Entry):%0A def __init__(self, state='open', due_on=None, **kwargs):%0A super(Milestone, self).__init__(**kwargs)%0A self.state = state%0A self.due_on = due_on%0A%0A%0A
def get_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.