commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
366371957a01a564c1860313ca04d484caf89fc1 | Create app.py | rajeshrao04/news-api | app.py | app.py | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "news.search":
return {}
baseurl = "https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=latest&apiKey=dda1592b3267447193fb1756b5746b0e"
if yql_query is None:
return {}
yql_url = baseurl + urlencode({'': yql_query}) + "&format=json"
result = urlopen(baseurl).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("news.search")
if city is None:
return None
def makeWebhookResult(data):
articles = data.get('articles')
if articles is None:
return {}
author = articles.get('author')
if author is None:
return {}
title = articles.get('title')
if title is None:
return {}
description= articles.get('description')
url = articles.get('url')
#units = channel.get('units')
#condition = item.get('condition')
#if condition is None:
# return {}
# print(json.dumps(item, indent=4))
speech = "latest news" +author.get()+""+title.get()+""+description.get()+""+url.get()
#print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "news.search":
return {}
baseurl = "https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=latest&apiKey=dda1592b3267447193fb1756b5746b0e"
if yql_query is None:
return {}
yql_url = baseurl + urlencode({'': yql_query}) + "&format=json"
result = urlopen(baseurl).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("news.search")
if city is None:
return None
def makeWebhookResult(data):
articles1 = data.get('articles')
if articles is None:
return {}
author = articles.get('author')
if author is None:
return {}
title = articles.get('title')
if title is None:
return {}
description= articles.get('description')
url = articles.get('url')
#units = channel.get('units')
#condition = item.get('condition')
#if condition is None:
# return {}
# print(json.dumps(item, indent=4))
speech = "latest news" +author.get()+""+title.get()+""+description.get()+""+url.get()
#print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| apache-2.0 | Python |
df3d7897d71cd7349934c215c39468630c703ebd | Add help. | drakeet/DrakeetLoveBot | app.py | app.py | # coding: utf-8
from datetime import datetime
from flask import Flask
from flask import render_template, request
import logging
import telegram
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
bot_name = '@DrakeetLoveBot'
global bot
# 由于 美国节点,只能 git 部署,我不得不开源 token,请不要随便拿去用,如需生成你自己的 token
# 请联系 http://telegram.me/BotFather 谢谢!
bot = telegram.Bot(token='192666820:AAHLcmxXJ68UvkB-nWgbPVGzb_bkDoTNlcU')
@app.route('/')
def index():
return r'{"drakeet":"hehe"}'
@app.route('/<token>', methods=['POST'])
def launcher(token):
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True))
logging.info('I am still alive.')
handle_message(update.message)
return 'ok'
def handle_message(message):
text = message.text
if '/echo' in text:
echo(message)
if '/milestone' in text:
milestone(message)
if '/help' in text:
help(message)
def parse_cmd_text(text):
# Telegram understands UTF-8, so encode text for unicode compatibility
text = text.encode('utf-8')
cmd = None
if '/' in text:
try:
index = text.index(' ')
except ValueError as e:
return (text, None)
cmd = text[:index]
text = text[index + 1:]
if not cmd == None and '@' in cmd:
cmd = cmd.replace(bot_name, '')
return (cmd, text)
def echo(message):
'''
repeat the same message back (echo)
'''
cmd, text = parse_cmd_text(message.text)
if text == None or len(text) == 0:
pass
else:
chat_id = message.chat.id
bot.sendMessage(chat_id=chat_id, text=text)
def milestone(message):
from_day = datetime(2013, 7, 16)
now = datetime.now()
text = 'drakeet 和他家老婆大人已经认识并相爱 %d 天啦(此处应该有恭喜' % (now - from_day).days
chat_id = message.chat.id
bot.sendMessage(chat_id=chat_id, text=text)
def help(message):
text = """
/echo - repeat the same message back
/milestone - Get drakeet's milestone
"""
chat_id = message.chat.id
bot.sendMessage(chat_id=chat_id, text=text)
| # coding: utf-8
from datetime import datetime
from flask import Flask
from flask import render_template, request
import logging
import telegram
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
bot_name = '@DrakeetLoveBot'
global bot
# 由于 美国节点,只能 git 部署,我不得不开源 token,请不要随便拿去用,如需生成你自己的 token
# 请联系 http://telegram.me/BotFather 谢谢!
bot = telegram.Bot(token='192666820:AAHLcmxXJ68UvkB-nWgbPVGzb_bkDoTNlcU')
@app.route('/')
def index():
return r'{"drakeet":"hehe"}'
@app.route('/<token>', methods=['POST'])
def launcher(token):
if request.method == "POST":
update = telegram.Update.de_json(request.get_json(force=True))
logging.info('I am still alive.')
handle_message(update.message)
return 'ok'
def handle_message(message):
text = message.text
if '/echo' in text:
echo(message)
if '/milestone' in text:
milestone(message)
def parse_cmd_text(text):
# Telegram understands UTF-8, so encode text for unicode compatibility
text = text.encode('utf-8')
cmd = None
if '/' in text:
try:
index = text.index(' ')
except ValueError as e:
return (text, None)
cmd = text[:index]
text = text[index + 1:]
if not cmd == None and '@' in cmd:
cmd = cmd.replace(bot_name, '')
return (cmd, text)
def echo(message):
'''
repeat the same message back (echo)
'''
cmd, text = parse_cmd_text(message.text)
if text == None or len(text) == 0:
pass
else:
chat_id = message.chat.id
bot.sendMessage(chat_id=chat_id, text=text)
def milestone(message):
from_day = datetime(2013, 7, 16)
now = datetime.now()
text = 'drakeet 和他家老婆大人已经认识并相爱 %d 天啦(此处应该有恭喜' % (now - from_day).days
chat_id = message.chat.id
bot.sendMessage(chat_id=chat_id, text=text)
| mit | Python |
60820804840e7c4bbb9049aedfbb67a5beecfe53 | use patch for updates as in other apps | praekelt/seed-stage-based-messaging,praekelt/seed-stage-based-messaging,praekelt/seed-staged-based-messaging | scheduler/client.py | scheduler/client.py |
"""
Client for Messaging Content Store HTTP services APIs.
"""
import requests
import json
class SchedulerApiClient(object):
"""
Client for Scheduler API.
:param str api_token:
An API Token.
:param str api_url:
The full URL of the API. Defaults to
``http://seed-scheduler/api/v1``.
"""
def __init__(self, api_token, api_url=None, session=None):
if api_url is None:
api_url = "http://seed-scheduler/api/v1"
self.api_url = api_url
self.headers = {
'Content-Type': 'application/json',
'Authorization': 'Token %s' % api_token
}
if session is None:
session = requests.Session()
session.headers.update(self.headers)
self.session = session
def call(self, endpoint, method, obj=None, params=None, data=None):
if obj is None:
url = '%s/%s/' % (self.api_url.rstrip('/'), endpoint)
else:
url = '%s/%s/%s/' % (self.api_url.rstrip('/'), endpoint, obj)
result = {
'get': self.session.get,
'post': self.session.post,
'patch': self.session.patch,
'delete': self.session.delete,
}.get(method, None)(url, params=params, data=json.dumps(data))
result.raise_for_status()
if method is "delete": # DELETE returns blank body
return {"success": True}
else:
return result.json()
def get_schedules(self, params=None):
return self.call('schedule', 'get', params=params)
def get_schedule(self, schedule_id):
return self.call('schedule', 'get', obj=schedule_id)
def create_schedule(self, schedule):
return self.call('schedule', 'post', data=schedule)
def update_schedule(self, schedule_id, schedule):
return self.call('schedule', 'patch', obj=schedule_id,
data=schedule)
def delete_schedule(self, schedule_id):
# Schedule messages must all be deleted first for FK reasons
return self.call('schedule', 'delete', obj=schedule_id)
|
"""
Client for Messaging Content Store HTTP services APIs.
"""
import requests
import json
class SchedulerApiClient(object):
"""
Client for Scheduler API.
:param str api_token:
An API Token.
:param str api_url:
The full URL of the API. Defaults to
``http://seed-scheduler/api/v1``.
"""
def __init__(self, api_token, api_url=None, session=None):
if api_url is None:
api_url = "http://seed-scheduler/api/v1"
self.api_url = api_url
self.headers = {
'Content-Type': 'application/json',
'Authorization': 'Token %s' % api_token
}
if session is None:
session = requests.Session()
session.headers.update(self.headers)
self.session = session
def call(self, endpoint, method, obj=None, params=None, data=None):
if obj is None:
url = '%s/%s/' % (self.api_url.rstrip('/'), endpoint)
else:
url = '%s/%s/%s/' % (self.api_url.rstrip('/'), endpoint, obj)
result = {
'get': self.session.get,
'post': self.session.post,
'put': self.session.post,
'delete': self.session.delete,
}.get(method, None)(url, params=params, data=json.dumps(data))
result.raise_for_status()
if method is "delete": # DELETE returns blank body
return {"success": True}
else:
return result.json()
def get_schedules(self, params=None):
return self.call('schedule', 'get', params=params)
def get_schedule(self, schedule_id):
return self.call('schedule', 'get', obj=schedule_id)
def create_schedule(self, schedule):
return self.call('schedule', 'post', data=schedule)
def update_schedule(self, schedule_id, schedule):
return self.call('schedule', 'put', obj=schedule_id,
data=schedule)
def delete_schedule(self, schedule_id):
# Schedule messages must all be deleted first for FK reasons
return self.call('schedule', 'delete', obj=schedule_id)
| bsd-3-clause | Python |
c9606a1d62df63b3b1b83b598efeb628d3428e20 | Bump to v0.10.1 | johanneswilm/eha-nutsurv-django,eHealthAfrica/nutsurv,johanneswilm/eha-nutsurv-django,johanneswilm/eha-nutsurv-django,eHealthAfrica/nutsurv,eHealthAfrica/nutsurv | nutsurv/__init__.py | nutsurv/__init__.py | __version__ = '0.10.1'
__url__ = 'https://github.com/eHealthAfrica/eha-nutsurv-django'
| __version__ = '0.9.0'
__url__ = 'https://github.com/eHealthAfrica/eha-nutsurv-django'
| agpl-3.0 | Python |
8b8754296a71e43315b3bb300770f2f621694bbb | add headers & load_profile support for converter | yandex/yandex-tank,yandex/yandex-tank | yandextank/version.py | yandextank/version.py | VERSION = '1.16.3'
| VERSION = '1.16.2'
| lgpl-2.1 | Python |
4a43cde46a0f12a9ad7681c63d9dea3918ac2013 | Update scripts version accordingly | wikimedia/pywikibot-core,wikimedia/pywikibot-core | scripts/__init__.py | scripts/__init__.py | """**Scripts** folder contains predefined scripts easy to use.
Scripts are only available im Pywikibot ist instaled in directory mode
and not as side package. They can be run in command line using the pwb
wrapper script::
python pwb.py <global options> <name_of_script> <options>
Every script provides a ``-help`` option which shows all available
options, their explanation and usage examples. Global options will be
shown by ``-help:global`` or using::
python pwb.py -help
The advantages of pwb.py wrapper script are:
- check for framework and script depedencies and show a warning if a
package is missing or outdated or if the Python release does not fit
- check whether user-config.py config file is available and ask to
create it by starting the generate_user_files.py script
- enable global options even if a script does not support them
- start private scripts located in userscripts sub-folder
- find a script even if given script name does not match a filename e.g.
due to spelling mistake
"""
#
# (C) Pywikibot team, 2021
#
# Distributed under the terms of the MIT license.
#
__version__ = '6.4.0'
| """**Scripts** folder contains predefined scripts easy to use.
Scripts are only available im Pywikibot ist instaled in directory mode
and not as side package. They can be run in command line using the pwb
wrapper script::
python pwb.py <global options> <name_of_script> <options>
Every script provides a ``-help`` option which shows all available
options, their explanation and usage examples. Global options will be
shown by ``-help:global`` or using::
python pwb.py -help
The advantages of pwb.py wrapper script are:
- check for framework and script depedencies and show a warning if a
package is missing or outdated or if the Python release does not fit
- check whether user-config.py config file is available and ask to
create it by starting the generate_user_files.py script
- enable global options even if a script does not support them
- start private scripts located in userscripts sub-folder
- find a script even if given script name does not match a filename e.g.
due to spelling mistake
"""
#
# (C) Pywikibot team, 2021
#
# Distributed under the terms of the MIT license.
#
__version__ = '6.2.0'
| mit | Python |
a68e484487289ab9aa208b88884c7b6e5537bab2 | Add basic argparse support | gforsyth/doctr_testing,doctrtesting/doctr,drdoctr/doctr | doctr/__main__.py | doctr/__main__.py | """
doctr
A tool to automatically deploy docs to GitHub pages from Travis CI.
"""
import sys
import os
import argparse
from .local import generate_GitHub_token, encrypt_variable
from .travis import setup_GitHub_push, commit_docs, push_docs
from . import __version__
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-V', '--version', action='version', version='doctr ' + __version__)
location = parser.add_mutually_exclusive_group()
location.add_argument('--travis', action='store_true', default=None, help="""Run
the Travis script. The default is to detect automatically.""")
location.add_argument('--local', action='store_true', default=None, help="""Run
the local script. The default is to detect automatically (only run if not
on Travis).""")
args = parser.parse_args()
if args.local == args.travis == None:
on_travis = os.environ.get("TRAVIS_JOB_NUMBER", '')
else:
on_travis = args.travis
if on_travis:
# TODO: Get this automatically
repo = sys.argv[1]
if setup_GitHub_push(repo):
commit_docs()
push_docs()
else:
username = input("What is your GitHub username? ")
token = generate_GitHub_token(username)
repo = input("What repo to you want to build the docs for? ")
encrypted_variable = encrypt_variable("GH_TOKEN={token}".format(token=token).encode('utf-8'), repo=repo)
travis_content = """
env:
global:
secure: "{encrypted_variable}"
""".format(encrypted_variable=encrypted_variable.decode('utf-8'))
print("Put\n", travis_content, "in your .travis.yml.\n",
"Also make sure to create an empty gh-pages branch on GitHub, and "
"enable it at https://github.com/{repo}/settings".format(repo=repo), sep='')
if __name__ == '__main__':
sys.exit(main())
| import sys
import os
from .local import generate_GitHub_token, encrypt_variable
from .travis import setup_GitHub_push, commit_docs, push_docs
def main():
on_travis = os.environ.get("TRAVIS_JOB_NUMBER", '')
if on_travis:
# TODO: Get this automatically
repo = sys.argv[1]
if setup_GitHub_push(repo):
commit_docs()
push_docs()
else:
username = input("What is your GitHub username? ")
token = generate_GitHub_token(username)
repo = input("What repo to you want to build the docs for? ")
encrypted_variable = encrypt_variable("GH_TOKEN={token}".format(token=token).encode('utf-8'), repo=repo)
travis_content = """
env:
global:
secure: "{encrypted_variable}"
""".format(encrypted_variable=encrypted_variable.decode('utf-8'))
print("Put\n", travis_content, "in your .travis.yml.\n",
"Also make sure to create an empty gh-pages branch on GitHub, and "
"enable it at https://github.com/{repo}/settings".format(repo=repo), sep='')
if __name__ == '__main__':
sys.exit(main())
| mit | Python |
75e03a0ea08fe88d3baf054ade578dde60c181c1 | add shebang to db build script | mcogswell/cnn_treevis,mcogswell/cnn_treevis,mcogswell/cnn_treevis | scripts/build_db.py | scripts/build_db.py | #!/usr/bin/env python
import caffe
from recon import Reconstructor
def main():
'''
Usage:
build_db.py <net_id> <blob_names>... [--gpu <id>]
Options:
--gpu <id> The id of the GPU to use [default: -1]
'''
import docopt, textwrap
main_args = docopt.docopt(textwrap.dedent(main.__doc__))
net_id = main_args['<net_id>']
blob_names = main_args['<blob_names>']
gpu_id = int(main_args['--gpu'])
if gpu_id < 0:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
recon = Reconstructor(net_id)
recon.build_max_act_db(blob_names)
if __name__ == '__main__':
main()
| import caffe
from recon import Reconstructor
def main():
'''
Usage:
build_db.py <net_id> <blob_names>... [--gpu <id>]
Options:
--gpu <id> The id of the GPU to use [default: -1]
'''
import docopt, textwrap
main_args = docopt.docopt(textwrap.dedent(main.__doc__))
net_id = main_args['<net_id>']
blob_names = main_args['<blob_names>']
gpu_id = int(main_args['--gpu'])
if gpu_id < 0:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
recon = Reconstructor(net_id)
recon.build_max_act_db(blob_names)
if __name__ == '__main__':
main()
| mit | Python |
05b2848849553172873600ffd6344fc2b1f12d8e | Substitute a more realistic jurisdiction_id | datamade/pupa,mileswwatkins/pupa,rshorey/pupa,opencivicdata/pupa,mileswwatkins/pupa,influence-usa/pupa,datamade/pupa,influence-usa/pupa,rshorey/pupa,opencivicdata/pupa | example/__init__.py | example/__init__.py | from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ex/place:example'
def get_metadata(self):
return {
'name': 'Example',
'legislature_name': 'Example Legislature',
'legislature_url': 'http://example.com',
'terms': [{
'name': '2013-2014',
'sessions': ['2013'],
'start_year': 2013,
'end_year': 2014
}],
'provides': ['people'],
'parties': [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
],
'session_details': {
'2013': {'_scraped_name': '2013'}
},
'feature_flags': [],
}
def get_scraper(self, term, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ex'
def get_metadata(self):
return {
'name': 'Example',
'legislature_name': 'Example Legislature',
'legislature_url': 'http://example.com',
'terms': [{
'name': '2013-2014',
'sessions': ['2013'],
'start_year': 2013,
'end_year': 2014
}],
'provides': ['people'],
'parties': [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
],
'session_details': {
'2013': {'_scraped_name': '2013'}
},
'feature_flags': [],
}
def get_scraper(self, term, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| bsd-3-clause | Python |
f136e140a57078c4f0f665051df74dffb1351f33 | fix loading in run_goal_conditioned_policy.py | vitchyr/rlkit | scripts/run_goal_conditioned_policy.py | scripts/run_goal_conditioned_policy.py | import argparse
import torch
from rlkit.core import logger
from rlkit.samplers.rollout_functions import multitask_rollout
from rlkit.torch import pytorch_util as ptu
from rlkit.envs.vae_wrapper import VAEWrappedEnv
def simulate_policy(args):
data = torch.load(args.file)
policy = data['evaluation/policy']
env = data['evaluation/env']
print("Policy and environment loaded")
if args.gpu:
ptu.set_gpu_mode(True)
policy.to(ptu.device)
if isinstance(env, VAEWrappedEnv) and hasattr(env, 'mode'):
env.mode(args.mode)
if args.enable_render or hasattr(env, 'enable_render'):
# some environments need to be reconfigured for visualization
env.enable_render()
paths = []
while True:
paths.append(multitask_rollout(
env,
policy,
max_path_length=args.H,
render=not args.hide,
observation_key='observation',
desired_goal_key='desired_goal',
))
if hasattr(env, "log_diagnostics"):
env.log_diagnostics(paths)
if hasattr(env, "get_diagnostics"):
for k, v in env.get_diagnostics(paths).items():
logger.record_tabular(k, v)
logger.dump_tabular()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=300,
help='Max length of rollout')
parser.add_argument('--speedup', type=float, default=10,
help='Speedup')
parser.add_argument('--mode', default='video_env', type=str,
help='env mode')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--enable_render', action='store_true')
parser.add_argument('--hide', action='store_true')
args = parser.parse_args()
simulate_policy(args)
| import argparse
import pickle
from rlkit.core import logger
from rlkit.samplers.rollout_functions import multitask_rollout
from rlkit.torch import pytorch_util as ptu
from rlkit.envs.vae_wrapper import VAEWrappedEnv
def simulate_policy(args):
data = pickle.load(open(args.file, "rb"))
policy = data['evaluation/policy']
env = data['evaluation/env']
print("Policy and environment loaded")
if args.gpu:
ptu.set_gpu_mode(True)
policy.to(ptu.device)
if isinstance(env, VAEWrappedEnv) and hasattr(env, 'mode'):
env.mode(args.mode)
if args.enable_render or hasattr(env, 'enable_render'):
# some environments need to be reconfigured for visualization
env.enable_render()
paths = []
while True:
paths.append(multitask_rollout(
env,
policy,
max_path_length=args.H,
render=not args.hide,
observation_key='observation',
desired_goal_key='desired_goal',
))
if hasattr(env, "log_diagnostics"):
env.log_diagnostics(paths)
if hasattr(env, "get_diagnostics"):
for k, v in env.get_diagnostics(paths).items():
logger.record_tabular(k, v)
logger.dump_tabular()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=300,
help='Max length of rollout')
parser.add_argument('--speedup', type=float, default=10,
help='Speedup')
parser.add_argument('--mode', default='video_env', type=str,
help='env mode')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--enable_render', action='store_true')
parser.add_argument('--hide', action='store_true')
args = parser.parse_args()
simulate_policy(args)
| mit | Python |
3298fff0ded49c21897a7387a7f3093c351ae04f | Use os.exelp to launch psql | lalinsky/acoustid-server,lalinsky/acoustid-server,lalinsky/acoustid-server,lalinsky/acoustid-server | scripts/run_psql.py | scripts/run_psql.py | #!/usr/bin/env python
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from acoustid.script import run_script
import os
def main(script, opts, args):
os.execlp('psql', 'psql', *script.config.database.create_psql_args())
run_script(main)
| #!/usr/bin/env python
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from acoustid.script import run_script
import subprocess
def main(script, opts, args):
subprocess.call(['psql'] + script.config.database.create_psql_args())
run_script(main)
| mit | Python |
05e9b7a3cf9dd46a599b9cc0d7b7944c01b5104f | remove FIXME now selenium test is working | simphony/simphony-remote,simphony/simphony-remote,simphony/simphony-remote,simphony/simphony-remote | selenium_tests/test_user_accounting.py | selenium_tests/test_user_accounting.py | from selenium_tests.AdminDriverTest import AdminDriverTest
from selenium.webdriver.common.by import By
import os
class TestUserAccounting(AdminDriverTest):
def test_create_new_entry_button(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_first_button("Create New Entry")
self.click_modal_footer_button("Cancel")
def test_create_and_remove_user(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_first_button("Create New Entry")
self.type_text_in_element_located(By.CSS_SELECTOR, ".modal-body > form > div > input", "mrenou")
self.click_modal_footer_button("Submit")
self.wait_until_invisibility_of_element_located(
By.CSS_SELECTOR, ".modal-fade-leave-to")
# Click remove button
self.click_row_action_button("mrenou", "Remove")
self.click_modal_footer_button("Ok")
self.wait_until_invisibility_of_row("mrenou")
def test_admin_name_header_bug(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_row_action_button("test", "Policies")
self.wait_until_text_inside_element_located(By.CSS_SELECTOR, "span.hidden-xs", "admin")
def test_user_id(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_row_action_button("test", "Policies")
self.wait_until_visibility_of_row("simphonyproject/simphonic-mayavi")
self.driver.get(os.path.join(
self.base_url,
"user/admin/#/users/36/accounting"
))
self.wait_until_presence_of_element_located(By.CSS_SELECTOR, "div.alert-danger")
self.wait_until_invisibility_of_row("simphonyproject/simphonic-mayavi")
| from selenium_tests.AdminDriverTest import AdminDriverTest
from selenium.webdriver.common.by import By
import os
class TestUserAccounting(AdminDriverTest):
def test_create_new_entry_button(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_first_button("Create New Entry")
self.click_modal_footer_button("Cancel")
def test_create_and_remove_user(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_first_button("Create New Entry")
self.type_text_in_element_located(By.CSS_SELECTOR, ".modal-body > form > div > input", "mrenou")
self.click_modal_footer_button("Submit")
self.wait_until_invisibility_of_element_located(
By.CSS_SELECTOR, ".modal-fade-leave-to")
# FIXME: This test is currently broken, since it will report that the
# "Remove" button is obstructed in the browser (although it is not in
# the deployment). It seems to be a selenium issue, which may have been
# previously suppressed by disallowing popups in the Firefox profile
# permissions.sqlite file. However there is no obvious way to access
# this file any more with later versions of Firefox (> 75.0)
# Click remove button
# self.click_row_action_button("mrenou", "Remove")
#
# self.click_modal_footer_button("Ok")
#
# self.wait_until_invisibility_of_row("mrenou")
def test_admin_name_header_bug(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_row_action_button("test", "Policies")
self.wait_until_text_inside_element_located(By.CSS_SELECTOR, "span.hidden-xs", "admin")
def test_user_id(self):
self.click_first_element_located(By.LINK_TEXT, "Users")
self.click_row_action_button("test", "Policies")
self.wait_until_visibility_of_row("simphonyproject/simphonic-mayavi")
self.driver.get(os.path.join(
self.base_url,
"user/admin/#/users/36/accounting"
))
self.wait_until_presence_of_element_located(By.CSS_SELECTOR, "div.alert-danger")
self.wait_until_invisibility_of_row("simphonyproject/simphonic-mayavi")
| bsd-3-clause | Python |
a3a35f3a967a8a875d64bad34193ab4198abd875 | Update python script to use date time objects directly to schedule things. Partially completed. | ilovepi/NCD,ilovepi/NCD,ilovepi/NCD,ilovepi/NCD | scripts/schedule.py | scripts/schedule.py | import random, fileinput
from datetime import datetime, date, time
from dateutil.rrule import rrule, DAILY
class ScheduleMaker(object):
"""docstring for ScheduleMaker"""
def __init__(self, ip_file, start_date:date, end_date: date, period:datetime, command):
super(ScheduleMaker, self).__init__()
self.ip_file = ip_file
self.start_date = start_date
self.end_date = end_date
self.period = period
self.command = command
# TODO read ip list from file
self.target_list = []
self.output_file_name = "schedule.txt"
def create_schedule(self):
for day in range(self.start_date, self.end_date):
self.write_day(self.schedule_day())
def schedule_day(self, my_date: date):
day = {}
periods = 24 * 60 / self.period
for i in range(periods):
# sample get me a random sample of the minutes in an hour ... up to 60
# instead of putting that in the dictionary, we should put an actual date time
# we should also pass in the day's date, and remove logic to make times consitent for the whole day
# datetime removes a great deal of that calculation
# TODO: fix this function to use actual date objects correctly, will simplify much of the logic
# and relieve a great deal of testing
schedule = random.sample(xrange(0 * i, self.period * i, self.interval), len(self.target_list))
scheduled_times = [ datetime.combine(my_date, time(minute = minutes)) for minutes in schedule ]
day.update(dict(zip(schedule, self.target_list)))
return day
def write_day(self,day):
with open(self.output_file_name) as outFile:
outFile.write(day)
| from datetime import date
from dateutil.rrule import rrule, DAILY
class ClassName(object):
"""docstring for """
def __init__(self, arg):
super(, self).__init__()
self.arg = arg
ScheduleMaker(object):
"""docstring for ScheduleMaker"""
def __init__(self, ip_file, start_date, end_date, period, command):
super(ScheduleMaker, self).__init__()
self.ip_file = ip_file
self.start_date = start_date
self.end_date = end_date
self.period = period
self.command = command
#TODO read ip list from file
self.target_list = []
def create_schedule(self):
for day in range(self.start_date, self.end_date):
write_day(schedule_day())
def schedule_day(self):
day = {}
periods = 24*60/self.period
for i in range(periods):
schedule = random.sample(xrange(0*i,self.period*i, self.interval), self.target_list.len)
day.update(dict(zip(schedule, self.target_list)))
return day
def write_file(self, day, date):
pass
| mit | Python |
5de304b53e5494fc47b157e52603d8bd36cda93b | Add boolean value alongside the boolean field name in icon label | flask-admin/flask-admin,flask-admin/flask-admin,flask-admin/flask-admin,flask-admin/flask-admin | flask_admin/model/typefmt.py | flask_admin/model/typefmt.py | import json
from markupsafe import Markup
from flask_admin._compat import text_type
try:
from enum import Enum
except ImportError:
Enum = None
def null_formatter(view, value, name):
"""
Return `NULL` as the string for `None` value
:param value:
Value to check
"""
return Markup('<i>NULL</i>')
def empty_formatter(view, value, name):
"""
Return empty string for `None` value
:param value:
Value to check
"""
return ''
def bool_formatter(view, value, name):
"""
Return check icon if value is `True` or empty string otherwise.
:param value:
Value to check
"""
glyph = 'ok-circle' if value else 'minus-sign'
fa = 'fa-check-circle' if value else 'fa-minus-circle'
label = f'{name}: {"true" if value else "false"}'
return Markup('<span class="fa %s glyphicon glyphicon-%s icon-%s" title="%s"></span>' % (fa, glyph, glyph, label))
def list_formatter(view, values, name):
"""
Return string with comma separated values
:param values:
Value to check
"""
return u', '.join(text_type(v) for v in values)
def enum_formatter(view, value, name):
"""
Return the name of the enumerated member.
:param value:
Value to check
"""
return value.name
def dict_formatter(view, value, name):
"""
Removes unicode entities when displaying dict as string. Also unescapes
non-ASCII characters stored in the JSON.
:param value:
Dict to convert to string
"""
return json.dumps(value, ensure_ascii=False)
BASE_FORMATTERS = {
type(None): empty_formatter,
bool: bool_formatter,
list: list_formatter,
dict: dict_formatter,
}
EXPORT_FORMATTERS = {
type(None): empty_formatter,
list: list_formatter,
dict: dict_formatter,
}
DETAIL_FORMATTERS = {
type(None): empty_formatter,
list: list_formatter,
dict: dict_formatter,
}
if Enum is not None:
BASE_FORMATTERS[Enum] = enum_formatter
EXPORT_FORMATTERS[Enum] = enum_formatter
DETAIL_FORMATTERS[Enum] = enum_formatter
| import json
from markupsafe import Markup
from flask_admin._compat import text_type
try:
from enum import Enum
except ImportError:
Enum = None
def null_formatter(view, value, name):
"""
Return `NULL` as the string for `None` value
:param value:
Value to check
"""
return Markup('<i>NULL</i>')
def empty_formatter(view, value, name):
"""
Return empty string for `None` value
:param value:
Value to check
"""
return ''
def bool_formatter(view, value, name):
"""
Return check icon if value is `True` or empty string otherwise.
:param value:
Value to check
"""
glyph = 'ok-circle' if value else 'minus-sign'
fa = 'fa-check-circle' if value else 'fa-minus-circle'
return Markup('<span class="fa %s glyphicon glyphicon-%s icon-%s" title="%s"></span>' % (fa, glyph, glyph, name))
def list_formatter(view, values, name):
"""
Return string with comma separated values
:param values:
Value to check
"""
return u', '.join(text_type(v) for v in values)
def enum_formatter(view, value, name):
"""
Return the name of the enumerated member.
:param value:
Value to check
"""
return value.name
def dict_formatter(view, value, name):
"""
Removes unicode entities when displaying dict as string. Also unescapes
non-ASCII characters stored in the JSON.
:param value:
Dict to convert to string
"""
return json.dumps(value, ensure_ascii=False)
BASE_FORMATTERS = {
type(None): empty_formatter,
bool: bool_formatter,
list: list_formatter,
dict: dict_formatter,
}
EXPORT_FORMATTERS = {
type(None): empty_formatter,
list: list_formatter,
dict: dict_formatter,
}
DETAIL_FORMATTERS = {
type(None): empty_formatter,
list: list_formatter,
dict: dict_formatter,
}
if Enum is not None:
BASE_FORMATTERS[Enum] = enum_formatter
EXPORT_FORMATTERS[Enum] = enum_formatter
DETAIL_FORMATTERS[Enum] = enum_formatter
| bsd-3-clause | Python |
63bcd91df2b36bede393bbf9668114354c494f99 | Update urltools.py | dpgaspar/Flask-AppBuilder,zhounanshu/Flask-AppBuilder,qpxu007/Flask-AppBuilder,dpgaspar/Flask-AppBuilder,zhounanshu/Flask-AppBuilder,rpiotti/Flask-AppBuilder,zhounanshu/Flask-AppBuilder,qpxu007/Flask-AppBuilder,rpiotti/Flask-AppBuilder,rpiotti/Flask-AppBuilder,qpxu007/Flask-AppBuilder,rpiotti/Flask-AppBuilder,dpgaspar/Flask-AppBuilder,qpxu007/Flask-AppBuilder,dpgaspar/Flask-AppBuilder,zhounanshu/Flask-AppBuilder | flask_appbuilder/urltools.py | flask_appbuilder/urltools.py | from flask import url_for, request
def get_group_by_args():
"""
Get page arguments for group by
"""
group_by = request.args.get('group_by')
if not group_by: group_by = ''
return group_by
def get_page_args():
"""
Get page arguments, returns a dictionary
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: page_<VIEW_NAME>=<PAGE_NUMBER>
"""
pages = {}
for arg in request.args:
re_match = re.findall('page_(.*)', arg)
if re_match:
pages[re_match[0]] = int(request.args.get(arg))
return pages
def get_page_size_args():
"""
Get page size arguments, returns an int
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: psize_<VIEW_NAME>=<PAGE_SIZE>
"""
page_sizes = {}
for arg in request.args:
re_match = re.findall('psize_(.*)', arg)
if re_match:
page_sizes[re_match[0]] = int(request.args.get(arg))
return page_sizes
def get_order_args():
"""
Get order arguments, return a dictionary
{ <VIEW_NAME>: (ORDER_COL, ORDER_DIRECTION) }
Arguments are passed like: _oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
"""
orders = {}
for arg in request.args:
re_match = re.findall('_oc_(.*)', arg)
if re_match:
orders[re_match[0]] = (request.args.get(arg),request.args.get('_od_' + re_match[0]))
return orders
def get_filter_args(filters):
filters.clear_filters()
for arg in request.args:
re_match = re.findall('_flt_(\d)_(.*)', arg)
if re_match:
filters.add_filter_index(re_match[0][1], int(re_match[0][0]), request.args.get(arg))
|
def get_group_by_args():
"""
Get page arguments for group by
"""
group_by = request.args.get('group_by')
if not group_by: group_by = ''
return group_by
def get_page_args():
"""
Get page arguments, returns a dictionary
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: page_<VIEW_NAME>=<PAGE_NUMBER>
"""
pages = {}
for arg in request.args:
re_match = re.findall('page_(.*)', arg)
if re_match:
pages[re_match[0]] = int(request.args.get(arg))
return pages
def get_page_size_args():
"""
Get page size arguments, returns an int
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: psize_<VIEW_NAME>=<PAGE_SIZE>
"""
page_sizes = {}
for arg in request.args:
re_match = re.findall('psize_(.*)', arg)
if re_match:
page_sizes[re_match[0]] = int(request.args.get(arg))
return page_sizes
def get_order_args():
"""
Get order arguments, return a dictionary
{ <VIEW_NAME>: (ORDER_COL, ORDER_DIRECTION) }
Arguments are passed like: _oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
"""
orders = {}
for arg in request.args:
re_match = re.findall('_oc_(.*)', arg)
if re_match:
orders[re_match[0]] = (request.args.get(arg),request.args.get('_od_' + re_match[0]))
return orders
def get_filter_args(filters):
filters.clear_filters()
for arg in request.args:
re_match = re.findall('_flt_(\d)_(.*)', arg)
if re_match:
filters.add_filter_index(re_match[0][1], int(re_match[0][0]), request.args.get(arg))
| bsd-3-clause | Python |
0355d4233f74ca585c92e34b2547d185a2a3d5fc | Update __init__.py | aspuru-guzik-group/selfies | selfies/__init__.py | selfies/__init__.py | #!/usr/bin/env python
"""
SELFIES: a robust representation of semantically constrained graphs with an
example application in chemistry.
SELFIES (SELF-referencIng Embedded Strings) is a general-purpose,
sequence-based, robust representation of semantically constrained graphs.
It is based on a Chomsky type-2 grammar, augmented with two self-referencing
functions. A main objective is to use SELFIES as direct input into machine
learning models, in particular in generative models, for the generation of
outputs with high validity.
The code presented here is a concrete application of SELFIES in chemistry, for
the robust representation of molecules.
Typical usage example:
import selfies
benzene = "C1=CC=CC=C1"
selfies_benzene = selfies.encoder(benzene)
smiles_benzene = selfies.decoder(selfies_benzene)
For comments, bug reports or feature ideas, please send an email to
mario.krenn@utoronto.ca and alan@aspuru.com.
"""
__version__ = "1.0.3"
__all__ = [
"encoder",
"decoder",
"get_semantic_robust_alphabet",
"get_semantic_constraints",
"set_semantic_constraints",
"len_selfies",
"split_selfies",
"get_alphabet_from_selfies",
"selfies_to_encoding",
"batch_selfies_to_flat_hot",
"encoding_to_selfies",
"batch_flat_hot_to_selfies",
]
from .decoder import decoder
from .encoder import encoder
from .grammar_rules import (
get_semantic_robust_alphabet,
get_semantic_constraints,
set_semantic_constraints,
)
from .utils import (
get_alphabet_from_selfies,
len_selfies,
split_selfies,
selfies_to_encoding,
batch_selfies_to_flat_hot,
encoding_to_selfies,
batch_flat_hot_to_selfies,
)
| #!/usr/bin/env python
"""
SELFIES: a robust representation of semantically constrained graphs with an
example application in chemistry.
SELFIES (SELF-referencIng Embedded Strings) is a general-purpose,
sequence-based, robust representation of semantically constrained graphs.
It is based on a Chomsky type-2 grammar, augmented with two self-referencing
functions. A main objective is to use SELFIES as direct input into machine
learning models, in particular in generative models, for the generation of
outputs with high validity.
The code presented here is a concrete application of SELFIES in chemistry, for
the robust representation of molecules.
Typical usage example:
import selfies
benzene = "C1=CC=CC=C1"
selfies_benzene = selfies.encoder(benzene)
smiles_benzene = selfies.decoder(selfies_benzene)
For comments, bug reports or feature ideas, please send an email to
mario.krenn@utoronto.ca and alan@aspuru.com.
"""
__version__ = "1.0.1"
__all__ = [
"encoder",
"decoder",
"get_semantic_robust_alphabet",
"get_semantic_constraints",
"set_semantic_constraints",
"len_selfies",
"split_selfies",
"get_alphabet_from_selfies",
"selfies_to_encoding",
"batch_selfies_to_flat_hot",
"encoding_to_selfies",
"batch_flat_hot_to_selfies",
]
from .decoder import decoder
from .encoder import encoder
from .grammar_rules import (
get_semantic_robust_alphabet,
get_semantic_constraints,
set_semantic_constraints,
)
from .utils import (
get_alphabet_from_selfies,
len_selfies,
split_selfies,
selfies_to_encoding,
batch_selfies_to_flat_hot,
encoding_to_selfies,
batch_flat_hot_to_selfies,
)
| apache-2.0 | Python |
28c8f6452383e8327918c215dab6bcbcafda5029 | Update ml_restore.py | pgyogesh/MarkLogic,pgyogesh/MarkLogic,pgyogesh/MarkLogic | forest-restore/ml_restore.py | forest-restore/ml_restore.py | from multiprocessing import Pool
from requests.auth import HTTPDigestAuth
import optparse
import requests
import ConfigParser
import logging
# Command line argument parsing
parser = optparse.OptionParser()
parser.add_option("-c","--config-file", dest="configfile", action="store", help="Specify the config file")
parser.add_option("-u","--user", dest="username", action="store", help="Specify the username")
parser.add_option("-w","--password", dest="password", action="store", help="Specify the user password")
parser.add_option("-p","--max-threads",dest="maxthreads", action="store", help="Specify maximum parallel forest restore")
parser.add_option("-v","--verbose", action="store_true", dest="verbose", help="Enable verbose logging")
options, args = parser.parse_args()
# logging
if options.verbose:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',level=logging.INFO)
link = "http://localhost:8000/v1/eval"
backup_configs=[]
logging.info("Reading configuration file")
f = open(options.configfile)
for line in f:
backup_configs.append(line)
f.close()
def run_restore(backup_config):
forest_name = backup_config.split(':')[0]
backup_path = backup_config.split(':')[1].rstrip()
logging.info(forest_name + " restore started from " + backup_path)
script = """xquery=
xquery version "1.0-ml";
import module namespace admin = "http://marklogic.com/xdmp/admin"
at "/MarkLogic/admin.xqy";
xdmp:forest-restore(admin:forest-get-id(admin:get-configuration(), \"""" + forest_name +"""\"), \"""" + backup_path + """\")"""
r=requests.post(link, data=script, auth=HTTPDigestAuth(options.username, options.password))
if r.status_code == 200:
logging.info(forest_name + " forest restore COMPLETED")
else:
logging.error(forest_name + " forest restore FAILED")
logging.error(r.text)
logging.info("Restore will continue for other forests")
pool = Pool(processes=int(options.maxthreads))
pool.map(run_restore, backup_configs)
pool.close() # worker processes will terminate when all work already assigned has completed.
pool.join() # to wait for the worker processes to terminate.
logging.info("DONE")
| from multiprocessing import Pool
from requests.auth import HTTPDigestAuth
import optparse
import requests
import ConfigParser
import logging
# Command line argument parsing
parser = optparse.OptionParser()
parser.add_option("-c","--config-file", dest="configfile", action="store", help="Specify the config file")
parser.add_option("-u","--user", dest="username", action="store", help="Specify the username")
parser.add_option("-w","--password", dest="password", action="store", help="Spec")
parser.add_option("-p","--max-threads",dest="maxthreads", action="store", help="Specify maximum parallel forest restore")
parser.add_option("-v","--verbose", action="store_true", dest="verbose", help="Enable verbose logging")
options, args = parser.parse_args()
# logging
if options.verbose:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',level=logging.INFO)
link = "http://localhost:8000/v1/eval"
backup_configs=[]
logging.info("Reading configuration file")
f = open(options.configfile)
for line in f:
backup_configs.append(line)
f.close()
def run_restore(backup_config):
forest_name = backup_config.split(':')[0]
backup_path = backup_config.split(':')[1].rstrip()
logging.info(forest_name + " restore started from " + backup_path)
script = """xquery=
xquery version "1.0-ml";
import module namespace admin = "http://marklogic.com/xdmp/admin"
at "/MarkLogic/admin.xqy";
xdmp:forest-restore(admin:forest-get-id(admin:get-configuration(), \"""" + forest_name +"""\"), \"""" + backup_path + """\")"""
r=requests.post(link, data=script, auth=HTTPDigestAuth(options.username, options.password))
if r.status_code == 200:
logging.info(forest_name + " forest restore COMPLETED")
else:
logging.error(forest_name + " forest restore FAILED")
logging.error(r.text)
logging.info("Restore will continue for other forests")
pool = Pool(processes=int(options.maxthreads))
pool.map(run_restore, backup_configs)
pool.close() # worker processes will terminate when all work already assigned has completed.
pool.join() # to wait for the worker processes to terminate.
logging.info("DONE")
| mit | Python |
1f6af4efe674a1891f067af94246cc7e3800889b | 版本号:0.0.6 增加 @cache | vex1023/vxUtils | vxUtils/__init__.py | vxUtils/__init__.py | # endcoding = utf-8
'''
author :
email :
'''
__author__ = 'vex1023'
__email__ = 'vex1023@qq.com'
__version__ = '0.0.6'
__homepages__ = 'https://github.com/vex1023/vxUtils'
__logger__ = 'vxQuant.vxUtils'
from PrettyLogger import *
| # endcoding = utf-8
'''
author :
email :
'''
__author__ = 'vex1023'
__email__ = 'vex1023@qq.com'
__version__ = '0.0.5'
__homepages__ = 'https://github.com/vex1023/vxUtils'
__logger__ = 'vxQuant.vxUtils'
from PrettyLogger import *
| mit | Python |
d5563c097f0a738ccc7df4e096318ddef9a979f4 | Increase queue job timeout | zooniverse/aggregation,zooniverse/aggregation,zooniverse/aggregation | engine/web_api.py | engine/web_api.py | #! /usr/bin/env python
from flask import Flask, make_response, request
from rq import Queue
from load_redis import configure_redis
from jobs import aggregate
import os
import json
import logging
app = Flask(__name__)
env = os.getenv('FLASK_ENV', 'production')
#30 mins - http://python-rq.org/docs/results/
q = Queue('default', connection=configure_redis(env), default_timeout=14400)
apis = {
'development': "http://"+str(os.getenv('HOST_IP', '172.17.42.1'))+":3000",
'staging': "https://panoptes-staging.zooniverse.org",
'production': "https://panoptes.zooniverse.org"
}
api_root = apis[env]
@app.route('/',methods=['POST'])
def start_aggregation():
try:
body = request.get_json()
project = body['project_id']
href = body['medium_href']
metadata = body['metadata']
token = body['token']
q.enqueue(aggregate, project, token, api_root+"/api"+href, metadata, env)
resp = make_response(json.dumps({'queued': True}), 200)
resp.headers['Content-Type'] = 'application/json'
return resp
except KeyError:
resp = make_response(json.dumps({'error': [{'messages': "Missing Required Key"}]}), 422)
resp.headers['Content-Type'] = 'application/json'
return resp
@app.before_first_request
def setup_logging():
if not app.debug:
import logging
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
| #! /usr/bin/env python
from flask import Flask, make_response, request
from rq import Queue
from load_redis import configure_redis
from jobs import aggregate
import os
import json
import logging
app = Flask(__name__)
env = os.getenv('FLASK_ENV', 'production')
#30 mins - http://python-rq.org/docs/results/
q = Queue('default', connection=configure_redis(env), default_timeout=7200)
apis = {
'development': "http://"+str(os.getenv('HOST_IP', '172.17.42.1'))+":3000",
'staging': "https://panoptes-staging.zooniverse.org",
'production': "https://panoptes.zooniverse.org"
}
api_root = apis[env]
@app.route('/',methods=['POST'])
def start_aggregation():
try:
body = request.get_json()
project = body['project_id']
href = body['medium_href']
metadata = body['metadata']
token = body['token']
q.enqueue(aggregate, project, token, api_root+"/api"+href, metadata, env)
resp = make_response(json.dumps({'queued': True}), 200)
resp.headers['Content-Type'] = 'application/json'
return resp
except KeyError:
resp = make_response(json.dumps({'error': [{'messages': "Missing Required Key"}]}), 422)
resp.headers['Content-Type'] = 'application/json'
return resp
@app.before_first_request
def setup_logging():
if not app.debug:
import logging
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False)
| apache-2.0 | Python |
7c3c333bd7fada179b7fcd9cba08eefd7b3b42cf | check in parser sketch | theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs,theonion/django-bulbs | bulbs/instant_articles/parser.py | bulbs/instant_articles/parser.py | class InstantArticleParser():
def __init__(self, intermediate):
pass
def generate_body(self, intermediate):
body = ""
for key, body in intermediate.iteritems():
body.append(parse_item(key, body))
def parse_item(self, key, body):
if key == "text":
return body
elif key == "betty":
return self.parse_betty(body)
elif key == "facebook":
return self.parse_facebook(body)
elif key == "twitter":
return self.parse_twitter(body)
elif key == "instagram":
return self.parse_instagram(body)
elif key == "onion_video":
return self.parse_onion_video(body)
elif key == "vimeo":
return self.parse_vimeo(body)
elif key == "youtube":
return self.parse_youtube(body)
elif key == "soundcloud":
return self.parse_soundcloud(body)
else:
raise Exception("Key not implemented")
def parse_betty(self, body):
pass
def parse_facebook(self, body):
pass
def parse_twitter(self, body):
pass
def parse_instagram(self, body):
pass
def parse_onion_video(self, body):
pass
def parse_vimeo(self, body):
pass
def parse_youtube(self, body):
pass
def parse_soundcloud(self, body):
pass
| class ContentTransformer(HTMLParser):
pass
class FacebookTransformer(ContentTransformer):
pass
| mit | Python |
10ce81ed374097009d710bb3cc8dcd70c1c3e2e4 | Update core.frameworks __init__.py | OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft | syft/core/frameworks/__init__.py | syft/core/frameworks/__init__.py | from syft.core import torch, tensorflow, numpy, encode, pandas
__all__ = ["torch", "tensorflow", "numpy", "encode", "pandas"]
| from . import torch
from . import tensorflow
from . import numpy
from . import encode
from . import pandas
__all__ = ["torch", "tensorflow", "numpy", "encode", "pandas"]
| apache-2.0 | Python |
64185e6890f4310a01755671a4d17238ec7ae6d2 | Add method for attach stars count to projects queryset. | jeffdwyatt/taiga-back,astronaut1712/taiga-back,seanchen/taiga-back,Zaneh-/bearded-tribble-back,gauravjns/taiga-back,bdang2012/taiga-back-casting,forging2012/taiga-back,joshisa/taiga-back,taigaio/taiga-back,CMLL/taiga-back,Tigerwhit4/taiga-back,EvgeneOskin/taiga-back,EvgeneOskin/taiga-back,astronaut1712/taiga-back,WALR/taiga-back,bdang2012/taiga-back-casting,19kestier/taiga-back,obimod/taiga-back,frt-arch/taiga-back,xdevelsistemas/taiga-back-community,Tigerwhit4/taiga-back,EvgeneOskin/taiga-back,gam-phon/taiga-back,obimod/taiga-back,dayatz/taiga-back,dycodedev/taiga-back,joshisa/taiga-back,CoolCloud/taiga-back,CMLL/taiga-back,rajiteh/taiga-back,Zaneh-/bearded-tribble-back,joshisa/taiga-back,Zaneh-/bearded-tribble-back,crr0004/taiga-back,coopsource/taiga-back,xdevelsistemas/taiga-back-community,taigaio/taiga-back,obimod/taiga-back,gauravjns/taiga-back,dayatz/taiga-back,joshisa/taiga-back,gauravjns/taiga-back,forging2012/taiga-back,WALR/taiga-back,astronaut1712/taiga-back,19kestier/taiga-back,bdang2012/taiga-back-casting,astagi/taiga-back,Rademade/taiga-back,Rademade/taiga-back,gam-phon/taiga-back,crr0004/taiga-back,EvgeneOskin/taiga-back,crr0004/taiga-back,astagi/taiga-back,forging2012/taiga-back,coopsource/taiga-back,CMLL/taiga-back,WALR/taiga-back,gam-phon/taiga-back,CoolCloud/taiga-back,Rademade/taiga-back,astronaut1712/taiga-back,seanchen/taiga-back,CoolCloud/taiga-back,forging2012/taiga-back,gauravjns/taiga-back,gam-phon/taiga-back,seanchen/taiga-back,jeffdwyatt/taiga-back,CoolCloud/taiga-back,jeffdwyatt/taiga-back,dycodedev/taiga-back,Rademade/taiga-back,dycodedev/taiga-back,dayatz/taiga-back,rajiteh/taiga-back,coopsource/taiga-back,xdevelsistemas/taiga-back-community,WALR/taiga-back,Tigerwhit4/taiga-back,coopsource/taiga-back,taigaio/taiga-back,Rademade/taiga-back,obimod/taiga-back,astagi/taiga-back,frt-arch/taiga-back,seanchen/taiga-back,Tigerwhit4/taiga-back,frt-arch/taiga-back,CMLL/taiga-back,crr0004/taiga-back,jeffdwyatt/taiga-back,rajiteh/taiga-back,dycodedev/taiga-back,rajiteh/taiga-back,19kestier/taiga-back,bdang2012/taiga-back-casting,astagi/taiga-back | taiga/projects/stars/services.py | taiga/projects/stars/services.py | from django.db.models import F
from django.db.transaction import atomic
from django.db.models.loading import get_model
from django.contrib.auth import get_user_model
from .models import Fan, Stars
def star(project, user):
"""Star a project for an user.
If the user has already starred the project nothing happends so this function can be considered
idempotent.
:param project: :class:`~taiga.projects.models.Project` instance.
:param user: :class:`~taiga.users.models.User` instance.
"""
with atomic():
fan, created = Fan.objects.get_or_create(project=project,
user=user)
if not created:
return
stars, _ = Stars.objects.get_or_create(project=project)
stars.count = F('count') + 1
stars.save()
def unstar(project, user):
"""
Unstar a project for an user.
If the user has not starred the project nothing happens so this function can be considered
idempotent.
:param project: :class:`~taiga.projects.models.Project` instance.
:param user: :class:`~taiga.users.models.User` instance.
"""
with atomic():
qs = Fan.objects.filter(project=project, user=user)
if not qs.exists():
return
qs.delete()
stars, _ = Stars.objects.get_or_create(project=project)
stars.count = F('count') - 1
stars.save()
def get_stars(project):
"""
Get the count of stars a project have.
"""
instance, _ = Stars.objects.get_or_create(project=project)
return instance.count
def get_fans(project_or_id):
"""Get the fans a project have."""
qs = get_user_model().objects.get_queryset()
if isinstance(project_or_id, int):
qs = qs.filter(fans__project_id=project_or_id)
else:
qs = qs.filter(fans__project=project_or_id)
return qs
def get_starred(user_or_id):
"""Get the projects an user has starred."""
project_model = get_model("projects", "Project")
qs = project_model.objects.get_queryset()
if isinstance(user_or_id, int):
qs = qs.filter(fans__user_id=user_or_id)
else:
qs = qs.filter(fans__user=user_or_id)
return qs
def attach_startscount_to_queryset(queryset):
"""
Attach stars count to each object of projects queryset.
Because of lazynes of starts objects creation, this makes
much simple and more efficient way to access to project
starts number.
(The other way was be do it on serializer with some try/except
blocks and additional queryes)
"""
sql = ("SELECT coalesce(stars_stars.count, 0) FROM stars_stars "
"WHERE stars_stars.project_id = projects_project.id ")
qs = queryset.extra(select={"starts_count": sql})
return qs
| from django.db.models import F
from django.db.transaction import atomic
from django.db.models.loading import get_model
from django.contrib.auth import get_user_model
from .models import Fan, Stars
def star(project, user):
"""Star a project for an user.
If the user has already starred the project nothing happends so this function can be considered
idempotent.
:param project: :class:`~taiga.projects.models.Project` instance.
:param user: :class:`~taiga.users.models.User` instance.
"""
with atomic():
fan, created = Fan.objects.get_or_create(project=project,
user=user)
if not created:
return
stars, _ = Stars.objects.get_or_create(project=project)
stars.count = F('count') + 1
stars.save()
def unstar(project, user):
"""
Unstar a project for an user.
If the user has not starred the project nothing happens so this function can be considered
idempotent.
:param project: :class:`~taiga.projects.models.Project` instance.
:param user: :class:`~taiga.users.models.User` instance.
"""
with atomic():
qs = Fan.objects.filter(project=project, user=user)
if not qs.exists():
return
qs.delete()
stars, _ = Stars.objects.get_or_create(project=project)
stars.count = F('count') - 1
stars.save()
def get_stars(project):
"""
Get the count of stars a project have.
"""
instance, _ = Stars.objects.get_or_create(project=project)
return instance.count
def get_fans(project_or_id):
"""Get the fans a project have."""
qs = get_user_model().objects.get_queryset()
if isinstance(project_or_id, int):
qs = qs.filter(fans__project_id=project_or_id)
else:
qs = qs.filter(fans__project=project_or_id)
return qs
def get_starred(user_or_id):
"""Get the projects an user has starred."""
project_model = get_model("projects", "Project")
qs = project_model.objects.get_queryset()
if isinstance(user_or_id, int):
qs = qs.filter(fans__user_id=user_or_id)
else:
qs = qs.filter(fans__user=user_or_id)
return qs
| agpl-3.0 | Python |
24151ca5d19c3743854cacf8bccb1709a7d46a87 | Format using formatting tool | evanepio/dotmanca,evanepio/dotmanca,evanepio/dotmanca | gallery/models.py | gallery/models.py | from django.db import models
from django.urls import reverse
from dotmanca.storage import OverwriteStorage
class Gallery(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
added_timestamp = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True)
sort_order = models.IntegerField()
def get_absolute_url(self):
kwargs = {"slug": self.slug}
return reverse("gallery:gallery", kwargs=kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "galleries"
def gallery_image_upload_to(instance, file_name):
file_extension = file_name.split(".")[-1]
return f"galleries/{instance.gallery.slug}/{instance.slug}.{file_extension}"
class GalleryImage(models.Model):
gallery = models.ForeignKey(Gallery, on_delete=models.CASCADE)
name = models.CharField(max_length=50)
slug = models.SlugField()
sort_order = models.IntegerField()
the_image = models.ImageField(
null=False,
blank=False,
upload_to=gallery_image_upload_to,
storage=OverwriteStorage(),
)
added_timestamp = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True)
def get_absolute_url(self):
kwargs = {"gallery_slug": self.gallery.slug, "slug": self.slug}
return reverse("gallery:gallery_image", kwargs=kwargs)
def __str__(self):
return self.name
class Meta:
unique_together = ("gallery", "slug")
| from django.db import models
from django.urls import reverse
from dotmanca.storage import OverwriteStorage
class Gallery(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(unique=True)
added_timestamp = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True)
sort_order = models.IntegerField()
def get_absolute_url(self):
kwargs = {'slug': self.slug}
return reverse('gallery:gallery', kwargs=kwargs)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'galleries'
def gallery_image_upload_to(instance, file_name):
file_extension = file_name.split(".")[-1]
return F'galleries/{instance.gallery.slug}/{instance.slug}.{file_extension}'
class GalleryImage(models.Model):
gallery = models.ForeignKey(Gallery, on_delete=models.CASCADE)
name = models.CharField(max_length=50)
slug = models.SlugField()
sort_order = models.IntegerField()
the_image = models.ImageField(null=False, blank=False, upload_to=gallery_image_upload_to,
storage=OverwriteStorage())
added_timestamp = models.DateTimeField(auto_now_add=True)
description = models.TextField(blank=True)
def get_absolute_url(self):
kwargs = {'gallery_slug': self.gallery.slug, 'slug': self.slug}
return reverse('gallery:gallery_image', kwargs=kwargs)
def __str__(self):
return self.name
class Meta:
unique_together = ('gallery', 'slug')
| mit | Python |
08eca76203eee2ddf2887c11d067e05b2c2d70f7 | Add allow-legacy-extension-manifests flag so that chrome can load v1 chromoting webapp | dednal/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,pozdnyakov/chromium-crosswalk,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,timopulkkinen/BubbleFish,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,fujunwei/chromium-crosswalk,patrickm/chromium.src,zcbenz/cefode-chromium,axinging/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,junmin-zhu/chromium-rivertrail,hujiajie/pa-chromium,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,jaruba/chromium.src,Jonekee/chromium.src,ltilve/chromium,Jonekee/chromium.src,chuan9/chromium-crosswalk,timopulkkinen/BubbleFish,ondra-novak/chromium.src,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,jaruba/chromium.src,junmin-zhu/chromium-rivertrail,littlstar/chromium.src,Just-D/chromium-1,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,dednal/chromium.src,anirudhSK/chromium,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,Chilledheart/chromium,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,nacl-webkit/chrome_deps,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,dednal/chromium.src,junmin-zhu/chromium-rivertrail,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,zcbenz/cefode-chromium,jaruba/chromium.src,ChromiumWebApps/chromium,dednal/chromium.src,dushu1203/chromium.src,anirudhSK/chromium,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,patrickm/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,dushu1203/chromium.src,jaruba/chromium.src,anirudhSK/chromium,ChromiumWebApps/chromium,jaruba/chromium.src,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,ChromiumWebApps/chromium,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,zcbenz/cefode-chromium,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,Just-D/chromium-1,ChromiumWebApps/chromium,Chilledheart/chromium,nacl-webkit/chrome_deps,jaruba/chromium.src,Jonekee/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk,Chilledheart/chromium,ondra-novak/chromium.src,junmin-zhu/chromium-rivertrail,jaruba/chromium.src,mogoweb/chromium-crosswalk,axinging/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,zcbenz/cefode-chromium,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,ondra-novak/chromium.src,markYoungH/chromium.src,nacl-webkit/chrome_deps,junmin-zhu/chromium-rivertrail,anirudhSK/chromium,littlstar/chromium.src,Just-D/chromium-1,junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,dednal/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,Chilledheart/chromium,patrickm/chromium.src,junmin-zhu/chromium-rivertrail,patrickm/chromium.src,timopulkkinen/BubbleFish,pozdnyakov/chromium-crosswalk,junmin-zhu/chromium-rivertrail,mogoweb/chromium-crosswalk,chuan9/chromium-crosswalk,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,timopulkkinen/BubbleFish,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,dednal/chromium.src,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,hujiajie/pa-chromium,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,hujiajie/pa-chromium,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,ltilve/chromium,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,ChromiumWebApps/chromium,littlstar/chromium.src,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,junmin-zhu/chromium-rivertrail,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,timopulkkinen/BubbleFish,M4sse/chromium.src,littlstar/chromium.src,Chilledheart/chromium,ondra-novak/chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,zcbenz/cefode-chromium,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,hujiajie/pa-chromium,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,junmin-zhu/chromium-rivertrail,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,zcbenz/cefode-chromium,Chilledheart/chromium,ltilve/chromium,Jonekee/chromium.src,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,anirudhSK/chromium,Just-D/chromium-1,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,Just-D/chromium-1,Jonekee/chromium.src,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,anirudhSK/chromium,littlstar/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,M4sse/chromium.src,ltilve/chromium,M4sse/chromium.src,krieger-od/nwjs_chromium.src,M4sse/chromium.src,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,dushu1203/chromium.src,patrickm/chromium.src,markYoungH/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,anirudhSK/chromium,dednal/chromium.src,mogoweb/chromium-crosswalk,zcbenz/cefode-chromium,timopulkkinen/BubbleFish,anirudhSK/chromium,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk,nacl-webkit/chrome_deps,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,dushu1203/chromium.src,jaruba/chromium.src | chrome/test/functional/chromoting/chromoting_base.py | chrome/test/functional/chromoting/chromoting_base.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common imports, setup, etc for chromoting tests."""
import os
def _SetupPaths():
"""Add chrome/test/functional to sys.path for importing pyauto_functional"""
functional_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.append(functional_dir)
_SetupPaths()
import pyauto_functional # Must come before chromoting and pyauto.
from pyauto_functional import Main
import pyauto
import chromotinglib
class ChromotingBase(chromotinglib.ChromotingMixIn, pyauto.PyUITest):
"""Chromoting pyauto test base class.
The following member variables can be used in the child classes:
client_local: True if the client is on the same machines as host
host: The chromoting host side, instance of ChromotingBase
client: The chromoting client side, intance of ChromotingBase
client_tab_index: The tab index to the chromoting client tab
"""
def __init__(self, methodName):
pyauto.PyUITest.__init__(self, methodName)
self.client_local = (self.remote == None)
self.host = self
self.client = self if self.client_local else self.remote
self.client_tab_index = 2 if self.client_local else 1
def ExtraChromeFlags(self):
"""Add extra flags for chromoting testing
Add --allow-nacl-socket-api to connect chromoting successfully.
Add --allow-legacy-extension-manifests so that chrome can load
chromoting webapp in v1 format.
"""
extra_chrome_flags = [
'--allow-nacl-socket-api=*',
'--allow-legacy-extension-manifests'
]
return pyauto.PyUITest.ExtraChromeFlags(self) + extra_chrome_flags | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common imports, setup, etc for chromoting tests."""
import os
def _SetupPaths():
"""Add chrome/test/functional to sys.path for importing pyauto_functional"""
functional_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.append(functional_dir)
_SetupPaths()
import pyauto_functional # Must come before chromoting and pyauto.
from pyauto_functional import Main
import pyauto
import chromotinglib
class ChromotingBase(chromotinglib.ChromotingMixIn, pyauto.PyUITest):
"""Chromoting pyauto test base class.
The following member variables can be used in the child classes:
client_local: True if the client is on the same machines as host
host: The chromoting host side, instance of ChromotingBase
client: The chromoting client side, intance of ChromotingBase
client_tab_index: The tab index to the chromoting client tab
"""
def __init__(self, methodName):
pyauto.PyUITest.__init__(self, methodName)
self.client_local = (self.remote == None)
self.host = self
self.client = self if self.client_local else self.remote
self.client_tab_index = 2 if self.client_local else 1
def ExtraChromeFlags(self):
"""Add --allow-nacl-socket-api to connect chromoting successfully."""
extra_chrome_flags = ['--allow-nacl-socket-api=*',]
return pyauto.PyUITest.ExtraChromeFlags(self) + extra_chrome_flags | bsd-3-clause | Python |
b067b251c4d37ef9cf876a33b470733fce046807 | add settings dir to system path for wsgi | whitews/BAMA_Analytics,whitews/BAMA_Analytics,whitews/BAMA_Analytics | BAMA_Analytics/wsgi.py | BAMA_Analytics/wsgi.py | """
WSGI config for BAMA_Analytics project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BAMA_Analytics.settings")
paths = [
'/srv/django-projects/BAMA_Analytics',
'/srv/django-projects/BAMA_Analytics/BAMA_Analytics'
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| """
WSGI config for BAMA_Analytics project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BAMA_Analytics.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| bsd-2-clause | Python |
aacb587ba875ba2a158e349b3191ead8adb9b515 | refactor to make single case type interface public | qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/userreports/app_manager.py | corehq/apps/userreports/app_manager.py | from corehq.apps.app_manager.util import ParentCasePropertyBuilder
from corehq.apps.userreports.models import DataSourceConfiguration
def get_case_data_sources(app):
"""
Returns a dict mapping case types to DataSourceConfiguration objects that have
the default set of case properties built in.
"""
return {case_type: get_case_data_source(app, case_type) for case_type in app.get_case_types() if case_type}
def get_case_data_source(app, case_type):
def _make_indicator(property_name):
return {
"type": "raw",
"column_id": property_name,
"datatype": "string",
'property_name': property_name,
"display_name": property_name,
}
property_builder = ParentCasePropertyBuilder(app)
return DataSourceConfiguration(
domain=app.domain,
referenced_doc_type='CommCareCase',
table_id=case_type,
display_name=case_type,
configured_filter={
'type': 'property_match',
'property_name': 'type',
'property_value': case_type,
},
configured_indicators=[
_make_indicator(property) for property in property_builder.get_properties(case_type)
]
)
| from corehq.apps.app_manager.util import ParentCasePropertyBuilder
from corehq.apps.userreports.models import DataSourceConfiguration
def get_case_data_sources(app):
"""
Returns a dict mapping case types to DataSourceConfiguration objects that have
the default set of case properties built in.
"""
def _get_config_for_type(app, case_type):
def _make_indicator(property_name):
return {
"type": "raw",
"column_id": property_name,
"datatype": "string",
'property_name': property_name,
"display_name": property_name,
}
property_builder = ParentCasePropertyBuilder(app)
return DataSourceConfiguration(
domain=app.domain,
referenced_doc_type='CommCareCase',
table_id=case_type,
display_name=case_type,
configured_filter={
'type': 'property_match',
'property_name': 'type',
'property_value': case_type,
},
configured_indicators=[
_make_indicator(property) for property in property_builder.get_properties(case_type)
]
)
return {case_type: _get_config_for_type(app, case_type) for case_type in app.get_case_types() if case_type}
| bsd-3-clause | Python |
e10f2b85775412dd60f11deea6e7a7f8b84edfbe | Add name for entry view URL | uranusjr/django-buysafe | buysafe/urls.py | buysafe/urls.py | from django.conf.urls import patterns, url
urlpatterns = patterns(
'buysafe.views',
url(r'^entry/(?P<order_id>\d+)/$', 'entry', name='buysafe_pay'),
(r'^start/$', 'start'),
(r'^success/(?P<payment_type>[01])/$', 'success'),
(r'^fail/(?P<payment_type>[01])/$', 'fail'),
(r'^check/(?P<payment_type>[01])/$', 'check')
)
| from django.conf.urls import patterns
urlpatterns = patterns(
'buysafe.views',
(r'^entry/(?P<order_id>\d+)/$', 'entry'),
(r'^start/$', 'start'),
(r'^success/(?P<payment_type>[01])/$', 'success'),
(r'^fail/(?P<payment_type>[01])/$', 'fail'),
(r'^check/(?P<payment_type>[01])/$', 'check')
)
| bsd-3-clause | Python |
94ee0506364be2ed58e793c4d237bb6c0da7f2d2 | fix error on 0D label array | rufrozen/cudarray,rufrozen/cudarray,andersbll/cudarray,bssrdf/cudarray,rufrozen/cudarray,bssrdf/cudarray,andersbll/cudarray | cudarray/numpy_backend/nnet/special.py | cudarray/numpy_backend/nnet/special.py | import numpy as np
def softmax(X):
e = np.exp(X - np.amax(X, axis=1, keepdims=True))
return e/np.sum(e, axis=1, keepdims=True)
def categorical_cross_entropy(y_pred, y_true, eps=1e-15):
# Assumes one-hot encoding.
y_pred = np.clip(y_pred, eps, 1 - eps)
# XXX: do we need to normalize?
y_pred /= y_pred.sum(axis=1, keepdims=True)
loss = -np.sum(y_true * np.log(y_pred), axis=1)
return loss
def one_hot_encode(labels, n_classes, out=None):
out_shape = (labels.size, n_classes)
if labels.dtype != np.dtype(int):
raise ValueError('labels.dtype must be int')
if out is None:
out = np.empty(out_shape)
else:
if out.shape != out_shape:
raise ValueError('shape mismatch')
out.fill(0)
if labels.size == 1:
out[0, labels] = 1
else:
for c in range(n_classes):
out[labels == c, c] = 1
return out
def one_hot_decode(one_hot, out=None):
out_shape = (one_hot.shape[0],)
if out is None:
out = np.empty(out_shape, dtype=np.dtype(int))
else:
if out.dtype != np.dtype(int):
raise ValueError('out.dtype must be int')
if out.shape != out_shape:
raise ValueError('shape mismatch')
result = np.argmax(one_hot, axis=1)
np.copyto(out, result)
return out
| import numpy as np
def softmax(X):
e = np.exp(X - np.amax(X, axis=1, keepdims=True))
return e/np.sum(e, axis=1, keepdims=True)
def categorical_cross_entropy(y_pred, y_true, eps=1e-15):
# Assumes one-hot encoding.
y_pred = np.clip(y_pred, eps, 1 - eps)
# XXX: do we need to normalize?
y_pred /= y_pred.sum(axis=1, keepdims=True)
loss = -np.sum(y_true * np.log(y_pred), axis=1)
return loss
def one_hot_encode(labels, n_classes, out=None):
out_shape = (labels.size, n_classes)
if labels.dtype != np.dtype(int):
raise ValueError('labels.dtype must be int')
if out is None:
out = np.empty(out_shape)
else:
if out.shape != out_shape:
raise ValueError('shape mismatch')
out.fill(0)
for c in range(n_classes):
out[labels == c, c] = 1
return out
def one_hot_decode(one_hot, out=None):
out_shape = (one_hot.shape[0],)
if out is None:
out = np.empty(out_shape, dtype=np.dtype(int))
else:
if out.dtype != np.dtype(int):
raise ValueError('out.dtype must be int')
if out.shape != out_shape:
raise ValueError('shape mismatch')
result = np.argmax(one_hot, axis=1)
np.copyto(out, result)
return out
| mit | Python |
d827c8efd57c10750c33072920fd59ccb5dda9fe | Fix related lookup error in admin. | cdriehuys/chmvh-website,cdriehuys/chmvh-website,cdriehuys/chmvh-website | chmvh_website/resources/admin.py | chmvh_website/resources/admin.py | from django.contrib import admin
from resources import models
class CategoryAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('title', 'important')
}),
)
list_display = ('title', 'important')
search_fields = ('title',)
class ResourceAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('category', 'title', 'description')
}),
('Contact Information', {
'fields': ('address', 'email', 'phone', 'url')
}),
)
list_display = ('title', 'category')
search_fields = ('title', 'category__title', 'description', 'url')
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Resource, ResourceAdmin)
| from django.contrib import admin
from resources import models
class CategoryAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('title', 'important')
}),
)
list_display = ('title', 'important')
search_fields = ('title',)
class ResourceAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('category', 'title', 'description')
}),
('Contact Information', {
'fields': ('address', 'email', 'phone', 'url')
}),
)
list_display = ('title', 'category')
search_fields = ('title', 'category', 'description', 'url')
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Resource, ResourceAdmin)
| mit | Python |
e302014cd1d914cd2227c9898dae991640c8d748 | Update wsgi.py settings | seciadev/django_weddingsite,seciadev/django_weddingsite,seciadev/django_weddingsite | weddingsite/wsgi.py | weddingsite/wsgi.py | """
WSGI config for weddingsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'weddingsite.settings'
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weddingsite.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application) | """
WSGI config for weddingsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weddingsite.settings")
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
#os.environ['DJANGO_SETTINGS_MODULE'] = 'weddingsite.settings'
application = get_wsgi_application()
application = DjangoWhiteNoise(application) | mpl-2.0 | Python |
b745b5e1a843195f2f067602381be038744a9390 | Bump version to 1.0.0 | weberwang/WeRoBot,FlyRabbit/WeRoBot,whtsky/WeRoBot,FlyRabbit/WeRoBot,whtsky/WeRoBot,weberwang/WeRoBot,whtsky/WeRoBot,adam139/WeRobot,adam139/WeRobot | werobot/__init__.py | werobot/__init__.py | __version__ = '1.0.0'
__author__ = 'whtsky'
__license__ = 'MIT'
__all__ = ["WeRoBot"]
try:
from werobot.robot import WeRoBot
except ImportError:
pass
| __version__ = '0.7.0'
__author__ = 'whtsky'
__license__ = 'MIT'
__all__ = ["WeRoBot"]
try:
from werobot.robot import WeRoBot
except ImportError:
pass
| mit | Python |
c4c8ca08f162686da0396fd6d9427eb5efcc6168 | Improve v7 conversion cli command | wbolster/whip-neustar | whip_neustar/cli.py | whip_neustar/cli.py | """
Command line interface module.
"""
import gzip
import logging
import sys
import aaargh
logger = logging.getLogger(__name__)
JSON_LIBS = ('ujson', 'simplejson', 'json')
for lib in JSON_LIBS:
try:
json = __import__(lib)
except ImportError:
pass
else:
break
from . import reader
from . import v7conversion
def gzip_wrap(fp):
if fp.name.endswith('.gz'):
return gzip.GzipFile(mode='r', fileobj=fp)
else:
return fp
app = aaargh.App(
description="Neustar (formerly Quova) data set utilities.")
@app.cmd(description="Convert a Neustar V7 dataset to Whip format")
@app.cmd_arg('filename')
def convert(filename):
out_fp = sys.stdout
write = out_fp.write
dumps = json.dumps
for doc in reader.iter_records(filename):
write(dumps(doc))
write('\n')
@app.cmd(
name='convert-to-v7',
description="Convert an older Quova data set into V7 format")
@app.cmd_arg('data_fp', type=file, nargs='?', default=sys.stdin)
@app.cmd_arg('ref_fp', type=file, nargs='?')
@app.cmd_arg('--output', '-o', default=sys.stdout)
def convert_v7(data_fp, ref_fp, output):
if ref_fp is None:
logger.info("No reference file specified; trying to find it "
"based on data file name")
if not '.dat' in data_fp.name:
raise RuntimeError("Cannot deduce reference file name")
ref_fp = open(data_fp.name.replace('.dat', '.ref'))
ref_fp = gzip_wrap(ref_fp)
logger.info("Loading reference file %r into memory", ref_fp.name)
references = v7conversion.load_references(ref_fp)
logger.info("Converting input file %r", data_fp.name)
data_fp = gzip_wrap(data_fp)
n = v7conversion.convert_to_v7(data_fp, references, output)
logger.info("Converted %d records", n)
def main():
logging.basicConfig(
format='%(asctime)s (%(name)s) %(levelname)s: %(message)s',
level=logging.INFO,
)
app.run()
if __name__ == '__main__':
main()
| """
Command line interface module.
"""
import gzip
import logging
import sys
import aaargh
logger = logging.getLogger(__name__)
JSON_LIBS = ('ujson', 'simplejson', 'json')
for lib in JSON_LIBS:
try:
json = __import__(lib)
except ImportError:
pass
else:
break
from . import reader
from . import v7conversion
def gzip_wrap(fp):
if fp.name.endswith('.gz'):
return gzip.GzipFile(mode='r', fileobj=fp)
else:
return fp
app = aaargh.App(
description="Neustar (formerly Quova) data set utilities.")
@app.cmd(description="Convert a Neustar V7 dataset to Whip format")
@app.cmd_arg('filename')
def convert(filename):
out_fp = sys.stdout
write = out_fp.write
dumps = json.dumps
for doc in reader.iter_records(filename):
write(dumps(doc))
write('\n')
@app.cmd(
name='convert-to-v7',
description="Convert an older Quova data set into V7 format")
@app.cmd_arg('data_fp', type=file)
@app.cmd_arg('ref_fp', type=file, nargs='?')
@app.cmd_arg('--output', '-o', default=sys.stdout)
def convert_v7(data_fp, ref_fp, output):
if ref_fp is None:
logger.info("No reference file specified; trying to find it "
"based on data file name")
ref_fp = open(data_fp.name.replace('.dat', '.ref'))
ref_fp = gzip_wrap(ref_fp)
logger.info("Loading reference file %r into memory", ref_fp.name)
references = v7conversion.load_references(ref_fp)
logger.info("Converting input file %r", data_fp.name)
data_fp = gzip_wrap(data_fp)
n = v7conversion.convert_to_v7(data_fp, references, output)
logger.info("Converted %d records", n)
def main():
logging.basicConfig(
format='%(asctime)s (%(name)s) %(levelname)s: %(message)s',
level=logging.INFO,
)
app.run()
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
96a49c5239b051a5ffd4371e360a74398b6532b5 | update openstack.py helper sample | karmab/kcli,karmab/kcli,karmab/kcli,karmab/kcli | extras/openstack.py | extras/openstack.py | from kvirt.config import Kconfig
cluster = 'testk'
network = "default"
api_ip = "12.0.0.253"
cidr = "12.0.0.0/24"
config = Kconfig()
config.k.delete_network_port(f"{cluster}-vip" % cluster)
config.k.create_network(name=network, cidr=cidr, overrides={'port_security_enabled': True})
config.k.create_network_port(f"{cluster}-vip" % cluster, network, ip=api_ip, floating=True)
| from kvirt.config import Kconfig
cluster = 'testk'
network = "default"
api_ip = "11.0.0.253"
cidr = "11.0.0.0/24"
config = Kconfig()
config.k.delete_network_port("%s-vip" % cluster)
config.k.create_network(name=network, cidr=cidr, overrides={'port_security_enabled': True})
config.k.create_network_port("%s-vip" % cluster, network, ip=api_ip, floating=True)
| apache-2.0 | Python |
cdc2eeb17bcd553e79ee6985fc3bad7889ef5647 | add kwargs and make exceptions noisy by default | scrapinghub/extruct | extruct/__init__.py | extruct/__init__.py | import logging
import argparse
from lxml.html import fromstring
from extruct.jsonld import JsonLdExtractor
from extruct.rdfa import RDFaExtractor
from extruct.w3cmicrodata import MicrodataExtractor
from extruct.opengraph import OpenGraphExtractor
from extruct.microformat import MicroformatExtractor
from extruct.xmldom import XmlDomHTMLParser
logger = logging.getLogger(__name__)
def extract(args=None):
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('htmlstring', help='string with valid html document')
arg('--url', default ='http://www.example.com/',
help='url to the html document')
arg('--encoding', default='UTF-8', help='encoding of the html document')
arg('--syntaxes', default='all', help='Either list of microdata syntaxes to\
use or "all" (syntaxes available [microdata, microformat, rdfa, \
opengraph, jsonld])')
arg('--errors', default='strict', choices=['log', 'ignore', 'strict'],
help='possible values: log, save exceptions to extruct.log, ignore, \
ignore exceptions or strict (default), raise exceptions')
args = parser.parse_args(args)
domparser = XmlDomHTMLParser(encoding=args.encoding)
tree = fromstring(args.htmlstring, parser=domparser)
if args.syntaxes == 'all':
syntaxes = ['microdata', 'jsonld', 'opengraph', 'microformat', 'rdfa']
processors = []
if 'microdata' in syntaxes:
processors.append(('microdata', MicrodataExtractor().extract_items))
if 'jsonld' in syntaxes:
processors.append(('jsonld', JsonLdExtractor().extract_items))
if 'opengraph' in syntaxes:
processors.append(('opengraph', OpenGraphExtractor().extract_items))
if 'microformat' in syntaxes:
processors.append(('microformat', MicroformatExtractor().extract_items))
if 'rdfa' in syntaxes:
processors.append(('rdfa', RDFaExtractor().extract_items))
output = {}
for label, extract in processors:
try:
output[label] = [obj for obj in extract(document=tree,
url=args.url,
html=args.htmlstring)]
except Exception:
if args.errors == 'log':
logger.exception("Failed to parse %s", args.url)
if args.errors == 'ignore':
pass
return output
| import logging
from lxml.html import fromstring
from extruct.jsonld import JsonLdExtractor
from extruct.rdfa import RDFaExtractor
from extruct.w3cmicrodata import MicrodataExtractor
from extruct.opengraph import OpenGraphExtractor
from extruct.microformat import MicroformatExtractor
from extruct.xmldom import XmlDomHTMLParser
logger = logging.getLogger(__name__)
def extract(htmlstring, url='http://www.example.com/', encoding="UTF-8",
syntaxes="all", schema_context='http://schema.org'):
domparser = XmlDomHTMLParser(encoding=encoding)
tree = fromstring(htmlstring, parser=domparser)
if syntaxes == 'all':
syntaxes = ['microdata', 'jsonld', 'opengraph', 'microformat', 'rdfa']
processors = []
if 'microdata' in syntaxes:
processors.append(('microdata', MicrodataExtractor().extract_items))
if 'jsonld' in syntaxes:
processors.append(('jsonld', JsonLdExtractor().extract_items))
if 'opengraph' in syntaxes:
processors.append(('opengraph', OpenGraphExtractor().extract_items))
if 'microformat' in syntaxes:
processors.append(('microformat', MicroformatExtractor().extract_items))
if 'rdfa' in syntaxes:
processors.append(('rdfa', RDFaExtractor().extract_items))
output = {}
for label, extract in processors:
try:
output[label] = [obj for obj in extract(document=tree, url=url, html=htmlstring)]
except Exception:
logger.exception("Failed to parse %s", url)
return output
| bsd-3-clause | Python |
86ea5819fed43086f347f0601f9372fa40742493 | remove unnecessary imports | mikevb1/discordbot,mikevb1/lagbot | bot.py | bot.py | """Discord bot for Discord."""
import datetime
import asyncio
import logging
import sys
import os
from discord.ext import commands
import discord
# Files and Paths
app_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
data_path = os.path.join(app_path, 'data')
token_file = os.path.join(app_path, 'token.txt')
log_file = os.path.join(app_path, 'bot.log')
if not os.path.isdir(data_path):
os.mkdir(data_path)
# Logging Setup
log = logging.getLogger('discord')
log.setLevel(logging.INFO)
fhandler = logging.FileHandler(
filename=log_file,
encoding='utf-8',
mode='w')
fhandler.setFormatter(logging.Formatter(
'%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
log.addHandler(fhandler)
# Discord Client/Bot
bot = commands.Bot(command_prefix='!')
bot.remove_command('help')
cogs = [
'cogs.admin',
'cogs.misc',
'cogs.meta',
'cogs.stream']
@bot.event
async def on_ready():
"""Called when bot is ready."""
log.info('Bot ready!')
bot.uptime = datetime.datetime.utcnow()
for cog in cogs:
try:
bot.load_extension(cog)
except Exception as e:
print("Couldn't load cog {}\n{}: {}".format(
cog, type(e).__name__, e))
await bot.change_status(game=discord.Game(name='Destroy All Humans!'))
@bot.event
async def on_message(msg):
"""Called when message is recieved."""
if msg.author == bot.user:
return
await bot.process_commands(msg)
if __name__ == '__main__':
if any('debug' in arg.lower() for arg in sys.argv):
bot.command_prefix = '%!'
with open(token_file, 'r') as fp:
token = fp.read()[:-1]
try:
bot.run(token)
except Exception as e:
print(e)
log.error(e)
| """Discord bot for Discord."""
from collections import OrderedDict
import datetime
import requests
import asyncio
import logging
import json
import sys
import os
from discord.ext import commands
import discord
# Files and Paths
app_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
data_path = os.path.join(app_path, 'data')
token_file = os.path.join(app_path, 'token.txt')
log_file = os.path.join(app_path, 'bot.log')
if not os.path.isdir(data_path):
os.mkdir(data_path)
# Logging Setup
log = logging.getLogger('discord')
log.setLevel(logging.INFO)
fhandler = logging.FileHandler(
filename=log_file,
encoding='utf-8',
mode='w')
fhandler.setFormatter(logging.Formatter(
'%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
log.addHandler(fhandler)
# Discord Client/Bot
bot = commands.Bot(command_prefix='!')
bot.remove_command('help')
cogs = [
'cogs.admin',
'cogs.misc',
'cogs.meta',
'cogs.stream']
@bot.event
async def on_ready():
"""Called when bot is ready."""
log.info('Bot ready!')
bot.uptime = datetime.datetime.utcnow()
for cog in cogs:
try:
bot.load_extension(cog)
except Exception as e:
print("Couldn't load cog {}\n{}: {}".format(
cog, type(e).__name__, e))
await bot.change_status(game=discord.Game(name='Destroy All Humans!'))
@bot.event
async def on_message(msg):
"""Called when message is recieved."""
if msg.author == bot.user:
return
await bot.process_commands(msg)
if __name__ == '__main__':
if any('debug' in arg.lower() for arg in sys.argv):
bot.command_prefix = '%!'
with open(token_file, 'r') as fp:
token = fp.read()[:-1]
try:
bot.run(token)
except Exception as e:
print(e)
log.error(e)
| mit | Python |
a5e87e7cf2a607c33f1fa669c42d9f13a4cffd97 | use a meaningful uri for prediction service | neoseele/prediction-aef | service/__init__.py | service/__init__.py | # -*- coding: utf-8 -*-
from flask import Flask, current_app, request, jsonify
import io
import base64
import logging
import numpy as np
import cv2
from service import model
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
# Create a health check handler. Health checks are used when running on
# Google Compute Engine by the load balancer to determine which instances
# can serve traffic. Google App Engine also uses health checking, but
# accepts any non-500 response as healthy.
@app.route('/_ah/health')
def health_check():
return 'ok', 200
@app.route('/', methods=['GET'])
def root():
return 'Hello!', 200
@app.route('/predict', methods=['POST'])
def predict():
data = {}
try:
data = request.get_json()['data']
except Exception:
return jsonify(status_code='400', msg='Bad Request'), 400
image = io.BytesIO(base64.b64decode(data))
img = cv2.imdecode(np.fromstring(image.getvalue(), dtype=np.uint8), 1)
response = {}
prediction = model.predict(img)
if prediction == 0:
response['prediction'] = 'boss'
else:
response['prediction'] = 'other'
current_app.logger.info('Prediction: %s', prediction)
return jsonify(response)
# Add an error handler that reports exceptions to Stackdriver Error
# Reporting. Note that this error handler is only used when Debug
# is False
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
# if __name__ == '__main__':
# app.run(host='0.0.0.0', port=8080, debug=True)
| # -*- coding: utf-8 -*-
from flask import Flask, current_app, request, jsonify
import io
import base64
import logging
import numpy as np
import cv2
from service import model
def create_app(config, debug=False, testing=False, config_overrides=None):
app = Flask(__name__)
app.config.from_object(config)
if config_overrides:
app.config.update(config_overrides)
# Configure logging
if not app.testing:
logging.basicConfig(level=logging.INFO)
# Create a health check handler. Health checks are used when running on
# Google Compute Engine by the load balancer to determine which instances
# can serve traffic. Google App Engine also uses health checking, but
# accepts any non-500 response as healthy.
@app.route('/_ah/health')
def health_check():
return 'ok', 200
@app.route('/', methods=['POST'])
def predict():
data = {}
try:
data = request.get_json()['data']
except Exception:
return jsonify(status_code='400', msg='Bad Request'), 400
image = io.BytesIO(base64.b64decode(data))
img = cv2.imdecode(np.fromstring(image.getvalue(), dtype=np.uint8), 1)
response = {}
prediction = model.predict(img)
if prediction == 0:
response['prediction'] = 'boss'
else:
response['prediction'] = 'other'
current_app.logger.info('Prediction: %s', prediction)
return jsonify(response)
# Add an error handler that reports exceptions to Stackdriver Error
# Reporting. Note that this error handler is only used when Debug
# is False
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
return app
# if __name__ == '__main__':
# app.run(host='0.0.0.0', port=8080, debug=True)
| apache-2.0 | Python |
1587d6935ea472d512bb956a8aa8218d4cfd5242 | remove country lookup, using db trigger instead | justinwp/croplands,justinwp/croplands | gfsad/views/api/locations.py | gfsad/views/api/locations.py | from gfsad import api
from gfsad.models import Location
from processors import api_roles, add_user_to_posted_data, remove_relations, debug_post
from records import save_record_state_to_history
from gfsad.tasks.records import get_ndvi, build_static_records
def process_records(result=None, **kwargs):
"""
This processes all records that may have been posted as a relation of the location.
:param result:
:param kwargs:
:return: None
"""
for record in result['records']:
save_record_state_to_history(record)
def merge_same_location_lat_long(data=None, **kwargs):
"""
This preprocessor checks if the location already exists.
:param data:
:param kwargs:
:return:
"""
# TODO
pass
def get_time_series(result=None, **kwargs):
# get_ndvi.delay(id=result['id'], lat=result['lat'], lon=result['lon'])
pass
def build_static_locations(result=None, **kwargs):
"""
Calls the celery task to rebuild the static locations for the web application.
"""
# build_static_records.delay()
pass
def change_field_names(data=None, **kwargs):
if 'photos' in data:
data['images'] = data['photos']
del data['photos']
def create(app):
api.create_api(Location,
app=app,
collection_name='locations',
methods=['GET', 'POST', 'PATCH', 'DELETE'],
preprocessors={
'POST': [change_field_names, add_user_to_posted_data, debug_post],
'PATCH_SINGLE': [api_roles(['mapping', 'validation', 'admin']), remove_relations],
'PATCH_MANY': [api_roles('admin'), remove_relations],
'DELETE': [api_roles('admin')]
},
postprocessors={
'POST': [process_records, get_time_series, build_static_locations],
'PATCH_SINGLE': [build_static_locations],
'PATCH_MANY': [build_static_locations],
'DELETE': [build_static_locations]
},
results_per_page=10) | from gfsad import api
from gfsad.models import Location
from processors import api_roles, add_user_to_posted_data, remove_relations, debug_post
from records import save_record_state_to_history
from gfsad.tasks.records import get_ndvi, build_static_records
from gfsad.utils.countries import find_country
def process_records(result=None, **kwargs):
"""
This processes all records that may have been posted as a relation of the location.
:param result:
:param kwargs:
:return: None
"""
for record in result['records']:
save_record_state_to_history(record)
def merge_same_location_lat_long(data=None, **kwargs):
"""
This preprocessor checks if the location already exists.
:param data:
:param kwargs:
:return:
"""
# TODO
pass
def get_time_series(result=None, **kwargs):
# get_ndvi.delay(id=result['id'], lat=result['lat'], lon=result['lon'])
pass
def build_static_locations(result=None, **kwargs):
"""
Calls the celery task to rebuild the static locations for the web application.
"""
# build_static_records.delay()
pass
def change_field_names(data=None, **kwargs):
if 'photos' in data:
data['images'] = data['photos']
del data['photos']
def create(app):
api.create_api(Location,
app=app,
collection_name='locations',
methods=['GET', 'POST', 'PATCH', 'DELETE'],
preprocessors={
'POST': [change_field_names, add_user_to_posted_data, debug_post],
'PATCH_SINGLE': [api_roles(['mapping', 'validation', 'admin']), remove_relations],
'PATCH_MANY': [api_roles('admin'), remove_relations],
'DELETE': [api_roles('admin')]
},
postprocessors={
'POST': [process_records, get_time_series, build_static_locations],
'PATCH_SINGLE': [build_static_locations],
'PATCH_MANY': [build_static_locations],
'DELETE': [build_static_locations]
},
results_per_page=10) | mit | Python |
cb9b6fcb5b6cf6ad286758a7ceef789d6265ceae | Update service.py | SimonWang2014/DockerConsoleApp,SimonWang2014/DockerConsoleApp,liuhong1happy/DockerConsoleApp,liuhong1happy/DockerConsoleApp,SimonWang2014/DockerConsoleApp,SimonWang2014/DockerConsoleApp,liuhong1happy/DockerConsoleApp | services/service.py | services/service.py | from models.service import ServiceModel
import tornado.gen
class ServiceService():
m_service = ServiceModel()
def __init__(self):
pass
@tornado.gen.engine
def insert_service(self,service,callback=None):
model = yield tornado.gen.Task(self.m_service.insert,service)
callback(model)
def exist_user(self,name,user,callback=None):
result = yield tornado.gen.Task(m_service.find_one,{"name":name,"user":user})
if result==None or not isinstance(result,dict):
callback(False)
else:
callback(True)
def get_user(self,name,user,callback=None):
result = yield tornado.gen.Task(m_service.find_one,{"name":name,"user":user})
if result==None or not isinstance(result,dict):
callback(None)
else:
callback(result)
@tornado.gen.engine
def get_list(self,spec,fields=None,sorts=None,page_index=0,page_size=20,callback=None):
result = yield tornado.gen.Task(self.m_service.get_list,spec,fields=fields,sorts=sorts,skip=page_size*page_index,limit=page_size)
if result==None or not isinstance(result,list):
callback(None)
else:
callback(result)
| from models.service import ServiceModel
import tornado.gen
class ServiceService():
m_service = ServiceModel()
def __init__(self):
pass
def insert_service(self,service,callback=None):
model = yield tornado.gen.Task(m_service.insert,service)
callback(model)
def exist_user(self,name,user,callback=None):
result = yield tornado.gen.Task(m_service.find_one,{"name":name,"user":user})
if result==None or not isinstance(result,dict):
callback(False)
else:
callback(True)
def get_user(self,name,user,callback=None):
result = yield tornado.gen.Task(m_service.find_one,{"name":name,"user":user})
if result==None or not isinstance(result,dict):
callback(None)
else:
callback(result)
@tornado.gen.engine
def get_list(self,spec,fields=None,sorts=None,page_index=0,page_size=20,callback=None):
result = yield tornado.gen.Task(self.m_service.get_list,spec,fields=fields,sorts=sorts,skip=page_size*page_index,limit=page_size)
if result==None or not isinstance(result,list):
callback(None)
else:
callback(result) | apache-2.0 | Python |
26ccb7620af2d883e9c584608cf3270e1561627c | Bump version for release | tiangolo/fastapi,tiangolo/fastapi,tiangolo/fastapi | fastapi/__init__.py | fastapi/__init__.py | """FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.1.9"
from .applications import FastAPI
from .routing import APIRouter
from .params import Body, Path, Query, Header, Cookie, Form, File, Security, Depends
| """FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.1.8"
from .applications import FastAPI
from .routing import APIRouter
from .params import Body, Path, Query, Header, Cookie, Form, File, Security, Depends
| mit | Python |
6f4dbc4aef65da839a9fd97345ed923251de93ce | reduce padding in logging | fedora-infra/fedimg,fedora-infra/fedimg | fedmsg.d/logging.py | fedmsg.d/logging.py | # Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)s][%(levelname)s](%(threadName)s) %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
| # Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s](%(threadName)s) %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
| agpl-3.0 | Python |
c84daa82aa3846445390c1bdf37db150671a4256 | Improve build_database step | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/workflow/steps/build_database.py | dbaas/workflow/steps/build_database.py | # -*- coding: utf-8 -*-
import logging
from base import BaseStep
from logical.models import Database
import datetime
LOG = logging.getLogger(__name__)
class BuildDatabase(BaseStep):
def __unicode__(self):
return "Creating logical database..."
def do(self, workflow_dict):
try:
if not workflow_dict['team'] or not workflow_dict['description'] or not workflow_dict['databaseinfra']:
return False
LOG.info("Creating Database...")
database = Database.provision(name= workflow_dict['name'], databaseinfra= workflow_dict['databaseinfra'])
LOG.info("Database %s created!" % database)
workflow_dict['database'] = database
LOG.info("Updating database team")
database.team = workflow_dict['team']
if 'project' in workflow_dict:
LOG.info("Updating database project")
database.project = workflow_dict['project']
LOG.info("Updating database description")
database.description = workflow_dict['description']
database.save()
return True
except Exception, e:
print e
return False
def undo(self, workflow_dict):
try:
if not 'database' in workflow_dict:
if 'databaseinfra' in workflow_dict:
LOG.info("Loading database into workflow_dict...")
workflow_dict['database'] = Database.objects.filter(databaseinfra=workflow_dict['databaseinfra'])[0]
else:
return False
if not workflow_dict['database'].is_in_quarantine:
LOG.info("Putting Database in quarentine...")
database = workflow_dict['database']
database.is_in_quarantine= True
database.quarantine_dt = datetime.datetime.now().date()
database.save()
LOG.info("Destroying the database....")
database.delete()
return True
except Exception, e:
print e
return False
| # -*- coding: utf-8 -*-
import logging
from base import BaseStep
from logical.models import Database
import datetime
LOG = logging.getLogger(__name__)
class BuildDatabase(BaseStep):
def __unicode__(self):
return "Creating logical database..."
def do(self, workflow_dict):
try:
if not workflow_dict['team'] or not workflow_dict['description'] or not workflow_dict['databaseinfra']:
return False
LOG.info("Creating Database...")
database = Database.provision(name= workflow_dict['name'], databaseinfra= workflow_dict['databaseinfra'])
LOG.info("Database %s created!" % database)
workflow_dict['database'] = database
LOG.info("Updating database team")
database.team = workflow_dict['team']
if 'project' in workflow_dict:
LOG.info("Updating database project")
database.project = workflow_dict['project']
LOG.info("Updating database description")
database.description = workflow_dict['description']
database.save()
return True
except Exception, e:
print e
return False
def undo(self, workflow_dict):
try:
if not 'database' in workflow_dict:
return False
LOG.info("Destroying the database....")
if not workflow_dict['database'].is_in_quarantine:
LOG.info("Putting Database in quarentine...")
database = workflow_dict['database']
database.is_in_quarantine= True
database.quarantine_dt = datetime.datetime.now().date()
database.save()
database.delete()
return True
except Exception, e:
print e
return False
| bsd-3-clause | Python |
3bde1cac6163d90287d2bef2d0ca551c8d544488 | add more multi qbit test programs | BBN-Q/pyqgl2,BBN-Q/pyqgl2 | src/python/pyqgl2/test/multi.py | src/python/pyqgl2/test/multi.py | from qgl2.qgl1 import QubitFactory, Id, X, MEAS, Y
from qgl2.qgl2 import qgl2decl, sequence, concur, seq
from qgl2.basic_sequences.qgl2_plumbing import init
@qgl2decl
def multiQbitTest() -> sequence:
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
with concur:
with seq:
init(q1)
Id(q1)
X(q1)
with seq:
init(q2)
X(q2)
Id(q2)
@qgl2decl
def multiQbitTest2() -> sequence:
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
with concur:
for q in [q1, q2]:
init(q)
Id(q)
X(q)
MEAS(q)
@qgl2decl
def simpleSingle() -> sequence:
q2 = QubitFactory('q2')
init(q2)
X(q2)
MEAS(q2)
@qgl2decl
def anotherMulti() -> sequence:
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
with concur:
for q in [q1, q2]:
init(q)
Id(q)
X(q)
MEAS(q)
with concur:
for q in [q1, q2]:
Y(q)
@qgl2decl
def anotherMulti2() -> sequence:
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
q3 = QubitFactory('q3')
with concur:
for q in [q1, q2]:
init(q)
Id(q)
X(q)
MEAS(q)
with concur:
for q in [q1, q3]:
Y(q)
| from qgl2.qgl1 import QubitFactory, Id, X, MEAS
from qgl2.qgl2 import qgl2decl, sequence, concur, seq
from qgl2.basic_sequences.qgl2_plumbing import init
@qgl2decl
def multiQbitTest() -> sequence:
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
with concur:
with seq:
init(q1)
Id(q1)
X(q1)
with seq:
init(q2)
X(q2)
Id(q2)
@qgl2decl
def multiQbitTest2() -> sequence:
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
with concur:
for q in [q1, q2]:
init(q)
Id(q)
X(q)
MEAS(q)
@qgl2decl
def simpleSingle() -> sequence:
q2 = QubitFactory('q2')
init(q2)
X(q2)
MEAS(q2)
| apache-2.0 | Python |
8a5db519f7aa810d4009ada20de46fb982da0ec8 | Fix description in __init__.py | Khan/wtforms | wtforms/__init__.py | wtforms/__init__.py | """
wtforms
~~~~~~~
WTForms is a HTTP/HTML forms handling library, written in Python.
It handles definition, validation and rendering in a flexible and i18n
friendly way. It heavily reduces boilerplate and is completely unicode
aware.
Check out the hg repository at http://www.bitbucket.org/prencher/wtforms/.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from wtforms.form import Form
from wtforms.fields import *
from wtforms.validators import ValidationError
from wtforms import validators
__version__ = "0.2dev"
| """
wtforms
~~~~~~~
What The Forms is a framework-agnostic way of generating HTML forms, handling
form submissions, and validating it.
Check out our trac wiki at http://dev.simplecodes.com/projects/wtforms
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from wtforms.form import Form
from wtforms.fields import *
from wtforms.validators import ValidationError
from wtforms import validators
__version__ = "0.2dev"
| bsd-3-clause | Python |
294f17c5c58b864ede65f2f842b792da85bb5c85 | Bump version | spectralDNS/shenfun,spectralDNS/shenfun,spectralDNS/shenfun | shenfun/__init__.py | shenfun/__init__.py | """
This is the **shenfun** package
What is **shenfun**?
================================
``Shenfun`` is a high performance computing platform for solving partial
differential equations (PDEs) by the spectral Galerkin method. The user
interface to shenfun is very similar to
`FEniCS <https://fenicsproject.org>`_, but applications are limited to
multidimensional tensor product grids. The code is parallelized with MPI
through the `mpi4py-fft <https://bitbucket.org/mpi4py/mpi4py-fft>`_
package.
``Shenfun`` enables fast development of efficient and accurate PDE solvers
(spectral order and accuracy), in the comfortable high-level Python language.
The spectral accuracy is ensured from using high-order *global* orthogonal
basis functions (Fourier, Legendre and Chebyshev), as opposed to finite element
codes like `FEniCS <https://fenicsproject.org>`_ that are using low-order
*local* basis functions. Efficiency is ensured through vectorization
(`Numpy <https://www.numpy.org/>`_), parallelization
(`mpi4py <https://bitbucket.org/mpi4py/mpi4py>`_) and by moving critical
routines to `Cython <https://cython.org/>`_.
"""
#pylint: disable=wildcard-import,no-name-in-module
__version__ = '2.0.4'
__author__ = 'Mikael Mortensen'
import numpy as np
from mpi4py import MPI
from . import chebyshev
from . import legendre
from . import laguerre
from . import hermite
from . import fourier
from . import matrixbase
from . import la
from .fourier import energy_fourier
from .io import *
from .matrixbase import *
from .forms import *
from .tensorproductspace import *
from .utilities import *
from .utilities.lagrangian_particles import *
from .utilities.integrators import *
comm = MPI.COMM_WORLD
| """
This is the **shenfun** package
What is **shenfun**?
================================
``Shenfun`` is a high performance computing platform for solving partial
differential equations (PDEs) by the spectral Galerkin method. The user
interface to shenfun is very similar to
`FEniCS <https://fenicsproject.org>`_, but applications are limited to
multidimensional tensor product grids. The code is parallelized with MPI
through the `mpi4py-fft <https://bitbucket.org/mpi4py/mpi4py-fft>`_
package.
``Shenfun`` enables fast development of efficient and accurate PDE solvers
(spectral order and accuracy), in the comfortable high-level Python language.
The spectral accuracy is ensured from using high-order *global* orthogonal
basis functions (Fourier, Legendre and Chebyshev), as opposed to finite element
codes like `FEniCS <https://fenicsproject.org>`_ that are using low-order
*local* basis functions. Efficiency is ensured through vectorization
(`Numpy <https://www.numpy.org/>`_), parallelization
(`mpi4py <https://bitbucket.org/mpi4py/mpi4py>`_) and by moving critical
routines to `Cython <https://cython.org/>`_.
"""
#pylint: disable=wildcard-import,no-name-in-module
__version__ = '2.0.3'
__author__ = 'Mikael Mortensen'
import numpy as np
from mpi4py import MPI
from . import chebyshev
from . import legendre
from . import laguerre
from . import hermite
from . import fourier
from . import matrixbase
from . import la
from .fourier import energy_fourier
from .io import *
from .matrixbase import *
from .forms import *
from .tensorproductspace import *
from .utilities import *
from .utilities.lagrangian_particles import *
from .utilities.integrators import *
comm = MPI.COMM_WORLD
| bsd-2-clause | Python |
dd5751c353391df0f8b112b946e4a928e85cae5e | test git commit | xgds/xgds_video,xgds/xgds_video,xgds/xgds_video | xgds_video/tests.py | xgds_video/tests.py | # __BEGIN_LICENSE__
# Copyright (C) 2008-2010 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# __END_LICENSE__
from django.test import TestCase
class xgds_videoTest(TestCase):
"""
Tests for xgds_video
"""
def test_xgds_video(self):
print "testing git hook 5 in xgds_video"
assert(False)
| # __BEGIN_LICENSE__
# Copyright (C) 2008-2010 United States Government as represented by
# the Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# __END_LICENSE__
from django.test import TestCase
class xgds_videoTest(TestCase):
"""
Tests for xgds_video
"""
def test_xgds_video(self):
print "testing git hook 4"
assert(False)
| apache-2.0 | Python |
fb3c48ffc6769f00fda70cf9fdba0da479ff9a4f | make readthedocs happy | alekz112/xlwings,ston380/xlwings,gdementen/xlwings,gdementen/xlwings,Juanlu001/xlwings,Juanlu001/xlwings | xlwings/__init__.py | xlwings/__init__.py | from __future__ import absolute_import
import sys
__version__ = '0.2.0'
# Python 2 vs 3
PY3 = sys.version_info[0] == 3
# Platform specific imports
if sys.platform.startswith('win'):
import xlwings._xlwindows as xlplatform
else:
import xlwings._xlmac as xlplatform
# API
from .main import Workbook, Range, Chart
from .constants import * | from __future__ import absolute_import
import sys
__version__ = '0.2.0'
# Python 2 vs 3
PY3 = sys.version_info[0] == 3
# Platform specific imports
if sys.platform.startswith('win'):
import xlwings._xlwindows as xlplatform
if sys.platform.startswith('darwin'):
import xlwings._xlmac as xlplatform
# API
from .main import Workbook, Range, Chart
from .constants import * | apache-2.0 | Python |
c842b0f93117f4fca8cc195ec73f844b281a0f19 | Update server.py | cfpb/django-nudge | src/nudge/server.py | src/nudge/server.py | """
Handles commands received from a Nudge client
"""
import binascii
import pickle
from Crypto.Cipher import AES
from django.contrib.contenttypes.models import ContentType
from django.core import serializers
from reversion.models import Version, VERSION_DELETE
try:
import simplejson as json
except ImportError:
import json
def valid_batch(batch_info):
"""Returns whether a batch format is valid"""
return 'items' in batch_info
def decrypt(key, ciphertext, iv):
"""Decrypts message sent from client using shared symmetric key"""
ciphertext = binascii.unhexlify(ciphertext)
decobj = AES.new(key, AES.MODE_CBC, iv)
plaintext = decobj.decrypt(ciphertext)
return plaintext
def versions(keys):
results = {}
for key in keys:
app, model, pk = key.split('~')
content_type = ContentType.objects.get_by_natural_key(app, model)
versions = Version.objects.all().filter(
content_type=content_type
).filter(object_id=pk).order_by('-revision__date_created')
if versions:
latest = versions[0]
results[key] = (latest.pk,
latest.type,
latest.revision
.date_created.strftime('%b %d, %Y, %I:%M %p'))
else:
results[key] = None
return json.dumps(results)
def process_batch(key, batch_info, iv):
"""Loops through items in a batch and processes them."""
batch_info = pickle.loads(decrypt(key, batch_info, iv.decode('hex')))
if valid_batch(batch_info):
items = serializers.deserialize('json', batch_info['items'])
success = True
for item in items:
item.save()
if isinstance(Version, item.object):
version = item.object
if version.type == VERSION_DELETE:
if version.object:
version.object.delete()
else:
item.object.revert()
return success
| """
Handles commands received from a Nudge client
"""
import binascii
import pickle
from Crypto.Cipher import AES
from django.contrib.contenttypes.models import ContentType
from reversion.models import Version, VERSION_DELETE
try:
import simplejson as json
except ImportError:
import json
def valid_batch(batch_info):
"""Returns whether a batch format is valid"""
return 'items' in batch_info
def decrypt(key, ciphertext, iv):
"""Decrypts message sent from client using shared symmetric key"""
ciphertext = binascii.unhexlify(ciphertext)
decobj = AES.new(key, AES.MODE_CBC, iv)
plaintext = decobj.decrypt(ciphertext)
return plaintext
def versions(keys):
results = {}
for key in keys:
app, model, pk = key.split('~')
content_type = ContentType.objects.get_by_natural_key(app, model)
versions = Version.objects.all().filter(
content_type=content_type
).filter(object_id=pk).order_by('-revision__date_created')
if versions:
latest = versions[0]
results[key] = (latest.pk,
latest.type,
latest.revision
.date_created.strftime('%b %d, %Y, %I:%M %p'))
else:
results[key] = None
return json.dumps(results)
def process_batch(key, batch_info, iv):
"""Loops through items in a batch and processes them."""
batch_info = pickle.loads(decrypt(key, batch_info, iv.decode('hex')))
if valid_batch(batch_info):
items = serializers.deserialize('json', batch_info['items'])
success = True
for item in items:
item.save()
if isinstance(Version, item.object):
version = item.object
if version.type == VERSION_DELETE:
if version.object:
version.object.delete()
else:
item.object.revert()
return success
| cc0-1.0 | Python |
53661db984233f38ab18f7df039c7a93a7382cdb | Revert "[common]: Add data.cache.audio settings entry" | pkulev/xoinvader,pankshok/xoinvader | xoinvader/common.py | xoinvader/common.py | """
Module for common shared objects.
"""
import json
from os.path import dirname
import xoinvader
from xoinvader.settings import Settings as Entry
from xoinvader.utils import Point
def get_json_config(path):
"""Return Settings object made from json."""
with open(path) as fd:
config = Entry(json.load(fd))
return config
__all__ = ["Settings"]
WIDTH = 90
HEIGHT = 34
_ROOT = dirname(xoinvader.__file__)
_CONFIG = _ROOT + "/config"
_RES = _ROOT + "/res"
_SND = _RES + "/snd"
DEFAUT_XOI_SETTINGS = dict(
layout=dict(
field=dict(
border=Point(x=WIDTH, y=HEIGHT),
player=Point(x=WIDTH // 2, y=HEIGHT - 1),
edge=Point(x=WIDTH, y=HEIGHT - 1)),
gui=dict(
bar=dict(
health=Point(x=2, y=HEIGHT - 1),
shield=Point(x=22, y=HEIGHT - 1),
weapon=Point(x=WIDTH - 18, y=HEIGHT - 1)),
info=dict(
weapon=Point(x=44, y=HEIGHT - 1)))),
path=dict(
config=dict(
ships=_CONFIG + "/ships.json",
weapons=_CONFIG + "/weapons.json"),
sound=dict(
weapon=dict(
Blaster=_SND + "/basic_blaster.ogg",
Laser=_SND + "/basic_laser.ogg",
EBlaster=_SND + "/basic_eblaster.ogg",
UM=_SND + "/basic_um.ogg"))),
color=dict(
general=dict(
normal=None),
gui=None,
weapon=None)
)
Settings = Entry(DEFAUT_XOI_SETTINGS)
| """
Module for common shared objects.
"""
import json
from os.path import dirname
import xoinvader
from xoinvader.settings import Settings as Entry
from xoinvader.utils import Point
def get_json_config(path):
"""Return Settings object made from json."""
with open(path) as fd:
config = Entry(json.load(fd))
return config
__all__ = ["Settings"]
WIDTH = 90
HEIGHT = 34
_ROOT = dirname(xoinvader.__file__)
_CONFIG = _ROOT + "/config"
_RES = _ROOT + "/res"
_SND = _RES + "/snd"
DEFAUT_XOI_SETTINGS = dict(
layout=dict(
field=dict(
border=Point(x=WIDTH, y=HEIGHT),
player=Point(x=WIDTH // 2, y=HEIGHT - 1),
edge=Point(x=WIDTH, y=HEIGHT - 1)),
gui=dict(
bar=dict(
health=Point(x=2, y=HEIGHT - 1),
shield=Point(x=22, y=HEIGHT - 1),
weapon=Point(x=WIDTH - 18, y=HEIGHT - 1)),
info=dict(
weapon=Point(x=44, y=HEIGHT - 1)))),
path=dict(
config=dict(
ships=_CONFIG + "/ships.json",
weapons=_CONFIG + "/weapons.json"),
sound=dict(
weapon=dict(
Blaster=_SND + "/basic_blaster.ogg",
Laser=_SND + "/basic_laser.ogg",
EBlaster=_SND + "/basic_eblaster.ogg",
UM=_SND + "/basic_um.ogg"))),
color=dict(
general=dict(
normal=None),
gui=None,
weapon=None),
data=dict(
cache=dict(
audio=dict()))
)
Settings = Entry(DEFAUT_XOI_SETTINGS)
| mit | Python |
324da23b97da9b513605610989ed8adb1ae80c4a | fix typo in config module | scott-maddox/obpds | src/obpds/config.py | src/obpds/config.py | cfg = {}
cfg['plot/semilogy/yrange'] = 1e7
cfg['plot/tight_layout'] = False | cfg = {}
cfg['plot.semilogy/yrange'] = 1e7
cfg['plot/tight_layout'] = False | agpl-3.0 | Python |
f360215dcd8cbb782af445131ab3a876b9580908 | quit requests dep | urkh/ddg | ddg.py | ddg.py | import sys
import urllib2
import json
import ipdb as pdb
def get_results(ddg):
results = []
for ret in ddg.get("RelatedTopics"):
if ret.has_key("Topics"):
for rett in ret.get("Topics"):
result_inf = {"description":rett["Text"], "url":rett["FirstURL"]}
else:
result_inf = {"description":ret["Text"], "url":ret["FirstURL"]}
results.append(result_inf)
for result in results:
print HEADER+" ["+str(results.index(result))+"]: "+BLUE+result.get("description")
print HEADER+" ["+str(results.index(result))+"]: "+GREEN+result.get("url")
print "\n"
#pdb.set_trace()
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
param = sys.argv[1]
if param == "-s":
#params = 'hola'
for arg in sys.argv:
params+=arg+"+"
#pdb.set_trace()
ddg = urllib2.urlopen("http://api.duckduckgo.com/?q=hola%2Bmundo&format=json&no_html=1")
#pdb.set_trace()
get_results(json.load(ddg))
| import sys
import requests
import ipdb as pdb
def get_results(ddg):
results = []
for ret in ddg.get("RelatedTopics"):
if ret.has_key("Topics"):
for rett in ret.get("Topics"):
result_inf = {"description":rett["Text"], "url":rett["FirstURL"]}
else:
result_inf = {"description":ret["Text"], "url":ret["FirstURL"]}
results.append(result_inf)
for result in results:
print HEADER+" ["+str(results.index(result))+"]: "+BLUE+result.get("description")
print HEADER+" ["+str(results.index(result))+"]: "+GREEN+result.get("url")
print "\n"
#pdb.set_trace()
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
param = sys.argv[1]
if param == "-s":
params = ''
for arg in sys.argv:
params+="+"+arg
pdb.set_trace()
ddg = requests.get("http://api.duckduckgo.com/?q="+params+"&format=json&no_html=1")
get_results(ddg.json())
| bsd-2-clause | Python |
202a9b2ad72ffe4ab5981f9a4a886e0a36808eb6 | Add test for handling Infinity in json | mitsuhiko/sentry,ifduyue/sentry,looker/sentry,mvaled/sentry,gencer/sentry,mvaled/sentry,looker/sentry,zenefits/sentry,korealerts1/sentry,zenefits/sentry,Kryz/sentry,kevinlondon/sentry,alexm92/sentry,JamesMura/sentry,mvaled/sentry,kevinlondon/sentry,looker/sentry,beeftornado/sentry,BuildingLink/sentry,ifduyue/sentry,felixbuenemann/sentry,looker/sentry,JamesMura/sentry,ngonzalvez/sentry,zenefits/sentry,gencer/sentry,jean/sentry,fotinakis/sentry,gencer/sentry,daevaorn/sentry,Kryz/sentry,jean/sentry,korealerts1/sentry,BuildingLink/sentry,fotinakis/sentry,imankulov/sentry,daevaorn/sentry,felixbuenemann/sentry,BayanGroup/sentry,beeftornado/sentry,alexm92/sentry,nicholasserra/sentry,kevinlondon/sentry,mitsuhiko/sentry,JackDanger/sentry,felixbuenemann/sentry,mvaled/sentry,ngonzalvez/sentry,Natim/sentry,BuildingLink/sentry,nicholasserra/sentry,JackDanger/sentry,JamesMura/sentry,daevaorn/sentry,JamesMura/sentry,mvaled/sentry,imankulov/sentry,fotinakis/sentry,jean/sentry,BuildingLink/sentry,Natim/sentry,BayanGroup/sentry,BuildingLink/sentry,mvaled/sentry,ifduyue/sentry,imankulov/sentry,JackDanger/sentry,gencer/sentry,daevaorn/sentry,ifduyue/sentry,gencer/sentry,fotinakis/sentry,jean/sentry,BayanGroup/sentry,zenefits/sentry,Kryz/sentry,ngonzalvez/sentry,korealerts1/sentry,zenefits/sentry,Natim/sentry,ifduyue/sentry,jean/sentry,looker/sentry,alexm92/sentry,beeftornado/sentry,nicholasserra/sentry,JamesMura/sentry | tests/sentry/utils/json/tests.py | tests/sentry/utils/json/tests.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import uuid
from sentry.utils import json
from sentry.testutils import TestCase
class JSONTest(TestCase):
def test_uuid(self):
res = uuid.uuid4()
self.assertEquals(json.dumps(res), '"%s"' % res.hex)
def test_datetime(self):
res = datetime.datetime(day=1, month=1, year=2011, hour=1, minute=1, second=1)
self.assertEquals(json.dumps(res), '"2011-01-01T01:01:01.000000Z"')
def test_set(self):
res = set(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_frozenset(self):
res = frozenset(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_escape(self):
res = '<script>alert(1);</script>'
assert json.dumps(res) == '"<script>alert(1);</script>"'
assert json.dumps(res, escape=True) == '"<script>alert(1);<\/script>"'
def test_inf(self):
res = float('inf')
self.assertEquals(json.dumps(res), 'null')
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import uuid
from sentry.utils import json
from sentry.testutils import TestCase
class JSONTest(TestCase):
def test_uuid(self):
res = uuid.uuid4()
self.assertEquals(json.dumps(res), '"%s"' % res.hex)
def test_datetime(self):
res = datetime.datetime(day=1, month=1, year=2011, hour=1, minute=1, second=1)
self.assertEquals(json.dumps(res), '"2011-01-01T01:01:01.000000Z"')
def test_set(self):
res = set(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_frozenset(self):
res = frozenset(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_escape(self):
res = '<script>alert(1);</script>'
assert json.dumps(res) == '"<script>alert(1);</script>"'
assert json.dumps(res, escape=True) == '"<script>alert(1);<\/script>"'
| bsd-3-clause | Python |
0d5ad2213240984d7375289778e77b1453ff7875 | make test for free_ram support a range since it's often flaky | guidow/pyfarm-agent,pyfarm/pyfarm-agent,guidow/pyfarm-agent,pyfarm/pyfarm-agent,pyfarm/pyfarm-agent,guidow/pyfarm-agent | tests/test_agent/test_service.py | tests/test_agent/test_service.py | # No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
try:
from httplib import OK, CREATED
except ImportError: # pragma: no cover
from http.client import OK, CREATED
from pyfarm.agent.sysinfo.system import system_identifier
from pyfarm.agent.testutil import TestCase
from pyfarm.agent.config import config
from pyfarm.agent.service import Agent
# TODO: need better tests, these are a little rudimentary at the moment
class TestAgentBasicMethods(TestCase):
def test_agent_api_url(self):
config["agent-id"] = 1
agent = Agent()
self.assertEqual(
agent.agent_api(),
"%s/agents/1" % config["master_api"])
def test_agent_api_url_keyerror(self):
agent = Agent()
config.pop("agent-id")
self.assertIsNone(agent.agent_api())
def test_system_data(self):
config["remote_ip"] = os.urandom(16).encode("hex")
expected = {
"current_assignments": {},
"systemid": system_identifier(),
"hostname": config["agent_hostname"],
"version": config.version,
"ram": config["agent_ram"],
"cpus": config["agent_cpus"],
"remote_ip": config["remote_ip"],
"port": config["agent_api_port"],
"time_offset": config["agent_time_offset"],
"state": config["state"]}
agent = Agent()
system_data = agent.system_data()
self.assertApproximates(
system_data.pop("free_ram"), config["free_ram"], 64)
self.assertEqual(system_data, expected)
class TestRunAgent(TestCase):
def test_created(self):
self.skipTest("NOT IMPLEMENTED")
def test_updated(self):
self.skipTest("NOT IMPLEMENTED")
| # No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
try:
from httplib import OK, CREATED
except ImportError: # pragma: no cover
from http.client import OK, CREATED
from pyfarm.agent.sysinfo.system import system_identifier
from pyfarm.agent.testutil import TestCase
from pyfarm.agent.config import config
from pyfarm.agent.service import Agent
# TODO: need better tests, these are a little rudimentary at the moment
class TestAgentBasicMethods(TestCase):
def test_agent_api_url(self):
config["agent-id"] = 1
agent = Agent()
self.assertEqual(
agent.agent_api(),
"%s/agents/1" % config["master_api"])
def test_agent_api_url_keyerror(self):
agent = Agent()
config.pop("agent-id")
self.assertIsNone(agent.agent_api())
def test_system_data(self):
config["remote_ip"] = os.urandom(16).encode("hex")
expected = {
"current_assignments": {},
"systemid": system_identifier(),
"hostname": config["agent_hostname"],
"version": config.version,
"ram": config["agent_ram"],
"cpus": config["agent_cpus"],
"remote_ip": config["remote_ip"],
"port": config["agent_api_port"],
"free_ram": config["free_ram"],
"time_offset": config["agent_time_offset"],
"state": config["state"]}
agent = Agent()
self.assertEqual(agent.system_data(), expected)
class TestRunAgent(TestCase):
def test_created(self):
self.skipTest("NOT IMPLEMENTED")
def test_updated(self):
self.skipTest("NOT IMPLEMENTED")
| apache-2.0 | Python |
968c4e30ec71d2c51996868788a1b92f9f175bff | resolve not defined bug | faneshion/MatchZoo,faneshion/MatchZoo | tests/unit_test/test_datapack.py | tests/unit_test/test_datapack.py | from matchzoo.datapack import DataPack, load_datapack
import pytest
import shutil
import pandas as pd
@pytest.fixture
def data_pack():
relation = [['qid0', 'did0', 1], ['qid1', 'did1', 0]]
left = [['qid0', [1, 2]], ['qid1', [2, 3]]]
right = [['did0', [2, 3, 4]], ['did1', [3, 4, 5]]]
ctx = {'vocab_size': 2000}
relation = pd.DataFrame(relation, columns=['id_left', 'id_right', 'label'])
left = pd.DataFrame(left, columns=['id_left', 'text_left'])
left.set_index('id_left', inplace=True)
right = pd.DataFrame(right, columns=['id_right', 'text_right'])
right.set_index('id_right', inplace=True)
return DataPack(relation=relation,
left=left,
right=right,
context=ctx
)
def test_length(data_pack):
num_examples = 2
assert len(data_pack) == num_examples
def test_getter(data_pack):
assert data_pack.relation.iloc[0].values.tolist() == ['qid0', 'did0', 1]
assert data_pack.relation.iloc[1].values.tolist() == ['qid1', 'did1', 0]
assert data_pack.left.loc['qid0', 'text_left'] == [1, 2]
assert data_pack.right.loc['did1', 'text_right'] == [3, 4, 5]
def test_setter(data_pack):
data = [['id0', [1]], ['id1', [2]]]
left = pd.DataFrame(data, columns=['id_left', 'text_left'])
left.set_index('id_left', inplace=True)
data_pack.left = left
assert data_pack.left.loc['id0', 'text_left'] == [1]
right = pd.DataFrame(data, columns=['id_right', 'text_right'])
right.set_index('id_right', inplace=True)
data_pack.right = right
assert data_pack.right.loc['id0', 'text_right'] == [1]
data_pack.context = {'a': 1}
assert data_pack.context
def test_save_load(data_pack):
dirpath = '.tmpdir'
data_pack.save(dirpath)
dp = load_datapack(dirpath)
with pytest.raises(FileExistsError):
data_pack.save(dirpath)
assert len(data_pack) == 2
assert len(dp) == 2
shutil.rmtree(dirpath)
| from matchzoo.datapack import DataPack, load_datapack
import pytest
import shutil
import pandas as pd
@pytest.fixture
def data_pack():
relation = [['qid0', 'did0', 1], ['qid1', 'did1', 0]]
left = [['qid0', [1, 2]], ['qid1', [2, 3]]]
right = [['did0', [2, 3, 4]], ['did1', [3, 4, 5]]]
ctx = {'vocab_size': 2000}
relation = pd.DataFrame(relation, columns=['id_left', 'id_right', 'label'])
left = pd.DataFrame(left, columns=['id_left', 'text_left'])
left.set_index('id_left', inplace=True)
right = pd.DataFrame(right, columns=['id_right', 'text_right'])
right.set_index('id_right', inplace=True)
return DataPack(relation=relation,
left=left,
right=right,
context=ctx
)
def test_length(data_pack):
num_examples = 2
assert len(data_pack) == num_examples
def test_getter(data_pack):
assert data_pack.relation.iloc[0].values.tolist() == ['qid0', 'did0', 1]
assert data_pack.relation.iloc[1].values.tolist() == ['qid1', 'did1', 0]
assert data_pack.left.loc['qid0', 'text_left'] == [1, 2]
assert data_pack.right.loc['did1', 'text_right'] == [3, 4, 5]
def test_setter(data_pack):
data = [['id0', [1]], ['id1', [2]]]
left = pd.DataFrame(data, columns=['id_left', 'text_left'])
left.set_index('id_left', inplace=True)
data_pack.left = left
assert data_pack.left.loc['id0', 'text_left'] == [1]
right = pd.DataFrame(data, columns=['id_right', 'text_right'])
right.set_index('id_right', inplace=True)
data_pack.right = right
assert data_pack.right.loc['id0', 'text_right'] == [1]
data_pack.context = {'a': 1}
assert datapack.context
def test_save_load(data_pack):
dirpath = '.tmpdir'
data_pack.save(dirpath)
dp = load_datapack(dirpath)
with pytest.raises(FileExistsError):
data_pack.save(dirpath)
assert len(data_pack) == 2
assert len(dp) == 2
shutil.rmtree(dirpath)
| apache-2.0 | Python |
6d9a9942254da2a2280106fb7b08fefb0f5457b9 | Add SNMP unit tests | brocade/pynos,SivagnanamCiena/pynos,BRCDcomm/pynos | tests/versions/base/test_snmp.py | tests/versions/base/test_snmp.py | #!/usr/bin/env python
"""
Copyright 2015 Brocade Communications Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import xml.etree.ElementTree as ET
import pynos.versions.base.snmp
import pynos.utilities
class TestSNMP(unittest.TestCase):
"""
System unit tests. Compare expected XML to generated XML.
"""
def setUp(self):
self.snmp = pynos.versions.base.snmp.SNMP(pynos.utilities.return_xml)
self.namespace = 'urn:brocade.com:mgmt:brocade-snmp'
self.community = 'public'
def test_add_snmp_community(self):
expected = '<config><snmp-server xmlns="{0}"><community><community>'\
'{1}</community></community></snmp-server>'\
'</config>'.format(self.namespace, self.community)
result = self.snmp.add_snmp_community(community='public')
result = ET.tostring(result)
self.assertEquals(expected, result)
def test_add_snmp_community_exception(self):
with self.assertRaises(KeyError):
self.snmp.add_snmp_community()
def test_del_snmp_community(self):
expected = '<config><snmp-server xmlns="{0}">'\
'<community operation="delete"><community>{1}</community>'\
'</community></snmp-server></config>'.format(self.namespace,
self.community)
result = self.snmp.del_snmp_community(community='public')
result = ET.tostring(result)
self.assertEquals(expected, result)
def test_del_snmp_community_exception(self):
with self.assertRaises(KeyError):
self.snmp.del_snmp_community()
def test_add_snmp_host(self):
expected = '<config><snmp-server xmlns="{0}"><host><ip>10.10.10.10'\
'</ip><community>{1}</community><udp-port>162</udp-port>'\
'</host></snmp-server></config>'.format(self.namespace,
self.community)
result = self.snmp.add_snmp_host(community='public',
host_info=('10.10.10.10', '162'))
result = ET.tostring(result)
self.assertEquals(expected, result)
def test_add_snmp_host_exception(self):
with self.assertRaises(KeyError):
self.snmp.add_snmp_host()
def test_del_snmp_host(self):
expected = '<config><snmp-server xmlns="{0}">'\
'<host operation="delete"><ip>10.10.10.10</ip><community>'\
'{1}</community><udp-port>162</udp-port></host>'\
'</snmp-server></config>'.format(self.namespace,
self.community)
result = self.snmp.del_snmp_host(community='public',
host_info=('10.10.10.10', '162'))
result = ET.tostring(result)
self.assertEquals(expected, result)
def test_del_snmp_host_exception(self):
with self.assertRaises(KeyError):
self.snmp.del_snmp_host()
| apache-2.0 | Python | |
a6caa061b4f25b22e9cbbcf35d214f7fb1bb6e51 | Print files that are checked | phase/o,phase/o,phase/o,phase/o | ide.py | ide.py | # NOTE: pass -d to this to print debugging info when the server crashes.
from flask import Flask, render_template, url_for, request
from subprocess import Popen, PIPE, check_call
import sys, os, string, glob
app = Flask(__name__)
def compileO():
r = check_call(['gcc', 'o.c', '-DIDE', '-o', 'o-ide', '-lm'])
if r != 0:
print("O code could not be compile. Error: " + r)
@app.route('/', methods=['GET', 'POST'])
def index():
url_for('static', filename='logo.ico')
if request.method == 'POST':
#Check files that start with 'o-ide*'
files = glob.glob("o-ide*")
print(files)
#Check if C was compiled
if len(files) < 1:
print("Compiling O...")
compileO()
#Run code
code = request.form['code']
input = request.form['input'].replace('\r\n', '\n')
print('Got code:', code, 'input:', input)
print('Running O code...')
p = Popen(['o-ide', '-e', code], stdout=PIPE, stderr=PIPE, stdin=PIPE, universal_newlines=True)
output, error = p.communicate(input)
#Output to IDE
print('Output:', output, 'error:', error)
if p.returncode:
return render_template('error.html', code=code, input=input, error=error)
else:
return render_template('code.html', code=code, input=input, output=output, stack=error or '[]')
else:
return render_template('primary.html')
@app.route('/link/')
@app.route('/link/<link>')
def link(link='code="Error in linking code"o&input='):
url_for('static', filename='logo.ico')
print('Link:', link)
return render_template('link.html', link=link)
if __name__ == '__main__':
print('Compiling O...')
compileO()
print('Starting server...')
app.run(debug='-d' in sys.argv[1:]) | # NOTE: pass -d to this to print debugging info when the server crashes.
from flask import Flask, render_template, url_for, request
from subprocess import Popen, PIPE, check_call
import sys, os, string, glob
app = Flask(__name__)
def compileO():
r = check_call(['gcc', 'o.c', '-DIDE', '-o', 'o-ide', '-lm'])
if r != 0:
print("O code could not be compile. Error: " + r)
@app.route('/', methods=['GET', 'POST'])
def index():
url_for('static', filename='logo.ico')
if request.method == 'POST':
#Check if C was compiled
if len(glob.glob("o-ide*")) < 1:
print("Compiling O...")
compileO()
#Run code
code = request.form['code']
input = request.form['input'].replace('\r\n', '\n')
print('Got code:', code, 'input:', input)
print('Running O code...')
p = Popen(['o-ide', '-e', code], stdout=PIPE, stderr=PIPE, stdin=PIPE, universal_newlines=True)
output, error = p.communicate(input)
#Output to IDE
print('Output:', output, 'error:', error)
if p.returncode:
return render_template('error.html', code=code, input=input, error=error)
else:
return render_template('code.html', code=code, input=input, output=output, stack=error or '[]')
else:
return render_template('primary.html')
@app.route('/link/')
@app.route('/link/<link>')
def link(link='code="Error in linking code"o&input='):
url_for('static', filename='logo.ico')
print('Link:', link)
return render_template('link.html', link=link)
if __name__ == '__main__':
print('Compiling O...')
compileO()
print('Starting server...')
app.run(debug='-d' in sys.argv[1:]) | mit | Python |
dee38b7f7537913faea69d286682001a3b4b879a | Change reddit limit | kshvmdn/nba-scores,kshvmdn/NBAScores,kshvmdn/nba.js | streams.py | streams.py | import bs4
import html
import requests
STREAMS_URL = 'http://www.reddit.com/r/nbastreams.json?limit=30'
# STREAMS_URL = 'https://api.myjson.com/bins/40flf'
def main(teams):
submission = get_submission(request(STREAMS_URL), teams)
if submission is not None:
comments_url = 'http://reddit.com' + submission['permalink'] + '.json'
# comments_url = 'https://api.myjson.com/bins/3f7pf'
streams = get_streams(request(comments_url))
return streams if len(streams) > 0 else None
def request(url):
return requests.get(url).json()
def get_submission(response, teams):
for submission in response['data']['children']:
if all(team in submission['data']['title'] for team in teams):
return submission['data']
def get_streams(response):
streams = []
for comment in response[1]['data']['children']:
comment_body = html.unescape(comment['data']['body_html'])
soup = bs4.BeautifulSoup(comment_body, 'html.parser')
if soup.find('a'):
streams.append(soup.find('a')['href'])
return streams
if __name__ == '__main__':
print(main(['Toronto Raptors', 'Memphis Grizzlies']))
| import bs4
import html
import requests
STREAMS_URL = 'http://www.reddit.com/r/nbastreams.json?limit=20'
# STREAMS_URL = 'https://api.myjson.com/bins/40flf'
def main(teams):
submission = get_submission(request(STREAMS_URL), teams)
if submission is not None:
comments_url = 'http://reddit.com' + submission['permalink'] + '.json'
# comments_url = 'https://api.myjson.com/bins/3f7pf'
streams = get_streams(request(comments_url))
return streams if len(streams) > 0 else None
def request(url):
return requests.get(url).json()
def get_submission(response, teams):
for submission in response['data']['children']:
if all(team in submission['data']['title'] for team in teams):
return submission['data']
def get_streams(response):
streams = []
for comment in response[1]['data']['children']:
comment_body = html.unescape(comment['data']['body_html'])
soup = bs4.BeautifulSoup(comment_body, 'html.parser')
if soup.find('a'):
streams.append(soup.find('a')['href'])
return streams
if __name__ == '__main__':
print(main(['Toronto Raptors', 'Memphis Grizzlies']))
| mit | Python |
100ed05fe390588a9da646de86af90e6491b623b | bump version | vpuzzella/sixpack,blackskad/sixpack,seatgeek/sixpack,seatgeek/sixpack,llonchj/sixpack,spjwebster/sixpack,nickveenhof/sixpack,blackskad/sixpack,spjwebster/sixpack,vpuzzella/sixpack,smokymountains/sixpack,smokymountains/sixpack,smokymountains/sixpack,llonchj/sixpack,nickveenhof/sixpack,vpuzzella/sixpack,spjwebster/sixpack,llonchj/sixpack,vpuzzella/sixpack,nickveenhof/sixpack,blackskad/sixpack,llonchj/sixpack,seatgeek/sixpack,seatgeek/sixpack,blackskad/sixpack | sixpack/__init__.py | sixpack/__init__.py | __version__ = '0.2.0'
| __version__ = '0.1.4'
| bsd-2-clause | Python |
ad118f0f1b011b1d15f598ae03b84c1b76e5c9d4 | Update scale_image.py | BMCV/galaxy-image-analysis,BMCV/galaxy-image-analysis | tools/scale_image/scale_image.py | tools/scale_image/scale_image.py | import argparse
import sys
import skimage.io
import skimage.transform
import scipy.misc
import warnings
import os
from PIL import Image
def scale_image(input_file, output_file, scale, order=1):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Image.MAX_IMAGE_PIXELS = 50000*50000
img_in = skimage.io.imread(input_file)
if order == 0:
interp = 'nearest'
elif order == 1:
interp = 'bilinear'
elif order == 2:
interp = 'bicubic'
if ',' in scale:
scale = scale[1:-1].split(',')
scale = [int(i) for i in scale]
elif '.' in scale:
scale = float(scale)
else:
scale = int(scale)
res = scipy.misc.imresize(img_in, scale, interp=interp)
skimage.io.imsave(output_file, res)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type=argparse.FileType('r'), default=sys.stdin, help='input file')
parser.add_argument('out_file', type=argparse.FileType('w'), default=sys.stdin, help='out file (PNG)')
parser.add_argument('scale', type=str, help='fraction scaling factor(float), percentage scaling factor(int), output size(tuple(height,width))') # integer option not implemented in galaxy wrapper
parser.add_argument('order', type=int, default=1, help='interpolation method')
args = parser.parse_args()
scale_image(args.input_file.name, args.out_file.name, args.scale, args.order)
| import argparse
import sys
import skimage.io
import skimage.transform
import scipy.misc
import warnings
import os
from PIL import Image
def scale_image(input_file, output_file, scale, order=1):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Image.MAX_IMAGE_PIXELS = 50000*50000
img_in = skimage.io.imread(input_file)
if order == 0:
interp = 'nearest'
elif order == 1:
interp = 'bilinear'
elif order == 2:
interp = 'bicubic'
if ',' in scale:
scale = scale[1:-1].split(',')
scale.reverse()
scale = [int(i) for i in scale]
elif '.' in scale:
scale = float(scale)
else:
scale = int(scale)
res = scipy.misc.imresize(img_in, scale, interp=interp)
skimage.io.imsave(output_file, res)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type=argparse.FileType('r'), default=sys.stdin, help='input file')
parser.add_argument('out_file', type=argparse.FileType('w'), default=sys.stdin, help='out file (PNG)')
parser.add_argument('scale', type=str, help='fraction scaling factor(float), percentage scaling factor(int), output size(tuple(height,width))') # integer option not implemented in galaxy wrapper
parser.add_argument('order', type=int, default=1, help='interpolation method')
args = parser.parse_args()
scale_image(args.input_file.name, args.out_file.name, args.scale, args.order) | mit | Python |
69f85de77612ac8ed620831e1d043981395e9301 | Add pip install | techbureau/zaifbot,techbureau/zaifbot | zaifbot/__init__.py | zaifbot/__init__.py | import os, sys, subprocess
from zaifbot.errors import ZaifBotError
__version__ = '0.0.5'
class ZaifBot:
_process = []
def add_running_process(self, auto_trade_process):
self._process.append(auto_trade_process)
def start(self):
running_processes = []
for process in self._process:
process.start()
running_processes.append(process)
[x.join() for x in running_processes]
def install_ta_lib():
if sys.platform.startswith('linux'):
# fixme
cwd = os.path.join(os.path.dirname(__file__), 'setup')
subprocess.call(['tar', '-xzf', 'ta-lib-0.4.0-src.tar.gz'], cwd=cwd)
talib_path = os.path.join(cwd, 'ta-lib')
subprocess.call(['./configure', '--prefix=/usr'], cwd=talib_path, shell=True)
subprocess.call(['make'], cwd=talib_path, shell=True)
subprocess.call(['sudo', 'make', 'install'], cwd=talib_path)
subprocess.call(['pip', 'install', 'TA-Lib'])
return
if sys.platform.startswith('darwin'):
subprocess.call(["brew", "install", "ta-lib"])
subprocess.call(['pip', 'install', 'TA-Lib'])
return
if sys.platform.startswith('win'):
bits = '32' if sys.maxsize < 2 ** 31 else '64'
py_version = str(sys.version_info.major) + str(sys.version_info.minor)
__install_talib_for_windows(bits, py_version)
return
raise ZaifBotError('zaifbot does not support your platform')
def __install_talib_for_windows(bits, py_version):
if bits == '32':
file = os.path.join(os.path.dirname(__file__),
"setup/TA_Lib-0.4.10-cp{v}-cp{v}m-win32.whl".format(v=py_version))
else:
file = os.path.join(os.path.dirname(__file__),
"setup/TA_Lib-0.4.10-cp{v}-cp{v}m-win_amd64.whl".format(v=py_version))
if os.path.isfile(file):
subprocess.call(["pip", "install", file])
return
raise ZaifBotError('zaifbot does not support your platform')
| import os, sys, subprocess
from zaifbot.errors import ZaifBotError
__version__ = '0.0.5'
class ZaifBot:
_process = []
def add_running_process(self, auto_trade_process):
self._process.append(auto_trade_process)
def start(self):
running_processes = []
for process in self._process:
process.start()
running_processes.append(process)
[x.join() for x in running_processes]
def install_ta_lib():
if sys.platform.startswith('linux'):
# fixme
cwd = os.path.join(os.path.dirname(__file__), 'setup')
subprocess.call(['tar', '-xzf', 'ta-lib-0.4.0-src.tar.gz'], cwd=cwd)
talib_path = os.path.join(cwd, 'ta-lib')
subprocess.call(['./configure', '--prefix=/usr'], cwd=talib_path, shell=True)
subprocess.call(['make'], cwd=talib_path, shell=True)
subprocess.call(['sudo', 'make', 'install'], cwd=talib_path)
return
if sys.platform.startswith('darwin'):
subprocess.call(["brew", "install", "ta-lib"])
return
if sys.platform.startswith('win'):
bits = '32' if sys.maxsize < 2 ** 31 else '64'
py_version = str(sys.version_info.major) + str(sys.version_info.minor)
__install_talib_for_windows(bits, py_version)
return
raise ZaifBotError('zaifbot does not support your platform')
def __install_talib_for_windows(bits, py_version):
if bits == '32':
file = os.path.join(os.path.dirname(__file__),
"setup/TA_Lib-0.4.10-cp{v}-cp{v}m-win32.whl".format(v=py_version))
else:
file = os.path.join(os.path.dirname(__file__),
"setup/TA_Lib-0.4.10-cp{v}-cp{v}m-win_amd64.whl".format(v=py_version))
if os.path.isfile(file):
subprocess.call(["pip", "install", file])
return
raise ZaifBotError('zaifbot does not support your platform')
| mit | Python |
df707de0bb8b08011cd7e875c4f7d096f6fe2bb6 | simplify cursutils initialization | timlegrand/giterm,timlegrand/giterm | src/giterm/cursutils.py | src/giterm/cursutils.py | # -*- coding: utf-8 -*-
import curses
import pdb
import sys
screen = None
def init(stdscr):
global screen
screen = stdscr
def finalize(stdscr=None):
if not stdscr and not screen:
raise Exception('either call init() first or provide a valid window object')
stdscr = screen if screen and not stdscr else stdscr
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
def debug(stdscr=None):
if not stdscr and not screen:
raise Exception('either call init() first or provide a valid window object')
stdscr = screen if screen and not stdscr else stdscr
finalize(stdscr)
debugger = pdb.Pdb()
debugger.reset()
debugger.do_where(None)
users_frame = sys._getframe().f_back # One frame up, outside this function
debugger.interaction(users_frame, None)
# Use with:
# import cursutils
# cursutils.init(stdscr) # where stdscr is a `curses` Window object
# cursutils.debug()
| # -*- coding: utf-8 -*-
import curses
import pdb
import sys
screen = None
initialized = False
def init(stdscr):
global screen
screen = stdscr
global initialized
initialized = True
def finalize(stdscr):
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
def debug(stdscr=screen):
if not initialized:
raise Exception('cursutils must be initialized first')
if not stdscr:
raise Exception('stdscr must be a valid window object')
finalize(stdscr)
debugger = pdb.Pdb()
debugger.reset()
debugger.do_where(None)
users_frame = sys._getframe().f_back # One frame up, outside this function
debugger.interaction(users_frame, None)
# Use with:
# import cursutils
# cursutils.init(stdscr) # where stdscr is a curses window object
# cursutils.debug(cursutils.screen)
| bsd-2-clause | Python |
27fec389928c93ba0efe4ca1e4c5b34c3b73fa2c | Add a version base on djangocms version from where the code was cloned | emencia/emencia-cms-snippet,emencia/emencia-cms-snippet,emencia/emencia-cms-snippet | snippet/__init__.py | snippet/__init__.py | """
"cms.plugins.snippet" (from djangocms) clone to extend it with some facilities
"""
__version__ = '2.3.6.1'
| """
"cms.plugins.snippet" (from djangocms) clone to extend it with some facilities
""" | bsd-3-clause | Python |
7c2e67da2b26b6eebd6d11346215dbd962b3efe9 | Bump version number | mailgun/flanker | flanker/__init__.py | flanker/__init__.py | __version__ = '0.9.10'
| __version__ = '0.9.7'
| apache-2.0 | Python |
5dc128b84d2c76fc9039c412b91c1a31dd4bfa9b | Prepare v3.0.5.dev | Flexget/Flexget,Flexget/Flexget,crawln45/Flexget,ianstalk/Flexget,Flexget/Flexget,ianstalk/Flexget,malkavi/Flexget,crawln45/Flexget,crawln45/Flexget,Flexget/Flexget,malkavi/Flexget,malkavi/Flexget,ianstalk/Flexget,malkavi/Flexget,crawln45/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '3.0.5.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '3.0.4'
| mit | Python |
1256c71a7c4908c32920b081c62075d3c9efbadf | Prepare v3.0.12.dev | Flexget/Flexget,Flexget/Flexget,Flexget/Flexget,malkavi/Flexget,ianstalk/Flexget,ianstalk/Flexget,crawln45/Flexget,crawln45/Flexget,malkavi/Flexget,crawln45/Flexget,malkavi/Flexget,crawln45/Flexget,malkavi/Flexget,ianstalk/Flexget,Flexget/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '3.0.12.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '3.0.11'
| mit | Python |
4b9264c1652bcf98a58da0a0c3f56e129eb25cfe | Prepare v1.2.398.dev | qvazzler/Flexget,OmgOhnoes/Flexget,jacobmetrick/Flexget,Flexget/Flexget,poulpito/Flexget,dsemi/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,tarzasai/Flexget,tobinjt/Flexget,malkavi/Flexget,antivirtel/Flexget,JorisDeRieck/Flexget,sean797/Flexget,qk4l/Flexget,cvium/Flexget,OmgOhnoes/Flexget,oxc/Flexget,crawln45/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,drwyrm/Flexget,drwyrm/Flexget,jawilson/Flexget,cvium/Flexget,LynxyssCZ/Flexget,oxc/Flexget,poulpito/Flexget,Danfocus/Flexget,crawln45/Flexget,qk4l/Flexget,Flexget/Flexget,antivirtel/Flexget,tobinjt/Flexget,Danfocus/Flexget,antivirtel/Flexget,lildadou/Flexget,poulpito/Flexget,Pretagonist/Flexget,ianstalk/Flexget,jawilson/Flexget,malkavi/Flexget,JorisDeRieck/Flexget,dsemi/Flexget,OmgOhnoes/Flexget,sean797/Flexget,lildadou/Flexget,lildadou/Flexget,gazpachoking/Flexget,jacobmetrick/Flexget,jawilson/Flexget,malkavi/Flexget,sean797/Flexget,crawln45/Flexget,Danfocus/Flexget,qk4l/Flexget,qvazzler/Flexget,cvium/Flexget,tobinjt/Flexget,Pretagonist/Flexget,drwyrm/Flexget,Pretagonist/Flexget,tsnoam/Flexget,tsnoam/Flexget,tarzasai/Flexget,jawilson/Flexget,JorisDeRieck/Flexget,oxc/Flexget,Flexget/Flexget,ianstalk/Flexget,malkavi/Flexget,jacobmetrick/Flexget,qvazzler/Flexget,Danfocus/Flexget,LynxyssCZ/Flexget,tsnoam/Flexget,tobinjt/Flexget,tarzasai/Flexget,crawln45/Flexget,LynxyssCZ/Flexget,dsemi/Flexget,gazpachoking/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.398.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.397'
| mit | Python |
de22e547c57be633cb32d51e93a6efe9c9e90293 | Remove buggy log message if prctl is missing | hb9kns/PyBitmessage,hb9kns/PyBitmessage,hb9kns/PyBitmessage,hb9kns/PyBitmessage | src/helper_threading.py | src/helper_threading.py | import threading
try:
import prctl
def set_thread_name(name): prctl.set_name(name)
def _thread_name_hack(self):
set_thread_name(self.name)
threading.Thread.__bootstrap_original__(self)
threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap
threading.Thread._Thread__bootstrap = _thread_name_hack
except ImportError:
def set_thread_name(name): pass
class StoppableThread(object):
def initStop(self):
self.stop = threading.Event()
self._stopped = False
def stopThread(self):
self._stopped = True
self.stop.set()
| import threading
try:
import prctl
def set_thread_name(name): prctl.set_name(name)
def _thread_name_hack(self):
set_thread_name(self.name)
threading.Thread.__bootstrap_original__(self)
threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap
threading.Thread._Thread__bootstrap = _thread_name_hack
except ImportError:
log('WARN: prctl module is not installed. You will not be able to see thread names')
def set_thread_name(name): pass
class StoppableThread(object):
def initStop(self):
self.stop = threading.Event()
self._stopped = False
def stopThread(self):
self._stopped = True
self.stop.set()
| mit | Python |
1b6eecc10e45aa0728ea48fd8b00419396765f1e | Complete main | landportal/landbook-importers,weso/landportal-importers,landportal/landbook-importers | IpfriExtractor/Main.py | IpfriExtractor/Main.py | """
Created on 14/01/2014
@author: Dani
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir), 'CountryReconciler'))
sys.path.append(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir), "LandPortalEntities"))
sys.path.append(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir), "ModelToXml"))
import logging
from ConfigParser import ConfigParser
from es.weso.extractor.IpfriExtractor import IpfriExtractor
from es.weso.translator.ipfri_trasnlator import IpfriTranslator
def configure_log():
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename='wbextractor.log', level=logging.INFO,
format=FORMAT)
def run():
configure_log()
log = logging.getLogger('ipfriextractor')
config = ConfigParser()
config.read("./files/configuration.ini")
try:
xml_extractor = IpfriExtractor(log, config)
xml_extractor.run()
except BaseException as e:
log.error("While extracting data from the source: " + e.message)
raise RuntimeError()
try:
xml_translator = IpfriTranslator(log, config, True)
xml_translator.run()
except BaseException as e:
log.error("While trying to introduce raw info into our model: " + e.message)
raise RuntimeError()
if __name__ == '__main__':
try:
run()
print 'Done!'
except:
print 'Execution finalized with erros. Check logs' | '''
Created on 14/01/2014
@author: Dani
'''
import logging
from ConfigParser import ConfigParser
from es.weso.extractor.IpfriExtractor import IpfriExtractor
from es.weso.translator.ipfri_trasnlator import IpfriTranslator
def configure_log():
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename='wbextractor.log', level=logging.INFO,
format=FORMAT)
def run():
configure_log()
log = logging.getLogger('ipfriextractor')
config = ConfigParser()
config.read("./files/configuration.ini")
try:
xml_extractor = IpfriExtractor(log, config)
xml_extractor.run()
except BaseException as e:
log.error("While extracting data from the source: " + e.message)
raise RuntimeError()
try:
xml_translator = IpfriTranslator(log, config, True)
xml_translator.run()
except BaseException as e:
log.error("While trying to introduce raw info into our model: " + e.message)
raise RuntimeError()
if __name__ == '__main__':
try:
run()
print 'Done!'
except:
print 'Execution finalized with erros. Check logs' | mit | Python |
f1937ba3381468644c3b1b01d01a73eff0b91031 | Create v0.10 branch | plepe/pgmapcss,plepe/pgmapcss | pgmapcss/version.py | pgmapcss/version.py | __all__ = 'VERSION', 'VERSION_INFO'
#: (:class:`tuple`) The version tuple e.g. ``(0, 9, 2)``.
VERSION_INFO = (0, 10, 'dev')
#: (:class:`basestring`) The version string e.g. ``'0.9.2'``.
if len(VERSION_INFO) == 4:
VERSION = '%d.%d.%d-%s' % VERSION_INFO
elif type(VERSION_INFO[2]) == str:
VERSION = '%d.%d-%s' % VERSION_INFO
else:
VERSION = '%d.%d.%d' % VERSION_INFO
| __all__ = 'VERSION', 'VERSION_INFO'
#: (:class:`tuple`) The version tuple e.g. ``(0, 9, 2)``.
VERSION_INFO = (0, 9, 0)
#: (:class:`basestring`) The version string e.g. ``'0.9.2'``.
if len(VERSION_INFO) == 4:
VERSION = '%d.%d.%d-%s' % VERSION_INFO
elif type(VERSION_INFO[2]) == str:
VERSION = '%d.%d-%s' % VERSION_INFO
else:
VERSION = '%d.%d.%d' % VERSION_INFO
| agpl-3.0 | Python |
140ea03a727e6f4301820efec07e0deda16ca0b4 | Set version 1.12.9 | atztogo/phono3py,atztogo/phono3py,atztogo/phono3py,atztogo/phono3py | phono3py/version.py | phono3py/version.py | # Copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "1.12.9"
| # Copyright (C) 2013 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "1.12.8"
| bsd-3-clause | Python |
394ba974ebb7bf2f6400cf5bdd6550d667b7b7ef | add underscore | joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue | hoomd/update/remove_drift.py | hoomd/update/remove_drift.py | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Implement RemoveDrift."""
import hoomd
from hoomd.operation import Updater
from hoomd.data.typeconverter import NDArrayValidator
from hoomd import _hoomd
import numpy as np
class RemoveDrift(Updater):
r"""Remove the average drift from a system restrained on a lattice.
Args:
reference_positions ((*N_particles*, 3) `numpy.ndarray` of
``numpy.float64``): the reference positions of the
lattice :math:`[\mathrm{length}]`.
trigger (`hoomd.trigger.Trigger`): Select the timesteps to remove drift.
During the time steps specified by *trigger*, the mean drift
:math:`\Delta\vec{r}` from the *reference_positions*
(:math:`\vec{r}_{ref, i}`) is substracted from the current particle
positions (:math:`\vec{r}_i`). The drift is then given is given by:
.. math::
\Delta\vec{r} = \frac{1}{\mathrm{N_{particles}}}
\sum_{i=1}^\mathrm{N_{particles}} \mathrm{min\_image}(\vec{r}_i -
\vec{r}_{ref,i})
"""
def __init__(self, reference_positions, trigger=1):
super().__init__(trigger)
self._param_dict.update(
{"reference_positions": NDArrayValidator(np.float64, (None, 3))})
self.reference_positions = reference_positions
def _add(self, simulation):
"""Add the operation to a simulation."""
super()._add(simulation)
def _attach(self):
if isinstance(self._simulation.device, hoomd.device.GPU):
self._simulation.device._cpp_msg.warning(
"Falling back on CPU. No GPU implementation available.\n")
self._cpp_obj = _hoomd.UpdaterRemoveDrift(
self._simulation.state._cpp_sys_def, self.reference_positions)
super()._attach()
| # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Implement RemoveDrift."""
import hoomd
from hoomd.operation import Updater
from hoomd.data.typeconverter import NDArrayValidator
from hoomd import _hoomd
import numpy as np
class RemoveDrift(Updater):
r"""Remove the average drift from a system restrained on a lattice.
Args:
reference_positions ((*N_particles*, 3) `numpy.ndarray` of
``numpy.float64``): the reference positions of the
lattice :math:`[\mathrm{length}]`.
trigger (`hoomd.trigger.Trigger`): Select the timesteps to remove drift.
During the time steps specified by *trigger*, the mean drift
:math:`\Delta\vec{r}` from the *reference_positions*
(:math:`\vec{r}_{ref, i}`) is substracted from the current particle
positions (:math:`\vec{r}_i`). The drift is then given is given by:
.. math::
\Delta\vec{r} = \frac{1}{\mathrm{N_{particles}}}
\sum_{i=1}^\mathrm{N_{particles}} \mathrm{minimage}(\vec{r}_i -
\vec{r}_{ref,i})
"""
def __init__(self, reference_positions, trigger=1):
super().__init__(trigger)
self._param_dict.update(
{"reference_positions": NDArrayValidator(np.float64, (None, 3))})
self.reference_positions = reference_positions
def _add(self, simulation):
"""Add the operation to a simulation."""
super()._add(simulation)
def _attach(self):
if isinstance(self._simulation.device, hoomd.device.GPU):
self._simulation.device._cpp_msg.warning(
"Falling back on CPU. No GPU implementation available.\n")
self._cpp_obj = _hoomd.UpdaterRemoveDrift(
self._simulation.state._cpp_sys_def, self.reference_positions)
super()._attach()
| bsd-3-clause | Python |
61ef80e2c5aab9ed58687ddcf43d53e3e669c827 | Fix issue in __init__.py | TaurusOlson/fntools | fntools/__init__.py | fntools/__init__.py | """
fntools: functional programming tools for data processing
=========================================================
"""
from .fntools import use_with, zip_with, unzip, concat, mapcat, dmap, rmap, replace,\
compose, groupby, reductions, split, assoc, dispatch, multimap,\
multistarmap, pipe, pipe_each, shift, repeatedly, update, duplicates,
pluck, pluck_each, use, get_in,\
valueof, take, drop, find, remove, isiterable, are_in, any_in,\
all_in, monotony, attributes, find_each, dfilter, occurrences,\
indexof, indexesof, count, isdistinct, nrow, ncol, names
__version__ = '1.1.1'
__title__ = 'fntools'
__author__ = 'Taurus Olson'
__license__ = 'MIT'
| """
fntools: functional programming tools for data processing
=========================================================
"""
from .fntools import use_with, zip_with, unzip, concat, mapcat, dmap, rmap, replace,\
compose, groupby, reductions, split, assoc, dispatch, multimap,\
multistarmap, pipe, pipe_each, shift, repeatedly, update, duplicates, pluck, use, get_in,\
valueof, take, drop, find, remove, isiterable, are_in, any_in,\
all_in, monotony, attributes, find_each, dfilter, occurrences,\
indexof, indexesof, count, isdistinct, nrow, ncol, names
__version__ = '1.1.1'
__title__ = 'fntools'
__author__ = 'Taurus Olson'
__license__ = 'MIT'
| mit | Python |
4232c762888fe70a035693a8313cc0adb02e257b | update comparisons in BlackBody1D tests for new version of constants | funbaker/astropy,astropy/astropy,astropy/astropy,stargaser/astropy,AustereCuriosity/astropy,StuartLittlefair/astropy,bsipocz/astropy,DougBurke/astropy,astropy/astropy,larrybradley/astropy,saimn/astropy,kelle/astropy,MSeifert04/astropy,MSeifert04/astropy,stargaser/astropy,stargaser/astropy,StuartLittlefair/astropy,stargaser/astropy,lpsinger/astropy,StuartLittlefair/astropy,saimn/astropy,MSeifert04/astropy,lpsinger/astropy,larrybradley/astropy,kelle/astropy,MSeifert04/astropy,dhomeier/astropy,pllim/astropy,funbaker/astropy,dhomeier/astropy,aleksandr-bakanov/astropy,dhomeier/astropy,astropy/astropy,larrybradley/astropy,bsipocz/astropy,funbaker/astropy,mhvk/astropy,larrybradley/astropy,kelle/astropy,funbaker/astropy,astropy/astropy,StuartLittlefair/astropy,pllim/astropy,saimn/astropy,StuartLittlefair/astropy,lpsinger/astropy,pllim/astropy,kelle/astropy,DougBurke/astropy,saimn/astropy,AustereCuriosity/astropy,mhvk/astropy,AustereCuriosity/astropy,lpsinger/astropy,DougBurke/astropy,lpsinger/astropy,larrybradley/astropy,bsipocz/astropy,mhvk/astropy,pllim/astropy,aleksandr-bakanov/astropy,saimn/astropy,DougBurke/astropy,pllim/astropy,aleksandr-bakanov/astropy,AustereCuriosity/astropy,aleksandr-bakanov/astropy,kelle/astropy,dhomeier/astropy,AustereCuriosity/astropy,bsipocz/astropy,dhomeier/astropy,mhvk/astropy,mhvk/astropy | astropy/modeling/tests/test_blackbody.py | astropy/modeling/tests/test_blackbody.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import pytest
import numpy as np
from ..blackbody import BlackBody1D
from ..fitting import LevMarLSQFitter
from ...tests.helper import assert_quantity_allclose
from ... import units as u
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestBlackbody1D():
# Make sure the temperature equivalency automatically applies by trying
# to pass temperatures in celsius
@pytest.mark.parametrize('temperature', (3000 * u.K, 2726.85 * u.deg_C))
def test_evaluate(self, temperature):
bolometric_flux = 1000 * u.L_sun / (4 * np.pi * (1.5 * u.pc)**2)
b = BlackBody1D(temperature=temperature,
bolometric_flux=bolometric_flux)
assert_quantity_allclose(b(1.4 * u.micron), 4734464.498937388 * u.Jy)
assert_quantity_allclose(b(214.13747 * u.THz), 4734464.498937388 * u.Jy)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit(self):
fitter = LevMarLSQFitter()
b = BlackBody1D(3000 * u.K)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy
b_fit = fitter(b, wav, fnu)
assert_quantity_allclose(b_fit.temperature, 2840.744774408546 * u.K)
assert_quantity_allclose(b_fit.bolometric_flux, 6.821837296857152e-08 * u.erg / u.cm**2 / u.s)
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import pytest
import numpy as np
from ..blackbody import BlackBody1D
from ..fitting import LevMarLSQFitter
from ...tests.helper import assert_quantity_allclose
from ... import units as u
try:
from scipy import optimize
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
class TestBlackbody1D():
# Make sure the temperature equivalency automatically applies by trying
# to pass temperatures in celsius
@pytest.mark.parametrize('temperature', (3000 * u.K, 2726.85 * u.deg_C))
def test_evaluate(self, temperature):
bolometric_flux = 1000 * u.L_sun / (4 * np.pi * (1.5 * u.pc)**2)
b = BlackBody1D(temperature=temperature,
bolometric_flux=bolometric_flux)
assert_quantity_allclose(b(1.4 * u.micron), 4756726.111003904 * u.Jy)
assert_quantity_allclose(b(214.13747 * u.THz), 4756726.111003904 * u.Jy)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit(self):
fitter = LevMarLSQFitter()
b = BlackBody1D(3000 * u.K)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy
b_fit = fitter(b, wav, fnu)
assert_quantity_allclose(b_fit.temperature, 2840.743996797581 * u.K)
assert_quantity_allclose(b_fit.bolometric_flux, 6.821837529644375e-08 * u.erg / u.cm**2 / u.s)
| bsd-3-clause | Python |
f00ff91ec9521cec2ef75fc165180307b8bf8321 | test estructura temporal | JavierGarciaD/banking | banking/tests/test_credit_constructor.py | banking/tests/test_credit_constructor.py | '''
Created on 8/06/2017
@author: spectre
'''
from credit.constructor import *
import pytest
class TestEstructuraTemporal(object):
def test_one(self):
pass
def test_two(self):
pass | '''
Created on 8/06/2017
@author: spectre
'''
from credit.constructor import *
import pytest
def test_my_function():
assert Prueba(2, 1).my_function() == 3
| mit | Python |
92ec14fd32ba0c2609a8a64898036197fbb5fa70 | allow storage to overwrite files | ei-grad/django-pipeline,Tekco/django-pipeline,lexqt/django-pipeline,edx/django-pipeline,edwinlunando/django-pipeline,fahhem/django-pipeline,Kobold/django-pipeline,sjhewitt/django-pipeline,d9pouces/django-pipeline,skirsdeda/django-pipeline,airtonix/django-pipeline,ei-grad/django-pipeline,leonardoo/django-pipeline,mgorny/django-pipeline,Kami/django-pipeline,jwatson/django-pipeline,letolab/django-pipeline,TwigWorld/django-pipeline,edx/django-pipeline,Kami/django-pipeline,Nivl/django-pipeline,jazzband/django-pipeline,jazzband/django-pipeline,letolab/django-pipeline,skirsdeda/django-pipeline,sideffect0/django-pipeline,jwatson/django-pipeline,almost/django-pipeline,lexqt/django-pipeline,apendleton/django-pipeline,chipx86/django-pipeline,caioariede/django-pipeline,zapier/django-pipeline,novapost/django-pipeline,d9pouces/django-pipeline,perdona/django-pipeline,camilonova/django-pipeline,theatlantic/django-pipeline,floppym/django-pipeline,caioariede/django-pipeline,beedesk/django-pipeline,mweibel/django-pipeline,apendleton/django-pipeline,edx/django-pipeline,mweibel/django-pipeline,demux/django-pipeline,simudream/django-pipeline,cyberdelia/django-pipeline,vbabiy/django-pipeline,vbabiy/django-pipeline,wienczny/django-pipeline,hyperoslo/django-pipeline,tayfun/django-pipeline,jazzband/django-pipeline,almost/django-pipeline,wienczny/django-pipeline,lexqt/django-pipeline,yuvadm/django-pipeline,adamcharnock/django-pipeline,necaris/django-pipeline,Kami/django-pipeline,theatlantic/django-pipeline,pombredanne/django-pipeline-1,Kobold/django-pipeline,demux/django-pipeline,vstoykov/django-pipeline,edwinlunando/django-pipeline,pdr/django-pipeline,hyperoslo/django-pipeline,edwinlunando/django-pipeline,lydell/django-pipeline,d9pouces/django-pipeline,apendleton/django-pipeline,hyperoslo/django-pipeline,simudream/django-pipeline,beedesk/django-pipeline,perdona/django-pipeline,zapier/django-pipeline,cyberdelia/django-pipeline,perdona/django-pipeline,floppym/django-pipeline,pdr/django-pipeline,TwigWorld/django-pipeline,vstoykov/django-pipeline,almost/django-pipeline,Kobold/django-pipeline,adamcharnock/django-pipeline,simudream/django-pipeline,ei-grad/django-pipeline,airtonix/django-pipeline,caioariede/django-pipeline,teozkr/django-pipeline,necaris/django-pipeline,lydell/django-pipeline,botify-labs/django-pipeline,jensenbox/django-pipeline,mgorny/django-pipeline,jensenbox/django-pipeline,skolsuper/django-pipeline,novapost/django-pipeline,leonardoo/django-pipeline,skolsuper/django-pipeline,wienczny/django-pipeline,Tekco/django-pipeline,camilonova/django-pipeline,chipx86/django-pipeline,yuvadm/django-pipeline,beedesk/django-pipeline,leonardoo/django-pipeline,joshkehn/django-pipeline,theatlantic/django-pipeline,mgorny/django-pipeline,sideffect0/django-pipeline,floppym/django-pipeline,fabiosantoscode/django-pipeline,kronion/django-pipeline,vbabiy/django-pipeline,fahhem/django-pipeline,jwatson/django-pipeline,cyberdelia/django-pipeline,teozkr/django-pipeline,yuvadm/django-pipeline,zapier/django-pipeline,adamcharnock/django-pipeline,Nivl/django-pipeline,chipx86/django-pipeline,joshkehn/django-pipeline,kronion/django-pipeline,jensenbox/django-pipeline,botify-labs/django-pipeline,sjhewitt/django-pipeline,joshkehn/django-pipeline,pombredanne/django-pipeline-1,skirsdeda/django-pipeline,fabiosantoscode/django-pipeline,sideffect0/django-pipeline,botify-labs/django-pipeline,Tekco/django-pipeline,TwigWorld/django-pipeline,lydell/django-pipeline,novapost/django-pipeline,kronion/django-pipeline,tayfun/django-pipeline,camilonova/django-pipeline,sjhewitt/django-pipeline,tayfun/django-pipeline,skolsuper/django-pipeline,demux/django-pipeline | pipeline/storage.py | pipeline/storage.py | import os
from datetime import datetime
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.functional import LazyObject
from pipeline.conf import settings
class PipelineStorage(FileSystemStorage):
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.PIPELINE_ROOT
if base_url is None:
base_url = settings.PIPELINE_URL
super(PipelineStorage, self).__init__(location, base_url, *args, **kwargs)
def get_available_name(self, name):
if self.exists(name):
self.delete(name)
return name
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.PIPELINE_STORAGE)()
storage = DefaultStorage()
| import os
from datetime import datetime
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.functional import LazyObject
from pipeline.conf import settings
class PipelineStorage(FileSystemStorage):
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.PIPELINE_ROOT
if base_url is None:
base_url = settings.PIPELINE_URL
super(PipelineStorage, self).__init__(location, base_url, *args, **kwargs)
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.PIPELINE_STORAGE)()
storage = DefaultStorage()
| mit | Python |
a25920e3549933e4c6c158cc9490f3eaa883ec66 | Use the same child->parent "formula" used by heapq.py. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/test/test_heapq.py | Lib/test/test_heapq.py | """Unittests for heapq."""
from test.test_support import verify, vereq, verbose, TestFailed
from heapq import heappush, heappop
import random
def check_invariant(heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
if pos: # pos 0 has no parent
parentpos = (pos-1) >> 1
verify(heap[parentpos] <= item)
def test_main():
# 1) Push 100 random numbers and pop them off, verifying all's OK.
heap = []
data = []
check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
heappush(heap, item)
check_invariant(heap)
results = []
while heap:
item = heappop(heap)
check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
vereq(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
check_invariant(results)
# 3) Naive "N-best" algorithm
heap = []
for item in data:
heappush(heap, item)
if len(heap) > 10:
heappop(heap)
heap.sort()
vereq(heap, data_sorted[-10:])
# Make user happy
if verbose:
print "All OK"
if __name__ == "__main__":
test_main()
| """Unittests for heapq."""
from test.test_support import verify, vereq, verbose, TestFailed
from heapq import heappush, heappop
import random
def check_invariant(heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
parentpos = ((pos+1) >> 1) - 1
if parentpos >= 0:
verify(heap[parentpos] <= item)
def test_main():
# 1) Push 100 random numbers and pop them off, verifying all's OK.
heap = []
data = []
check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
heappush(heap, item)
check_invariant(heap)
results = []
while heap:
item = heappop(heap)
check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
vereq(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
check_invariant(results)
# 3) Naive "N-best" algorithm
heap = []
for item in data:
heappush(heap, item)
if len(heap) > 10:
heappop(heap)
heap.sort()
vereq(heap, data_sorted[-10:])
# Make user happy
if verbose:
print "All OK"
if __name__ == "__main__":
test_main()
| mit | Python |
22937ae3ac01df89d0bf1018f62c7b9cbae76627 | change event registry to a set to make sure the same function is not called twice | panoplyio/panoply-python-sdk | panoply/events.py | panoply/events.py | class Emitter(object):
_events = None
def __init__(self, events={}):
self._events = events
def on(self, name, fn):
self._events.setdefault(name, set([])).add(fn)
return self
def fire(self, name, data):
for fn in self._events.get("*", set([])):
fn(name, data)
for fn in self._events.get(name, set([])):
fn(data)
return self
| class Emitter(object):
_events = None
def __init__(self, events={}):
self._events = events
def on(self, name, fn):
self._events.setdefault(name, []).append(fn)
return self
def fire(self, name, data):
for fn in self._events.get("*", []):
fn(name, data)
for fn in self._events.get(name, []):
fn(data)
return self
| mit | Python |
b12b4f47d3cd701506380c914e45b9958490392c | Add FT specs and use unittest. | ejpreciado/superlists,ejpreciado/superlists,ejpreciado/superlists | functional_tests.py | functional_tests.py | from selenium import webdriver
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# Edith has heard about a cool new onlien to-do app. She goes
# to check out its homepage
self.browser.get('http://localhost:8000')
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
self.fail('Finish the test!')
# She is invited to enter a to-do item straight away
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
# She notices the page title and header mention to-do lists
# She is invited to enter a to-do item straight away
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an itme in a to-do list
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very methodical)
# The page updates again, and now shows both updates on her list
# Edith wonders whether the site will remember her list. Then she sees
# that the site has generated a unique URL for her -- there is some
# explanatory text to that effect.
# She visits that URL - her to-do list is still there.
# Satisfied, she goes back to sleep
if __name__ == '__main__':
unittest.main(warnings='ignore')
| from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://localhost:8000')
assert 'Django' in browser.title
| mit | Python |
0f19fb03a9976beb410f52b4e53a6da31665ab15 | Fix identation errors. | microserv/frontend,microserv/frontend,microserv/frontend | editor_backend/editor_backend/views.py | editor_backend/editor_backend/views.py | from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from django.shortcuts import render
import json
import requests
NODE_ADDR = "http://127.0.0.1:9001"
publish_base_url = "http://despina.128.no/publish"
def get_publisher_url():
r = requests.get(NODE_ADDR + "/" + "publish")
response_as_json = json.loads(r.text)
if response_as_json:
return response_as_json
else:
return None
def homepage(request):
return render(request, "homepage.html", {});
def editor(request):
return render(request, "editor_page.html", {});
def upload_article(request):
dict = request.POST.dict()
article = {"tags": dict["tags"], "description": dict["description"], "title": dict["title"]}
article_start = "<!DOCTYPE html><html lang=\"en\"><head><meta charset=\"utf-8\"><title>"
article_mid = "</title></head><body>"
article_end = "</body></html>"
article["article"] = article_start + dict["title"] + article_mid + dict["article"].replace("src=\"//www.", "src=\"http://www.") + article_end
if "index" in dict:
article["index"] = "on"
else:
article["index"] = "off"
publisher_url = get_publisher_url()
if publisher_url:
r = requests.post("http://"+publisher_url+"/save_article", data = article)
else:
# Do some error handling here.
pass
#js = json.dumps(article)
#jf = open('js.json', 'w')
#jf.write(js)
#jf.close()
return render(request, "editor_page.html", {});
def articles(request):
r = requests.get(publish_base_url + "/list")
d = r.json()
d["publisher_url"] = publish_base_url
return render(request, "articles.html", d);
def search(request):
return render(request, "search.html", {});
def about(request):
return render(request, "about.html", {});
| from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from django.shortcuts import render
import json
import requests
NODE_ADDR = "http://127.0.0.1:9001"
publish_base_url = "http://despina.128.no/publish"
def get_publisher_url():
r = requests.get(NODE_ADDR + "/" + "publish")
response_as_json = json.loads(r.text)
if response_as_json:
return response_as_json
else:
return None
def homepage(request):
return render(request, "homepage.html", {});
def editor(request):
return render(request, "editor_page.html", {});
def upload_article(request):
dict = request.POST.dict()
article = {"tags": dict["tags"], "description": dict["description"], "title": dict["title"]}
article_start = "<!DOCTYPE html><html lang=\"en\"><head><meta charset=\"utf-8\"><title>"
article_mid = "</title></head><body>"
article_end = "</body></html>"
article["article"] = article_start + dict["title"] + article_mid + dict["article"].replace("src=\"//www.", "src=\"http://www.") + article_end
if "index" in dict:
article["index"] = "on"
else:
article["index"] = "off"
publisher_url = get_publisher_url()
if publisher_url:
r = requests.post("http://"+publisher_url+"/save_article", data = article)
else:
# Do some error handling here.
pass
#js = json.dumps(article)
#jf = open('js.json', 'w')
#jf.write(js)
#jf.close()
return render(request, "editor_page.html", {});
def articles(request):
r = requests.get(publish_base_url + "/list")
d = r.json()
d["publisher_url"] = publish_base_url
return render(request, "articles.html", d);
def search(request):
return render(request, "search.html", {});
def about(request):
return render(request, "about.html", {});
| mit | Python |
345fb3f60b5df1d45b44a5b2b7f162900669f1e9 | Fix typo in command | AgalmicVentures/Probe,AgalmicVentures/Probe,AgalmicVentures/Probe | Probe/Application/RawApplication.py | Probe/Application/RawApplication.py |
# Copyright (c) 2015-2018 Agalmic Ventures LLC (www.agalmicventures.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cherrypy
import os
import subprocess
def getOutput(command):
return subprocess.Popen(command.split(' '), stdout=subprocess.PIPE).communicate()[0]
def htmlEncode(s):
return s.decode('utf-8').replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '&squot;')
def commandResponse(command):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return htmlEncode(getOutput(command))
class ProbeRawApplication(object):
def __init__(self):
self._isMac = os.uname().sysname == 'Darwin'
@cherrypy.expose
def date(self):
return commandResponse('date')
@cherrypy.expose
def df(self):
return commandResponse('df -h')
@cherrypy.expose
def entropyAvail(self):
return commandResponse('cat /proc/sys/kernel/random/entropy_avail')
@cherrypy.expose
def gitLog(self):
return commandResponse('git log --graph -10')
@cherrypy.expose
def gitRevision(self):
return commandResponse('git rev-parse HEAD')
@cherrypy.expose
def ifconfig(self):
return commandResponse('/sbin/ifconfig')
@cherrypy.expose
def iostat(self):
return commandResponse('iostat')
@cherrypy.expose
def mount(self):
return commandResponse('mount')
@cherrypy.expose
def netstatTcp(self):
return commandResponse('netstat -s -p tcp' if self._isMac else 'netstat -s -t')
@cherrypy.expose
def netstatUdp(self):
return commandResponse('netstat -s -p udp' if self._isMac else 'netstat -s -u')
@cherrypy.expose
def numactlHardware(self):
return commandResponse('numactl -H') if not self._isMac else 'numactl not available on Mac'
@cherrypy.expose
def uname(self):
return commandResponse('uname -a')
@cherrypy.expose
def update(self):
return commandResponse('git pull')
@cherrypy.expose
def uptime(self):
return commandResponse('uptime')
|
# Copyright (c) 2015-2018 Agalmic Ventures LLC (www.agalmicventures.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cherrypy
import os
import subprocess
def getOutput(command):
return subprocess.Popen(command.split(' '), stdout=subprocess.PIPE).communicate()[0]
def htmlEncode(s):
return s.decode('utf-8').replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '&squot;')
def commandResponse(command):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return htmlEncode(getOutput(command))
class ProbeRawApplication(object):
def __init__(self):
self._isMac = os.uname().sysname == 'Darwin'
@cherrypy.expose
def date(self):
return commandResponse('date')
@cherrypy.expose
def df(self):
return commandResponse('df -h')
@cherrypy.expose
def entropyAvail(self):
return commandResponse('cat /proc/sys/kernel/random/entropy_avail')
@cherrypy.expose
def gitLog(self):
return commandResponse('git log --graph -10')
@cherrypy.expose
def gitRevision(self):
return commandResponse('git rev-parse HEAD')
@cherrypy.expose
def ifconfig(self):
return commandResponse('/sbin/ifconfig')
@cherrypy.expose
def iostat(self):
return commandResponse('iostat')
@cherrypy.expose
def mount(self):
return commandResponse('mount')
@cherrypy.expose
def netstatTcp(self):
return commandResponse('netstat -s -p tcp' if self._isMac else 'netstat -s -t')
@cherrypy.expose
def netstatUdp(self):
return commandResponse('netstat -s -p udp' if self._isMac else 'netstat -s -u')
@cherrypy.expose
def numactlHardware(self):
return commandResponse('numactl -h') if not self._isMac else 'numactl not available on Mac'
@cherrypy.expose
def uname(self):
return commandResponse('uname -a')
@cherrypy.expose
def update(self):
return commandResponse('git pull')
@cherrypy.expose
def uptime(self):
return commandResponse('uptime')
| mit | Python |
698d6bd59f810f3527c663a9f51362c018f6f394 | add french formatting for numbers | ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide,ZTH1970/alcide | calebasse/settings/formats/fr/formats.py | calebasse/settings/formats/fr/formats.py | DATE_FORMAT = 'l d F Y'
SHORT_DATE_FORMAT = 'j/n/Y'
DATE_INPUT_FORMATS = ('%d/%m/%Y', '%d/%m/%Y', '%Y-%m-d')
TIME_INPUT_FORMATS = ( '%Hh%M', '%H:%M', '%H%M', '%Hh' )
DECIMAL_SEPARATOR = ','
NUMBER_GROUPING = 3
THOUSAND_SEPARATOR = ' '
| DATE_FORMAT = 'l d F Y'
SHORT_DATE_FORMAT = 'j/n/Y'
DATE_INPUT_FORMATS = ('%d/%m/%Y', '%d/%m/%Y', '%Y-%m-d')
TIME_INPUT_FORMATS = ( '%Hh%M', '%H:%M', '%H%M', '%Hh' )
| agpl-3.0 | Python |
b2f6ec1db1c6831b923cb89907bbfedea79012f0 | handle yaml in post - WIP | empirical-org/WikipediaSentences,empirical-org/WikipediaSentences | genmodel/manager.py | genmodel/manager.py | from flask import Flask, request, render_template, jsonify
import yaml
from tabulate import tabulate
import os
import psycopg2
# Connect to Database
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PASS=os.environ['DB_PASS']
except KeyError as e:
raise Exception('environment variables for database connection must be set')
conn = psycopg2.connect(dbname=DB_NAME,
user=DB_USER,
password=DB_PASS,
host='localhost',
port=5432
)
app = Flask(__name__)
@app.route('/')
def man():
return 'Not implemented'
@app.route('/jobs', methods=["GET", "POST"])
def jobs():
if request.method == "GET":
cur = conn.cursor()
cur.execute("SELECT id,name,state,created FROM jobs WHERE state='running'")
resp_list = cur.fetchall()
cur.close()
return tabulate(resp_list, headers=['id','name','state','created'])
elif request.method == "POST":
resp = yaml.loads(request.files['job'])
# Take a JSON with attributes of job, start job, then redirect to that
# job's monitoring page (jobs/job_id)
return jsonify(resp), 201
else:
return 'Not implemented'
@app.route('/jobs/<job_id>', methods=["GET", "PATCH", "DELETE"])
def job_for_id(job_id):
if request.method == "GET":
# Job monitoring for a specific job
return 'GET job #' + job_id
elif request.method == "PATCH":
# TODO: Should this be an endpoint?
# Modify job, scale resolvers
return 'PATCH job #' + job_id
elif request.method == "DELETE":
# Remove all dedicated Digital Ocean containers, stop all publishers,
# writers and workers. Purge the queue.
return 'DELETE job #' + job_id
return job_id
if __name__ == '__main__':
app.run(port=5000, host= '0.0.0.0', debug=True)
| from flask import Flask, request, render_template, jsonify
from tabulate import tabulate
import os
import psycopg2
# Connect to Database
try:
DB_NAME=os.environ['DB_NAME']
DB_USER=os.environ['DB_USER']
DB_PASS=os.environ['DB_PASS']
except KeyError as e:
raise Exception('environment variables for database connection must be set')
conn = psycopg2.connect(dbname=DB_NAME,
user=DB_USER,
password=DB_PASS,
host='localhost',
port=5432
)
app = Flask(__name__)
@app.route('/')
def man():
return 'Not implemented'
@app.route('/jobs', methods=["GET", "POST"])
def jobs():
if request.method == "GET":
cur = conn.cursor()
cur.execute("SELECT id,name,state,created FROM jobs WHERE state='running'")
resp_list = cur.fetchall()
cur.close()
return tabulate(resp_list, headers=['id','name','state','created'])
elif request.method == "POST":
# Take a JSON with attributes of job, start job, then redirect to that
# job's monitoring page (jobs/job_id)
return 'Not implemented'
else:
return 'Not implemented'
@app.route('/jobs/<job_id>', methods=["GET", "PATCH", "DELETE"])
def job_for_id(job_id):
if request.method == "GET":
# Job monitoring for a specific job
return 'GET job #' + job_id
elif request.method == "PATCH":
# TODO: Should this be an endpoint?
# Modify job, scale resolvers
return 'PATCH job #' + job_id
elif request.method == "DELETE":
# Remove all dedicated Digital Ocean containers, stop all publishers,
# writers and workers. Purge the queue.
return 'DELETE job #' + job_id
return job_id
if __name__ == '__main__':
app.run(port=5000, host= '0.0.0.0', debug=True)
| agpl-3.0 | Python |
1239d7a881f7ad88c736aac526a821658cea0f96 | check platform with correct python | stweil/letsencrypt,letsencrypt/letsencrypt,lmcro/letsencrypt,letsencrypt/letsencrypt,lmcro/letsencrypt,stweil/letsencrypt | certbot-nginx/certbot_nginx/constants.py | certbot-nginx/certbot_nginx/constants.py | """nginx plugin constants."""
import pkg_resources
import platform
if platform.system() in ('FreeBSD', 'Darwin'):
server_root_tmp = "/usr/local/etc/nginx"
else:
server_root_tmp = "/etc/nginx"
CLI_DEFAULTS = dict(
server_root=server_root_tmp,
ctl="nginx",
)
"""CLI defaults."""
MOD_SSL_CONF_DEST = "options-ssl-nginx.conf"
"""Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
MOD_SSL_CONF_SRC = pkg_resources.resource_filename(
"certbot_nginx", "options-ssl-nginx.conf")
"""Path to the nginx mod_ssl config file found in the Certbot
distribution."""
UPDATED_MOD_SSL_CONF_DIGEST = ".updated-options-ssl-nginx-conf-digest.txt"
"""Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`."""
ALL_SSL_OPTIONS_HASHES = [
'0f81093a1465e3d4eaa8b0c14e77b2a2e93568b0fc1351c2b87893a95f0de87c',
'9a7b32c49001fed4cff8ad24353329472a50e86ade1ef9b2b9e43566a619612e',
'a6d9f1c7d6b36749b52ba061fff1421f9a0a3d2cfdafbd63c05d06f65b990937',
'7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',
'394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',
'4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',
]
"""SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC"""
def os_constant(key):
# XXX TODO: In the future, this could return different constants
# based on what OS we are running under. To see an
# approach to how to handle different OSes, see the
# apache version of this file. Currently, we do not
# actually have any OS-specific constants on Nginx.
"""
Get a constant value for operating system
:param key: name of cli constant
:return: value of constant for active os
"""
return CLI_DEFAULTS[key]
HSTS_ARGS = ['\"max-age=31536000\"', ' ', 'always']
HEADER_ARGS = {'Strict-Transport-Security': HSTS_ARGS}
| """nginx plugin constants."""
import pkg_resources
import platform
if(platform.system() == ('FreeBSD' or 'Darwin')):
server_root_tmp = "/usr/local/etc/nginx"
else:
server_root_tmp = "/etc/nginx"
CLI_DEFAULTS = dict(
server_root=server_root_tmp,
ctl="nginx",
)
"""CLI defaults."""
MOD_SSL_CONF_DEST = "options-ssl-nginx.conf"
"""Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
MOD_SSL_CONF_SRC = pkg_resources.resource_filename(
"certbot_nginx", "options-ssl-nginx.conf")
"""Path to the nginx mod_ssl config file found in the Certbot
distribution."""
UPDATED_MOD_SSL_CONF_DIGEST = ".updated-options-ssl-nginx-conf-digest.txt"
"""Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`."""
ALL_SSL_OPTIONS_HASHES = [
'0f81093a1465e3d4eaa8b0c14e77b2a2e93568b0fc1351c2b87893a95f0de87c',
'9a7b32c49001fed4cff8ad24353329472a50e86ade1ef9b2b9e43566a619612e',
'a6d9f1c7d6b36749b52ba061fff1421f9a0a3d2cfdafbd63c05d06f65b990937',
'7f95624dd95cf5afc708b9f967ee83a24b8025dc7c8d9df2b556bbc64256b3ff',
'394732f2bbe3e5e637c3fb5c6e980a1f1b90b01e2e8d6b7cff41dde16e2a756d',
'4b16fec2bcbcd8a2f3296d886f17f9953ffdcc0af54582452ca1e52f5f776f16',
]
"""SHA256 hashes of the contents of all versions of MOD_SSL_CONF_SRC"""
def os_constant(key):
# XXX TODO: In the future, this could return different constants
# based on what OS we are running under. To see an
# approach to how to handle different OSes, see the
# apache version of this file. Currently, we do not
# actually have any OS-specific constants on Nginx.
"""
Get a constant value for operating system
:param key: name of cli constant
:return: value of constant for active os
"""
return CLI_DEFAULTS[key]
HSTS_ARGS = ['\"max-age=31536000\"', ' ', 'always']
HEADER_ARGS = {'Strict-Transport-Security': HSTS_ARGS}
| apache-2.0 | Python |
e889cd87e1260d209775a98fb7779146f92c9333 | handle non integer issue numbers better | xchewtoyx/comicmgt,xchewtoyx/comicmgt | ooo.py | ooo.py | #!/usr/bin/python
import os
import sys
import re
from collections import defaultdict
COMIC_RE = re.compile(r'^\d+ +([^#]+)#(\d+)')
def lines(todofile):
with open(todofile) as todolines:
for line in todolines:
title_match = COMIC_RE.match(line)
if title_match:
# (title, issue)
yield line.strip(), title_match.group(1), int(title_match.group(2))
def issues(todofile):
seen = defaultdict(int)
for line, title, issue in lines(todofile):
if issue and seen[title] and abs(issue - seen[title]) > 1:
yield line, seen[title]
seen[title] = issue
def main(files):
for todofile in files:
for issue, lastissue in issues(todofile):
print "%s (last seen %d)" % (issue, lastissue)
if __name__ == '__main__':
main(sys.argv[1:])
| #!/usr/bin/python
import os
import sys
import re
from collections import defaultdict
COMIC_RE = re.compile(r'^\d+ +([^#]+)#(\d+)')
def lines(todofile):
with open(todofile) as todolines:
for line in todolines:
title_match = COMIC_RE.match(line)
if title_match:
# (title, issue)
yield line.strip(), title_match.group(1), int(title_match.group(2))
def issues(todofile):
seen = defaultdict(int)
for line, title, issue in lines(todofile):
if issue and seen[title] and issue != seen[title]+1:
yield line, seen[title]
seen[title] = issue
def main(files):
for todofile in files:
for issue, lastissue in issues(todofile):
print "%s (last seen %d)" % (issue, lastissue)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | Python |
04d1494cfcbc4508519f559d9a2093f4491c3451 | Update get_access_token.py | jquacinella/python-twitter,shichao-an/python-twitter,jakeshi/python-twitter,jeremylow/python-twitter | get_access_token.py | get_access_token.py | #!/usr/bin/env python
#
# Copyright 2007-2013 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webbrowser
from requests_oauthlib import OAuth1Session
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def get_access_token(consumer_key, consumer_secret):
oauth_client = OAuth1Session(consumer_key, client_secret=consumer_secret, callback_uri='oob')
print 'Requesting temp token from Twitter'
try:
resp = oauth_client.fetch_request_token(REQUEST_TOKEN_URL)
except ValueError, e:
print 'Invalid respond from Twitter requesting temp token: %s' % e
return
url = oauth_client.authorization_url(AUTHORIZATION_URL)
print ''
print 'I will try to start a browser to visit the following Twitter page'
print 'if a browser will not start, copy the URL to your browser'
print 'and retrieve the pincode to be used'
print 'in the next step to obtaining an Authentication Token:'
print ''
print url
print ''
webbrowser.open(url)
pincode = raw_input('Pincode? ')
print ''
print 'Generating and signing request for an access token'
print ''
oauth_client = OAuth1Session(consumer_key, client_secret=consumer_secret,
resource_owner_key=resp.get('oauth_token'),
resource_owner_secret=resp.get('oauth_token_secret'),
verifier=pincode
)
try:
resp = oauth_client.fetch_access_token(ACCESS_TOKEN_URL)
except ValueError, e:
print 'Invalid respond from Twitter requesting access token: %s' % e
return
print 'Your Twitter Access Token key: %s' % resp.get('oauth_token')
print ' Access Token secret: %s' % resp.get('oauth_token_secret')
print ''
def main():
consumer_key = raw_input('Enter your consumer key: ')
consumer_secret = raw_input("Enter your consumer secret: ")
get_access_token(consumer_key, consumer_secret)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
#
# Copyright 2007-2013 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webbrowser
from requests_oauthlib import OAuth1Session
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
def get_access_token(consumer_key, consumer_secret):
oauth_client = OAuth1Session(consumer_key, client_secret=consumer_secret)
print 'Requesting temp token from Twitter'
try:
resp = oauth_client.fetch_request_token(REQUEST_TOKEN_URL)
except ValueError, e:
print 'Invalid respond from Twitter requesting temp token: %s' % e
return
url = oauth_client.authorization_url(AUTHORIZATION_URL)
print ''
print 'I will try to start a browser to visit the following Twitter page'
print 'if a browser will not start, copy the URL to your browser'
print 'and retrieve the pincode to be used'
print 'in the next step to obtaining an Authentication Token:'
print ''
print url
print ''
webbrowser.open(url)
pincode = raw_input('Pincode? ')
print ''
print 'Generating and signing request for an access token'
print ''
oauth_client = OAuth1Session(consumer_key, client_secret=consumer_secret,
resource_owner_key=resp.get('oauth_token'),
resource_owner_secret=resp.get('oauth_token_secret'),
verifier=pincode
)
try:
resp = oauth_client.fetch_access_token(ACCESS_TOKEN_URL)
except ValueError, e:
print 'Invalid respond from Twitter requesting access token: %s' % e
return
print 'Your Twitter Access Token key: %s' % resp.get('oauth_token')
print ' Access Token secret: %s' % resp.get('oauth_token_secret')
print ''
def main():
consumer_key = raw_input('Enter your consumer key: ')
consumer_secret = raw_input("Enter your consumer secret: ")
get_access_token(consumer_key, consumer_secret)
if __name__ == "__main__":
main()
| apache-2.0 | Python |
ebb2b2381662dcaa9a16083cae3b7e0ae9bc1065 | Reword 51 | daicang/Euler | p51.py | p51.py | # 51
# Find the smallest prime which, by replacing part of the number with the same digit,
# is part of an eight prime value family
from prime import Prime
class Solve(object):
def __init__(self):
self.p = Prime()
self.ps = self.p.prime_stream()
def get_replaced(self, p_str, r_str):
return [int(p_str.replace(r_str, str(i))) for i in range(int(r_str), 10)]
def solve(self):
for p in self.ps:
p_str = str(p)
for num in ['0', '1', '2']:
if p_str.count(num) > 1:
primes = filter(self.p.is_prime, self.get_replaced(p_str, num))
if len(primes) > 6:
print p_str, len(primes), primes
if len(primes) == 8:
return primes[0]
solver = Solve()
print solver.solve() | # 51
# Find the smallest prime which, by replacing part of the number with the same digit,
from prime import Prime
class Solve(object):
def __init__(self):
self.p = Prime()
self.ps = self.p.prime_stream()
def get_replaced(self, p_str, r_str):
return [int(p_str.replace(r_str, str(i))) for i in range(int(r_str), 10)]
def solve(self):
for p in self.ps:
p_str = str(p)
for num in ['0', '1', '2']:
if p_str.count(num) > 1:
primes = filter(self.p.is_prime, self.get_replaced(p_str, num))
if len(primes) > 6:
print p_str, len(primes), primes
if len(primes) == 8:
return primes[0]
solver = Solve()
print solver.solve() | mit | Python |
80e6c4d499b6c126dc0032a8175d9a6ba3bab1a8 | Add os import | j-friedrich/thunder,broxtronix/thunder,oliverhuangchao/thunder,oliverhuangchao/thunder,kcompher/thunder,poolio/thunder,kunallillaney/thunder,kunallillaney/thunder,broxtronix/thunder,pearsonlab/thunder,mikarubi/thunder,kcompher/thunder,zhwa/thunder,jwittenbach/thunder,mikarubi/thunder,zhwa/thunder,pearsonlab/thunder,j-friedrich/thunder,thunder-project/thunder,poolio/thunder | pca.py | pca.py | # pca <master> <inputFile> <outputFile> <slices> <k> <dim>
#
# performs pca on a data matrix
# input is a local text file or a file in hdfs
# format should be rows of ' ' separated values
# - example: space (rows) x time (cols)
# - rows should be whichever dim is larger
# 'dim' is dimension to subtract mean along
# 'k' is number of pcs to return
# writes pcs (in both dims) and eigenvalues to text
import sys
import os
from numpy import *
from scipy.linalg import *
from pyspark import SparkContext
if len(sys.argv) < 7:
print >> sys.stderr, \
"(pca) usage: pca <master> <inputFile> <outputFile> <slices> <k> <dim>"
exit(-1)
def parseVector(line):
return array([float(x) for x in line.split(' ')])
# parse inputs
sc = SparkContext(sys.argv[1], "pca")
inputFile = str(sys.argv[2])
outputFile = str(sys.argv[3])
slices = int(sys.argv[4])
k = int(sys.argv[5])
dim = int(sys.argv[6])
if not os.path.exists(outputFile):
os.makedirs(outputFile)
# load data
lines = sc.textFile(inputFile,slices)
data = lines.map(parseVector).cache()
n = data.count()
# do mean subtraction
if dim==1:
meanVec = data.reduce(lambda x,y : x+y) / n
sub = data.map(lambda x : x - meanVec)
elif dim==2:
meanVec = data.reduce(lambda x,y : x+y) / n
sub = data.map(lambda x : x - np.mean(x))
else:
print >> sys.stderr, \
"(pca) dim must be 1 or 2"
exit(-1)
# do eigendecomposition
cov = sub.map(lambda x : outer(x,x)).reduce(lambda x,y : (x + y)) / (n - 1)
w, v = eig(cov)
inds = argsort(w)[::-1]
sortedDim2 = transpose(v[:,inds[0:k]])
latent = w[inds[0:k]]
print("(pca) writing output...")
np.savetxt(outputFile+"/"+"out-dim2-"+outputFile+".txt",sortedDim2,fmt='%.8f')
np.savetxt(outputFile+"/"+"out-latent-"+outputFile+".txt",latent,fmt='%.8f')
for ik in range(0,k):
sortedDim1 = sub.map(lambda x : inner(x,sortedDim2[ik,:]))
np.savetxt(outputFile+"/"+"out-dim1-"+str(ik)+"-"-outputFile+".txt",sortedDim1.collect(),fmt='%.8f')
| # pca <master> <inputFile> <outputFile> <slices> <k> <dim>
#
# performs pca on a data matrix
# input is a local text file or a file in hdfs
# format should be rows of ' ' separated values
# - example: space (rows) x time (cols)
# - rows should be whichever dim is larger
# 'dim' is dimension to subtract mean along
# 'k' is number of pcs to return
# writes pcs (in both dims) and eigenvalues to text
import sys
from numpy import *
from scipy.linalg import *
from pyspark import SparkContext
if len(sys.argv) < 7:
print >> sys.stderr, \
"(pca) usage: pca <master> <inputFile> <outputFile> <slices> <k> <dim>"
exit(-1)
def parseVector(line):
return array([float(x) for x in line.split(' ')])
# parse inputs
sc = SparkContext(sys.argv[1], "pca")
inputFile = str(sys.argv[2])
outputFile = str(sys.argv[3])
slices = int(sys.argv[4])
k = int(sys.argv[5])
dim = int(sys.argv[6])
if not os.path.exists(outputFile):
os.makedirs(outputFile)
# load data
lines = sc.textFile(inputFile,slices)
data = lines.map(parseVector).cache()
n = data.count()
# do mean subtraction
if dim==1:
meanVec = data.reduce(lambda x,y : x+y) / n
sub = data.map(lambda x : x - meanVec)
elif dim==2:
meanVec = data.reduce(lambda x,y : x+y) / n
sub = data.map(lambda x : x - np.mean(x))
else:
print >> sys.stderr, \
"(pca) dim must be 1 or 2"
exit(-1)
# do eigendecomposition
cov = sub.map(lambda x : outer(x,x)).reduce(lambda x,y : (x + y)) / (n - 1)
w, v = eig(cov)
inds = argsort(w)[::-1]
sortedDim2 = transpose(v[:,inds[0:k]])
latent = w[inds[0:k]]
print("(pca) writing output...")
np.savetxt(outputFile+"/"+"out-dim2-"+outputFile+".txt",sortedDim2,fmt='%.8f')
np.savetxt(outputFile+"/"+"out-latent-"+outputFile+".txt",latent,fmt='%.8f')
for ik in range(0,k):
sortedDim1 = sub.map(lambda x : inner(x,sortedDim2[ik,:]))
np.savetxt(outputFile+"/"+"out-dim1-"+str(ik)+"-"-outputFile+".txt",sortedDim1.collect(),fmt='%.8f')
| apache-2.0 | Python |
2cf25d10bb81f53fa415523650558b9ea10032f0 | Add rounding to nearest for the number of SQL queries per call. | catcombo/django-speedinfo,catcombo/django-speedinfo,catcombo/django-speedinfo | speedinfo/models.py | speedinfo/models.py | # coding: utf-8
from django.db import models
class ViewProfiler(models.Model):
"""
Holds profiler stats grouped by view.
"""
view_name = models.CharField('View name', max_length=255)
method = models.CharField('HTTP method', max_length=8)
anon_calls = models.PositiveIntegerField('Anonymous calls', default=0)
cache_hits = models.PositiveIntegerField('Cache hits', default=0)
sql_total_time = models.FloatField('SQL total time', default=0)
sql_total_count = models.PositiveIntegerField('SQL total queries count', default=0)
total_calls = models.PositiveIntegerField('Total calls', default=0)
total_time = models.FloatField('Total time', default=0)
class Meta:
verbose_name_plural = 'Views profiler'
unique_together = ('view_name', 'method')
@property
def anon_calls_ratio(self):
"""Anonymous calls ratio.
:return: anonymous calls ratio percent
:rtype: float
"""
if self.total_calls > 0:
return 100 * self.anon_calls / float(self.total_calls)
else:
return 0
@property
def cache_hits_ratio(self):
"""Cache hits ratio.
:return: cache hits ratio percent
:rtype: float
"""
if self.total_calls > 0:
return 100 * self.cache_hits / float(self.total_calls)
else:
return 0
@property
def sql_time_ratio(self):
"""SQL time per call ratio.
:return: SQL time per call ratio percent
:rtype: float
"""
if self.total_time > 0:
return 100 * self.sql_total_time / float(self.total_time)
else:
return 0
@property
def sql_count_per_call(self):
"""SQL queries count per call.
:return: SQL queries count per call
:rtype: int
"""
if self.total_calls > 0:
return int(round(self.sql_total_count / float(self.total_calls)))
else:
return 0
@property
def time_per_call(self):
"""Time per call.
:return: time per call
:rtype: float
"""
if self.total_calls > 0:
return self.total_time / float(self.total_calls)
else:
return 0
| # coding: utf-8
from django.db import models
class ViewProfiler(models.Model):
"""
Holds profiler stats grouped by view.
"""
view_name = models.CharField('View name', max_length=255)
method = models.CharField('HTTP method', max_length=8)
anon_calls = models.PositiveIntegerField('Anonymous calls', default=0)
cache_hits = models.PositiveIntegerField('Cache hits', default=0)
sql_total_time = models.FloatField('SQL total time', default=0)
sql_total_count = models.PositiveIntegerField('SQL total queries count', default=0)
total_calls = models.PositiveIntegerField('Total calls', default=0)
total_time = models.FloatField('Total time', default=0)
class Meta:
verbose_name_plural = 'Views profiler'
unique_together = ('view_name', 'method')
@property
def anon_calls_ratio(self):
"""Anonymous calls ratio.
:return: anonymous calls ratio percent
:rtype: float
"""
if self.total_calls > 0:
return 100 * self.anon_calls / float(self.total_calls)
else:
return 0
@property
def cache_hits_ratio(self):
"""Cache hits ratio.
:return: cache hits ratio percent
:rtype: float
"""
if self.total_calls > 0:
return 100 * self.cache_hits / float(self.total_calls)
else:
return 0
@property
def sql_time_ratio(self):
"""SQL time per call ratio.
:return: SQL time per call ratio percent
:rtype: float
"""
if self.total_time > 0:
return 100 * self.sql_total_time / float(self.total_time)
else:
return 0
@property
def sql_count_per_call(self):
"""SQL queries count per call.
:return: SQL queries count per call
:rtype: int
"""
if self.total_calls > 0:
return int(self.sql_total_count / self.total_calls)
else:
return 0
@property
def time_per_call(self):
"""Time per call.
:return: time per call
:rtype: float
"""
if self.total_calls > 0:
return self.total_time / float(self.total_calls)
else:
return 0
| mit | Python |
1c738439e8d3d30d5c2f7586e6ca796e64e28873 | add service debug log. | cj1324/WebHooks2IRC | src/webhooks2irc/core/ircbot.py | src/webhooks2irc/core/ircbot.py | #!/usr/bin/env python
# coding: UTF-8
import time
import threading
import logging
from irc import client as irc_client
from webhooks2irc import settings
logger = logging.getLogger(__name__)
class IrcBotService(threading.Thread):
def __init__(self):
self.client = irc_client.Reactor()
server = self.client.server()
logger.debug('connect to {0}.'.format(settings.IRC_HOST))
server.connect(settings.IRC_HOST,
settings.IRC_PORT,
settings.IRC_NICK)
server_name = server.get_server_name()
self.server = server
self.hi = threading.Event()
super(IrcBotService, self).__init__()
def run(self):
while True:
if not self.server.is_connected():
logger.debug('server reconnect.')
self.server.reconnect()
self.client.process_once(settings.IRC_DATA_TIMEOUT)
if self.hi.is_set():
logger.debug('ready send hi message.')
self.server.join("#{0}".format(settings.IRC_CHANNEL))
self.server.privmsg("#{0}".format(settings.IRC_CHANNEL),
settings.IRC_HIMSG)
self.hi.clear()
| #!/usr/bin/env python
# coding: UTF-8
import time
import threading
from irc import client as irc_client
from webhooks2irc import settings
class IrcBotService(threading.Thread):
def __init__(self):
self.client = irc_client.Reactor()
server = self.client.server()
server.connect(settings.IRC_HOST,
settings.IRC_PORT,
settings.IRC_NICK)
server_name = server.get_server_name()
self.server = server
self.hi = threading.Event()
super(IrcBotService, self).__init__()
def run(self):
while True:
if not self.server.is_connected():
self.server.reconnect()
self.client.process_once(settings.IRC_DATA_TIMEOUT)
if self.hi.is_set():
self.server.join("#{0}".format(settings.IRC_CHANNEL))
self.server.privmsg("#{0}".format(settings.IRC_CHANNEL),
settings.IRC_HIMSG)
self.hi.clear()
| bsd-2-clause | Python |
f34773f4ae5e8ab3ad067e5e9fd57bb4996a2186 | Bump version to 0.1.2 | bendtherules/pontoon,bendtherules/pontoon,duggan/pontoon,duggan/pontoon | pontoon/__init__.py | pontoon/__init__.py | # -*- coding: utf-8 -*-
__name__ = 'pontoon'
__description__ = 'A Python CLI for Digital Ocean'
__version__ = '0.1.2'
__author__ = 'Ross Duggan'
__author_email__ = 'ross.duggan@acm.org'
__url__ = 'https://github.com/duggan/pontoon'
__copyright__ = 'Copyright Ross Duggan 2013'
from .cache import cache
from .log import debug
from .pontoon import Pontoon
from .command import Command
from .pontoon import ClientException
from .exceptions import *
| # -*- coding: utf-8 -*-
__name__ = 'pontoon'
__description__ = 'A Python CLI for Digital Ocean'
__version__ = '0.1.1'
__author__ = 'Ross Duggan'
__author_email__ = 'ross.duggan@acm.org'
__url__ = 'https://github.com/duggan/pontoon'
__copyright__ = 'Copyright Ross Duggan 2013'
from .cache import cache
from .log import debug
from .pontoon import Pontoon
from .command import Command
from .pontoon import ClientException
from .exceptions import *
| mit | Python |
8a520b1824e4f7d24c3aa05c22f374baafb3f629 | Add a copyright and license comment to re2.py | facebook/pyre2,pombredanne/pyre2,andreasvc/pyre2,axiak/pyre2,simudream/pyre2,sunu/pyre2,simudream/pyre2,andreasvc/pyre2,axiak/pyre2,pombredanne/pyre2,pombredanne/pyre2,sunu/pyre2,facebook/pyre2 | re2.py | re2.py | #!/usr/bin/env python
# Copyright (c) 2010, David Reiss and Facebook, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Facebook nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Define this first, since it is referenced by _re2.
class error(Exception):
pass
import _re2
compile = _re2._compile
| #!/usr/bin/env python
# Define this first, since it is referenced by _re2.
class error(Exception):
pass
import _re2
compile = _re2._compile
| bsd-3-clause | Python |
aa57ee789731f129646dc6641087a17486353251 | Fix filtering bug in run.py | shellphish/driller | run.py | run.py | #!/usr/bin/env python
import logging
import logconfig
# silence these loggers
logging.getLogger().setLevel("CRITICAL")
logging.getLogger("driller.fuzz").setLevel("INFO")
l = logging.getLogger("driller")
l.setLevel("INFO")
import os
import sys
import redis
import fuzzer.tasks
import driller.config as config
'''
Large scale test script. Should just require pointing it at a directory full of binaries.
'''
def start(binary_dir):
jobs = [ ]
binaries = os.listdir(binary_dir)
for binary in binaries:
if binary.startswith("."):
continue
pathed_binary = os.path.join(binary_dir, binary)
if os.path.isdir(pathed_binary):
continue
if not os.access(pathed_binary, os.X_OK):
continue
identifier = binary[:binary.rindex("_")]
# remove IPC binaries from largescale testing
if (identifier + "_02") not in binaries:
jobs.append(binary)
l.info("%d binaries found", len(jobs))
l.debug("binaries: %r", jobs)
# send all the binaries to the celery queue
l.info("%d binaries found", len(jobs))
filter_t = set()
try:
pwned = open("pwned").read()
for pwn in pwned.split("\n")[:-1]:
filter_t.add(pwn)
l.info("already pwned %d", len(filter_t))
except IOError:
pass
jobs = filter(lambda j: j not in filter_t, jobs)
l.info("going to work on %d", len(jobs))
for binary in jobs:
fuzzer.tasks.fuzz.delay(binary)
l.info("listening for crashes..")
redis_inst = redis.Redis(host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB)
p = redis_inst.pubsub()
p.subscribe("crashes")
cnt = 1
for msg in p.listen():
if msg['type'] == 'message':
l.info("[%03d/%03d] crash found for '%s'", cnt, len(jobs), msg['data'])
cnt += 1
def main(argv):
if len(argv) < 2:
print "usage: %s <binary_dir>" % argv[0]
return 1
binary_dir = sys.argv[1]
start(binary_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| #!/usr/bin/env python
import logging
import logconfig
# silence these loggers
logging.getLogger().setLevel("CRITICAL")
logging.getLogger("driller.fuzz").setLevel("INFO")
l = logging.getLogger("driller")
l.setLevel("INFO")
import os
import sys
import redis
import fuzzer.tasks
import driller.config as config
'''
Large scale test script. Should just require pointing it at a directory full of binaries.
'''
def start(binary_dir):
jobs = [ ]
binaries = os.listdir(binary_dir)
for binary in binaries:
if binary.startswith("."):
continue
pathed_binary = os.path.join(binary_dir, binary)
if os.path.isdir(pathed_binary):
continue
if not os.access(pathed_binary, os.X_OK):
continue
identifier = binary[:binary.rindex("_")]
# remove IPC binaries from largescale testing
if (identifier + "_02") not in binaries:
jobs.append(binary)
l.info("%d binaries found", len(jobs))
l.debug("binaries: %r", jobs)
# send all the binaries to the celery queue
l.info("%d binaries found", len(jobs))
filter_t = set()
try:
pwned = open("pwned").read()
for pwn in pwned.split("\n")[:-1]:
filter_t.add(pwn)
l.info("already pwned %d", len(filter_t))
except IOError:
pass
jobs = filter(lambda j: j not in pwned, jobs)
l.info("going to work on %d", len(jobs))
for binary in jobs:
fuzzer.tasks.fuzz.delay(binary)
l.info("listening for crashes..")
redis_inst = redis.Redis(host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB)
p = redis_inst.pubsub()
p.subscribe("crashes")
cnt = 1
for msg in p.listen():
if msg['type'] == 'message':
l.info("[%03d/%03d] crash found for '%s'", cnt, len(jobs), msg['data'])
cnt += 1
def main(argv):
if len(argv) < 2:
print "usage: %s <binary_dir>" % argv[0]
return 1
binary_dir = sys.argv[1]
start(binary_dir)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| bsd-2-clause | Python |
aa2d675354566fa4cef801b2a553e076ad8d8c94 | remove attempt to grab terminal size (breaks when there is no terminal) | nysbc/Anisotropy,nysbc/Anisotropy | ThreeDFSC/programs/utility_functions.py | ThreeDFSC/programs/utility_functions.py | import os
import sys
def print_progress(iteration, total, prefix='', suffix='', decimals=1):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
#rows, columns = os.popen('stty size', 'r').read().split()
columns = 40
bar_length = int(float(columns)/2)
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total))) ## adjusted base on window size
bar = '*' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\x1b[2K\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
## Added to mute the print statements
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def enablePrint():
# sys.stdout = open('threedfscstdout.log', 'a')
sys.stdout = sys.__stdout__
| import os
import sys
def print_progress(iteration, total, prefix='', suffix='', decimals=1):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
rows, columns = os.popen('stty size', 'r').read().split()
bar_length = int(float(columns)/2)
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total))) ## adjusted base on window size
bar = '*' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\x1b[2K\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
## Added to mute the print statements
def blockPrint():
sys.stdout = open(os.devnull, 'w')
def enablePrint():
# sys.stdout = open('threedfscstdout.log', 'a')
sys.stdout = sys.__stdout__
| mit | Python |
d5b13e06c3ed31691c23de765fcb60d5972ef7d9 | Fix embedding images from tweets in threads | BeatButton/beattie,BeatButton/beattie-bot | cogs/twitter.py | cogs/twitter.py | import re
from lxml import etree
from discord.ext import commands
class Twitter:
url_expr = re.compile(r'https?:\/\/twitter\.com\/\S+\/status\/\d+')
tweet_selector = ".//div[contains(@class, 'tweet permalink-tweet')]"
img_selector = './/img[@data-aria-label-part]'
def __init__(self, bot):
self.bot = bot
self.bot.loop.create_task(self.__init())
self.headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/41.0.2228.0 Safari/537.36'}
async def __init(self):
await self.bot.wait_until_ready()
if not self.bot.user.bot:
self.bot.unload_extension(__name__)
async def on_message(self, message):
if message.guild is None:
return
if not (await self.bot.config.get(message.guild.id)).get('twitter'):
return
for link in self.url_expr.findall(message.content):
await self.display_images(link, message.channel)
async def display_images(self, link, destination):
async with self.bot.get(link, headers=self.headers) as resp:
root = etree.fromstring(await resp.read(), etree.HTMLParser())
try:
tweet = root.xpath(self.tweet_selector)[0]
except IndexError:
return
for img_link in tweet.findall(self.img_selector)[1:]:
url = dict(img_link.items())['src']
await destination.send(f'{url}:large')
@commands.command()
async def twitter(self, ctx, enabled: bool=True):
"""Enable or disable sending non-previewed Twitter images."""
await self.bot.config.set(ctx.guild.id, twitter=enabled)
fmt = 'en' if enabled else 'dis'
await ctx.send(f'Sending Twitter images {fmt}abled.')
def setup(bot):
bot.add_cog(Twitter(bot))
| import re
from lxml import etree
from discord.ext import commands
class Twitter:
url_expr = re.compile(r'https?:\/\/twitter\.com\/\S+\/status\/\d+')
tweet_selector = ".//div[@class='AdaptiveMediaOuterContainer']"
img_selector = './/img[@data-aria-label-part]'
def __init__(self, bot):
self.bot = bot
self.bot.loop.create_task(self.__init())
self.headers = {'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/41.0.2228.0 Safari/537.36'}
async def __init(self):
await self.bot.wait_until_ready()
if not self.bot.user.bot:
self.bot.unload_extension(__name__)
async def on_message(self, message):
if message.guild is None:
return
if not (await self.bot.config.get(message.guild.id)).get('twitter'):
return
for link in self.url_expr.findall(message.content):
await self.display_images(link, message.channel)
async def display_images(self, link, destination):
async with self.bot.get(link, headers=self.headers) as resp:
root = etree.fromstring(await resp.read(), etree.HTMLParser())
tweet = root.find(self.tweet_selector)
if tweet is None:
return
for img_link in tweet.findall(self.img_selector)[1:]:
url = dict(img_link.items())['src']
await destination.send(f'{url}:large')
@commands.command()
async def twitter(self, ctx, enabled: bool=True):
"""Enable or disable sending non-previewed Twitter images."""
await self.bot.config.set(ctx.guild.id, twitter=enabled)
fmt = 'en' if enabled else 'dis'
await ctx.send(f'Sending Twitter images {fmt}abled.')
def setup(bot):
bot.add_cog(Twitter(bot))
| mit | Python |
3299e9a73e484df73a0b2ce3e947a980ac64d862 | Update PedidoEditar.py | AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb | backend/Models/Campus/PedidoEditar.py | backend/Models/Campus/PedidoEditar.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoEditar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoEditar, self).__init__(variaveis_do_ambiente)
try:
self.nome = self.corpo['nome']
except:
raise ErroNoHTTP(400)
def getNome(self):
return self.nome
| from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoEditar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoEditar, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
self.nome = self.corpo['nome']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
def getNome(self):
return self.nome | mit | Python |
82f9abbb7689bea6b8961274a169568e0ff2bbeb | Add conversion for out of date processes | materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org | backend/scripts/conversion/convert.py | backend/scripts/conversion/convert.py | #!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def msg(s):
print s
sys.stdout.flush()
def fix_mcpub_missing_process_types(conn):
print "Fixing missing process_type entries..."
processes = list(r.db('mcpub').table('processes').filter(~r.row.has_fields('process_type')).run(conn))
for process in processes:
template = r.table('templates').get(process['template_id']).run(conn)
r.db('mcpub').table('processes').get(process['id']).update({'process_type': template['process_type']}).run(conn)
print "Done."
def fix_bad_mcpub_process_types(conn):
print "Fixing bad process_type entries..."
processes = list(r.db('mcpub').table('processes').run(conn))
for process in processes:
if is_bad_process_type(process):
if 'template_id' in process:
template = r.table('templates').get(process['template_id']).run(conn)
r.db('mcpub').table('processes').get(process['id']).update({
'process_type': template['process_type']
}).run(conn)
else:
template_id = 'global_' + process['process_name']
if process['process_name'] == 'As Received':
template_id = 'global_Create Samples'
template = r.table('templates').get(template_id).run(conn)
r.db('mcpub').table('processes').get(process['id']).update({
'process_type': template['process_type'],
'template_id': template['id']
}).run(conn)
print "Done."
def is_bad_process_type(p):
pt = p['process_type']
if pt == 'analysis':
return False
elif pt == 'create':
return False
elif pt == 'measurement':
return False
elif pt == 'transform':
return False
else:
return True
def main():
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db="materialscommons")
fix_mcpub_missing_process_types(conn)
fix_bad_mcpub_process_types(conn)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def msg(s):
print s
sys.stdout.flush()
def fix_mcpub_missing_property_types(conn):
processes = list(r.db('mcpub').table('processes').filter(~r.row.has_fields('process_type')).run(conn))
for process in processes:
template = r.table('templates').get(process['template_id']).run(conn)
r.db('mcpub').table('processes').get(process['id']).update({'process_type': template['process_type']}).run(conn)
def main():
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815)
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db="materialscommons")
fix_mcpub_missing_property_types(conn)
if __name__ == "__main__":
main()
| mit | Python |
0eff0eb9e544e9071a49226545d227371bc4843f | Add person admin | rafal-jaworski/bazaNGObackend,rafal-jaworski/bazaNGObackend,rafal-jaworski/bazaNGObackend | bazango/contrib/organization/admin.py | bazango/contrib/organization/admin.py | from django.contrib import admin
from .models import Organization, OrganizationProfile, Category, Person
from django.utils.translation import ugettext_lazy as _
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
list_display = ['short_name', 'krs', 'register_at', 'tag_list', 'category_list', 'is_active']
actions = ['make_active', 'make_inactive']
search_fields = ['name', 'address', 'krs']
list_filter = ['is_active', 'tags', 'categories', 'register_at']
preserve_filters = True
def get_queryset(self, request):
return super(OrganizationAdmin, self).get_queryset(request).prefetch_related('tags')
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
def category_list(self, obj):
return u", ".join(o.name for o in obj.categories.all())
def make_active(self, request, queryset):
queryset.update(is_active=True)
make_active.short_description = _('Mark selected organizations as active')
def make_inactive(self, request, queryset):
queryset.update(is_active=False)
make_inactive.short_description = _('Mark selected organizations as inactive')
@admin.register(OrganizationProfile)
class OrganizationProfileAdmin(admin.ModelAdmin):
# list_display = ['organization__short_name', 'organization__krs', 'organization__register_at',
# 'organization__tag_list', 'organization__is_active']
# search_fields = ['organization__name', 'organization__address']
pass
class CategoriesInline(admin.TabularInline):
model = Organization.categories.through
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
inlines = [
CategoriesInline
]
@admin.register(Person)
class PersonAdmin(admin.ModelAdmin):
pass
| from django.contrib import admin
from .models import Organization, OrganizationProfile, Category
from django.utils.translation import ugettext_lazy as _
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
list_display = ['short_name', 'krs', 'register_at', 'tag_list', 'category_list', 'is_active']
actions = ['make_active', 'make_inactive']
search_fields = ['name', 'address', 'krs']
list_filter = ['is_active', 'tags', 'categories', 'register_at']
preserve_filters = True
def get_queryset(self, request):
return super(OrganizationAdmin, self).get_queryset(request).prefetch_related('tags')
def tag_list(self, obj):
return u", ".join(o.name for o in obj.tags.all())
def category_list(self, obj):
return u", ".join(o.name for o in obj.categories.all())
def make_active(self, request, queryset):
queryset.update(is_active=True)
make_active.short_description = _('Mark selected organizations as active')
def make_inactive(self, request, queryset):
queryset.update(is_active=False)
make_inactive.short_description = _('Mark selected organizations as inactive')
@admin.register(OrganizationProfile)
class OrganizationProfileAdmin(admin.ModelAdmin):
# list_display = ['organization__short_name', 'organization__krs', 'organization__register_at',
# 'organization__tag_list', 'organization__is_active']
# search_fields = ['organization__name', 'organization__address']
pass
class CategoriesInline(admin.TabularInline):
model = Organization.categories.through
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
inlines = [
CategoriesInline
]
| bsd-3-clause | Python |
282ce26a3200dc5c5c94781dc2fc4af27a75df7f | Update version [skip ci] | julianghionoiu/tdl-client-python,julianghionoiu/tdl-client-python | previous_version.py | previous_version.py | from __future__ import print_function
PREVIOUS_VERSION = '0.21.0'
def main():
print(PREVIOUS_VERSION)
if __name__ == "__main__":
main()
| PREVIOUS_VERSION = '0.20.4'
def main():
print PREVIOUS_VERSION
if __name__ == "__main__":
main()
| apache-2.0 | Python |
9e00287611f254ec3599eb69c2b6ef8a2220ecef | Store previous version [skip ci] | julianghionoiu/tdl-client-python,julianghionoiu/tdl-client-python | previous_version.py | previous_version.py | PREVIOUS_VERSION = '0.20.4'
def main():
print PREVIOUS_VERSION
if __name__ == "__main__":
main()
| PREVIOUS_VERSION = '0.20.3'
def main():
print PREVIOUS_VERSION
if __name__ == "__main__":
main()
| apache-2.0 | Python |
86b87ba2975a0eef39887568c7af57801594fc9e | Set correct number of nodes in JSON. | fasaxc/wireless-sensor-node-server,fasaxc/wireless-sensor-node-server,fasaxc/wireless-sensor-node-server | src/api/readings.py | src/api/readings.py | # Copyright (c)Shaun Crampton 2012-2012. All rights reserved.
import time
import tornado
import cjson
from data import Session, Reading
import datetime
class ReadingsHandler(tornado.web.RequestHandler):
def get(self):
sess = Session()
self.set_header("Content-Type", "application/json")
self.write('{"num_nodes":3,"readings":[')
i = 0
last_timestamp = {}
now = datetime.datetime.utcnow()
one_month_ago = now - datetime.timedelta(days=30)
one_week_ago = now - datetime.timedelta(days=7)
one_day_ago = now - datetime.timedelta(days=1)
for r in (sess.query(Reading).
filter(Reading.checksum_calc == Reading.checksum_sent).
order_by(Reading.created_at)):
if r.created_at < one_month_ago:
min_delta = datetime.timedelta(hours=2)
elif r.created_at < one_week_ago:
min_delta = datetime.timedelta(hours=1)
elif r.created_at < one_day_ago:
min_delta = datetime.timedelta(minutes=40)
else:
min_delta = datetime.timedelta()
if (last_timestamp.get(r.node_id, None) is None or
r.created_at > last_timestamp[r.node_id] + min_delta):
self.write(("" if i == 0 else ",") +
cjson.encode([time.mktime(r.created_at.timetuple()),
r.reading if r.node_id == 1 else None,
r.reading if r.node_id == 2 else None,
r.reading if r.node_id == 3 else None]))
last_timestamp[r.node_id] = r.created_at
i += 1
if (i % 20) == 0:
self.flush()
self.finish("]}")
| # Copyright (c)Shaun Crampton 2012-2012. All rights reserved.
import time
import tornado
import cjson
from data import Session, Reading
import datetime
class ReadingsHandler(tornado.web.RequestHandler):
def get(self):
sess = Session()
self.set_header("Content-Type", "application/json")
self.write('{"num_nodes":2,"readings":[')
i = 0
last_timestamp = {}
now = datetime.datetime.utcnow()
one_month_ago = now - datetime.timedelta(days=30)
one_week_ago = now - datetime.timedelta(days=7)
one_day_ago = now - datetime.timedelta(days=1)
for r in (sess.query(Reading).
filter(Reading.checksum_calc == Reading.checksum_sent).
order_by(Reading.created_at)):
if r.created_at < one_month_ago:
min_delta = datetime.timedelta(hours=2)
elif r.created_at < one_week_ago:
min_delta = datetime.timedelta(hours=1)
elif r.created_at < one_day_ago:
min_delta = datetime.timedelta(minutes=40)
else:
min_delta = datetime.timedelta()
if (last_timestamp.get(r.node_id, None) is None or
r.created_at > last_timestamp[r.node_id] + min_delta):
self.write(("" if i == 0 else ",") +
cjson.encode([time.mktime(r.created_at.timetuple()),
r.reading if r.node_id == 1 else None,
r.reading if r.node_id == 2 else None,
r.reading if r.node_id == 3 else None]))
last_timestamp[r.node_id] = r.created_at
i += 1
if (i % 20) == 0:
self.flush()
self.finish("]}")
| bsd-2-clause | Python |
1d63a28492163a63d710a950004bf67e6e46ae41 | remove spaces | CodeCatz/TrackCat,CodeCatz/TrackCat,anuschka/TrackCat,livike/TrackCat,livike/TrackCat,livike/TrackCat,CodeCatz/TrackCat,anuschka/TrackCat,anuschka/TrackCat | TrackCat/templatetags/navigation.py | TrackCat/templatetags/navigation.py | from django import template
from django.core import urlresolvers
register = template.Library()
@register.simple_tag(takes_context=True)
def current(context, url_name, return_value=' active', **kwargs):
matches = current_url_equals(context, url_name, **kwargs)
return return_value if matches else ''
def current_url_equals(context, url_name, **kwargs):
resolved = False
try:
resolved = urlresolvers.resolve(context.get('request').path)
except:
pass
matches = resolved and resolved.url_name == url_name
if matches and kwargs:
for key in kwargs:
kwarg = kwargs.get(key)
resolved_kwarg = resolved.kwargs.get(key)
if not resolved_kwarg or kwarg != resolved_kwarg:
return False
return matches | from django import template
from django.core import urlresolvers
register = template.Library()
@register.simple_tag(takes_context=True)
def current(context, url_name, return_value=' active', **kwargs):
matches = current_url_equals(context, url_name, **kwargs)
return return_value if matches else ''
def current_url_equals(context, url_name, **kwargs):
resolved = False
try:
resolved = urlresolvers.resolve(context.get('request').path)
except:
pass
matches = resolved and resolved.url_name == url_name
if matches and kwargs:
for key in kwargs:
kwarg = kwargs.get(key)
resolved_kwarg = resolved.kwargs.get(key)
if not resolved_kwarg or kwarg != resolved_kwarg:
return False
return matches | mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.