commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
d3c2eb97d82b74abf3ac44f452a30c54d72a99b2 | Bumped version number to 2.0.2alpha1 | morfessor/__init__.py | morfessor/__init__.py | #!/usr/bin/env python
"""
Morfessor 2.0 - Python implementation of the Morfessor method
"""
import logging
__all__ = ['MorfessorException', 'ArgumentException', 'MorfessorIO',
'BaselineModel', 'main', 'get_default_argparser', 'main_evaluation',
'get_evaluation_argparser']
__version__ = '2.0.2alpha1'
__author__ = 'Sami Virpioja, Peter Smit'
__author_email__ = "morfessor@cis.hut.fi"
show_progress_bar = True
_logger = logging.getLogger(__name__)
def get_version():
return __version__
# The public api imports need to be at the end of the file,
# so that the package global names are available to the modules
# when they are imported.
from .baseline import BaselineModel, FixedCorpusWeight, AnnotationCorpusWeight, NumMorphCorpusWeight, MorphLengthCorpusWeight
from .cmd import main, get_default_argparser, main_evaluation, \
get_evaluation_argparser
from .exception import MorfessorException, ArgumentException
from .io import MorfessorIO
from .utils import _progress
from .evaluation import MorfessorEvaluation, MorfessorEvaluationResult
| #!/usr/bin/env python
"""
Morfessor 2.0 - Python implementation of the Morfessor method
"""
import logging
__all__ = ['MorfessorException', 'ArgumentException', 'MorfessorIO',
'BaselineModel', 'main', 'get_default_argparser', 'main_evaluation',
'get_evaluation_argparser']
__version__ = '2.0.1'
__author__ = 'Sami Virpioja, Peter Smit'
__author_email__ = "morfessor@cis.hut.fi"
show_progress_bar = True
_logger = logging.getLogger(__name__)
def get_version():
return __version__
# The public api imports need to be at the end of the file,
# so that the package global names are available to the modules
# when they are imported.
from .baseline import BaselineModel, FixedCorpusWeight, AnnotationCorpusWeight, NumMorphCorpusWeight, MorphLengthCorpusWeight
from .cmd import main, get_default_argparser, main_evaluation, \
get_evaluation_argparser
from .exception import MorfessorException, ArgumentException
from .io import MorfessorIO
from .utils import _progress
from .evaluation import MorfessorEvaluation, MorfessorEvaluationResult
| Python | 0.998853 |
d9044815f4034a51d27e4949ffcd153e253cc882 | use double quotes | frappe/integrations/doctype/google_settings/test_google_settings.py | frappe/integrations/doctype/google_settings/test_google_settings.py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from .google_settings import get_file_picker_settings
class TestGoogleSettings(unittest.TestCase):
def setUp(self):
settings = frappe.get_single("Google Settings")
settings.client_id = "test_client_id"
settings.app_id = "test_app_id"
settings.api_key = "test_api_key"
settings.save()
def test_picker_disabled(self):
"""Google Drive Picker should be disabled if it is not enabled in Google Settings."""
frappe.db.set_value("Google Settings", None, "enable", 1)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 0)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_google_disabled(self):
"""Google Drive Picker should be disabled if Google integration is not enabled."""
frappe.db.set_value("Google Settings", None, "enable", 0)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 1)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_picker_enabled(self):
"""If picker is enabled, get_file_picker_settings should return the credentials."""
frappe.db.set_value("Google Settings", None, "enable", 1)
frappe.db.set_value("Google Settings", None, "google_drive_picker_enabled", 1)
settings = get_file_picker_settings()
self.assertEqual(True, settings.get("enabled", False))
self.assertEqual("test_client_id", settings.get("clientId", ""))
self.assertEqual("test_app_id", settings.get("appId", ""))
self.assertEqual("test_api_key", settings.get("developerKey", ""))
| # -*- coding: utf-8 -*-
# Copyright (c) 2021, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from .google_settings import get_file_picker_settings
class TestGoogleSettings(unittest.TestCase):
def setUp(self):
settings = frappe.get_single('Google Settings')
settings.client_id = 'test_client_id'
settings.app_id = 'test_app_id'
settings.api_key = 'test_api_key'
settings.save()
def test_picker_disabled(self):
"""Google Drive Picker should be disabled if it is not enabled in Google Settings."""
frappe.db.set_value('Google Settings', None, 'enable', 1)
frappe.db.set_value('Google Settings', None, 'google_drive_picker_enabled', 0)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_google_disabled(self):
"""Google Drive Picker should be disabled if Google integration is not enabled."""
frappe.db.set_value('Google Settings', None, 'enable', 0)
frappe.db.set_value('Google Settings', None, 'google_drive_picker_enabled', 1)
settings = get_file_picker_settings()
self.assertEqual(settings, {})
def test_picker_enabled(self):
"""If picker is enabled, get_file_picker_settings should return the credentials."""
frappe.db.set_value('Google Settings', None, 'enable', 1)
frappe.db.set_value('Google Settings', None, 'google_drive_picker_enabled', 1)
settings = get_file_picker_settings()
self.assertEqual(True, settings.get('enabled', False))
self.assertEqual('test_client_id', settings.get('clientId', ''))
self.assertEqual('test_app_id', settings.get('appId', ''))
self.assertEqual('test_api_key', settings.get('developerKey', ''))
| Python | 0.000001 |
f83076f722d66ebc27d66bc13798d4e5bc9cc27a | Fix `TypeError: must use keyword argument for key function` on Python 3 | scikits/image/transform/tests/test_hough_transform.py | scikits/image/transform/tests/test_hough_transform.py | import numpy as np
from numpy.testing import *
import scikits.image.transform as tf
import scikits.image.transform.hough_transform as ht
from scikits.image.transform import probabilistic_hough
def append_desc(func, description):
"""Append the test function ``func`` and append
``description`` to its name.
"""
func.description = func.__module__ + '.' + func.func_name + description
return func
from scikits.image.transform import *
def test_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 1
out, angles, d = tf.hough(img)
y, x = np.where(out == out.max())
dist = d[y[0]]
theta = angles[x[0]]
assert_equal(dist > 70, dist < 72)
assert_equal(theta > 0.78, theta < 0.79)
def test_hough_angles():
img = np.zeros((10, 10))
img[0, 0] = 1
out, angles, d = tf.hough(img, np.linspace(0, 360, 10))
assert_equal(len(angles), 10)
def test_py_hough():
ht._hough, fast_hough = ht._py_hough, ht._hough
yield append_desc(test_hough, '_python')
yield append_desc(test_hough_angles, '_python')
tf._hough = fast_hough
def test_probabilistic_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 100
img[i, i] = 100
# decrease default theta sampling because similar orientations may confuse
# as mentioned in article of Galambos et al
theta=np.linspace(0, np.pi, 45)
lines = probabilistic_hough(img, theta=theta, threshold=10, line_length=10, line_gap=1)
# sort the lines according to the x-axis
sorted_lines = []
for line in lines:
line = list(line)
line.sort(key=lambda x: x[0])
sorted_lines.append(line)
assert([(25, 75), (74, 26)] in sorted_lines)
assert([(25, 25), (74, 74)] in sorted_lines)
if __name__ == "__main__":
run_module_suite()
| import numpy as np
from numpy.testing import *
import scikits.image.transform as tf
import scikits.image.transform.hough_transform as ht
from scikits.image.transform import probabilistic_hough
def append_desc(func, description):
"""Append the test function ``func`` and append
``description`` to its name.
"""
func.description = func.__module__ + '.' + func.func_name + description
return func
from scikits.image.transform import *
def test_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 1
out, angles, d = tf.hough(img)
y, x = np.where(out == out.max())
dist = d[y[0]]
theta = angles[x[0]]
assert_equal(dist > 70, dist < 72)
assert_equal(theta > 0.78, theta < 0.79)
def test_hough_angles():
img = np.zeros((10, 10))
img[0, 0] = 1
out, angles, d = tf.hough(img, np.linspace(0, 360, 10))
assert_equal(len(angles), 10)
def test_py_hough():
ht._hough, fast_hough = ht._py_hough, ht._hough
yield append_desc(test_hough, '_python')
yield append_desc(test_hough_angles, '_python')
tf._hough = fast_hough
def test_probabilistic_hough():
# Generate a test image
img = np.zeros((100, 100), dtype=int)
for i in range(25, 75):
img[100 - i, i] = 100
img[i, i] = 100
# decrease default theta sampling because similar orientations may confuse
# as mentioned in article of Galambos et al
theta=np.linspace(0, np.pi, 45)
lines = probabilistic_hough(img, theta=theta, threshold=10, line_length=10, line_gap=1)
# sort the lines according to the x-axis
sorted_lines = []
for line in lines:
line = list(line)
line.sort(lambda x,y: cmp(x[0], y[0]))
sorted_lines.append(line)
assert([(25, 75), (74, 26)] in sorted_lines)
assert([(25, 25), (74, 74)] in sorted_lines)
if __name__ == "__main__":
run_module_suite()
| Python | 0.000409 |
c6f1ab2d33c31201c00435f336c7793b4ff8dd2a | Fix for docutils.error_reporting -> docutils.utils.error_reporting | notebook_sphinxext.py | notebook_sphinxext.py | import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import ErrorString
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
from nbconvert import ConverterHTML
class Notebook(Directive):
"""
Use nbconvert to insert a notebook into the environment.
This is based on the Raw directive in docutils
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = False
def run(self):
# check if raw html is supported
if not self.state.document.settings.raw_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# set up encoding
attributes = {'format': 'html'}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler = self.state.document.settings.input_encoding_error_handler
# get path to notebook
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
nb_path = os.path.normpath(os.path.join(source_dir,
self.arguments[0]))
nb_path = utils.relative_path(None, nb_path)
# convert notebook to html
converter = ConverterHTML(nb_path)
converter.read()
# add HTML5 scoped attribute to header style tags
header = map(lambda s: s.replace('<style', '<style scoped="scoped"'),
converter.header_body())
# concatenate raw html lines
lines = ['<div class="ipynotebook">']
lines.extend(header)
lines.extend(converter.main_body())
lines.append('</div>')
text = '\n'.join(lines)
# add dependency
self.state.document.settings.record_dependencies.add(nb_path)
attributes['source'] = nb_path
# create notebook node
nb_node = notebook('', text, **attributes)
(nb_node.source, nb_node.line) = \
self.state_machine.get_source_and_line(self.lineno)
return [nb_node]
class notebook(nodes.raw):
pass
def visit_notebook_node(self, node):
self.visit_raw(node)
def depart_notebook_node(self, node):
self.depart_raw(node)
def setup(app):
app.add_node(notebook,
html=(visit_notebook_node, depart_notebook_node))
app.add_directive('notebook', Notebook)
| import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
from docutils.error_reporting import ErrorString
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
from nbconvert import ConverterHTML
class Notebook(Directive):
"""
Use nbconvert to insert a notebook into the environment.
This is based on the Raw directive in docutils
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = False
def run(self):
# check if raw html is supported
if not self.state.document.settings.raw_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# set up encoding
attributes = {'format': 'html'}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler = self.state.document.settings.input_encoding_error_handler
# get path to notebook
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
nb_path = os.path.normpath(os.path.join(source_dir,
self.arguments[0]))
nb_path = utils.relative_path(None, nb_path)
# convert notebook to html
converter = ConverterHTML(nb_path)
converter.read()
# add HTML5 scoped attribute to header style tags
header = map(lambda s: s.replace('<style', '<style scoped="scoped"'),
converter.header_body())
# concatenate raw html lines
lines = ['<div class="ipynotebook">']
lines.extend(header)
lines.extend(converter.main_body())
lines.append('</div>')
text = '\n'.join(lines)
# add dependency
self.state.document.settings.record_dependencies.add(nb_path)
attributes['source'] = nb_path
# create notebook node
nb_node = notebook('', text, **attributes)
(nb_node.source, nb_node.line) = \
self.state_machine.get_source_and_line(self.lineno)
return [nb_node]
class notebook(nodes.raw):
pass
def visit_notebook_node(self, node):
self.visit_raw(node)
def depart_notebook_node(self, node):
self.depart_raw(node)
def setup(app):
app.add_node(notebook,
html=(visit_notebook_node, depart_notebook_node))
app.add_directive('notebook', Notebook)
| Python | 0 |
15016615c5406a56468171ab55c0cc60797dc580 | Check author is not NoneType, not author.name | reddit2kindle.py | reddit2kindle.py | import os
from flask import Flask, request, jsonify
from flask.templating import render_template
import util
import forms
app = Flask(__name__)
app.secret_key = os.urandom(24)
forms.csrf.init_app(app)
@app.route('/')
def index():
post = forms.Submission()
subreddit = forms.Subreddit()
return render_template('index.html', post=post, subreddit=subreddit)
@app.route('/thread', methods=['POST'])
def thread():
if util.validate_request_post(request.form) is not None:
return jsonify(type='danger', text=util.validate_request_post(request.form))
try:
submission = util.r.get_submission(url=request.form['submission'])
except:
return jsonify(type='danger', text='That wasn\'t a reddit link, was it?')
comments = None
if request.form['comments'] == 'true':
submission.replace_more_comments(limit=0)
comments = util.get_comments(submission)
if submission.selftext == '':
body = util.get_readability(submission.url)
else:
body = util.markdown(submission.selftext, output_format='html5')
title = submission.title
author = "[deleted]"
if submission.author is not None:
author = submission.author.name
address = request.form['email']
kindle_address = request.form['kindle_address']
attachment = render_template('comments.html', title=title, body=body, author=author, comments=comments)
status = util.send_email(address, kindle_address, attachment, title)
if status is None:
return jsonify(type='success', text='Success!')
else:
return jsonify(type='warning', text='Uh oh! Something went wrong on our end')
@app.route('/subreddit', methods=['POST'])
def convert():
if util.validate_request_subreddit(request.form) is not None:
return jsonify(type='danger', text=util.validate_request_subreddit(request.form))
subreddit = request.form['subreddit']
time = request.form['time']
limit = int(request.form['limit'])
address = request.form['email']
kindle_address = request.form['kindle_address']
try:
posts = util.get_posts(subreddit, time, limit)
if time == 'all':
title = 'Top ' + str(limit) + ' posts from /r/' + subreddit + ' ever'
else:
title = 'Top ' + str(limit) + ' posts from /r/' + subreddit + ' over the past ' + time
top = []
for post in posts:
try:
top.append({'title': post.title,
'body': util.get_readability(post.url) if post.selftext == '' else util.markdown(post.selftext),
'author': '[deleted]' if post.author is None else post.author.name })
except:
pass
except:
return jsonify(type='danger', text='That ain\'t no subreddit I\'ve ever heard of!')
attachment = render_template('posts.html', posts=top)
status = util.send_email(address, kindle_address, attachment, title)
if status is None:
return jsonify(type='success', text='Success!')
else:
return jsonify(type='warning', text='Uh oh! Something went wrong on our end')
if __name__ == '__main__':
app.run(debug=True)
| import os
from flask import Flask, request, jsonify
from flask.templating import render_template
import util
import forms
app = Flask(__name__)
app.secret_key = os.urandom(24)
forms.csrf.init_app(app)
@app.route('/')
def index():
post = forms.Submission()
subreddit = forms.Subreddit()
return render_template('index.html', post=post, subreddit=subreddit)
@app.route('/thread', methods=['POST'])
def thread():
if util.validate_request_post(request.form) is not None:
return jsonify(type='danger', text=util.validate_request_post(request.form))
try:
submission = util.r.get_submission(url=request.form['submission'])
except:
return jsonify(type='danger', text='That wasn\'t a reddit link, was it?')
comments = None
if request.form['comments'] == 'true':
submission.replace_more_comments(limit=0)
comments = util.get_comments(submission)
if submission.selftext == '':
body = util.get_readability(submission.url)
else:
body = util.markdown(submission.selftext, output_format='html5')
title = submission.title
author = "[deleted]"
if submission.author.name is not None:
author = submission.author.name
address = request.form['email']
kindle_address = request.form['kindle_address']
attachment = render_template('comments.html', title=title, body=body, author=author, comments=comments)
status = util.send_email(address, kindle_address, attachment, title)
if status is None:
return jsonify(type='success', text='Success!')
else:
return jsonify(type='warning', text='Uh oh! Something went wrong on our end')
@app.route('/subreddit', methods=['POST'])
def convert():
if util.validate_request_subreddit(request.form) is not None:
return jsonify(type='danger', text=util.validate_request_subreddit(request.form))
subreddit = request.form['subreddit']
time = request.form['time']
limit = int(request.form['limit'])
address = request.form['email']
kindle_address = request.form['kindle_address']
try:
posts = util.get_posts(subreddit, time, limit)
if time == 'all':
title = 'Top ' + str(limit) + ' posts from /r/' + subreddit + ' ever'
else:
title = 'Top ' + str(limit) + ' posts from /r/' + subreddit + ' over the past ' + time
top = []
for post in posts:
try:
top.append({'title': post.title,
'body': util.get_readability(post.url) if post.selftext == '' else util.markdown(post.selftext),
'author': '[deleted]' if post.author is None else post.author.name })
except:
pass
except:
return jsonify(type='danger', text='That ain\'t no subreddit I\'ve ever heard of!')
attachment = render_template('posts.html', posts=top)
status = util.send_email(address, kindle_address, attachment, title)
if status is None:
return jsonify(type='success', text='Success!')
else:
return jsonify(type='warning', text='Uh oh! Something went wrong on our end')
if __name__ == '__main__':
app.run(debug=True)
| Python | 0.021749 |
f3cada11b253ceea129342040f7e3d75f4f0cf15 | use assertions with the form elements in test_new instead of the regex on the html content | test_notes.py | test_notes.py | from webtest import TestApp
import os
import re
import notes
import dbaccessor
DB = 'notes.db'
class TestWebserver():
def test_index(self):
dba = dbaccessor.DbAccessor(DB)
dba.addNote('eins', 'lorem ipsum')
dba.addNote('zwei', 'blabla')
bottle = TestApp(notes.app)
result = bottle.get('/')
assert result.status == '200 OK'
match = re.search(r'<td>blabla</td>\s*</tr>', result.body)
assert match
def test_new(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
assert result.status == '200 OK'
form = result.form
assert form.action == '/new'
assert form.method == 'GET'
assert form['title'].value == ''
assert form['content'].value == ''
def test_adding_new_note(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
form = result.form
form['title'] = "testtitle"
form['content'] = "testcontent"
result = form.submit('save')
assert result.status == '200 OK'
def tearDown(self):
if os.path.isfile(DB):
os.remove(DB)
| from webtest import TestApp
import os
import re
import notes
import dbaccessor
DB = 'notes.db'
class TestWebserver():
def test_index(self):
dba = dbaccessor.DbAccessor(DB)
dba.addNote('eins', 'lorem ipsum')
dba.addNote('zwei', 'blabla')
bottle = TestApp(notes.app)
result = bottle.get('/')
assert result.status == '200 OK'
match = re.search(r'<td>blabla</td>\s*</tr>', result.body)
assert match
def test_new(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
assert result.status == '200 OK'
match = re.search(r'<input type="text" size="100" maxlength="100" name="content">', result.body)
assert match
def test_adding_new_note(self):
bottle = TestApp(notes.app)
result = bottle.get('/new')
form = result.form
form['title'] = "testtitle"
form['content'] = "testcontent"
result = form.submit('save')
assert result.status == '200 OK'
def tearDown(self):
if os.path.isfile(DB):
os.remove(DB)
| Python | 0 |
77e3f0da9bec64c2bf0f34faec735a29f1a74284 | remove test for Google+ | tests/test.py | tests/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from you_get import *
from you_get.__main__ import url_to_module
def test_urls(urls):
for url in urls:
url_to_module(url).download(url, info_only = True)
class YouGetTests(unittest.TestCase):
def test_freesound(self):
test_urls([
"http://www.freesound.org/people/Corsica_S/sounds/184419/",
])
def test_jpopsuki(self):
test_urls([
"http://jpopsuki.tv/video/Dragon-Ash---Run-to-the-Sun/8ad7aec604badd0b0798cd999b63ae17",
])
def test_mixcloud(self):
test_urls([
"http://www.mixcloud.com/beatbopz/beat-bopz-disco-mix/",
"http://www.mixcloud.com/beatbopz/tokyo-taste-vol4/",
"http://www.mixcloud.com/DJVadim/north-america-are-you-ready/",
])
def test_vimeo(self):
test_urls([
"http://vimeo.com/56810854",
])
def test_xiami(self):
test_urls([
"http://www.xiami.com/song/1769835121",
])
def test_youtube(self):
test_urls([
"http://www.youtube.com/watch?v=pzKerr0JIPA",
"http://youtu.be/pzKerr0JIPA",
])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from you_get import *
from you_get.__main__ import url_to_module
def test_urls(urls):
for url in urls:
url_to_module(url).download(url, info_only = True)
class YouGetTests(unittest.TestCase):
def test_freesound(self):
test_urls([
"http://www.freesound.org/people/Corsica_S/sounds/184419/",
])
def test_googleplus(self):
test_urls([
"http://plus.google.com/102663035987142737445/posts/jJRu43KQFT5",
"http://plus.google.com/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/posts/jJRu43KQFT5",
"http://plus.google.com/+平田梨奈/posts/jJRu43KQFT5",
"http://plus.google.com/photos/102663035987142737445/albums/5844078581209509505/5844078587839097874",
"http://plus.google.com/photos/+%E5%B9%B3%E7%94%B0%E6%A2%A8%E5%A5%88/albums/5844078581209509505/5844078587839097874",
"http://plus.google.com/photos/+平田梨奈/albums/5844078581209509505/5844078587839097874",
])
def test_jpopsuki(self):
test_urls([
"http://jpopsuki.tv/video/Dragon-Ash---Run-to-the-Sun/8ad7aec604badd0b0798cd999b63ae17",
])
def test_mixcloud(self):
test_urls([
"http://www.mixcloud.com/beatbopz/beat-bopz-disco-mix/",
"http://www.mixcloud.com/beatbopz/tokyo-taste-vol4/",
"http://www.mixcloud.com/DJVadim/north-america-are-you-ready/",
])
def test_vimeo(self):
test_urls([
"http://vimeo.com/56810854",
])
def test_xiami(self):
test_urls([
"http://www.xiami.com/song/1769835121",
])
def test_youtube(self):
test_urls([
"http://www.youtube.com/watch?v=pzKerr0JIPA",
"http://youtu.be/pzKerr0JIPA",
])
| Python | 0.00001 |
bbbd535ecabc6017aec6a3549c917d26036aff3b | Remove checks for testGotTrace unit test until trace event importer is implemented. | tools/telemetry/telemetry/core/chrome/tracing_backend_unittest.py | tools/telemetry/telemetry/core/chrome/tracing_backend_unittest.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import os
import unittest
from telemetry.core import util
from telemetry.core.chrome import tracing_backend
from telemetry.test import tab_test_case
class TracingBackendTest(tab_test_case.TabTestCase):
def _StartServer(self):
base_dir = os.path.dirname(__file__)
self._browser.SetHTTPServerDirectories(
os.path.join(base_dir, '..', '..', '..', 'unittest_data'))
def _WaitForAnimationFrame(self):
def _IsDone():
js_is_done = """done"""
return bool(self._tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 5)
def testGotTrace(self):
if not self._browser.supports_tracing:
logging.warning('Browser does not support tracing, skipping test.')
return
self._StartServer()
self._browser.StartTracing()
self._browser.StopTracing()
# TODO(tengs): check model for correctness after trace_event_importer
# is implemented (crbug.com/173327).
class TracingResultImplTest(unittest.TestCase):
def testWrite1(self):
ri = tracing_backend.TraceResultImpl([])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], [])
def testWrite2(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], ['foo', 'bar'])
def testWrite3(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"',
'"baz"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'],
['foo', 'bar', 'baz'])
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import os
import unittest
from telemetry.core import util
from telemetry.core.chrome import tracing_backend
from telemetry.test import tab_test_case
class TracingBackendTest(tab_test_case.TabTestCase):
def _StartServer(self):
base_dir = os.path.dirname(__file__)
self._browser.SetHTTPServerDirectories(
os.path.join(base_dir, '..', '..', '..', 'unittest_data'))
def _WaitForAnimationFrame(self):
def _IsDone():
js_is_done = """done"""
return bool(self._tab.EvaluateJavaScript(js_is_done))
util.WaitFor(_IsDone, 5)
def testGotTrace(self):
if not self._browser.supports_tracing:
logging.warning('Browser does not support tracing, skipping test.')
return
self._StartServer()
self._browser.StartTracing()
self._browser.StopTracing()
model = self._browser.GetTraceResultAndReset().AsTimelineModel()
events = model.GetAllEvents()
assert len(events) > 0
class TracingResultImplTest(unittest.TestCase):
def testWrite1(self):
ri = tracing_backend.TraceResultImpl([])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], [])
def testWrite2(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'], ['foo', 'bar'])
def testWrite3(self):
ri = tracing_backend.TraceResultImpl([
'"foo"',
'"bar"',
'"baz"'])
f = cStringIO.StringIO()
ri.Serialize(f)
v = f.getvalue()
j = json.loads(v)
assert 'traceEvents' in j
self.assertEquals(j['traceEvents'],
['foo', 'bar', 'baz'])
| Python | 0.000002 |
a440cef4140a4225fc093c9143d7cfd1a0c4e917 | Update metric through API | biggraphite/cli/web/namespaces/biggraphite.py | biggraphite/cli/web/namespaces/biggraphite.py | #!/usr/bin/env python
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigGraphite API."""
from __future__ import absolute_import
from flask import request
import flask_restplus as rp
from biggraphite import metric as bg_metric
from biggraphite.cli.web import context
# TODO:
# - Add the equivalent of what the accessor provides
# - Add the ability to get/set points.
api = rp.Namespace("biggraphite", description="BigGraphite API")
metric_metadata = api.model(
"MetricMetadata",
{
"aggregator": rp.fields.String(description="The metric aggregator"),
"retention": rp.fields.String(description="The metric retention"),
"carbon_xfilesfactor": rp.fields.Float(description="The metric carbon xfiles factor"),
}
)
metric = api.model(
"Metric",
{
"id": rp.fields.String(readOnly=True, description="The metric identifier"),
"name": rp.fields.String(description="The metric name"),
"metadata": rp.fields.Nested(metric_metadata, description="The metric metadata"),
"created_on": rp.fields.DateTime(),
"updated_on": rp.fields.DateTime(),
"read_on": rp.fields.DateTime(),
},
)
@api.route("/metric/<string:name>")
@api.doc("Operations on metrics.")
@api.param("name", "The metric name")
class MetricResource(rp.Resource):
"""A Metric."""
@api.doc("Get a metric by name.")
@api.marshal_with(metric)
def get(self, name):
"""Get a metric."""
m = context.accessor.get_metric(name)
if not m:
rp.abort(404)
return m.as_string_dict()
@api.doc("Update a metric.")
@api.expect(metric_metadata)
def post(self, name):
"""Update a metric."""
if not context.accessor.has_metric(name):
return "Unknown metric: '%s'" % name, 404
payload = request.json
metadata = bg_metric.MetricMetadata(
aggregator=bg_metric.Aggregator.from_config_name(payload["aggregator"]),
retention=bg_metric.Retention.from_string(payload["retention"]),
carbon_xfilesfactor=payload["carbon_xfilesfactor"]
)
context.accessor.update_metric(name, metadata)
return '', 204
| #!/usr/bin/env python
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BigGraphite API."""
from __future__ import absolute_import
import flask_restplus as rp
from biggraphite.cli.web import context
# TODO:
# - Add the equivalent of what the accessor provides
# - Add the ability to get/set points.
api = rp.Namespace("biggraphite", description="BigGraphite API")
metric = api.model(
"Metric",
{
"id": rp.fields.String(readOnly=True, description="The metric identifier"),
"name": rp.fields.String(description="The metric name"),
"metadata": rp.fields.Raw(description="The metric metadata"),
"created_on": rp.fields.DateTime(),
"updated_on": rp.fields.DateTime(),
"read_on": rp.fields.DateTime(),
},
)
@api.route("/metric/<string:name>")
@api.doc("Operations on metrics.")
@api.param("name", "The metric name")
class MetricResource(rp.Resource):
"""A Metric."""
@api.doc("Get a metric by name.")
@api.marshal_with(metric)
def get(self, name):
"""Get a metric."""
m = context.accessor.get_metric(name)
if not m:
rp.abort(404)
return m.as_string_dict()
| Python | 0 |
1dbb1e0f8751f37271178665a727c4eefc49a88c | Remove subclassing of exception, since there is only one. | partner_firstname/exceptions.py | partner_firstname/exceptions.py | # -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import _, exceptions
class EmptyNames(exceptions.ValidationError):
def __init__(self, record, value=_("No name is set.")):
self.record = record
self._value = value
self.name = _("Error(s) with partner %d's name.") % record.id
| # -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import _, exceptions
class PartnerNameError(exceptions.ValidationError):
def __init__(self, record, value=None):
self.record = record
self._value = value
self.name = _("Error(s) with partner %d's name.") % record.id
@property
def value(self):
raise NotImplementedError()
class EmptyNames(PartnerNameError):
@property
def value(self):
return _("No name is set.")
| Python | 0 |
017889913a1dba443022ee032535bdc4cb40ddb6 | Make nodepool git repo caching more robust | modules/openstack_project/files/nodepool/scripts/cache_git_repos.py | modules/openstack_project/files/nodepool/scripts/cache_git_repos.py | #!/usr/bin/env python
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import re
import shutil
import urllib2
from common import run_local
URL = ('http://git.openstack.org/cgit/openstack-infra/config/plain/'
'modules/openstack_project/files/review.projects.yaml')
PROJECT_RE = re.compile('^-?\s+project:\s+(.*)$')
def clone_repo(project):
remote = 'git://git.openstack.org/%s.git' % project
# Clear out any existing target directory first, in case of a retry.
try:
shutil.rmtree(os.path.join('/opt/git', project))
except OSError:
pass
# Try to clone the requested git repository.
(status, out) = run_local(['git', 'clone', remote, project],
status=True, cwd='/opt/git')
# If it claims to have worked, make sure we can list branches.
if status == 0:
(status, moreout) = run_local(['git', 'branch', '-a'], status=True,
cwd=os.path.join('/opt/git', project))
out = '\n'.join((out, moreout))
# If that worked, try resetting to HEAD to make sure it's there.
if status == 0:
(status, moreout) = run_local(['git', 'reset', '--hard', 'HEAD'],
status=True,
cwd=os.path.join('/opt/git', project))
out = '\n'.join((out, moreout))
# Status of 0 imples all the above worked, 1 means something failed.
return (status, out)
def main():
# TODO(jeblair): use gerrit rest api when available
data = urllib2.urlopen(URL).read()
for line in data.split('\n'):
# We're regex-parsing YAML so that we don't have to depend on the
# YAML module which is not in the stdlib.
m = PROJECT_RE.match(line)
if m:
(status, out) = clone_repo(m.group(1))
print out
if status != 0:
print 'Retrying to clone %s' % m.group(1)
(status, out) = clone_repo(m.group(1))
print out
if status != 0:
raise Exception('Failed to clone %s' % m.group(1))
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# Copyright (C) 2011-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import urllib2
from common import run_local
URL = ('http://git.openstack.org/cgit/openstack-infra/config/plain/'
'modules/openstack_project/files/review.projects.yaml')
PROJECT_RE = re.compile('^-?\s+project:\s+(.*)$')
def main():
# TODO(jeblair): use gerrit rest api when available
data = urllib2.urlopen(URL).read()
for line in data.split('\n'):
# We're regex-parsing YAML so that we don't have to depend on the
# YAML module which is not in the stdlib.
m = PROJECT_RE.match(line)
if m:
project = 'git://git.openstack.org/%s' % m.group(1)
print run_local(['git', 'clone', project, m.group(1)],
cwd='/opt/git')
if __name__ == '__main__':
main()
| Python | 0.000122 |
fc3bd4b8f6ce1f688afa86975619b11f64e3cd02 | Initialize BaseStaticSiteRenderer.client to None | django_medusa/renderers/base.py | django_medusa/renderers/base.py | from __future__ import print_function
from django.conf import settings
from django.test.client import Client
__all__ = ['COMMON_MIME_MAPS', 'BaseStaticSiteRenderer']
# Since mimetypes.get_extension() gets the "first known" (alphabetically),
# we get supid behavior like "text/plain" mapping to ".bat". This list
# overrides some file types we will surely use, to eliminate a call to
# mimetypes.get_extension() except in unusual cases.
COMMON_MIME_MAPS = {
"text/plain": ".txt",
"text/html": ".html",
"text/javascript": ".js",
"application/javascript": ".js",
"text/json": ".json",
"application/json": ".json",
"text/css": ".css",
}
class RenderError(Exception):
"""
Exception thrown during a rendering error.
"""
pass
class BaseStaticSiteRenderer(object):
"""
This default renderer writes the given URLs (defined in get_paths())
into static files on the filesystem by getting the view's response
through the Django testclient.
"""
def __init__(self):
self.client = None
@classmethod
def initialize_output(cls):
"""
Things that should be done only once to the output directory BEFORE
rendering occurs (i.e. setting up a config file, creating dirs,
creating an external resource, starting an atomic deploy, etc.)
Management command calls this once before iterating over all
renderer instances.
"""
pass
@classmethod
def finalize_output(cls):
"""
Things that should be done only once to the output directory AFTER
rendering occurs (i.e. writing end of config file, setting up
permissions, calling an external "deploy" method, finalizing an
atomic deploy, etc.)
Management command calls this once after iterating over all
renderer instances.
"""
pass
def get_paths(self):
""" Override this in a subclass to define the URLs to process """
raise NotImplementedError
@property
def paths(self):
""" Property that memoizes get_paths. """
p = getattr(self, "_paths", None)
if not p:
p = self.get_paths()
self._paths = p
return p
def _render(self, path=None, view=None):
client = self.client
if not client:
client = Client()
response = client.get(path)
if response.status_code != 200:
raise RenderError(
"Path {0} did not return status 200".format(path))
return response
@classmethod
def get_outpath(cls, path, content_type):
# Get non-absolute path
path = path[1:] if path.startswith('/') else path
# Resolves to a file, not a directory
if not path.endswith('/'):
return path
mime = content_type.split(';', 1)[0]
return os.path.join(path, cls.get_dirsuffix(content_type))
@classmethod
def get_dirsuffix(cls, content_type):
return ('index' +
(COMMON_MIME_MAPS.get(mime, mimetypes.guess_extension(mime)) or
'.html'))
def render_path(self, path=None, view=None):
raise NotImplementedError
def generate(self):
if getattr(settings, "MEDUSA_MULTITHREAD", False):
from multiprocessing import Pool, cpu_count
print("Generating with up to %d processes..." % cpu_count())
pool = Pool(cpu_count())
generator = PageGenerator(self)
retval = pool.map(
generator,
((path, None) for path in self.paths),
chunksize=1
)
pool.close()
else:
self.client = Client()
retval = map(self.render_path, self.paths)
return retval
class PageGenerator(object):
"""
Helper class to bounce things back into the renderer instance, since
multiprocessing is unable to transfer a bound method object into a pickle.
"""
def __init__(self, renderer):
self.renderer = renderer
def __call__(self, args):
self.renderer.render_path(*args)
| from __future__ import print_function
from django.conf import settings
from django.test.client import Client
__all__ = ['COMMON_MIME_MAPS', 'BaseStaticSiteRenderer']
# Since mimetypes.get_extension() gets the "first known" (alphabetically),
# we get supid behavior like "text/plain" mapping to ".bat". This list
# overrides some file types we will surely use, to eliminate a call to
# mimetypes.get_extension() except in unusual cases.
COMMON_MIME_MAPS = {
"text/plain": ".txt",
"text/html": ".html",
"text/javascript": ".js",
"application/javascript": ".js",
"text/json": ".json",
"application/json": ".json",
"text/css": ".css",
}
class RenderError(Exception):
"""
Exception thrown during a rendering error.
"""
pass
class BaseStaticSiteRenderer(object):
"""
This default renderer writes the given URLs (defined in get_paths())
into static files on the filesystem by getting the view's response
through the Django testclient.
"""
@classmethod
def initialize_output(cls):
"""
Things that should be done only once to the output directory BEFORE
rendering occurs (i.e. setting up a config file, creating dirs,
creating an external resource, starting an atomic deploy, etc.)
Management command calls this once before iterating over all
renderer instances.
"""
pass
@classmethod
def finalize_output(cls):
"""
Things that should be done only once to the output directory AFTER
rendering occurs (i.e. writing end of config file, setting up
permissions, calling an external "deploy" method, finalizing an
atomic deploy, etc.)
Management command calls this once after iterating over all
renderer instances.
"""
pass
def get_paths(self):
""" Override this in a subclass to define the URLs to process """
raise NotImplementedError
@property
def paths(self):
""" Property that memoizes get_paths. """
p = getattr(self, "_paths", None)
if not p:
p = self.get_paths()
self._paths = p
return p
def _render(self, path=None, view=None):
client = self.client
if not client:
client = Client()
response = client.get(path)
if response.status_code != 200:
raise RenderError(
"Path {0} did not return status 200".format(path))
return response
@classmethod
def get_outpath(cls, path, content_type):
# Get non-absolute path
path = path[1:] if path.startswith('/') else path
# Resolves to a file, not a directory
if not path.endswith('/'):
return path
mime = content_type.split(';', 1)[0]
return os.path.join(path, cls.get_dirsuffix(content_type))
@classmethod
def get_dirsuffix(cls, content_type):
return ('index' +
(COMMON_MIME_MAPS.get(mime, mimetypes.guess_extension(mime)) or
'.html'))
def render_path(self, path=None, view=None):
raise NotImplementedError
def generate(self):
if getattr(settings, "MEDUSA_MULTITHREAD", False):
from multiprocessing import Pool, cpu_count
print("Generating with up to %d processes..." % cpu_count())
pool = Pool(cpu_count())
generator = PageGenerator(self)
retval = pool.map(
generator,
((path, None) for path in self.paths),
chunksize=1
)
pool.close()
else:
self.client = Client()
retval = map(self.render_path, self.paths)
return retval
class PageGenerator(object):
"""
Helper class to bounce things back into the renderer instance, since
multiprocessing is unable to transfer a bound method object into a pickle.
"""
def __init__(self, renderer):
self.renderer = renderer
def __call__(self, args):
self.renderer.render_path(*args)
| Python | 0.000022 |
ebfd3d465c376f0eb9eb664f93d6656b232c7867 | Add 'styp' major brand to compatible brands, and make compatible_brands a set. | isobmff.py | isobmff.py | from base64 import b64encode
import bitstring
from bitstring import BitStream
import json
import struct
def _to_json(o):
if isinstance(o, bytes):
try:
return o.decode("ASCII")
except:
return b64encode(o)
if isinstance(o, set):
return list(o)
return o.__dict__
class Box(object):
def __init__(self, type):
if isinstance(type, str):
type = type.encode("ASCII")
self.type = type
@property
def size(self):
return 8
@property
def bytes(self):
return struct.pack("!I4s", self.size, self.type)
def __repr__(self):
return json.dumps(self, default=_to_json)
class StypBox(Box):
def __init__(self, major_brand, minor_version=0, compatible_brands=None):
super().__init__("styp")
if isinstance(major_brand, str):
major_brand = major_brand.encode("ASCII")
self.major_brand = major_brand
self.minor_version = minor_version
self.compatible_brands = set()
self.compatible_brands.add(major_brand)
for brand in compatible_brands or []:
if isinstance(brand, str):
brand = brand.encode("ASCII")
self.compatible_brands.add(brand)
@property
def size(self):
return super().size + 8 + len(self.compatible_brands) * 4
@property
def bytes(self):
binary = super().bytes + struct.pack("!4sII", self.major_brand,
self.minor_version, len(self.compatible_brands))
for brand in self.compatible_brands:
binary += struct.pack("!4s", brand)
return binary
class FullBox(Box):
def __init__(self, type, version, flags):
super().__init__(type)
self.version = version
self.flags = flags
@property
def size(self):
return Box.size.fget(self) + 4
@property
def bytes(self):
return Box.bytes.fget(self) + struct.pack("!BBH",
self.version, self.flags >> 16, self.flags & 0xFF)
return binary
class SidxReference(object):
class ReferenceType:
MEDIA = 0
INDEX = 1
def __init__(self, reference_type):
self.reference_type = reference_type
self.referenced_size = 0
self.subsegment_duration = 0
self.starts_with_sap = 0
self.sap_type = 0
self.sap_delta_time = 0
@property
def size(self):
return 12
@property
def bytes(self):
return bitstring.pack("bool, uint:31, uint:32, bool, uint:3, uint:28",
self.reference_type, self.referenced_size, self.subsegment_duration,
self.starts_with_sap, self.sap_type, self.sap_delta_time).bytes
class SidxBox(FullBox):
def __init__(self, version=0):
super().__init__("sidx", version, 0)
self.reference_id = 0
self.timescale = 90000
self.earliest_presentation_time = 0
self.first_offset = 0
self.references = []
@property
def size(self):
total = super().size + 12
if self.version == 0:
total += 8
else:
total += 16
for reference in self.references:
total += reference.size
return total
@property
def bytes(self):
binary = super().bytes + struct.pack("!II", self.reference_id,
self.timescale)
if self.version == 0:
binary += struct.pack("!QQ", self.earliest_presentation_time,
self.first_offset)
else:
binary += struct.pack("!II", self.earliest_presentation_time,
self.first_offset)
binary += struct.pack("!HH", 0, len(self.references))
for reference in self.references:
binary += reference.bytes
return binary
| from base64 import b64encode
import bitstring
from bitstring import BitStream
import json
import struct
def _to_json(o):
if isinstance(o, bytes):
try:
return o.decode("ASCII")
except:
return b64encode(o)
return o.__dict__
class Box(object):
def __init__(self, type):
if isinstance(type, str):
type = type.encode("ASCII")
self.type = type
@property
def size(self):
return 8
@property
def bytes(self):
return struct.pack("!I4s", self.size, self.type)
def __repr__(self):
return json.dumps(self, default=_to_json)
class StypBox(Box):
def __init__(self, major_brand, minor_version=0, compatible_brands = None):
super().__init__("styp")
if isinstance(major_brand, str):
major_brand = major_brand.encode("ASCII")
self.major_brand = major_brand
self.minor_version = minor_version
self.compatible_brands = compatible_brands if compatible_brands else []
@property
def size(self):
return super().size + 8 + len(self.compatible_brands) * 4
@property
def bytes(self):
binary = super().bytes + struct.pack("!4sII", self.major_brand,
self.minor_version, len(self.compatible_brands))
for brand in self.compatible_brands:
binary += struct.pack("!4s", brand)
return binary
class FullBox(Box):
def __init__(self, type, version, flags):
super().__init__(type)
self.version = version
self.flags = flags
@property
def size(self):
return Box.size.fget(self) + 4
@property
def bytes(self):
return Box.bytes.fget(self) + struct.pack("!BBH",
self.version, self.flags >> 16, self.flags & 0xFF)
return binary
class SidxReference(object):
class ReferenceType:
MEDIA = 0
INDEX = 1
def __init__(self, reference_type):
self.reference_type = reference_type
self.referenced_size = 0
self.subsegment_duration = 0
self.starts_with_sap = 0
self.sap_type = 0
self.sap_delta_time = 0
@property
def size(self):
return 12
@property
def bytes(self):
return bitstring.pack("bool, uint:31, uint:32, bool, uint:3, uint:28",
self.reference_type, self.referenced_size, self.subsegment_duration,
self.starts_with_sap, self.sap_type, self.sap_delta_time).bytes
class SidxBox(FullBox):
def __init__(self, version=0):
super().__init__("sidx", version, 0)
self.reference_id = 0
self.timescale = 90000
self.earliest_presentation_time = 0
self.first_offset = 0
self.references = []
@property
def size(self):
total = super().size + 12
if self.version == 0:
total += 8
else:
total += 16
for reference in self.references:
total += reference.size
return total
@property
def bytes(self):
binary = super().bytes + struct.pack("!II", self.reference_id,
self.timescale)
if self.version == 0:
binary += struct.pack("!QQ", self.earliest_presentation_time,
self.first_offset)
else:
binary += struct.pack("!II", self.earliest_presentation_time,
self.first_offset)
binary += struct.pack("!HH", 0, len(self.references))
for reference in self.references:
binary += reference.bytes
return binary
| Python | 0 |
b0133c948555c821a9dcae1df4119a2bfcc19304 | fix building | packages/dependencies/librubberband.py | packages/dependencies/librubberband.py | {
'repo_type' : 'git',
'url' : 'https://github.com/breakfastquay/rubberband.git',
'download_header' : [
'https://raw.githubusercontent.com/DeadSix27/python_cross_compile_script/master/additional_headers/ladspa.h',
],
'env_exports' : {
'AR': '{cross_prefix_bare}ar',
'CC': '{cross_prefix_bare}gcc',
'PREFIX': '{target_prefix}',
'RANLIB': '{cross_prefix_bare}ranlib',
'LD': '{cross_prefix_bare}ld',
'STRIP': '{cross_prefix_bare}strip',
'CXX': '{cross_prefix_bare}g++',
# 'PKG_CONFIG': 'pkg-config --static',
'SNDFILE_LIBS': '-lsndfile -lopus -lFLAC -lvorbis -lvorbisenc -logg -lspeex',
},
'configure_options' : '--host={target_host} --prefix={target_prefix}',
'build_options' : '{make_prefix_options}',
'needs_make_install' : False,
'run_post_build' : [
'cp -fv lib/* "{target_prefix}/lib"',
'cp -frv rubberband "{target_prefix}/include"',
'cp -fv rubberband.pc.in "{pkg_config_path}/rubberband.pc"',
'sed -i.bak "s|%PREFIX%|{target_prefix_sed_escaped}|" "{pkg_config_path}/rubberband.pc"',
'sed -i.bak \'s/-lrubberband *$/-lrubberband -lfftw3 -lsamplerate -lstdc++/\' "{pkg_config_path}/rubberband.pc"',
],
'depends_on' : [
'libsndfile',
],
'_info' : { 'version' : '1.8.1', 'fancy_name' : 'librubberband' },
} | {
'repo_type' : 'git',
'url' : 'https://github.com/breakfastquay/rubberband.git',
'download_header' : [
'https://raw.githubusercontent.com/DeadSix27/python_cross_compile_script/master/additional_headers/ladspa.h',
],
'env_exports' : {
'AR' : '{cross_prefix_bare}ar',
'CC' : '{cross_prefix_bare}gcc',
'PREFIX' : '{target_prefix}',
'RANLIB' : '{cross_prefix_bare}ranlib',
'LD' : '{cross_prefix_bare}ld',
'STRIP' : '{cross_prefix_bare}strip',
'CXX' : '{cross_prefix_bare}g++',
},
'configure_options' : '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static',
'build_options' : '{make_prefix_options}',
'needs_make_install' : False,
'run_post_build' : [
'cp -fv lib/* "{target_prefix}/lib"',
'cp -frv rubberband "{target_prefix}/include"',
'cp -fv rubberband.pc.in "{pkg_config_path}/rubberband.pc"',
'sed -i.bak "s|%PREFIX%|{target_prefix_sed_escaped}|" "{pkg_config_path}/rubberband.pc"',
'sed -i.bak \'s/-lrubberband *$/-lrubberband -lfftw3 -lsamplerate -lstdc++/\' "{pkg_config_path}/rubberband.pc"',
],
'depends_on' : [
'libsndfile',
],
'_info' : { 'version' : '1.8.1', 'fancy_name' : 'librubberband' },
} | Python | 0.000003 |
a96cb89524f2fa17a015011d972d396e509a1079 | Add code for getting and releasing a database connection | journal.py | journal.py | # -*- coding: utf-8 -*-
from flask import Flask
import os
import psycopg2
from contextlib import closing
from flask import g
DB_SCHEMA = """
DROP TABLE IF EXISTS entries;
CREATE TABLE entries (
id serial PRIMARY KEY,
title VARCHAR (127) NOT NULL,
text TEXT NOT NULL,
created TIMESTAMP NOT NULL
)
"""
app = Flask(__name__)
@app.route('/')
def hello():
return u'Hello world!'
app.config['DATABASE'] = os.environ.get(
'DATABASE_URL', 'dbname=learning_journal user=elizabethrives'
)
def connect_db():
"""Return a connection to the configured database"""
return psycopg2.connect(app.config['DATABASE'])
def init_db():
"""Initialize the database using DB_SCHEMA
WARNING: executing this function will drop existing tables.
"""
with closing(connect_db()) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
def get_database_connection():
db = getattr(g, 'db', None)
if db is None:
g.db = db = connect_db()
return db
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
if exception and isinstance(exception, psycopg2.Error):
db.rollback()
else:
db.commit()
db.close()
if __name__ == '__main__':
app.run(debug=True)
| # -*- coding: utf-8 -*-
from flask import Flask
import os
import psycopg2
from contextlib import closing
DB_SCHEMA = """
DROP TABLE IF EXISTS entries;
CREATE TABLE entries (
id serial PRIMARY KEY,
title VARCHAR (127) NOT NULL,
text TEXT NOT NULL,
created TIMESTAMP NOT NULL
)
"""
app = Flask(__name__)
@app.route('/')
def hello():
return u'Hello world!'
app.config['DATABASE'] = os.environ.get(
'DATABASE_URL', 'dbname=learning_journal user=elizabethrives'
)
def connect_db():
"""Return a connection to the configured database"""
return psycopg2.connect(app.config['DATABASE'])
def init_db():
"""Initialize the database using DB_SCHEMA
WARNING: executing this function will drop existing tables.
"""
with closing(connect_db()) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
if __name__ == '__main__':
app.run(debug=True)
| Python | 0 |
69705079398391cdc392b18dcd440fbc3b7404fd | Set celery to ignore results | celery_cgi.py | celery_cgi.py | import os
import logging
from celery import Celery
from temp_config.set_environment import DeployEnv
runtime_env = DeployEnv()
runtime_env.load_deployment_environment()
redis_server = os.environ.get('REDIS_HOSTNAME')
redis_port = os.environ.get('REDIS_PORT')
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
redis = 'redis://' + redis_server + ':' + redis_port + '/0'
logging.info("Celery connecting to redis server: " + redis)
celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=True,
CELERY_TRACK_STARTED=True,
)
| import os
import logging
from celery import Celery
from temp_config.set_environment import DeployEnv
runtime_env = DeployEnv()
runtime_env.load_deployment_environment()
redis_server = os.environ.get('REDIS_HOSTNAME')
redis_port = os.environ.get('REDIS_PORT')
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
redis = 'redis://' + redis_server + ':' + redis_port + '/0'
logging.info("Celery connecting to redis server: " + redis)
celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=False,
CELERY_TRACK_STARTED=True,
)
| Python | 0 |
9bdc1dbc37a67d726f808e724b862e7de84fa06a | Change function name | changedate.py | changedate.py | """ Calcular Data a partir de uma quantidade de minutos """
def change_date(dataEnt, op, minutosEnt):
""" Calcular nova data """
dataEnt, horaEnt = dataEnt.split(" ", 2)
diaIni, mesIni, anoIni = dataEnt.split("/", 3)
horaIni, minuIni = horaEnt.split(":", 2)
# transformar tudo em minutos
# converter horas em minutos totais
minutosTotais = (int(horaIni) * 60) + int(minuIni) + minutosEnt
print("Total de Minutos: ", minutosTotais)
# 5415 / 60 minutos = 90.25 => separar inteiro de casas decimais 0.25 * 60 = 15
horas_minutos_conv = minutosTotais / 60
print(int(horas_minutos_conv))
# 90h e 15 min
i, d = divmod(horas_minutos_conv, 1)
resto_minutos = d * 60
print(int(resto_minutos))
# 90h / 24h = 3.75 => separar inteiro de casas decimais = 0.75 / 24
total_dias = horas_minutos_conv / 24
print(total_dias)
i, d = divmod(total_dias, 1)
xtotal_dias = i
xtotal_minutos = d
print("Total Dias", int(xtotal_dias))
# 3d 3.75 (0.75 * 24) = 18 h
minutosHora = xtotal_minutos * 24
print(int(xtotal_dias), " Dias", int(minutosHora), " horas", int(resto_minutos), " minutos")
# data_alterada = '01/01/2012 12:00' essa data sera calculada
# print(data_alterada)
if __name__ == ("__main__"):
change_date("31/12/2016 23:35", "+", 200)
| """ Calcular Data a partir de uma quantidade de minutos """
def alterar_data(dataEnt, op, minutosEnt):
""" Calcular nova data """
dataEnt, horaEnt = dataEnt.split(" ", 2)
diaIni, mesIni, anoIni = dataEnt.split("/", 3)
horaIni, minuIni = horaEnt.split(":", 2)
# transformar tudo em minutos
# converter horas em minutos totais
minutosTotais = (int(horaIni) * 60) + int(minuIni) + minutosEnt
print("Total de Minutos: ", minutosTotais)
# 5415 / 60 minutos = 90.25 => separar inteiro de casas decimais 0.25 * 60 = 15
horas_minutos_conv = minutosTotais / 60
print(int(horas_minutos_conv))
# 90h e 15 min
i, d = divmod(horas_minutos_conv, 1)
resto_minutos = d * 60
print(int(resto_minutos))
# 90h / 24h = 3.75 => separar inteiro de casas decimais = 0.75 / 24
total_dias = horas_minutos_conv / 24
print(total_dias)
i, d = divmod(total_dias, 1)
xtotal_dias = i
xtotal_minutos = d
print("Total Dias", int(xtotal_dias))
# 3d 3.75 (0.75 * 24) = 18 h
minutosHora = xtotal_minutos * 24
print(int(xtotal_dias), " Dias", int(minutosHora), " horas", int(resto_minutos), " minutos")
# data_alterada = '01/01/2012 12:00' essa data sera calculada
# print(data_alterada)
if __name__ == ("__main__"):
alterar_data("31/12/2016 23:35", "+", 25)
| Python | 0.000092 |
a10729414971ee454276960fcc1a736c08b3aef7 | Fix syntax error | corehq/tests/noseplugins/uniformresult.py | corehq/tests/noseplugins/uniformresult.py | r"""A plugin to format test names uniformly for easy comparison
Usage:
# collect django tests
COLLECT_ONLY=1 ./manage.py test -v2 --settings=settings 2> tests-django.txt
# collect nose tests
./manage.py test -v2 --collect-only 2> tests-nose.txt
# clean up django test output: s/skipped\ \'.*\'$/ok/
# sort each output file
# diff tests-django.txt tests-nose.txt
"""
from inspect import isfunction
from types import ModuleType
from nose.case import FunctionTestCase
from nose.plugins import Plugin
def uniform_description(test):
if type(test).__name__ == "DocTestCase":
return test._dt_test.name
if isinstance(test, ModuleType):
return test.__name__
if isinstance(test, type) or isfunction(test):
return "%s:%s" % (test.__module__, test.__name__)
if isinstance(test, FunctionTestCase):
descriptor = test.descriptor or test.test
return "%s:%s %s" % (
descriptor.__module__,
descriptor.__name__,
test.arg,
)
name = "%s:%s.%s" % (
test.__module__,
type(test).__name__,
test._testMethodName
)
return name
#return sys.modules[test.__module__].__file__
class UniformTestResultPlugin(Plugin):
"""Format test descriptions for easy comparison
"""
name = "uniform-results"
enabled = True
def configure(self, options, conf):
"""Do not call super (always enabled)"""
def describeTest(self, test):
return uniform_description(test.test)
| """A plugin to format test names uniformly for easy comparison
Usage:
# collect django tests
COLLECT_ONLY=1 ./manage.py test -v2 --settings=settings 2> tests-django.txt
# collect nose tests
./manage.py test -v2 --collect-only 2> tests-nose.txt
# clean up django test output: s/skipped\ \'.*\'$/ok/
# sort each output file
# diff tests-django.txt tests-nose.txt
"""
from inspect import isfunction
from types import ModuleType
from nose.case import FunctionTestCase
from nose.plugins import Plugin
def uniform_description(test):
if type(test).__name__ == "DocTestCase":
return test._dt_test.name
if isinstance(test, ModuleType):
return test.__name__
if isinstance(test, type) or isfunction(test):
return "%s:%s" % (test.__module__, test.__name__)
if isinstance(test, FunctionTestCase):
descriptor = test.descriptor or test.test
return "%s:%s %s" % (
descriptor.__module__,
descriptor.__name__,
test.arg,
)
name = "%s:%s.%s" % (
test.__module__,
type(test).__name__,
test._testMethodName
)
return name
#return sys.modules[test.__module__].__file__
class UniformTestResultPlugin(Plugin):
"""Format test descriptions for easy comparison
"""
name = "uniform-results"
enabled = True
def configure(self, options, conf):
"""Do not call super (always enabled)"""
def describeTest(self, test):
return uniform_description(test.test)
| Python | 0.999991 |
50805c2da2889c13485096f53de27af27a06391a | Implement tree preorder traversal | all-domains/data-structures/trees/tree-order-traversal/solution.py | all-domains/data-structures/trees/tree-order-traversal/solution.py | # https://www.hackerrank.com/challenges/tree-preorder-traversal
# Python 2
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)
"""
def preOrder(tree):
if tree is None: return
print(tree.data),
return preOrder(tree.left) or preOrder(tree.right)
"""
class Node:
def __init__(self, left=None, right=None, data=None):
self.left = left
self.right = right
self.data = data
one = Node(data=1)
four = Node(data=4)
six = Node(data=6)
five = Node(left=one, right=four, data=5)
two = Node(left=six, data=2)
three = Node(left=five, right=two, data=3)
preOrder(three)
"""
| # https://www.hackerrank.com/challenges/tree-preorder-traversal
# Python 2
"""
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)
"""
def preOrder(tree):
if tree is None: return
print(tree.data)
return preOrder(tree.left) or preOrder(tree.right)
class Node:
def __init__(self, left=None, right=None, data=None):
self.left = left
self.right = right
self.data = data
one = Node(data=1)
four = Node(data=4)
six = Node(data=6)
five = Node(left=one, right=four, data=5)
two = Node(left=six, data=2)
three = Node(left=five, right=two, data=3)
preOrder(three)
| Python | 0.000002 |
1097889524cf7deb4b87722d3aedd27c071117c1 | Simplify exception logging in template render method. | app/soc/views/template.py | app/soc/views/template.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct templates
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import logging
from django.template import loader
from soc.views.helper import context as context_helper
class Template(object):
"""Template class that facilitates the rendering of templates.
"""
def __init__(self, data):
self.data = data
def render(self):
"""Renders the template to a string.
Uses the context method to retrieve the appropriate context, uses the
self.templatePath() method to retrieve the template that should be used.
"""
try:
context = context_helper.default(self.data)
context.update(self.context())
rendered = loader.render_to_string(self.templatePath(), dictionary=context)
except Exception, e:
logging.exception(e)
raise e
return rendered
def context(self):
"""Returns the context for the current template.
"""
return {}
def templatePath(self):
"""Returns the path to the template that should be used in render().
Subclasses should override this method.
"""
raise NotImplementedError()
| #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct templates
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import logging
import traceback
from django.template import loader
from soc.views.helper import context as context_helper
class Template(object):
"""Template class that facilitates the rendering of templates.
"""
def __init__(self, data):
self.data = data
def render(self):
"""Renders the template to a string.
Uses the context method to retrieve the appropriate context, uses the
self.templatePath() method to retrieve the template that should be used.
"""
try:
context = context_helper.default(self.data)
context.update(self.context())
rendered = loader.render_to_string(self.templatePath(), dictionary=context)
except Exception, e:
logging.error(traceback.format_exc(e))
raise e
return rendered
def context(self):
"""Returns the context for the current template.
"""
return {}
def templatePath(self):
"""Returns the path to the template that should be used in render().
Subclasses should override this method.
"""
raise NotImplementedError()
| Python | 0 |
a6ed56b37bba3f5abff73c297a8a20271d73cab2 | Add configure call to random_agent | example/random-agent/random-agent.py | example/random-agent/random-agent.py | #!/usr/bin/env python
import argparse
import logging
import sys
import gym
import universe # register the universe environments
from universe import wrappers
logger = logging.getLogger()
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.')
args = parser.parse_args()
if args.verbosity == 0:
logger.setLevel(logging.INFO)
elif args.verbosity >= 1:
logger.setLevel(logging.DEBUG)
env = gym.make('flashgames.NeonRace-v0')
env.configure(remotes=1) # automatically creates a local docker container
# Restrict the valid random actions. (Try removing this and see
# what happens when the agent is given full control of the
# keyboard/mouse.)
env = wrappers.SafeActionSpace(env)
observation_n = env.reset()
while True:
# your agent here
#
# Try sending this instead of a random action: ('KeyEvent', 'ArrowUp', True)
action_n = [env.action_space.sample() for ob in observation_n]
observation_n, reward_n, done_n, info = env.step(action_n)
env.render()
return 0
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
import argparse
import logging
import sys
import gym
import universe # register the universe environments
from universe import wrappers
logger = logging.getLogger()
def main():
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.')
args = parser.parse_args()
if args.verbosity == 0:
logger.setLevel(logging.INFO)
elif args.verbosity >= 1:
logger.setLevel(logging.DEBUG)
env = gym.make('flashgames.NeonRace-v0')
# Restrict the valid random actions. (Try removing this and see
# what happens when the agent is given full control of the
# keyboard/mouse.)
env = wrappers.SafeActionSpace(env)
observation_n = env.reset()
while True:
# your agent here
#
# Try sending this instead of a random action: ('KeyEvent', 'ArrowUp', True)
action_n = [env.action_space.sample() for ob in observation_n]
observation_n, reward_n, done_n, info = env.step(action_n)
env.render()
return 0
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000001 |
cd23780fdc39003f2affe7352bc3253f958faaa5 | Change assertion so it works with pytest (don't know what its problem is...) | src/zeit/content/cp/browser/tests/test_centerpage.py | src/zeit/content/cp/browser/tests/test_centerpage.py | import mock
import zeit.cms.testing
import zeit.content.cp
import zope.testbrowser.testing
class PermissionsTest(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.cp.testing.layer
def setUp(self):
super(PermissionsTest, self).setUp()
zeit.content.cp.browser.testing.create_cp(self.browser)
self.browser.getLink('Checkin').click()
self.producing = zope.testbrowser.testing.Browser()
self.producing.addHeader('Authorization', 'Basic producer:producerpw')
def test_normal_user_may_not_delete(self):
b = self.browser
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertNotIn('island/@@delete.html', b.contents)
def test_producing_may_delete(self):
b = self.producing
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertEllipsis('...<a...island/@@delete.html...', b.contents)
def test_normal_user_may_not_retract(self):
b = self.browser
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertNotIn('island/@@retract', b.contents)
def test_producing_may_retract(self):
b = self.producing
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertEllipsis('...<a...island/@@retract...', b.contents)
| import mock
import zeit.cms.testing
import zeit.content.cp
import zope.testbrowser.testing
class PermissionsTest(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.cp.testing.layer
def setUp(self):
super(PermissionsTest, self).setUp()
zeit.content.cp.browser.testing.create_cp(self.browser)
self.browser.getLink('Checkin').click()
self.producing = zope.testbrowser.testing.Browser()
self.producing.addHeader('Authorization', 'Basic producer:producerpw')
def test_normal_user_may_not_delete(self):
b = self.browser
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertNotEllipsis('...<a...island/@@delete.html...', b.contents)
def test_producing_may_delete(self):
b = self.producing
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/island')
self.assertEllipsis('...<a...island/@@delete.html...', b.contents)
def test_normal_user_may_not_retract(self):
b = self.browser
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertNotEllipsis('...<a...island/@@retract...', b.contents)
def test_producing_may_retract(self):
b = self.producing
with mock.patch('zeit.cms.workflow.interfaces.IPublishInfo') as pi:
pi().published = True
b.open(
'http://localhost/++skin++vivi/repository/online/2007/01/'
'island')
self.assertEllipsis('...<a...island/@@retract...', b.contents)
| Python | 0 |
fed1475564f7ca8a496d50446e4e5924befe8628 | Update function output type annotation | tensorflow/core/function/capture/free_vars_detect.py | tensorflow/core/function/capture/free_vars_detect.py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An independent module to detect free vars inside a function."""
import types
from typing import List
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.util import tf_inspect
def _parse_and_analyze(func):
"""Parse and analyze Python Function code."""
node, source = parser.parse_entity(func, future_features=())
node = qual_names.resolve(node)
entity_info = transformer.EntityInfo(
name=func.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node
def detect_function_free_vars(func: types.FunctionType) -> List[str]:
"""Detect free vars in any Python function."""
assert isinstance(
func, types.FunctionType
), f"The input should be of Python function type. Got type: {type(func)}."
node = _parse_and_analyze(func)
scope = anno.getanno(node, anno.Static.SCOPE)
free_vars_all = list(scope.free_vars)
globals_dict = func.__globals__
filtered = []
for var in free_vars_all:
base = str(var.qn[0])
if base in globals_dict:
obj = globals_dict[base]
if tf_inspect.ismodule(obj):
continue
if (tf_inspect.isclass(obj) or
tf_inspect.ismethod(obj) or
tf_inspect.isfunction(obj)):
if obj.__module__ != func.__module__:
continue
# Only keep free vars without subscript for simplicity
if not var.has_subscript():
filtered.append(str(var))
else:
if not var.has_subscript():
filtered.append(str(var))
return sorted(filtered)
| # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An independent module to detect free vars inside a function."""
import types
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import naming
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.util import tf_inspect
def _parse_and_analyze(func):
"""Parse and analyze Python Function code."""
node, source = parser.parse_entity(func, future_features=())
node = qual_names.resolve(node)
entity_info = transformer.EntityInfo(
name=func.__name__,
source_code=source,
source_file=None,
future_features=(),
namespace={})
namer = naming.Namer({})
ctx = transformer.Context(entity_info, namer, None)
node = activity.resolve(node, ctx)
return node
def detect_function_free_vars(
func: types.FunctionType) -> tuple[list[str], list[str], list[int]]:
"""Detect free vars in any Python function."""
assert isinstance(
func, types.FunctionType
), f"The input should be of Python function type. Got type: {type(func)}."
node = _parse_and_analyze(func)
scope = anno.getanno(node, anno.Static.SCOPE)
free_vars_all = list(scope.free_vars)
globals_dict = func.__globals__
filtered = []
for var in free_vars_all:
base = str(var.qn[0])
if base in globals_dict:
obj = globals_dict[base]
if tf_inspect.ismodule(obj):
continue
if (tf_inspect.isclass(obj) or
tf_inspect.ismethod(obj) or
tf_inspect.isfunction(obj)):
if obj.__module__ != func.__module__:
continue
# Only keep free vars without subscript for simplicity
if not var.has_subscript():
filtered.append(str(var))
else:
if not var.has_subscript():
filtered.append(str(var))
return sorted(filtered)
| Python | 0.000007 |
cbfbc2dbeeb8a03cd96ef2756185099a9be9b714 | Update data_provider_test.py | tensorflow_gan/examples/esrgan/data_provider_test.py | tensorflow_gan/examples/esrgan/data_provider_test.py | # coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfgan.examples.esrgan.data_provider"""
import collections
from absl.testing import absltest
import tensorflow as tf
import data_provider
hparams = collections.namedtuple('hparams', ['hr_dimension',
'scale',
'batch_size',
'data_dir'])
class DataProviderTest(tf.test.TestCase, absltest.TestCase):
def setUp(self):
super(DataProviderTest, self).setUp()
self.hparams = hparams(256, 4, 32, '/content/')
self.dataset = data_provider.get_div2k_data(self.hparams)
self.mock_lr = tf.random.normal([32, 64, 64, 3])
self.mock_hr = tf.random.normal([32, 256, 256, 3])
def test_dataset(self):
self.assertIsInstance(self.dataset, tf.data.Dataset)
with self.cached_session() as sess:
lr_image, hr_image = next(iter(self.dataset))
sess.run(tf.compat.v1.global_variables_initializer())
self.assertEqual(type(self.mock_lr), type(lr_image))
self.assertEqual(self.mock_lr.shape, lr_image.shape)
self.assertEqual(type(self.mock_hr), type(hr_image))
self.assertEqual(self.mock_hr.shape, hr_image.shape)
if __name__ == '__main__':
tf.test.main()
| # coding=utf-8
# Copyright 2021 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfgan.examples.esrgan.data_provider"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
import tensorflow as tf
import data_provider
import collections
Params = collections.namedtuple('HParams', ['hr_dimension',
'scale',
'batch_size',
'data_dir'])
class DataProviderTest(tf.test.TestCase, absltest.TestCase):
def setUp(self):
super(DataProviderTest, self).setUp()
self.HParams = Params(256, 4, 32, '/content/')
self.dataset = data_provider.get_div2k_data(self.HParams)
self.mock_lr = tf.random.normal([32, 64, 64, 3])
self.mock_hr = tf.random.normal([32, 256, 256, 3])
def test_dataset(self):
with self.cached_session() as sess:
self.assertIsInstance(self.dataset, tf.data.Dataset)
lr_image, hr_image = next(iter(self.dataset))
sess.run(tf.compat.v1.global_variables_initializer())
self.assertEqual(type(self.mock_lr), type(lr_image))
self.assertEqual(self.mock_lr.shape, lr_image.shape)
self.assertEqual(type(self.mock_hr), type(hr_image))
self.assertEqual(self.mock_hr.shape, hr_image.shape)
if __name__ == '__main__':
tf.test.main()
| Python | 0 |
ef0d0fa26bfd22c281c54bc348877afd0a7ee9d7 | Use regex to match user metrics | tests/integration/blueprints/metrics/test_metrics.py | tests/integration/blueprints/metrics/test_metrics.py | """
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import re
import pytest
# To be overridden by test parametrization
@pytest.fixture
def config_overrides():
return {}
@pytest.fixture
def client(admin_app, config_overrides, make_admin_app):
app = make_admin_app(**config_overrides)
with app.app_context():
yield app.test_client()
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': True}])
def test_metrics(client):
response = client.get('/metrics')
assert response.status_code == 200
assert response.content_type == 'text/plain; version=0.0.4; charset=utf-8'
assert response.mimetype == 'text/plain'
# Not a full match as there can be other metrics, too.
regex = re.compile(
'users_active_count \\d+\n'
'users_uninitialized_count \\d+\n'
'users_suspended_count \\d+\n'
'users_deleted_count \\d+\n'
'users_total_count \\d+\n'
)
assert regex.search(response.get_data(as_text=True)) is not None
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': False}])
def test_disabled_metrics(client):
response = client.get('/metrics')
assert response.status_code == 404
| """
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import pytest
# To be overridden by test parametrization
@pytest.fixture
def config_overrides():
return {}
@pytest.fixture
def client(admin_app, config_overrides, make_admin_app):
app = make_admin_app(**config_overrides)
with app.app_context():
yield app.test_client()
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': True}])
def test_metrics(client):
response = client.get('/metrics')
assert response.status_code == 200
assert response.content_type == 'text/plain; version=0.0.4; charset=utf-8'
assert response.mimetype == 'text/plain'
assert response.get_data(as_text=True) == (
'users_active_count 0\n'
'users_uninitialized_count 0\n'
'users_suspended_count 0\n'
'users_deleted_count 0\n'
'users_total_count 0\n'
)
@pytest.mark.parametrize('config_overrides', [{'METRICS_ENABLED': False}])
def test_disabled_metrics(client):
response = client.get('/metrics')
assert response.status_code == 404
| Python | 0.000006 |
e71bbe6dff52d7977332ee37ffa9df5505173a0c | Use sets like they should be, and finish up changes for the night | test_utils/management/commands/relational_dumpdata.py | test_utils/management/commands/relational_dumpdata.py | from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from optparse import make_option
from django.db.models.fields.related import ForeignKey, ManyToManyField
from django.db.models import get_app, get_apps, get_models
def _relational_dumpdata(app, collected):
objects = []
for mod in get_models(app):
objects.extend(mod._default_manager.all())
#Got models, now get their relationships.
#Thanks to http://www.djangosnippets.org/snippets/918/
related = []
collected.add([(x.__class__, x.pk) for x in objects]) #Just used to track already gotten models
for obj in objects:
for f in obj._meta.fields :
if isinstance(f, ForeignKey):
new = getattr(obj, f.name) # instantiate object
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
for f in obj._meta.many_to_many:
if isinstance(f, ManyToManyField):
for new in getattr(obj, f.name).all():
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
if related != []:
objects.extend(related)
return (objects, collected)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='App to exclude (use multiple --exclude to exclude multiple apps).'),
)
help = 'Output the contents of the database as a fixture of the given format.'
args = '[appname ...]'
def handle(self, *app_labels, **options):
format = options.get('format','json')
indent = options.get('indent',None)
exclude = options.get('exclude',[])
show_traceback = options.get('traceback', False)
excluded_apps = [get_app(app_label) for app_label in exclude]
if len(app_labels) == 0:
app_list = [app for app in get_apps() if app not in excluded_apps]
else:
app_list = [get_app(app_label) for app_label in app_labels]
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
objects = []
collected = set()
for app in app_list: #Yey for ghetto recusion
objects, collected = _relational_dumpdata(app, collected)
#****End New stuff
try:
return serializers.serialize(format, objects, indent=indent)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
| from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from optparse import make_option
from django.db.models.fields.related import ForeignKey, ManyToManyField
from django.db.models import get_app, get_apps, get_models
def _relational_dumpdata(app, collected):
objects = []
for mod in get_models(app):
objects.extend(mod._default_manager.all())
#Got models, now get their relationships.
#Thanks to http://www.djangosnippets.org/snippets/918/
related = []
collected.add(s for s in set([(x.__class__, x.pk) for x in objects])) #Just used to track already gotten models
for obj in objects:
for f in obj._meta.fields :
if isinstance(f, ForeignKey):
new = getattr(obj, f.name) # instantiate object
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
for f in obj._meta.many_to_many:
if isinstance(f, ManyToManyField):
for new in getattr(obj, f.name).all():
if new and not (new.__class__, new.pk) in collected:
collected.add((new.__class__, new.pk))
related.append(new)
if related != []:
objects.extend(related)
return (objects, collected)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='App to exclude (use multiple --exclude to exclude multiple apps).'),
)
help = 'Output the contents of the database as a fixture of the given format.'
args = '[appname ...]'
def handle(self, *app_labels, **options):
format = options.get('format','json')
indent = options.get('indent',None)
exclude = options.get('exclude',[])
show_traceback = options.get('traceback', False)
excluded_apps = [get_app(app_label) for app_label in exclude]
if len(app_labels) == 0:
app_list = [app for app in get_apps() if app not in excluded_apps]
else:
app_list = [get_app(app_label) for app_label in app_labels]
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
objects = []
collected = set()
for app in app_list:
objects, collected = _relational_dumpdata(app, collected)
#****End New stuff
try:
return serializers.serialize(format, objects, indent=indent)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
| Python | 0 |
cc1b63e76a88fd589bfe3fce2f6cbe5becf995bc | use no_backprop_mode | tests/links_tests/model_tests/vgg_tests/test_vgg16.py | tests/links_tests/model_tests/vgg_tests/test_vgg16.py | import unittest
import numpy as np
import chainer
from chainer.initializers import Zero
from chainer import testing
from chainer.testing import attr
from chainer import Variable
from chainercv.links import VGG16
@testing.parameterize(
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'pool5', 'shapes': (1, 512, 7, 7), 'n_class': None},
{'pick': ['conv5_3', 'conv4_2'],
'shapes': ((1, 512, 14, 14), (1, 512, 28, 28)), 'n_class': None},
)
class TestVGG16Call(unittest.TestCase):
def setUp(self):
self.link = VGG16(
n_class=self.n_class, pretrained_model=None,
initialW=Zero())
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
with chainer.no_backprop_mode():
features = self.link(x1)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
| import unittest
import numpy as np
from chainer.initializers import Zero
from chainer import testing
from chainer.testing import attr
from chainer import Variable
from chainercv.links import VGG16
@testing.parameterize(
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'pool5', 'shapes': (1, 512, 7, 7), 'n_class': None},
{'pick': ['conv5_3', 'conv4_2'],
'shapes': ((1, 512, 14, 14), (1, 512, 28, 28)), 'n_class': None},
)
class TestVGG16Call(unittest.TestCase):
def setUp(self):
self.link = VGG16(
n_class=self.n_class, pretrained_model=None,
initialW=Zero())
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
features = self.link(x1)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
testing.run_module(__name__, __file__)
| Python | 0.000005 |
b635d3bb0a0de01539d66dda4555b306c59082ee | fix version number | constant2/__init__.py | constant2/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from ._constant2 import Constant
except: # pragma: no cover
pass
__version__ = "0.0.10"
__short_description__ = "provide extensive way of managing your constant variable."
__license__ = "MIT"
__author__ = "Sanhe Hu"
__author_email__ = "husanhe@gmail.com"
__maintainer__ = "Sanhe Hu"
__maintainer_email__ = "husanhe@gmail.com"
__github_username__ = "MacHu-GWU"
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from ._constant2 import Constant
except: # pragma: no cover
pass
__version__ = "0.0.9"
__short_description__ = "provide extensive way of managing your constant variable."
__license__ = "MIT"
__author__ = "Sanhe Hu"
__author_email__ = "husanhe@gmail.com"
__maintainer__ = "Sanhe Hu"
__maintainer_email__ = "husanhe@gmail.com"
__github_username__ = "MacHu-GWU"
| Python | 0.000014 |
28b7b5f43d5206609f789a94c614d6811ae87cef | Fix call to resolve nick protection | txircd/modules/extra/services/account_nick_protect.py | txircd/modules/extra/services/account_nick_protect.py | from twisted.internet import reactor
from twisted.plugin import IPlugin
from txircd.config import ConfigValidationError
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
from weakref import WeakKeyDictionary
from datetime import timedelta
class AccountNickProtect(ModuleData):
implements(IPlugin, IModuleData)
name = "AccountNickProtect"
blockedNickChangeUsers = WeakKeyDictionary()
def actions(self):
return [ ("welcome", 1, self.checkNickOnConnect),
("changenick", 1, self.checkNickOnNickChange),
("quit", 1, self.cancelTimerOnQuit),
("commandpermission-NICK", 10, self.checkCanChangeNick) ]
def verifyConfig(self, config):
if "account_nick_protect_seconds" in config:
if not isinstance(config["account_nick_protect_seconds"], int) or config["account_nick_protect_seconds"] < 1:
raise ConfigValidationError("account_nick_protect_seconds", "invalid number")
if "account_nick_recover_seconds" in config:
if not isinstance(config["account_nick_recover_seconds"], int) or config["account_nick_recover_seconds"] < 1:
raise ConfigValidationError("account_nick_recover_seconds", "invalid number")
def checkNickOnConnect(self, user):
if not self.userSignedIntoNickAccount(user):
self.applyNickProtection(user)
def checkNickOnNickChange(self, user, oldNick, fromServer):
self.cancelOldProtectTimer(user)
if not self.userSignedIntoNickAccount(user):
self.applyNickProtection(user)
def cancelTimerOnQuit(self, user, reason, fromServer):
self.cancelOldProtectTimer(user)
def checkCanChangeNick(self, user, data):
if user not in self.blockedNickChangeUsers:
return None
if self.blockedNickChangeUsers[user] > now():
del self.blockedNickChangeUsers[user]
return None
user.sendMessage("NOTICE", "You can't change nicknames yet.")
return False
def applyNickProtection(self, user):
if user.uuid[:3] != self.ircd.serverID:
return
protectDelay = self.ircd.config.get("account_nick_protect_seconds", 30)
user.sendMessage("NOTICE", "The nickname you're using is owned by an account to which you are not identified. Please identify to that account or change your nick in the next \x02{}\x02 seconds.".format(protectDelay))
user.cache["accountNickProtectTimer"] = reactor.callLater(protectDelay, self.resolveNickProtection, user, user.nick)
def resolveNickProtection(self, user, nick):
if user.nick != nick:
return
if self.userSignedIntoNickAccount(user):
return
user.changeNick(user.uuid)
recoverSeconds = self.ircd.config.get("account_nick_recover_seconds", 10)
if recoverSeconds > 0:
recoveryTime = timedelta(seconds = recoverSeconds)
self.blockedNickChangeUsers[user] = now() + recoveryTime
def cancelOldProtectTimer(self, user):
if "accountNickProtectTimer" not in user.cache:
return
if user.cache["accountNickProtectTimer"].active:
user.cache["accountNickProtectTimer"].cancel()
del user.cache["accountNickProtectTimer"]
def userSignedIntoNickAccount(self, user):
accountName = self.ircd.runActionUntilValue("accountfromnick", user.nick)
if accountName is None:
return True # Nick applies to all accounts and no-account users
userAccount = user.metadataValue("account")
if userAccount == accountName:
return True
return False
accountNickProtect = AccountNickProtect() | from twisted.internet import reactor
from twisted.plugin import IPlugin
from txircd.config import ConfigValidationError
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
from weakref import WeakKeyDictionary
from datetime import timedelta
class AccountNickProtect(ModuleData):
implements(IPlugin, IModuleData)
name = "AccountNickProtect"
blockedNickChangeUsers = WeakKeyDictionary()
def actions(self):
return [ ("welcome", 1, self.checkNickOnConnect),
("changenick", 1, self.checkNickOnNickChange),
("quit", 1, self.cancelTimerOnQuit),
("commandpermission-NICK", 10, self.checkCanChangeNick) ]
def verifyConfig(self, config):
if "account_nick_protect_seconds" in config:
if not isinstance(config["account_nick_protect_seconds"], int) or config["account_nick_protect_seconds"] < 1:
raise ConfigValidationError("account_nick_protect_seconds", "invalid number")
if "account_nick_recover_seconds" in config:
if not isinstance(config["account_nick_recover_seconds"], int) or config["account_nick_recover_seconds"] < 1:
raise ConfigValidationError("account_nick_recover_seconds", "invalid number")
def checkNickOnConnect(self, user):
if not self.userSignedIntoNickAccount(user):
self.applyNickProtection(user)
def checkNickOnNickChange(self, user, oldNick, fromServer):
self.cancelOldProtectTimer(user)
if not self.userSignedIntoNickAccount(user):
self.applyNickProtection(user)
def cancelTimerOnQuit(self, user, reason, fromServer):
self.cancelOldProtectTimer(user)
def checkCanChangeNick(self, user, data):
if user not in self.blockedNickChangeUsers:
return None
if self.blockedNickChangeUsers[user] > now():
del self.blockedNickChangeUsers[user]
return None
user.sendMessage("NOTICE", "You can't change nicknames yet.")
return False
def applyNickProtection(self, user):
if user.uuid[:3] != self.ircd.serverID:
return
protectDelay = self.ircd.config.get("account_nick_protect_seconds", 30)
user.sendMessage("NOTICE", "The nickname you're using is owned by an account to which you are not identified. Please identify to that account or change your nick in the next \x02{}\x02 seconds.".format(protectDelay))
user.cache["accountNickProtectTimer"] = reactor.callLater(protectDelay, self.resolveNickProtection, user.nick)
def resolveNickProtection(self, user, nick):
if user.nick != nick:
return
if self.userSignedIntoNickAccount(user):
return
user.changeNick(user.uuid)
recoverSeconds = self.ircd.config.get("account_nick_recover_seconds", 10)
if recoverSeconds > 0:
recoveryTime = timedelta(seconds = recoverSeconds)
self.blockedNickChangeUsers[user] = now() + recoveryTime
def cancelOldProtectTimer(self, user):
if "accountNickProtectTimer" not in user.cache:
return
if user.cache["accountNickProtectTimer"].active:
user.cache["accountNickProtectTimer"].cancel()
del user.cache["accountNickProtectTimer"]
def userSignedIntoNickAccount(self, user):
accountName = self.ircd.runActionUntilValue("accountfromnick", user.nick)
if accountName is None:
return True # Nick applies to all accounts and no-account users
userAccount = user.metadataValue("account")
if userAccount == accountName:
return True
return False
accountNickProtect = AccountNickProtect() | Python | 0 |
770c4fd0b282ee355d2ea3e662786113dd6b4e74 | add 1.4.2 (#26472) | var/spack/repos/builtin/packages/py-nipype/package.py | var/spack/repos/builtin/packages/py-nipype/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNipype(PythonPackage):
"""Neuroimaging in Python: Pipelines and Interfaces."""
homepage = "https://nipy.org/nipype"
pypi = "nipype/nipype-1.6.0.tar.gz"
version('1.6.1', sha256='8428cfc633d8e3b8c5650e241e9eedcf637b7969bcd40f3423334d4c6b0992b5')
version('1.6.0', sha256='bc56ce63f74c9a9a23c6edeaf77631377e8ad2bea928c898cc89527a47f101cf')
version('1.4.2', sha256='069dcbb0217f13af6ee5a7f1e58424b9061290a3e10d7027d73bf44e26f820db')
depends_on('python@3.6:', when='@1.5:', type=('build', 'run'))
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-click@6.6:', type=('build', 'run'))
depends_on('py-networkx@2:', when='@1.6:', type=('build', 'run'))
depends_on('py-networkx@1.9:', type=('build', 'run'))
depends_on('py-nibabel@2.1:', type=('build', 'run'))
depends_on('py-numpy@1.15.3:', when='^python@3.7:', type=('build', 'run'))
depends_on('py-numpy@1.13:', when='@1.5: ^python@:3.6', type=('build', 'run'))
depends_on('py-numpy@1.12:', when='^python@:3.6', type=('build', 'run'))
depends_on('py-packaging', type=('build', 'run'))
depends_on('py-prov@1.5.2:', type=('build', 'run'))
depends_on('py-pydot@1.2.3:', type=('build', 'run'))
depends_on('py-pydotplus', when='@:1.5', type=('build', 'run'))
depends_on('py-python-dateutil@2.2:', type=('build', 'run'))
depends_on('py-rdflib@5:', when='@1.5:', type=('build', 'run'))
depends_on('py-scipy@0.14:', type=('build', 'run'))
depends_on('py-simplejson@3.8:', type=('build', 'run'))
depends_on('py-traits@4.6:4,5.1:', type=('build', 'run'))
depends_on('py-filelock@3:', type=('build', 'run'))
depends_on('py-etelemetry@0.2:', when='@1.5:', type=('build', 'run'))
depends_on('py-etelemetry', type=('build', 'run'))
depends_on('py-sphinxcontrib-napoleon', type='test')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNipype(PythonPackage):
"""Neuroimaging in Python: Pipelines and Interfaces."""
homepage = "https://nipy.org/nipype"
pypi = "nipype/nipype-1.6.0.tar.gz"
version('1.6.1', sha256='8428cfc633d8e3b8c5650e241e9eedcf637b7969bcd40f3423334d4c6b0992b5')
version('1.6.0', sha256='bc56ce63f74c9a9a23c6edeaf77631377e8ad2bea928c898cc89527a47f101cf')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-click@6.6.0:', type=('build', 'run'))
depends_on('py-networkx@2.0:', type=('build', 'run'))
depends_on('py-nibabel@2.1.0:', type=('build', 'run'))
depends_on('py-numpy@1.13:', type=('build', 'run'), when='^python@:3.6')
depends_on('py-numpy@1.15.3:', type=('build', 'run'), when='^python@3.7:')
depends_on('py-packaging', type=('build', 'run'))
depends_on('py-prov@1.5.2:', type=('build', 'run'))
depends_on('py-pydot@1.2.3:', type=('build', 'run'))
depends_on('py-python-dateutil@2.2:', type=('build', 'run'))
depends_on('py-rdflib@5.0.0:', type=('build', 'run'))
depends_on('py-scipy@0.14:', type=('build', 'run'))
depends_on('py-simplejson@3.8.0:', type=('build', 'run'))
depends_on('py-traits@4.6:4,5.1:', type=('build', 'run'))
depends_on('py-filelock@3.0.0:', type=('build', 'run'))
depends_on('py-etelemetry@0.2.0:', type=('build', 'run'))
| Python | 0.000001 |
63d4d37c9194aacd783e911452a34ca78a477041 | add latest version 1.2.0 (#23528) | var/spack/repos/builtin/packages/py-vermin/package.py | var/spack/repos/builtin/packages/py-vermin/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyVermin(PythonPackage):
"""Concurrently detect the minimum Python versions needed to run code."""
homepage = "https://github.com/netromdk/vermin"
url = "https://github.com/netromdk/vermin/archive/v1.2.0.tar.gz"
maintainers = ['netromdk']
version('1.2.0', sha256='a3ab6dc6608b859f301b9a77d5cc0d03335aae10c49d47a91b82be5be48c4f1f')
version('1.1.1', sha256='d13b2281ba16c9d5b0913646483771789552230a9ed625e2cd92c5a112e4ae80')
version('1.1.0', sha256='62d9f1b6694f50c22343cead2ddb6e2b007d24243fb583f61ceed7540fbe660b')
version('1.0.3', sha256='1503be05b55cacde1278a1fe55304d8ee889ddef8ba16e120ac6686259bec95c')
version('1.0.2', sha256='e999d5f5455e1116b366cd1dcc6fecd254c7ae3606549a61bc044216f9bb5b55')
version('1.0.1', sha256='c06183ba653b9d5f6687a6686da8565fb127fab035f9127a5acb172b7c445079')
version('1.0.0', sha256='e598e9afcbe3fa6f3f3aa894da81ccb3954ec9c0783865ecead891ac6aa57207')
version('0.10.5', sha256='00601356e8e10688c52248ce0acc55d5b45417b462d5aa6887a6b073f0d33e0b')
version('0.10.4', sha256='bd765b84679fb3756b26f462d2aab4af3183fb65862520afc1517f6b39dea8bf')
version('0.10.0', sha256='3458a4d084bba5c95fd7208888aaf0e324a07ee092786ee4e5529f539ab4951f')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
@run_after('build')
@on_package_attributes(run_tests=True)
def build_test(self):
make('test')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyVermin(PythonPackage):
"""Concurrently detect the minimum Python versions needed to run code."""
homepage = "https://github.com/netromdk/vermin"
url = "https://github.com/netromdk/vermin/archive/v1.1.1.tar.gz"
maintainers = ['netromdk']
version('1.1.1', sha256='d13b2281ba16c9d5b0913646483771789552230a9ed625e2cd92c5a112e4ae80')
version('1.1.0', sha256='62d9f1b6694f50c22343cead2ddb6e2b007d24243fb583f61ceed7540fbe660b')
version('1.0.3', sha256='1503be05b55cacde1278a1fe55304d8ee889ddef8ba16e120ac6686259bec95c')
version('1.0.2', sha256='e999d5f5455e1116b366cd1dcc6fecd254c7ae3606549a61bc044216f9bb5b55')
version('1.0.1', sha256='c06183ba653b9d5f6687a6686da8565fb127fab035f9127a5acb172b7c445079')
version('1.0.0', sha256='e598e9afcbe3fa6f3f3aa894da81ccb3954ec9c0783865ecead891ac6aa57207')
version('0.10.5', sha256='00601356e8e10688c52248ce0acc55d5b45417b462d5aa6887a6b073f0d33e0b')
version('0.10.4', sha256='bd765b84679fb3756b26f462d2aab4af3183fb65862520afc1517f6b39dea8bf')
version('0.10.0', sha256='3458a4d084bba5c95fd7208888aaf0e324a07ee092786ee4e5529f539ab4951f')
depends_on('python@2.7:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
@run_after('build')
@on_package_attributes(run_tests=True)
def build_test(self):
make('test')
| Python | 0 |
28c9c0349e0f86fbdee8a02b46386e42dbe702a2 | fix conflict | oct_turrets/turret.py | oct_turrets/turret.py | import time
import json
import traceback
from oct_turrets.base import BaseTurret
from oct_turrets.canon import Canon
class Turret(BaseTurret):
"""This class represent the classic turret for oct
"""
def init_commands(self):
"""Initialize the basics commandes for the turret
"""
self.commands['start'] = self.run
self.commands['status_request'] = self.send_status
def send_status(self, msg=None):
"""Reply to the master by sending the current status
"""
if not self.already_responded:
print("responding to master")
reply = self.build_status_message()
self.result_collector.send_json(reply)
self.already_responded = True
def start(self):
"""Start the turret and wait for the master to run the test
"""
print("starting turret")
self.status = "Ready"
while self.start_loop:
payload = self.master_publisher.recv_string()
payload = json.loads(payload)
self.exec_command(payload)
def run(self, msg=None):
"""The main run method
"""
print("Starting tests")
self.start_time = time.time()
self.start_loop = False
self.status = 'running'
self.send_status()
if 'rampup' in self.config:
rampup = float(self.config['rampup']) / float(self.config['canons'])
else:
rampup = 0
last_insert = 0
if rampup > 0 and rampup < 1:
timeout = rampup * 1000
else:
timeout = 1000
try:
while self.run_loop:
if len(self.canons) < self.config['canons'] and time.time() - last_insert >= rampup:
canon = Canon(self.start_time, self.script_module, self.uuid)
canon.daemon = True
self.canons.append(canon)
canon.start()
last_insert = time.time()
socks = dict(self.poller.poll(timeout))
if self.master_publisher in socks:
data = self.master_publisher.recv_string()
data = json.loads(data)
if 'command' in data and data['command'] == 'stop': # not managed, must break the loop
print("Exiting loop, premature stop")
self.run_loop = False
break
if self.local_result in socks:
results = self.local_result.recv_json()
results['turret_name'] = self.config['name']
self.result_collector.send_json(results)
for i in self.canons:
i.run_loop = False
for i in self.canons:
i.join()
except (Exception, RuntimeError, KeyboardInterrupt) as e:
self.status = "Aborted"
print(e)
self.send_status()
traceback.print_exc()
# data = self.build_status_message()
# self.result_collector.send_json(data)
# self.start_loop = True
# self.already_responded = False
def stop(self, msg=None):
"""The main stop method
"""
pass
| import time
import json
from oct_turrets.base import BaseTurret
from oct_turrets.canon import Canon
class Turret(BaseTurret):
"""This class represent the classic turret for oct
"""
def init_commands(self):
"""Initialize the basics commandes for the turret
"""
self.commands['start'] = self.run
self.commands['status_request'] = self.send_status
def send_status(self, msg=None):
"""Reply to the master by sending the current status
"""
if not self.already_responded:
print("responding to master")
reply = self.build_status_message()
self.result_collector.send_json(reply)
self.already_responded = True
def start(self):
"""Start the turret and wait for the master to run the test
"""
print("starting turret")
self.status = "Ready"
while self.start_loop:
payload = self.master_publisher.recv_string()
payload = json.loads(payload)
self.exec_command(payload)
def run(self, msg=None):
"""The main run method
"""
print("Starting tests")
self.start_time = time.time()
self.start_loop = False
self.status = 'running'
self.send_status()
if 'rampup' in self.config:
rampup = float(self.config['rampup']) / float(self.config['canons'])
else:
rampup = 0
last_insert = 0
print(rampup)
if rampup > 0 and rampup < 1:
timeout = rampup * 1000
else:
timeout = 1000
try:
while self.run_loop:
if len(self.canons) < self.config['canons'] and time.time() - last_insert >= rampup:
canon = Canon(self.start_time, self.script_module, self.uuid)
canon.daemon = True
self.canons.append(canon)
canon.start()
last_insert = time.time()
print(len(self.canons))
socks = dict(self.poller.poll(timeout))
if self.master_publisher in socks:
data = self.master_publisher.recv_string()
data = json.loads(data)
if 'command' in data and data['command'] == 'stop': # not managed, must break the loop
print("Exiting loop, premature stop")
self.run_loop = False
break
if self.local_result in socks:
results = self.local_result.recv_json()
results['turret_name'] = self.config['name']
self.result_collector.send_json(results)
for i in self.canons:
i.run_loop = False
for i in self.canons:
i.join()
except (Exception, RuntimeError, KeyboardInterrupt) as e:
self.status = "Aborted"
print(e)
self.send_status()
# data = self.build_status_message()
# self.result_collector.send_json(data)
# self.start_loop = True
# self.already_responded = False
def stop(self, msg=None):
"""The main stop method
"""
pass
| Python | 0.031708 |
c0627c6d8d11a9b9597b8fecd10b562d46a71521 | Send fio results to fio.sc.couchbase.com | perfrunner/tests/fio.py | perfrunner/tests/fio.py | from collections import defaultdict
import requests
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
TRACKER = 'fio.sc.couchbase.com'
TEMPLATE = {
'group': '{}, random mixed reads and writes, IOPS',
'metric': None,
'value': None,
}
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, test_config, verbose)
@staticmethod
def _parse(results):
"""Terse output parsing is based on the following guide:
https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def _post(self, data):
data = pretty_dict(data)
logger.info('Posting: {}'.format(data))
requests.post('http://{}/api/v1/benchmarks'.format(self.TRACKER),
data=data)
def _report_kpi(self, stats):
for host, iops in stats.items():
data = self.TEMPLATE.copy()
data['group'] = data['group'].format(self.cluster_spec.name.title())
data['metric'] = host
data['value'] = iops
self._post(data)
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
self._report_kpi(self._parse(stats))
| from collections import defaultdict
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, test_config, verbose)
@staticmethod
def _parse(results):
"""Terse output parsing is based on the following guide:
https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
logger.info('IOPS: {}'.format(pretty_dict(self._parse(stats))))
| Python | 0 |
148b3db081ef85a6ac0e6d20a65fa05cdf005ad8 | Allow editing of existing patches | Plist-To-Patch.py | Plist-To-Patch.py | import plistlib
import os
import sys
import time
script_path = os.path.dirname(os.path.realpath(__file__))
def create_patch(add_plist = {}, rem_plist = {}, desc = ""):
new_plist = { "Add": add_plist, "Remove": rem_plist, "Description": desc }
return new_plist
def grab(prompt):
if sys.version_info >= (3, 0):
return input(prompt)
else:
return str(raw_input(prompt))
# OS Independent clear method
def cls():
os.system('cls' if os.name=='nt' else 'clear')
# Set Windows color to blue background, white foreground
if os.name=='nt':
os.system('COLOR 17')
def check_path(path):
# Add os checks for path escaping/quote stripping
if os.name == 'nt':
# Windows - remove quotes
path = path.replace('"', "")
else:
# Unix - remove quotes and space-escapes
path = path.replace("\\", "").replace('"', "")
# Remove trailing space if drag and dropped
if path[len(path)-1:] == " ":
path = path[:-1]
# Expand tilde
path = os.path.expanduser(path)
if not os.path.exists(path):
print("That file doesn't exist!")
return None
return path
def main():
add_plist = None
rem_plist = None
desc = ""
name = ""
existing_add = None
existing_rem = None
existing_des = None
cls()
print("This script can help make plist patches for Plist-Tool")
print("Each entry must have at least an Add or Remove section")
print("and a description.")
print(" ")
print("To leave a section empty (eg you're adding but not")
print("removing), simply press enter at the prompt.")
print(" ")
print("If you'd like to edit an existing patch, drag and")
print("drop it on the chat now - if you'd like to create")
edit_plist = grab("a new patch, just press enter: ")
print(" ")
if not edit_plist == "":
# We need to edit
edit_plist = check_path(edit_plist)
if not edit_plist:
exit(1)
test_plist = plistlib.readPlist(edit_plist)
existing_add = test_plist["Add"]
existing_rem = test_plist["Remove"]
existing_des = test_plist["Description"]
add_plist = grab("Please select the plist containing the information to add: ")
if not add_plist == "":
add_plist = check_path(add_plist)
if not add_plist:
exit(1)
add_plist = plistlib.readPlist(add_plist)
else:
add_plist = {}
if add_plist == {} and existing_add:
add_plist = existing_add
print(" ")
rem_plist = grab("Please select the plist containing the information to remove: ")
if not rem_plist == "":
rem_plist = check_path(rem_plist)
if not rem_plist:
exit(1)
rem_plist = plistlib.readPlist(rem_plist)
else:
rem_plist = {}
if rem_plist == {} and existing_rem:
rem_plist = existing_rem
print(" ")
if add_plist == {} and rem_plist == {}:
print("You need to at least add or remove something...")
exit(1)
while desc == "":
desc = grab("Please enter the description for the patch: ")
if existing_des and desc == "":
break
if desc == "" and existing_des:
desc = existing_des
plist_patch = create_patch(add_plist, rem_plist, desc)
if not plist_patch:
print("Something went wrong!")
exit(1)
print(" ")
if edit_plist == "":
while name == "":
name = grab("Please enter the name for your patch - It will be \nlocated in the same directory as this script: ")
if not name.lower().endswith(".plist"):
name = name + ".plist"
if os.path.exists(script_path + "/" + name):
print("That file already exists...\n")
name = ""
print("\nWriting plist...\n")
plistlib.writePlist(plist_patch, script_path + "/" + name)
else:
print("\nWriting plist...\n")
plistlib.writePlist(plist_patch, edit_plist)
print("Done!\n\n")
again = grab("Work on another patch? (y/n): ")
if again[:1].lower() == "y":
main()
else:
exit(0)
main() | import plistlib
import os
import sys
import time
script_path = os.path.dirname(os.path.realpath(__file__))
def create_patch(add, remove, description):
if add == "":
add_plist = {}
else:
add_plist = plistlib.readPlist(add)
if remove == "":
rem_plist = {}
else:
rem_plist = plistlib.readPlist(remove)
try:
new_plist = { "Add": add_plist, "Remove": rem_plist, "Description": desc }
except Exception:
new_plist = None
return new_plist
def grab(prompt):
if sys.version_info >= (3, 0):
return input(prompt)
else:
return str(raw_input(prompt))
# OS Independent clear method
def cls():
os.system('cls' if os.name=='nt' else 'clear')
# Set Windows color to blue background, white foreground
if os.name=='nt':
os.system('COLOR 17')
def check_path(path):
# Add os checks for path escaping/quote stripping
if os.name == 'nt':
# Windows - remove quotes
path = path.replace('"', "")
else:
# Unix - remove quotes and space-escapes
path = path.replace("\\", "").replace('"', "")
# Remove trailing space if drag and dropped
if path[len(path)-1:] == " ":
path = path[:-1]
# Expand tilde
path = os.path.expanduser(path)
if not os.path.exists(path):
print("That file doesn't exist!")
return None
return path
add_plist = None
rem_plist = None
desc = ""
name = ""
cls()
print("This script can help make plist patches for Plist-Tool")
print("Each entry must have at least an Add or Remove section")
print("and a description.")
print(" ")
print("To leave a section empty (eg you're adding but not")
print("removing), simply press enter at the prompt.")
print(" ")
add_plist = grab("Please select the plist containing the information to add: ")
if not add_plist == "":
add_plist = check_path(add_plist)
if not add_plist:
exit(1)
print(" ")
rem_plist = grab("Please select the plist containing the information to remove: ")
if not rem_plist == "":
rem_plist = check_path(rem_plist)
if not rem_plist:
exit(1)
print(" ")
if add_plist == "" and rem_plist == "":
print("You need to at least add or remove something...")
exit(1)
while desc == "":
desc = grab("Please enter the description for the patch: ")
plist_patch = create_patch(add_plist, rem_plist, desc)
if not plist_patch:
print("Something went wrong!")
exit(1)
print(" ")
while name == "":
name = grab("Please enter the name for your patch - It will be \nlocated in the same directory as this script: ")
if not name.lower().endswith(".plist"):
name = name + ".plist"
if os.path.exists(script_path + "/" + name):
print("That file already exists...\n")
name = ""
print("\nWriting plist...\n")
plistlib.writePlist(plist_patch, script_path + "/" + name)
print("Done!\n\n")
grab("Press enter to exit...")
exit(0)
| Python | 0 |
69038348a0e029d2b06c2753a0dec9b2552ed820 | Add license header to __init__.py | openquake/__init__.py | openquake/__init__.py | """
OpenGEM is an open-source platform for the calculation of hazard, risk,
and socio-economic impact. It is a project of the Global Earthquake Model,
nd may be extended by other organizations to address additional classes
of peril.
For more information, please see the website at http://www.globalquakemodel.org
This software may be downloaded at http://github.com/gem/openquake
The continuous integration server is at http://openquake.globalquakemodel.org
Up-to-date sphinx documentation is at http://openquake.globalquakemodel.org/docs
This software is licensed under the LGPL license, for more details
please see the LICENSE file.
Copyright (c) 2010, GEM Foundation.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
| """
OpenGEM is an open-source platform for the calculation of hazard, risk,
and socio-economic impact. It is a project of the Global Earthquake Model,
nd may be extended by other organizations to address additional classes
of peril.
For more information, please see the website at http://www.globalquakemodel.org
This software may be downloaded at http://github.com/gem/openquake
The continuous integration server is at http://openquake.globalquakemodel.org
Up-to-date sphinx documentation is at http://openquake.globalquakemodel.org/docs
This software is licensed under the LGPL license, for more details
please see the LICENSE file.
Copyright (c) 2010, GEM Foundation.
"""
| Python | 0.000018 |
9815f575f882ac54f4633a2899513dc492bd47e3 | Update optimizeHyper.py | oppa/optimizeHyper.py | oppa/optimizeHyper.py | import numpy as np
import math
#from macs.learnMACSparam import run as learnMACparam
from BayesianOptimization.bayes_opt.bayesian_optimization import BayesianOptimization
#function for testing Bayesian optimization.
def test_function(y):
return (y-50)**2
def optimized_function(function, error, *param):
"""
this function will wrap black box function for bayesian optimization.
now, we denote vector like X = { x1, x2, ... , xn } to capitalize
like this A = { a1, a2, ... , an }. so in our concept, X is a
input parameter of peak calling algorithms and y=f(X) is error rate
about those input. we can make abstraction of that functions.
:param function:
the function which you want to wrap.
e. g. : in MACS, can f : macs.learnMACSparam.run()
:param error:
result error rate of input some parameter vector X = {p1,p2,p3}
e. g. : in MACS, can X = { q value, mfold }
:param param:
input parameters or parameter that it will be input to some Peak
detection algorithm. it is denoted X = { p1, p2, p3 }
:return:
"""
return None
def run(function, Param_bound, init_point):
"""
Doing Bayesian optimization with some function. the function is just process that
input the file and parameter to Peak Detection algorithm and get some error.
X : parameters
Y : errors
f : X -> Y
in MACS case, for example, run(args) in learnMACSparam.py can be optimized-function.
or you can wrap that function. ( f : learnMACSparam.run() . . . etc )
and also, because we use "BayesianOptimization" module if you update or modify or read
this code, you should keep refer that module.
:param function:
function will be optimized.
:param Param_bound:
this Parameter would be boundary of parameter will be learned. and it must be
python tuple : (min,max)
:param init_point:
this parameter decide number of sample which randomly generated for first state.
:return:
"""
optimizer = BayesianOptimization(function, Param_bound, init_point)
"""
In the Bayesian Optimization Class`s field :
keys : Error Value of each parameters ( List : [] )
dim : number of parameters ( int : 0-n )
bounds : boundary of parameters to List ( List : [] )
X : Numpy array place holders ( Numpy Array )
Y : Numpy array place holders ( Numpy Array )
gp : Class Object of GaussianProcessRegressor ,
set the Kernel function and others ( Class GaussianProcessRegressor )
util : choose your utility function ( function )
res : output (result) dictionary . field is
self.res['max'] = {'max_val': None, 'max_params': None}
self.res['all'] = {'values': [], 'params': []}
and etc...
and you can do optimization by maximize() Method,
you can initialize data frame ( table of data structure ) by initialize_df method.
"""
optimizer.init(init_point)
optimizer.maximize(Param_bound, acq = 'ei')
optimizer.points_to_csv("result")
#code for test you just run this script
# number of random generate sample.
init_point = 3
Param_bound = {'y' : (0, 100.0)}
run(test_function, Param_bound, init_point)
| import numpy as np
import math
#from macs.learnMACSparam import run as learnMACparam
from BayesianOptimization.bayes_opt.bayesian_optimization import BayesianOptimization
#function for testing Bayesian optimization.
def test_function(y):
return (y-50)**2
def optimized_function(function, error, *param):
"""
this function will wrap black box function for bayesian optimization.
now, we denote vector like X = { x1, x2, ... , xn } to capitalize
like this A = { a1, a2, ... , an }. so in our concept, X is a
input parameter of peak calling algorithms and y=f(X) is error rate
about those input. we can make abstraction of that functions.
:param function:
the function which you want to wrap.
e. g. : in MACS, can f : macs.learnMACSparam.run()
:param error:
result error rate of input some parameter vector X = {p1,p2,p3}
e. g. : in MACS, can X = { q value, mfold }
:param param:
input parameters or parameter that it will be input to some Peak
detection algorithm. it is denoted X = { p1, p2, p3 }
:return:
"""
return None
def run(function, Param_bound, init_point):
"""
Doing Bayesian optimization with some function. the function is just process that
input the file and parameter to Peak Detection algorithm and get some error.
X : parameters
Y : errors
f : X -> Y
in MACS case, for example, run(args) in learnMACSparam.py can be optimized-function.
or you can wrap that function. ( f : learnMACSparam.run() . . . etc )
and also, because we use "BayesianOptimization" module if you update or modify or read
this code, you should keep refer that module.
:param function:
function will be optimized.
:param Param_bound:
this Parameter would be boundary of parameter will be learned. and it must be
python tuple : (min,max)
:return:
"""
optimizer = BayesianOptimization(function, Param_bound, init_point)
"""
In the Bayesian Optimization Class`s field :
keys : Error Value of each parameters ( List : [] )
dim : number of parameters ( int : 0-n )
bounds : boundary of parameters to List ( List : [] )
X : Numpy array place holders ( Numpy Array )
Y : Numpy array place holders ( Numpy Array )
gp : Class Object of GaussianProcessRegressor ,
set the Kernel function and others ( Class GaussianProcessRegressor )
util : choose your utility function ( function )
res : output (result) dictionary . field is
self.res['max'] = {'max_val': None, 'max_params': None}
self.res['all'] = {'values': [], 'params': []}
and etc...
and you can do optimization by maximize() Method,
you can initialize data frame ( table of data structure ) by initialize_df method.
"""
optimizer.init(init_point)
optimizer.maximize(Param_bound, acq = 'ei')
optimizer.points_to_csv("result")
#code for test you just run this script
# number of random generate sample.
init_point = 3
Param_bound = {'y' : (0, 100.0)}
run(test_function, Param_bound, init_point) | Python | 0.000001 |
bab1ad914ab9273aa8ab905edef2578b5c760f31 | add django installed apps init opps core | opps/core/__init__.py | opps/core/__init__.py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
trans_app_label = _('Opps')
settings.INSTALLED_APPS += ('opps.article',
'opps.image',
'opps.channel',
'opps.source',
'redactor',
'tagging',)
settings.REDACTOR_OPTIONS = {'lang': 'en'}
settings.REDACTOR_UPLOAD = 'uploads/'
| # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
trans_app_label = _('Opps')
| Python | 0 |
fdea91164a145474d0ce093420aeace592cc57a6 | Update RESOURCE_TYPE_MAP and datacite format subjects | osf/metadata/utils.py | osf/metadata/utils.py | from website import settings
SUBJECT_SCHEME = 'bepress Digital Commons Three-Tiered Taxonomy'
RESOURCE_TYPE_MAP = {
'Audio/Video': 'Audiovisual',
'Dataset': 'Dataset',
'Image': 'Image',
'Model': 'Model',
'Software': 'Software',
'Book': 'Text',
'Funding Submission': 'Text',
'Journal Article': 'Text',
'Lesson': 'Text',
'Poster': 'Text',
'Preprint': 'Text',
'Presentation': 'Text',
'Research Tool': 'Text',
'Thesis': 'Text',
'Other': 'Text',
'(:unas)': 'Other'
}
def datacite_format_contributors(contributors):
"""
contributors_list: list of OSFUsers to format
returns: formatted json for datacite
"""
creators = []
for contributor in contributors:
name_identifiers = [
{
'nameIdentifier': contributor.absolute_url,
'nameIdentifierScheme': 'OSF',
'schemeURI': settings.DOMAIN
}
]
if contributor.external_identity.get('ORCID'):
verified = contributor.external_identity['ORCID'].values()[0] == 'VERIFIED'
if verified:
name_identifiers.append({
'nameIdentifier': contributor.external_identity['ORCID'].keys()[0],
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'
})
creators.append({
'creatorName': {
'creatorName': contributor.fullname,
'familyName': contributor.family_name,
'givenName': contributor.given_name
},
'nameIdentifiers': name_identifiers
})
return creators
def datacite_format_subjects(subjects):
return [
{
'subject': subject.bepress_subject.text if subject.bepress_subject else subject.text,
'subjectScheme': SUBJECT_SCHEME
}
for subject in subjects
]
def datacite_format_identifier(target):
identifier = target.get_identifier('doi')
if identifier:
return {
'identifier': identifier.value,
'identifierType': 'DOI'
}
def datacite_format_rights(license):
return {
'rights': license.name,
'rightsURI': license.url
}
| from website import settings
SUBJECT_SCHEME = 'bepress Digital Commons Three-Tiered Taxonomy'
RESOURCE_TYPE_MAP = {
'Audio/Video': 'Audiovisual',
'Dataset': 'Dataset',
'Image': 'Image',
'Model': 'Model',
'Software': 'Software',
'Book': 'Text',
'Funding Submission': 'Text',
'Journal Article': 'Text',
'Lesson': 'Text',
'Poster': 'Text',
'Preprint': 'Text',
'Presentation': 'Text',
'Research Tool': 'Text',
'Thesis': 'Text',
'Other': 'Text',
'(unas)': 'Other'
}
def datacite_format_contributors(contributors):
"""
contributors_list: list of OSFUsers to format
returns: formatted json for datacite
"""
creators = []
for contributor in contributors:
name_identifiers = [
{
'nameIdentifier': contributor.absolute_url,
'nameIdentifierScheme': 'OSF',
'schemeURI': settings.DOMAIN
}
]
if contributor.external_identity.get('ORCID'):
verified = contributor.external_identity['ORCID'].values()[0] == 'VERIFIED'
if verified:
name_identifiers.append({
'nameIdentifier': contributor.external_identity['ORCID'].keys()[0],
'nameIdentifierScheme': 'ORCID',
'schemeURI': 'http://orcid.org/'
})
creators.append({
'creatorName': {
'creatorName': contributor.fullname,
'familyName': contributor.family_name,
'givenName': contributor.given_name
},
'nameIdentifiers': name_identifiers
})
return creators
def datacite_format_subjects(subjects):
return [
{
'subject': subject,
'subjectScheme': SUBJECT_SCHEME
}
for subject in subjects
]
def datacite_format_identifier(target):
identifier = target.get_identifier('doi')
if identifier:
return {
'identifier': identifier.value,
'identifierType': 'DOI'
}
def datacite_format_rights(license):
return {
'rights': license.name,
'rightsURI': license.url
}
| Python | 0 |
ff60b8e2cb89e1c4d6c75af79d0199d142c6e2b4 | fix TypeError: memoryview: a bytes-like object is required, not 'str' | ouimeaux/subscribe.py | ouimeaux/subscribe.py | from collections import defaultdict
import logging
from xml.etree import cElementTree
from functools import partial
import gevent
from gevent.pywsgi import WSGIServer
from ouimeaux.utils import get_ip_address, requests_request
from ouimeaux.device.insight import Insight
from ouimeaux.device.maker import Maker
from ouimeaux.signals import subscription
from random import randint
log = logging.getLogger(__name__)
NS = "{urn:schemas-upnp-org:event-1-0}"
SUCCESS = '<html><body><h1>200 OK</h1></body></html>'
SUCCESS_BINARY = SUCCESS.encode()
class SubscriptionRegistry(object):
def __init__(self):
self._devices = {}
self._callbacks = defaultdict(list)
self.port = randint(8300, 8990)
def register(self, device):
if not device:
log.error("Received an invalid device: %r", device)
return
log.info("Subscribing to basic events from %r", device)
# Provide a function to register a callback when the device changes
# state
device.register_listener = partial(self.on, device, 'BinaryState')
self._devices[device.host] = device
self._resubscribe(device.basicevent.eventSubURL)
def _resubscribe(self, url, sid=None):
headers = {'TIMEOUT': 'Second-%d' % 1800}
if sid is not None:
headers['SID'] = sid
else:
host = get_ip_address()
headers.update({
"CALLBACK": '<http://%s:%d>'%(host, self.port),
"NT": "upnp:event"
})
response = requests_request(method="SUBSCRIBE", url=url,
headers=headers)
if response.status_code == 412 and sid:
# Invalid subscription ID. Send an UNSUBSCRIBE for safety and
# start over.
requests_request(method='UNSUBSCRIBE', url=url,
headers={'SID': sid})
return self._resubscribe(url)
timeout = int(response.headers.get('timeout', '1801').replace(
'Second-', ''))
sid = response.headers.get('sid', sid)
gevent.spawn_later(int(timeout * 0.75), self._resubscribe, url, sid)
def _handle(self, environ, start_response):
device = self._devices.get(environ['REMOTE_ADDR'])
if device is not None:
data = environ['wsgi.input'].read()
# trim garbage from end, if any
data = data.split("\n\n")[0]
doc = cElementTree.fromstring(data)
for propnode in doc.findall('./{0}property'.format(NS)):
for property_ in propnode.getchildren():
text = property_.text
if isinstance(device, Insight) and property_.tag=='BinaryState':
text = text.split('|')[0]
subscription.send(device, type=property_.tag, value=text)
self._event(device, property_.tag, text)
start_response('200 OK', [
('Content-Type', 'text/html'),
('Content-Length', str(len(SUCCESS))),
('Connection', 'close')
])
yield SUCCESS_BINARY
def _event(self, device, type_, value):
for t, callback in self._callbacks.get(device, ()):
if t == type_:
callback(value)
def on(self, device, type, callback):
self._callbacks[device].append((type, callback))
@property
def server(self):
"""
UDP server to listen for responses.
"""
server = getattr(self, "_server", None)
if server is None:
server = WSGIServer(('', self.port), self._handle, log=None)
self._server = server
return server
| from collections import defaultdict
import logging
from xml.etree import cElementTree
from functools import partial
import gevent
from gevent.pywsgi import WSGIServer
from ouimeaux.utils import get_ip_address, requests_request
from ouimeaux.device.insight import Insight
from ouimeaux.device.maker import Maker
from ouimeaux.signals import subscription
from random import randint
log = logging.getLogger(__name__)
NS = "{urn:schemas-upnp-org:event-1-0}"
SUCCESS = '<html><body><h1>200 OK</h1></body></html>'
class SubscriptionRegistry(object):
def __init__(self):
self._devices = {}
self._callbacks = defaultdict(list)
self.port = randint(8300, 8990)
def register(self, device):
if not device:
log.error("Received an invalid device: %r", device)
return
log.info("Subscribing to basic events from %r", device)
# Provide a function to register a callback when the device changes
# state
device.register_listener = partial(self.on, device, 'BinaryState')
self._devices[device.host] = device
self._resubscribe(device.basicevent.eventSubURL)
def _resubscribe(self, url, sid=None):
headers = {'TIMEOUT': 'Second-%d' % 1800}
if sid is not None:
headers['SID'] = sid
else:
host = get_ip_address()
headers.update({
"CALLBACK": '<http://%s:%d>'%(host, self.port),
"NT": "upnp:event"
})
response = requests_request(method="SUBSCRIBE", url=url,
headers=headers)
if response.status_code == 412 and sid:
# Invalid subscription ID. Send an UNSUBSCRIBE for safety and
# start over.
requests_request(method='UNSUBSCRIBE', url=url,
headers={'SID': sid})
return self._resubscribe(url)
timeout = int(response.headers.get('timeout', '1801').replace(
'Second-', ''))
sid = response.headers.get('sid', sid)
gevent.spawn_later(int(timeout * 0.75), self._resubscribe, url, sid)
def _handle(self, environ, start_response):
device = self._devices.get(environ['REMOTE_ADDR'])
if device is not None:
data = environ['wsgi.input'].read()
# trim garbage from end, if any
data = data.split("\n\n")[0]
doc = cElementTree.fromstring(data)
for propnode in doc.findall('./{0}property'.format(NS)):
for property_ in propnode.getchildren():
text = property_.text
if isinstance(device, Insight) and property_.tag=='BinaryState':
text = text.split('|')[0]
subscription.send(device, type=property_.tag, value=text)
self._event(device, property_.tag, text)
start_response('200 OK', [
('Content-Type', 'text/html'),
('Content-Length', str(len(SUCCESS))),
('Connection', 'close')
])
yield SUCCESS
def _event(self, device, type_, value):
for t, callback in self._callbacks.get(device, ()):
if t == type_:
callback(value)
def on(self, device, type, callback):
self._callbacks[device].append((type, callback))
@property
def server(self):
"""
UDP server to listen for responses.
"""
server = getattr(self, "_server", None)
if server is None:
server = WSGIServer(('', self.port), self._handle, log=None)
self._server = server
return server
| Python | 0.000294 |
f5275dba7285a97b05ac3abb756897ba75f119c5 | remove execute permission | p038_conut_and_say.py | p038_conut_and_say.py | #!/usr/bin/python
# -*- utf-8 -*-
class Solution:
def __init__(self, init='1'):
self._list = [init]
# @return a string
def countAndSay(self, n):
while len(self._list) < n:
self._list.append(self.say(self._list[-1]))
return self._list[n-1]
@staticmethod
def say(string):
ret = []
save = None
count = 1
for c in string:
if save is None:
save = c
elif c == save:
count += 1
else:
ret.append('%d%s' % (count, save))
save = c
count = 1
ret.append('%d%s' % (count, save))
return ''.join(ret)
if __name__ == '__main__':
solution = Solution()
print(solution.countAndSay(10))
print(solution._list)
| #!/usr/bin/python
# -*- utf-8 -*-
class Solution:
def __init__(self, init='1'):
self._list = [init]
# @return a string
def countAndSay(self, n):
while len(self._list) < n:
self._list.append(self.say(self._list[-1]))
return self._list[n-1]
@staticmethod
def say(string):
ret = []
save = None
count = 1
for c in string:
if save is None:
save = c
elif c == save:
count += 1
else:
ret.append('%d%s' % (count, save))
save = c
count = 1
ret.append('%d%s' % (count, save))
return ''.join(ret)
if __name__ == '__main__':
solution = Solution()
print(solution.countAndSay(10))
print(solution._list)
| Python | 0.000004 |
de29012d0bf48cf970ad37c62d7db960161f14c0 | Remove unused stat import | core/dovecot/start.py | core/dovecot/start.py | #!/usr/bin/python3
import os
import glob
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(8)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
run_server(0, "dovecot", "/tmp/podop.socket", [
("quota", "url", url ),
("auth", "url", url),
("sieve", "url", url),
])
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM", "antispam:11334")
if os.environ["WEBMAIL"] != "none":
os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
os.makedirs("/conf/bin", exist_ok=True)
for script_file in glob.glob("/conf/*.script"):
out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
conf.jinja(script_file, os.environ, out_file)
os.chmod(out_file, 0o555)
# Run Podop, then postfix
multiprocessing.Process(target=start_podop).start()
os.system("chown mail:mail /mail")
os.system("chown -R mail:mail /var/lib/dovecot /conf")
os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
| #!/usr/bin/python3
import os
import stat
import glob
import multiprocessing
import logging as log
import sys
from podop import run_server
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(8)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§"
run_server(0, "dovecot", "/tmp/podop.socket", [
("quota", "url", url ),
("auth", "url", url),
("sieve", "url", url),
])
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["REDIS_ADDRESS"] = system.get_host_address_from_environment("REDIS", "redis")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM", "antispam:11334")
if os.environ["WEBMAIL"] != "none":
os.environ["WEBMAIL_ADDRESS"] = system.get_host_address_from_environment("WEBMAIL", "webmail")
for dovecot_file in glob.glob("/conf/*.conf"):
conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file)))
os.makedirs("/conf/bin", exist_ok=True)
for script_file in glob.glob("/conf/*.script"):
out_file = os.path.join("/conf/bin/", os.path.basename(script_file).replace('.script',''))
conf.jinja(script_file, os.environ, out_file)
os.chmod(out_file, 0o555)
# Run Podop, then postfix
multiprocessing.Process(target=start_podop).start()
os.system("chown mail:mail /mail")
os.system("chown -R mail:mail /var/lib/dovecot /conf")
os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
| Python | 0 |
b61c907a49e20da6148828aa512c86b58c0312c1 | use the real index info in the sql pillow | corehq/pillows/sms.py | corehq/pillows/sms.py | from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from corehq.elastic import get_es_new
from corehq.apps.sms.models import SMSLog
from corehq.pillows.mappings.sms_mapping import SMS_MAPPING, SMS_INDEX, SMS_META, SMS_TYPE
from dimagi.utils.decorators.memoized import memoized
from pillowtop.checkpoints.manager import PillowCheckpoint, PillowCheckpointEventHandler
from pillowtop.es_utils import ElasticsearchIndexInfo
from pillowtop.listener import AliasedElasticPillow
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors.elastic import ElasticProcessor
SMS_PILLOW_CHECKPOINT_ID = 'sql-sms-to-es'
SMS_PILLOW_KAFKA_CONSUMER_GROUP_ID = 'sql-sms-to-es'
ES_SMS_INDEX = SMS_INDEX
class SMSPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = SMSLog # while this index includes all users,
# I assume we don't care about querying on properties specfic to WebUsers
couch_filter = "sms/all_logs"
es_timeout = 60
es_alias = "smslogs"
es_type = SMS_TYPE
es_meta = SMS_META
es_index = ES_SMS_INDEX
default_mapping = SMS_MAPPING
@classmethod
@memoized
def calc_meta(cls):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return cls.calc_mapping_hash({"es_meta": cls.es_meta, "mapping": cls.default_mapping})
def change_transport(self, doc_dict):
# SMS changes don't go to couch anymore. Let the SqlSMSPillow process
# changes from now on.
# Also, we explicitly need this to be a no-op because we're going to
# delete all sms from couch and don't want them to be deleted from
# elasticsearch.
return
def get_sql_sms_pillow(pillow_id):
checkpoint = PillowCheckpoint(SMS_PILLOW_CHECKPOINT_ID)
processor = ElasticProcessor(
elasticsearch=get_es_new(),
index_info=SMS_INDEX_INFO,
doc_prep_fn=lambda x: x
)
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topics=[topics.SMS], group_id=SMS_PILLOW_KAFKA_CONSUMER_GROUP_ID),
processor=processor,
change_processed_event_handler=PillowCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100,
),
)
| from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from corehq.elastic import get_es_new
from corehq.apps.sms.models import SMSLog
from corehq.pillows.mappings.sms_mapping import SMS_MAPPING, SMS_INDEX, SMS_META, SMS_TYPE
from dimagi.utils.decorators.memoized import memoized
from pillowtop.checkpoints.manager import PillowCheckpoint, PillowCheckpointEventHandler
from pillowtop.es_utils import ElasticsearchIndexInfo
from pillowtop.listener import AliasedElasticPillow
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors.elastic import ElasticProcessor
SMS_PILLOW_CHECKPOINT_ID = 'sql-sms-to-es'
SMS_PILLOW_KAFKA_CONSUMER_GROUP_ID = 'sql-sms-to-es'
ES_SMS_INDEX = SMS_INDEX
class SMSPillow(AliasedElasticPillow):
"""
Simple/Common Case properties Indexer
"""
document_class = SMSLog # while this index includes all users,
# I assume we don't care about querying on properties specfic to WebUsers
couch_filter = "sms/all_logs"
es_timeout = 60
es_alias = "smslogs"
es_type = SMS_TYPE
es_meta = SMS_META
es_index = ES_SMS_INDEX
default_mapping = SMS_MAPPING
@classmethod
@memoized
def calc_meta(cls):
#todo: actually do this correctly
"""
override of the meta calculator since we're separating out all the types,
so we just do a hash of the "prototype" instead to determined md5
"""
return cls.calc_mapping_hash({"es_meta": cls.es_meta, "mapping": cls.default_mapping})
def change_transport(self, doc_dict):
# SMS changes don't go to couch anymore. Let the SqlSMSPillow process
# changes from now on.
# Also, we explicitly need this to be a no-op because we're going to
# delete all sms from couch and don't want them to be deleted from
# elasticsearch.
return
def get_sql_sms_pillow(pillow_id):
checkpoint = PillowCheckpoint(SMS_PILLOW_CHECKPOINT_ID)
processor = ElasticProcessor(
elasticsearch=get_es_new(),
index_info=ElasticsearchIndexInfo(index=ES_SMS_INDEX, type=ES_SMS_TYPE),
doc_prep_fn=lambda x: x
)
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topics=[topics.SMS], group_id=SMS_PILLOW_KAFKA_CONSUMER_GROUP_ID),
processor=processor,
change_processed_event_handler=PillowCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100,
),
)
| Python | 0 |
d48a0cb42aabf0c42debfddfcf09a8c2f954b9ff | Put project when create account.analytic.account for test. | purchase_line_with_delivery_service_info/tests/test_purchase_line_with_delivery_service_info.py | purchase_line_with_delivery_service_info/tests/test_purchase_line_with_delivery_service_info.py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
import openerp.tests.common as common
class TestPurchaseLineWithDeliveryServiceInfo(common.TransactionCase):
def setUp(self):
super(TestPurchaseLineWithDeliveryServiceInfo, self).setUp()
self.sale_model = self.env['sale.order']
self.procurement_model = self.env['procurement.order']
account_vals = {'name': 'account procurement service project',
'date_start': '2016-01-15',
'date': '2016-02-20'}
self.account = self.env['account.analytic.account'].create(
account_vals)
project_vals = {'name': 'project procurement service project',
'analytic_account_id': self.account.id}
self.project = self.env['project.project'].create(project_vals)
sale_vals = {
'partner_id': self.env.ref('base.res_partner_1').id,
'partner_shipping_id': self.env.ref('base.res_partner_1').id,
'partner_invoice_id': self.env.ref('base.res_partner_1').id,
'pricelist_id': self.env.ref('product.list0').id,
'carrier_id': self.env.ref('delivery.normal_delivery_carrier').id,
'project_id': self.account.id}
sale_line_vals = {
'product_id': self.env.ref('product.product_product_6').id,
'name': self.env.ref('product.product_product_6').name,
'product_uos_qty': 1,
'product_uom': self.env.ref('product.product_product_6').uom_id.id,
'price_unit': self.env.ref('product.product_product_6').list_price}
sale_vals['order_line'] = [(0, 0, sale_line_vals)]
self.sale_order = self.sale_model.create(sale_vals)
self.sale_order.delivery_set()
for line in self.sale_order.order_line:
if line.product_id.type == 'service':
line.product_id.write(
{'route_ids':
[(6, 0,
[self.env.ref('stock.route_warehouse0_mto').id,
self.env.ref('purchase.route_warehouse0_buy').id])],
'seller_ids':
[(6, 0, [self.env.ref('base.res_partner_14').id])]})
self.service_product = line.product_id
line.write({'delivery_standard_price': 578.00})
def test_confirm_sale_with_delivery_service(self):
self.sale_order.action_button_confirm()
cond = [('origin', '=', self.sale_order.name),
('product_id', '=', self.service_product.id)]
procurement = self.procurement_model.search(cond)
self.assertEqual(
len(procurement), 1,
"Procurement not generated for the service product type")
procurement.run()
cond = [('group_id', '=', procurement.group_id.id),
('product_id', '=', self.service_product.id),
('state', '=', 'confirmed')]
procurement2 = self.procurement_model.search(cond)
self.assertEqual(
len(procurement2), 1,
"Procurement2 not generated for the service product type")
procurement2.run()
self.assertTrue(
bool(procurement2.purchase_id),
"Purchase no generated for procurement Service")
for line in procurement2.purchase_id.order_line:
if line.product_id.type == 'service':
self.assertEqual(
line.price_unit,
procurement2.sale_line_id.delivery_standard_price,
"Erroneous price on purchase order line")
| # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
import openerp.tests.common as common
class TestPurchaseLineWithDeliveryServiceInfo(common.TransactionCase):
def setUp(self):
super(TestPurchaseLineWithDeliveryServiceInfo, self).setUp()
self.sale_model = self.env['sale.order']
self.procurement_model = self.env['procurement.order']
account_vals = {'name': 'account procurement service project',
'date_start': '2016-01-15',
'date': '2016-02-20'}
self.account = self.env['account.analytic.account'].create(
account_vals)
sale_vals = {
'partner_id': self.env.ref('base.res_partner_1').id,
'partner_shipping_id': self.env.ref('base.res_partner_1').id,
'partner_invoice_id': self.env.ref('base.res_partner_1').id,
'pricelist_id': self.env.ref('product.list0').id,
'carrier_id': self.env.ref('delivery.normal_delivery_carrier').id,
'project_id': self.account.id}
sale_line_vals = {
'product_id': self.env.ref('product.product_product_6').id,
'name': self.env.ref('product.product_product_6').name,
'product_uos_qty': 1,
'product_uom': self.env.ref('product.product_product_6').uom_id.id,
'price_unit': self.env.ref('product.product_product_6').list_price}
sale_vals['order_line'] = [(0, 0, sale_line_vals)]
self.sale_order = self.sale_model.create(sale_vals)
self.sale_order.delivery_set()
for line in self.sale_order.order_line:
if line.product_id.type == 'service':
line.product_id.write(
{'route_ids':
[(6, 0,
[self.env.ref('stock.route_warehouse0_mto').id,
self.env.ref('purchase.route_warehouse0_buy').id])],
'seller_ids':
[(6, 0, [self.env.ref('base.res_partner_14').id])]})
self.service_product = line.product_id
line.write({'delivery_standard_price': 578.00})
def test_confirm_sale_with_delivery_service(self):
self.sale_order.action_button_confirm()
cond = [('origin', '=', self.sale_order.name),
('product_id', '=', self.service_product.id)]
procurement = self.procurement_model.search(cond)
self.assertEqual(
len(procurement), 1,
"Procurement not generated for the service product type")
procurement.run()
cond = [('group_id', '=', procurement.group_id.id),
('product_id', '=', self.service_product.id),
('state', '=', 'confirmed')]
procurement2 = self.procurement_model.search(cond)
self.assertEqual(
len(procurement2), 1,
"Procurement2 not generated for the service product type")
procurement2.run()
self.assertTrue(
bool(procurement2.purchase_id),
"Purchase no generated for procurement Service")
for line in procurement2.purchase_id.order_line:
if line.product_id.type == 'service':
self.assertEqual(
line.price_unit,
procurement2.sale_line_id.delivery_standard_price,
"Erroneous price on purchase order line")
| Python | 0 |
6593fe983b40a5d4c467cb2ea0847e10e5faae1c | Update Beck_Pang_First_Python_practice_2.7.py | Coding_practice/Beck_Pang_First_Python_practice_2.7.py | Coding_practice/Beck_Pang_First_Python_practice_2.7.py | """
Beck Pang 25/07/2014
First practice project for our summer robotics team
"""
import random
def name_to_number (name):
# This helper function converts the string name into a number between 0 an 4
# pre: take a name in String as a parameter
# post: return an represented number in integer
if (name == "rock"):
return 0
elif (name == "Spock"):
return 1
elif (name == "paper"):
return 2
elif (name == "lizard"):
return 3
elif (name == "scissors"):
return 4
else:
print ("This name is not included in this game.\n")
def number_to_name (number):
# pre: take a number in integer as a parameter
# post: return a name in String
if (number == 0):
return "rock"
elif (number == 1):
return "Spock"
elif (number == 2):
return "paper"
elif (number == 3):
return "lizard"
elif (number == 4):
return "scissors"
else:
return "no word found"
def rpsls (player_choice):
# This function operate the main functionality
# pre: take a player's choice in String as a parameter
# post: print the player and computer's choices in the console
# and show the result
print ("\n")
player_number = name_to_number(player_choice)
comp_number = random.randrange(5)
comp_name = number_to_name(comp_number)
print ("Player chooses " + player_choice + "\n")
print ("Computer chooses " + comp_name + "\n")
difference = (comp_number - player_number) % 5
if (difference == 0):
print ("Player and computer tie!")
elif (difference >= 2):
print ("Player wins!")
else:
print ("Computer wins!")
""" There is no main function in this game
Please play this game in the console.
"""
| """
Beck Pang 25/07/2014
First practice project for our summer robotics team
"""
import random
def name_to_number (name):
# This helper function converts the string name into a number between 0 an 4
# pre: take a name in String as a parameter
# post: return an represented number in integer
if (name == "rock"):
return 0
elif (name == "Spock"):
return 1
elif (name == "paper"):
return 2
elif (name == "lizard"):
return 3
elif (name ==hel "scissors"):
return 4
else:
print ("This name is not included in this game.\n")
def number_to_name (number):
# pre: take a number in integer as a parameter
# post: return a name in String
if (number == 0):
return "rock"
elif (number == 1):
return "Spock"
elif (number == 2):
return "paper"
elif (number == 3):
return "lizard"
elif (number == 4):
return "scissors"
else:
return "no word found"
def rpsls (player_choice):
# This function operate the main functionality
# pre: take a player's choice in String as a parameter
# post: print the player and computer's choices in the console
# and show the result
print ("\n")
player_number = name_to_number(player_choice)
comp_number = random.randrange(5)
comp_name = number_to_name(comp_number)
print ("Player chooses " + player_choice + "\n")
print ("Computer chooses " + comp_name + "\n")
difference = (comp_number - player_number) % 5
if (difference == 0):
print ("Player and computer tie!")
elif (difference >= 2):
print ("Player wins!")
else:
print ("Computer wins!")
""" There is no main function in this game
Please play this game in the console.
"""
| Python | 0.000001 |
7ad0a248ab019c1c99080da5860ecba56f5a7654 | switch to beautiful soup for extraction | ifind/common/position_content_extractor.py | ifind/common/position_content_extractor.py | #!/usr/bin/env python
# -*- coding: latin-1 -*-
__author__ = 'rose'
from BeautifulSoup import BeautifulSoup
from copy import deepcopy
class PositionContentExtractor(object):
def __init__(self, div_ids=None):
self.div_ids = div_ids
self.html = ''
self.html_soup = None
self.text = ''
def set_div_ids(self, ids):
self.div_ids = ids
self.process_html_page(self.html)
def process_html_page(self, html):
""" reads in the html, parses it, and removes the set of specified div ids, assigning the text to self.text
:param html: expects a valid html document
:return: None
"""
self.html = html
self.html_soup = BeautifulSoup(html)
self.text = self._remove_div_content()
def get_subtext(self, num_words=0, percentage=None):
"""
takes first num_words from text and return them as a string
:param text:
:return:
"""
words = self.text.split()
subtext = ' '
if(percentage):
num_words = round(self._calc_percentage(percentage,len(words)))
if(num_words):
if num_words == 0:#return all text if 0 assumes 0 means wants all
return self.text
if len(words) > num_words:
return subtext.join(words[0:num_words])
else:
return self.text
def _remove_div_content(self):
"""
returns a string with the content the html with the content of
divs in div_ids removed
:param div_ids: a list of the ids of the div to be removed
:return:None
"""
result = ''
#for all div ids find the elements in the beautiful soup tree and extract
#the corresponding div
#this would update self.html_soup which we want to keep whole html in
#so perform on a deep copy
soup_copy = deepcopy(self.html_soup)
for div_id in self.div_ids:
elem = soup_copy.find("div", {"id": div_id})
elem.extract()
#set the text of the class to be the result of removing the text from the divs
self.text = soup_copy.get_text()
def _calc_percentage(self, percentage, total_words):
if total_words == 0:
return 0
else:
return 100 * float(percentage)/float(total_words)
| #!/usr/bin/env python
# -*- coding: latin-1 -*-
__author__ = 'rose'
from BeautifulSoup import BeautifulSoup
from copy import deepcopy
class PositionContentExtractor(object):
def __init__(self, div_ids=None):
self.div_ids = div_ids
self.html = ''
self.html_soup = None
self.text = ''
def set_div_ids(self, ids):
self.div_ids = ids
self.process_html_page(self.html)
def process_html_page(self, html):
""" reads in the html, parses it, and removes the set of specified div ids, assigning the text to self.text
:param html: expects a valid html document
:return: None
"""
self.html = html
self.html_soup = BeautifulSoup(html)
self.text = self._remove_div_content()
def get_subtext(self, num_words=0, percentage=None):
"""
takes first num_words from text and return them as a string
:param text:
:return:
"""
words = self.text.split()
subtext = ' '
if(percentage):
num_words = round(self._calc_percentage(percentage,len(words)))
if(num_words):
if num_words == 0:#return all text if 0 assumes 0 means wants all
return self.text
if len(words) > num_words:
return subtext.join(words[0:num_words])
# for term in :
# print term
# subtext += ' '.join(term)
else:
return self.text
def _remove_div_content(self):
"""
returns a string with the content the html with the content of
divs in div_ids removed
:param div_ids: a list of the ids of the div to be removed
:return: a string with the divs content removed
"""
result = ''
for div_id in self.div_ids:
self.html_soup.find("div", {"id": div_id})
return result
def _calc_percentage(self, percentage, total_words):
if total_words == 0:
return 0
else:
return 100 * float(percentage)/float(total_words)
| Python | 0 |
157a5b7350928eab13170da7e0c06636ae1e9975 | Add option to load instrumentation key from env | applicationinsights/flask/ext.py | applicationinsights/flask/ext.py | from os import getenv
from applicationinsights import TelemetryClient
from applicationinsights.channel import AsynchronousSender
from applicationinsights.channel import AsynchronousQueue
from applicationinsights.channel import TelemetryChannel
from applicationinsights.logging import LoggingHandler
from applicationinsights.requests import WSGIApplication
CONF_PREFIX = "APPINSIGHTS"
CONF_KEY = CONF_PREFIX + "_INSTRUMENTATIONKEY"
CONF_ENDPOINT_URI = CONF_PREFIX + "_ENDPOINT_URI"
CONF_DISABLE_REQUEST_LOGGING = CONF_PREFIX + "_DISABLE_REQUEST_LOGGING"
CONF_DISABLE_TRACE_LOGGING = CONF_PREFIX + "_DISABLE_TRACE_LOGGING"
CONF_DISABLE_EXCEPTION_LOGGING = CONF_PREFIX + "_DISABLE_EXCEPTION_LOGGING"
class AppInsights(object):
def __init__(self, app=None):
self._key = None
self._endpoint_uri = None
self._channel = None
self._requests_middleware = None
self._trace_log_handler = None
self._exception_telemetry_client = None
if app:
self.init_app(app)
def init_app(self, app):
self._key = app.config.get(CONF_KEY) or getenv(CONF_KEY)
if not self._key:
return
self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
if self._endpoint_uri:
sender = AsynchronousSender(self._endpoint_uri)
else:
sender = AsynchronousSender()
queue = AsynchronousQueue(sender)
self._channel = TelemetryChannel(None, queue)
self._init_request_logging(app)
self._init_trace_logging(app)
self._init_exception_logging(app)
def _init_request_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
if not enabled:
return
self._requests_middleware = WSGIApplication(
self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware
def _init_trace_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler)
def _init_exception_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
exception_telemetry_client.track_exception(
type=type(exception),
value=exception,
tb=exception.__traceback__)
raise exception
self._exception_telemetry_client = exception_telemetry_client
def flush(self):
if self._requests_middleware:
self._requests_middleware.flush()
if self._trace_log_handler:
self._trace_log_handler.flush()
if self._exception_telemetry_client:
self._exception_telemetry_client.flush()
| from applicationinsights import TelemetryClient
from applicationinsights.channel import AsynchronousSender
from applicationinsights.channel import AsynchronousQueue
from applicationinsights.channel import TelemetryChannel
from applicationinsights.logging import LoggingHandler
from applicationinsights.requests import WSGIApplication
CONF_PREFIX = "APPINSIGHTS"
CONF_KEY = CONF_PREFIX + "_INSTRUMENTATIONKEY"
CONF_ENDPOINT_URI = CONF_PREFIX + "_ENDPOINT_URI"
CONF_DISABLE_REQUEST_LOGGING = CONF_PREFIX + "_DISABLE_REQUEST_LOGGING"
CONF_DISABLE_TRACE_LOGGING = CONF_PREFIX + "_DISABLE_TRACE_LOGGING"
CONF_DISABLE_EXCEPTION_LOGGING = CONF_PREFIX + "_DISABLE_EXCEPTION_LOGGING"
class AppInsights(object):
def __init__(self, app=None):
self._key = None
self._endpoint_uri = None
self._channel = None
self._requests_middleware = None
self._trace_log_handler = None
self._exception_telemetry_client = None
if app:
self.init_app(app)
def init_app(self, app):
self._key = app.config.get(CONF_KEY)
if not self._key:
return
self._endpoint_uri = app.config.get(CONF_ENDPOINT_URI)
if self._endpoint_uri:
sender = AsynchronousSender(self._endpoint_uri)
else:
sender = AsynchronousSender()
queue = AsynchronousQueue(sender)
self._channel = TelemetryChannel(None, queue)
self._init_request_logging(app)
self._init_trace_logging(app)
self._init_exception_logging(app)
def _init_request_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False)
if not enabled:
return
self._requests_middleware = WSGIApplication(
self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware
def _init_trace_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_TRACE_LOGGING, False)
if not enabled:
return
self._trace_log_handler = LoggingHandler(
self._key, telemetry_channel=self._channel)
app.logger.addHandler(self._trace_log_handler)
def _init_exception_logging(self, app):
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
exception_telemetry_client.track_exception(
type=type(exception),
value=exception,
tb=exception.__traceback__)
raise exception
self._exception_telemetry_client = exception_telemetry_client
def flush(self):
if self._requests_middleware:
self._requests_middleware.flush()
if self._trace_log_handler:
self._trace_log_handler.flush()
if self._exception_telemetry_client:
self._exception_telemetry_client.flush()
| Python | 0 |
8483de37aad2256266deb404ce4d9eaae31a8142 | Remove backend | kaggle-classification/keras_trainer/rnn.py | kaggle-classification/keras_trainer/rnn.py | """RNN"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input, GRU, Dense, Embedding, Dropout, Bidirectional, TimeDistributed, Multiply, Flatten, Reshape, Dot
from keras.models import Model
from keras_trainer import base_model
from keras_trainer.custom_metrics import auc_roc
class RNNModel(base_model.BaseModel):
""" RNN
hparams:
embedding_dim
vocab_size
train_embedding
"""
def __init__(self, embeddings_matrix, hparams):
self.embeddings_matrix = embeddings_matrix
self.hparams = hparams
def get_model(self):
sequence_length = self.hparams.sequence_length
I = Input(shape=(sequence_length,), dtype='float32')
E = Embedding(
self.hparams.vocab_size,
self.hparams.embedding_dim,
weights=[self.embeddings_matrix],
input_length=sequence_length,
trainable=self.hparams.train_embedding)(
I)
H = Bidirectional(GRU(128, return_sequences=True))(E)
A = TimeDistributed(Dense(3), input_shape=(sequence_length, 256))(H)
A = Flatten()(A)
A = Dense(sequence_length, activation='softmax')(A)
X = Dot((1, 1))([H, A])
X = Dense(128, activation='relu')(X)
X = Dropout(self.hparams.dropout_rate)(X)
Output = Dense(6, activation='sigmoid')(X)
model = Model(inputs=I, outputs=Output)
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', auc_roc])
print(model.summary())
return model
| """RNN"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Input, GRU, Dense, Embedding, Dropout, Bidirectional, TimeDistributed, Multiply, Flatten, Reshape, Dot
from keras.models import Model
from keras_trainer import base_model
from keras_trainer.custom_metrics import auc_roc
import keras.backend as K
class RNNModel(base_model.BaseModel):
""" RNN
hparams:
embedding_dim
vocab_size
train_embedding
"""
def __init__(self, embeddings_matrix, hparams):
self.embeddings_matrix = embeddings_matrix
self.hparams = hparams
def get_model(self):
sequence_length = self.hparams.sequence_length
I = Input(shape=(sequence_length,), dtype='float32')
E = Embedding(
self.hparams.vocab_size,
self.hparams.embedding_dim,
weights=[self.embeddings_matrix],
input_length=sequence_length,
trainable=self.hparams.train_embedding)(
I)
H = Bidirectional(GRU(128, return_sequences=True))(E)
A = TimeDistributed(Dense(3), input_shape=(sequence_length, 256))(H)
A = Flatten()(A)
A = Dense(sequence_length, activation='softmax')(A)
X = Dot((1, 1))([H, A])
X = Dense(128, activation='relu')(X)
X = Dropout(self.hparams.dropout_rate)(X)
Output = Dense(6, activation='sigmoid')(X)
model = Model(inputs=I, outputs=Output)
model.compile(
optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', auc_roc])
print(model.summary())
return model
| Python | 0.000001 |
fa022cf128f16e97aad4670cbb87b9178744e0d8 | Add unittest for page size=1 & page chunck size=1 | kboard/core/tests/test_utils_pagination.py | kboard/core/tests/test_utils_pagination.py | from django.core.paginator import Paginator
from django.test import TestCase
from core.utils import get_pages_nav_info
class TestUtilsPagiation(TestCase):
def get_pages_nav_info(PAGE_SIZE, NAV_PAGE_CHUNK_SIZE, TEST_LOAD_PAGE, OBJS_SIZE):
object_list = range(OBJS_SIZE)
paginator = Paginator(object_list, PAGE_SIZE)
page = paginator.page(TEST_LOAD_PAGE)
return get_pages_nav_info(page, nav_chunk_size=NAV_PAGE_CHUNK_SIZE)
def test_pages_nav_info(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=10,
OBJS_SIZE=100)
check_elements = ('pre_nav_page', 'page_list', 'current_page_num', 'next_nav_page')
for check_element in check_elements:
self.assertIn(check_element, page_nav_info)
self.assertEqual(5, page_nav_info['pre_nav_page'])
self.assertEqual([6, 7, 8, 9, 10], page_nav_info['page_list'])
self.assertEqual(10, page_nav_info['current_page_num'])
self.assertEqual(11, page_nav_info['next_nav_page'])
def test_pre_and_next_nav_pages_are_not_exist_if_page_count_less_than_nav_page_chunck_size(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=3,
OBJS_SIZE=17)
self.assertEqual(-1, page_nav_info['pre_nav_page'])
self.assertEqual(-1, page_nav_info['next_nav_page'])
def test_pre_nav_page_exist(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=6,
OBJS_SIZE=31)
self.assertEqual(5, page_nav_info['pre_nav_page'])
def test_next_nav_page_exist(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=1,
OBJS_SIZE=31)
self.assertEqual(6, page_nav_info['next_nav_page'])
def test_page_size_1_case(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=1, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=8,
OBJS_SIZE=50)
self.assertEqual(5, page_nav_info['pre_nav_page'])
self.assertEqual([6, 7, 8, 9, 10], page_nav_info['page_list'])
self.assertEqual(8, page_nav_info['current_page_num'])
self.assertEqual(11, page_nav_info['next_nav_page'])
def test_page_chunk_size_1_case(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=1, TEST_LOAD_PAGE=8,
OBJS_SIZE=50)
self.assertEqual(7, page_nav_info['pre_nav_page'])
self.assertEqual([8, ], page_nav_info['page_list'])
self.assertEqual(8, page_nav_info['current_page_num'])
self.assertEqual(9, page_nav_info['next_nav_page'])
| from django.core.paginator import Paginator
from django.test import TestCase
from core.utils import get_pages_nav_info
class TestUtilsPagiation(TestCase):
def get_pages_nav_info(PAGE_SIZE, NAV_PAGE_CHUNK_SIZE, TEST_LOAD_PAGE, OBJS_SIZE):
object_list = range(OBJS_SIZE)
paginator = Paginator(object_list, PAGE_SIZE)
page = paginator.page(TEST_LOAD_PAGE)
return get_pages_nav_info(page, nav_chunk_size=NAV_PAGE_CHUNK_SIZE)
def test_pages_nav_info(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=10,
OBJS_SIZE=100)
check_elements = ('pre_nav_page', 'page_list', 'current_page_num', 'next_nav_page')
for check_element in check_elements:
self.assertIn(check_element, page_nav_info)
self.assertEqual(5, page_nav_info['pre_nav_page'])
self.assertEqual([6, 7, 8, 9, 10], page_nav_info['page_list'])
self.assertEqual(10, page_nav_info['current_page_num'])
self.assertEqual(11, page_nav_info['next_nav_page'])
def test_pre_and_next_nav_pages_are_not_exist_if_page_count_less_than_nav_page_chunck_size(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=3,
OBJS_SIZE=17)
self.assertEqual(-1, page_nav_info['pre_nav_page'])
self.assertEqual(-1, page_nav_info['next_nav_page'])
def test_pre_nav_page_exist(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=6,
OBJS_SIZE=31)
self.assertEqual(5, page_nav_info['pre_nav_page'])
def test_next_nav_page_exist(self):
page_nav_info = TestUtilsPagiation.get_pages_nav_info(PAGE_SIZE=5, NAV_PAGE_CHUNK_SIZE=5, TEST_LOAD_PAGE=1,
OBJS_SIZE=31)
self.assertEqual(6, page_nav_info['next_nav_page'])
| Python | 0 |
1075b77abfbd04238a95c4b3e070c80fb141ab8b | Rename get_view() to get_view_method() for clarity. | incuna_test_utils/testcases/integration.py | incuna_test_utils/testcases/integration.py | from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import render
from .request import BaseRequestTestCase
class BaseIntegrationTestCase(BaseRequestTestCase):
"""
A TestCase that operates similarly to a Selenium test.
Contains methods that access pages and render them to strings full of
HTML. Can be used to assert the contents of templates as well as doing
normal TestCase things.
Must be subclassed with the following attributes in order to work:
* user_factory
* view (class-based or function-based view)
"""
def get_view_method(self):
"""
Returns the class's attached view, as a method.
Checks self.view exists, and throws an ImproperlyConfigured exception
if it doesn't. Otherwise, it returns the view as a method.
"""
try:
view = self.view
except AttributeError:
message = "This test must have a 'view' attribute."
raise ImproperlyConfigured(message)
try:
return view.as_view()
except AttributeError:
return view
def access_view(self, *args, **kwargs):
"""
Helper method that accesses the test's view.
Accepts an optional 'request' kwarg. If this isn't supplied,
access_view creates a basic request on your behalf.
Returns a HTTPResponse object with the request (created or otherwise)
attached.
"""
request = kwargs.pop('request', None)
if request is None:
request = self.create_request()
view_method = self.get_view_method()
response = view_method(request, *args, **kwargs)
# Add the request to the response.
# This is a weird-looking but compact way of ensuring we have access to
# the request everywhere we need it, without doing clunky things like
# returning tuples all the time.
response.request = request
return response
def render_to_str(self, response, request=None):
"""
Render a HTTPResponse into a string that holds the HTML content.
Accepts an optional request parameter, and looks for a request attached
to the response if the optional parameter isn't specified.
"""
if request is None:
request = response.request
response = render(request, response.template_name, response.context_data)
return str(response.content)
def access_view_and_render_response(self, *args, **kwargs):
"""
Accesses the view and returns a string of HTML.
Combines access_view, an assertion on the returned status, and
render_to_str.
Accepts an optional 'request' kwarg holding a HTTPRequest, but will
create a simple one if the parameter isn't supplied, and
'expected_status', an expected status code for the response, which
defaults to 200. Other args and kwargs are passed on to the view
method.
"""
request = kwargs.pop('request', None)
expected_status = kwargs.pop('expected_status', 200)
response = self.access_view(request, *args, **kwargs)
# Assert that the response has the correct status code before we go
# any further. Throwing accurately descriptive failures when something
# goes wrong is better than trying to run assertions on the content
# of a HTML response for some random 404 page.
self.assertEqual(expected_status, response.status_code)
# Render the response and return it.
return self.render_to_str(response)
def assert_count(self, needle, haystack, count):
"""
Assert that 'needle' occurs exactly 'count' times in 'haystack'.
Used as a snazzier, stricter version of unittest.assertIn.
Outputs a verbose error message when it fails.
"""
actual_count = haystack.count(needle)
# Build a verbose error message in case we need it.
plural = '' if count == 1 else 's'
message = 'Expected {count} instance{plural} of {needle}, but found {actual_count}, in {haystack}'
message = message.format_map(locals())
# Make the assertion.
self.assertEqual(count, actual_count, message)
| from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import render
from .request import BaseRequestTestCase
class BaseIntegrationTestCase(BaseRequestTestCase):
"""
A TestCase that operates similarly to a Selenium test.
Contains methods that access pages and render them to strings full of
HTML. Can be used to assert the contents of templates as well as doing
normal TestCase things.
Must be subclassed with the following attributes in order to work:
* user_factory
* view (class-based or function-based view)
"""
def get_view(self):
"""
Returns the class's attached view, as a method.
Checks self.view exists, and throws an ImproperlyConfigured exception
if it doesn't. Otherwise, it returns the view as a method.
"""
try:
view = self.view
except AttributeError:
message = "This test must have a 'view' attribute."
raise ImproperlyConfigured(message)
try:
return view.as_view()
except AttributeError:
return view
def access_view(self, *args, **kwargs):
"""
Helper method that accesses the test's view.
Accepts an optional 'request' kwarg. If this isn't supplied,
access_view creates a basic request on your behalf.
Returns a HTTPResponse object with the request (created or otherwise)
attached.
"""
request = kwargs.pop('request', None)
if request is None:
request = self.create_request()
view = self.get_view()
response = view(request, *args, **kwargs)
# Add the request to the response.
# This is a weird-looking but compact way of ensuring we have access to
# the request everywhere we need it, without doing clunky things like
# returning tuples all the time.
response.request = request
return response
def render_to_str(self, response, request=None):
"""
Render a HTTPResponse into a string that holds the HTML content.
Accepts an optional request parameter, and looks for a request attached
to the response if the optional parameter isn't specified.
"""
if request is None:
request = response.request
response = render(request, response.template_name, response.context_data)
return str(response.content)
def access_view_and_render_response(self, *args, **kwargs):
"""
Accesses the view and returns a string of HTML.
Combines access_view, an assertion on the returned status, and
render_to_str.
Accepts an optional 'request' kwarg holding a HTTPRequest, but will
create a simple one if the parameter isn't supplied, and
'expected_status', an expected status code for the response, which
defaults to 200. Other args and kwargs are passed on to the view
method.
"""
request = kwargs.pop('request', None)
expected_status = kwargs.pop('expected_status', 200)
response = self.access_view(request, *args, **kwargs)
# Assert that the response has the correct status code before we go
# any further. Throwing accurately descriptive failures when something
# goes wrong is better than trying to run assertions on the content
# of a HTML response for some random 404 page.
self.assertEqual(expected_status, response.status_code)
# Render the response and return it.
return self.render_to_str(response)
def assert_count(self, needle, haystack, count):
"""
Assert that 'needle' occurs exactly 'count' times in 'haystack'.
Used as a snazzier, stricter version of unittest.assertIn.
Outputs a verbose error message when it fails.
"""
actual_count = haystack.count(needle)
# Build a verbose error message in case we need it.
plural = '' if count == 1 else 's'
message = 'Expected {count} instance{plural} of {needle}, but found {actual_count}, in {haystack}'
message = message.format_map(locals())
# Make the assertion.
self.assertEqual(count, actual_count, message)
| Python | 0 |
4bda6769c5e6a01e8a62b06ad310dc846fbc7cbf | fix an error handling bug I introduced | core/dbt/task/base.py | core/dbt/task/base.py | from abc import ABCMeta, abstractmethod
import os
import six
from dbt.config import RuntimeConfig, Project
from dbt.config.profile import read_profile, PROFILES_DIR
from dbt import flags
from dbt import tracking
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.exceptions
class NoneConfig(object):
@classmethod
def from_args(cls, args):
return None
def read_profiles(profiles_dir=None):
"""This is only used for some error handling"""
if profiles_dir is None:
profiles_dir = PROFILES_DIR
raw_profiles = read_profile(profiles_dir)
if raw_profiles is None:
profiles = {}
else:
profiles = {k: v for (k, v) in raw_profiles.items() if k != 'config'}
return profiles
PROFILES_HELP_MESSAGE = """
For more information on configuring profiles, please consult the dbt docs:
https://docs.getdbt.com/docs/configure-your-profile
"""
@six.add_metaclass(ABCMeta)
class BaseTask(object):
ConfigType = NoneConfig
def __init__(self, args, config):
self.args = args
self.config = config
@classmethod
def from_args(cls, args):
try:
config = cls.ConfigType.from_args(args)
except dbt.exceptions.DbtProjectError as exc:
logger.info("Encountered an error while reading the project:")
logger.info(to_string(exc))
tracking.track_invalid_invocation(
args=args,
result_type=exc.result_type)
raise dbt.exceptions.RuntimeException('Could not run dbt')
except dbt.exceptions.DbtProfileError as exc:
logger.info("Encountered an error while reading profiles:")
logger.info(" ERROR {}".format(str(exc)))
all_profiles = read_profiles(args.profiles_dir).keys()
if len(all_profiles) > 0:
logger.info("Defined profiles:")
for profile in all_profiles:
logger.info(" - {}".format(profile))
else:
logger.info("There are no profiles defined in your "
"profiles.yml file")
logger.info(PROFILES_HELP_MESSAGE)
tracking.track_invalid_invocation(
args=args,
result_type=exc.result_type)
raise dbt.exceptions.RuntimeException('Could not run dbt')
return cls(args, config)
@abstractmethod
def run(self):
raise dbt.exceptions.NotImplementedException('Not Implemented')
def interpret_results(self, results):
return True
def get_nearest_project_dir():
root_path = os.path.abspath(os.sep)
cwd = os.getcwd()
while cwd != root_path:
project_file = os.path.join(cwd, "dbt_project.yml")
if os.path.exists(project_file):
return cwd
cwd = os.path.dirname(cwd)
return None
def move_to_nearest_project_dir():
nearest_project_dir = get_nearest_project_dir()
if nearest_project_dir is None:
raise dbt.exceptions.RuntimeException(
"fatal: Not a dbt project (or any of the parent directories). "
"Missing dbt_project.yml file"
)
os.chdir(nearest_project_dir)
class RequiresProjectTask(BaseTask):
@classmethod
def from_args(cls, args):
move_to_nearest_project_dir()
return super(RequiresProjectTask, cls).from_args(args)
class ConfiguredTask(RequiresProjectTask):
ConfigType = RuntimeConfig
class ProjectOnlyTask(RequiresProjectTask):
ConfigType = Project
| from abc import ABCMeta, abstractmethod
import os
import six
from dbt.config import RuntimeConfig, Project
from dbt.config.profile import read_profile, PROFILES_DIR
from dbt import flags
from dbt import tracking
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.exceptions
class NoneConfig(object):
@classmethod
def from_args(cls, args):
return None
def read_profiles(profiles_dir=None):
"""This is only used for some error handling"""
if profiles_dir is None:
profiles_dir = PROFILES_DIR
raw_profiles = read_profile(profiles_dir)
if raw_profiles is None:
profiles = {}
else:
profiles = {k: v for (k, v) in raw_profiles.items() if k != 'config'}
return profiles
PROFILES_HELP_MESSAGE = """
For more information on configuring profiles, please consult the dbt docs:
https://docs.getdbt.com/docs/configure-your-profile
"""
@six.add_metaclass(ABCMeta)
class BaseTask(object):
ConfigType = NoneConfig
def __init__(self, args, config):
self.args = args
self.config = config
@classmethod
def from_args(cls, args):
try:
config = cls.ConfigType.from_args(args)
except DbtProjectError as exc:
logger.info("Encountered an error while reading the project:")
logger.info(to_string(exc))
tracking.track_invalid_invocation(
args=args,
result_type=exc.result_type)
raise dbt.exceptions.RuntimeException('Could not run dbt')
except DbtProfileError as exc:
logger.info("Encountered an error while reading profiles:")
logger.info(" ERROR {}".format(str(exc)))
all_profiles = read_profiles(args.profiles_dir).keys()
if len(all_profiles) > 0:
logger.info("Defined profiles:")
for profile in all_profiles:
logger.info(" - {}".format(profile))
else:
logger.info("There are no profiles defined in your "
"profiles.yml file")
logger.info(PROFILES_HELP_MESSAGE)
tracking.track_invalid_invocation(
args=args,
result_type=exc.result_type)
raise dbt.exceptions.RuntimeException('Could not run dbt')
return cls(args, config)
@abstractmethod
def run(self):
raise dbt.exceptions.NotImplementedException('Not Implemented')
def interpret_results(self, results):
return True
def get_nearest_project_dir():
root_path = os.path.abspath(os.sep)
cwd = os.getcwd()
while cwd != root_path:
project_file = os.path.join(cwd, "dbt_project.yml")
if os.path.exists(project_file):
return cwd
cwd = os.path.dirname(cwd)
return None
def move_to_nearest_project_dir():
nearest_project_dir = get_nearest_project_dir()
if nearest_project_dir is None:
raise dbt.exceptions.RuntimeException(
"fatal: Not a dbt project (or any of the parent directories). "
"Missing dbt_project.yml file"
)
os.chdir(nearest_project_dir)
class RequiresProjectTask(BaseTask):
@classmethod
def from_args(cls, args):
move_to_nearest_project_dir()
return super(RequiresProjectTask, cls).from_args(args)
class ConfiguredTask(RequiresProjectTask):
ConfigType = RuntimeConfig
class ProjectOnlyTask(RequiresProjectTask):
ConfigType = Project
| Python | 0.000002 |
7fd0ed0897ffedf117698502cdefac0436ac4f2c | remove MotechTab import | corehq/tabs/config.py | corehq/tabs/config.py | from corehq.apps.styleguide.tabs import SGExampleTab, SimpleCrispyFormSGExample, \
ControlsDemoSGExample
from corehq.tabs.tabclasses import DashboardTab, ProjectReportsTab, ProjectInfoTab, SetupTab, \
ProjectDataTab, ApplicationsTab, CloudcareTab, MessagingTab, ProjectUsersTab, \
AdminTab, IndicatorAdminTab, SMSAdminTab, AccountingTab, ProjectSettingsTab, \
MySettingsTab
MENU_TABS = (
DashboardTab,
ProjectInfoTab,
ProjectReportsTab,
IndicatorAdminTab,
ProjectDataTab,
SetupTab,
ProjectUsersTab,
ApplicationsTab,
CloudcareTab,
MessagingTab,
# invisible
ProjectSettingsTab,
MySettingsTab,
# Admin
AdminTab,
SMSAdminTab,
AccountingTab,
# Styleguide
SGExampleTab,
SimpleCrispyFormSGExample,
ControlsDemoSGExample,
)
| from corehq.apps.styleguide.tabs import SGExampleTab, SimpleCrispyFormSGExample, \
ControlsDemoSGExample
from corehq.tabs.tabclasses import DashboardTab, ProjectReportsTab, ProjectInfoTab, SetupTab, \
ProjectDataTab, ApplicationsTab, CloudcareTab, MessagingTab, ProjectUsersTab, \
AdminTab, IndicatorAdminTab, SMSAdminTab, AccountingTab, ProjectSettingsTab, \
MySettingsTab, MotechTab
MENU_TABS = (
DashboardTab,
ProjectInfoTab,
ProjectReportsTab,
IndicatorAdminTab,
ProjectDataTab,
SetupTab,
ProjectUsersTab,
ApplicationsTab,
CloudcareTab,
MessagingTab,
MotechTab,
# invisible
ProjectSettingsTab,
MySettingsTab,
# Admin
AdminTab,
SMSAdminTab,
AccountingTab,
# Styleguide
SGExampleTab,
SimpleCrispyFormSGExample,
ControlsDemoSGExample,
)
| Python | 0 |
0185a30a340fae956c0e5b9d9f354e56e2e2178a | update the wsgi file | crate_project/wsgi.py | crate_project/wsgi.py | import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "apps")))
import newrelic.agent
newrelic.agent.initialize()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| import newrelic.agent
newrelic.agent.initialize()
import pinax.env
from django.core.wsgi import get_wsgi_application
# setup the environment for Django and Pinax
pinax.env.setup_environ(__file__)
# set application for WSGI processing
application = get_wsgi_application()
| Python | 0 |
a0fab69d12d64d4e5371fcb26a4ec70365a76fa6 | Move task results database to data dir | cref/app/web/tasks.py | cref/app/web/tasks.py | from celery import Celery
from cref.app.terminal import run_cref
app = Celery(
'tasks',
backend='db+sqlite:///data/results.sqlite',
broker='amqp://guest@localhost//'
)
@app.task
def predict_structure(sequence, params={}):
return run_cref(sequence)
| from celery import Celery
from cref.app.terminal import run_cref
app = Celery(
'tasks',
backend='db+sqlite:///results.sqlite',
broker='amqp://guest@localhost//'
)
@app.task
def predict_structure(sequence, params={}):
return run_cref(sequence)
| Python | 0.000004 |
6e287393ad87ad09f94f845d372b5835ad4ebaba | Increase plot range | examples/alpha250-4/adc-bram/test.py | examples/alpha250-4/adc-bram/test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import time
from adc_bram import AdcBram
from koheron import connect
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
host = os.getenv('HOST', '192.168.1.50')
client = connect(host, 'adc-bram', restart=False)
driver = AdcBram(client)
print('ADC size = {}'.format(driver.adc_size))
driver.set_reference_clock(0) # External
time.sleep(5)
clk_200MHz = {'idx': 0, 'fs': 200E6}
clk_250MHz = {'idx': 1, 'fs': 250E6}
clock = clk_250MHz
driver.set_sampling_frequency(clock['idx'])
# driver.phase_shift(0)
t = np.arange(driver.adc_size) / clock['fs']
t_us = 1e6 * t
# Dynamic plot
fig = plt.figure()
ax = fig.add_subplot(111)
y = np.zeros(driver.adc_size)
line0 = Line2D([], [], color='blue', label='IN0')
line1 = Line2D([], [], color='green', label='IN1')
line2 = Line2D([], [], color='red', label='IN2')
line3 = Line2D([], [], color='cyan', label='IN3')
ax.add_line(line0)
ax.add_line(line1)
ax.add_line(line2)
ax.add_line(line3)
ax.set_xlabel('Time (us)')
ax.set_ylabel('ADC Raw data')
ax.set_xlim((t_us[0], t_us[-1]))
# ax.set_ylim((-2**15, 2**15))
ax.set_ylim((-32768, 32768))
ax.legend(loc='upper right')
fig.canvas.draw()
while True:
try:
driver.trigger_acquisition()
time.sleep(0.1)
driver.get_adc(0)
driver.get_adc(1)
line0.set_data(t_us, driver.adc0[0,:])
line1.set_data(t_us, driver.adc0[1,:])
line2.set_data(t_us, driver.adc1[0,:])
line3.set_data(t_us, driver.adc1[1,:])
fig.canvas.draw()
plt.pause(0.001)
# plt.pause(3600)
except KeyboardInterrupt:
break | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
import time
from adc_bram import AdcBram
from koheron import connect
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
host = os.getenv('HOST', '192.168.1.50')
client = connect(host, 'adc-bram', restart=False)
driver = AdcBram(client)
print('ADC size = {}'.format(driver.adc_size))
driver.set_reference_clock(0) # External
time.sleep(5)
clk_200MHz = {'idx': 0, 'fs': 200E6}
clk_250MHz = {'idx': 1, 'fs': 250E6}
clock = clk_250MHz
driver.set_sampling_frequency(clock['idx'])
# driver.phase_shift(0)
t = np.arange(driver.adc_size) / clock['fs']
t_us = 1e6 * t
# Dynamic plot
fig = plt.figure()
ax = fig.add_subplot(111)
y = np.zeros(driver.adc_size)
line0 = Line2D([], [], color='blue', label='IN0')
line1 = Line2D([], [], color='green', label='IN1')
line2 = Line2D([], [], color='red', label='IN2')
line3 = Line2D([], [], color='cyan', label='IN3')
ax.add_line(line0)
ax.add_line(line1)
ax.add_line(line2)
ax.add_line(line3)
ax.set_xlabel('Time (us)')
ax.set_ylabel('ADC Raw data')
ax.set_xlim((t_us[0], t_us[-1]))
# ax.set_ylim((-2**15, 2**15))
ax.set_ylim((-300, 300))
ax.legend(loc='upper right')
fig.canvas.draw()
while True:
try:
driver.trigger_acquisition()
time.sleep(0.1)
driver.get_adc(0)
driver.get_adc(1)
line0.set_data(t_us, driver.adc0[0,:])
line1.set_data(t_us, driver.adc0[1,:])
line2.set_data(t_us, driver.adc1[0,:])
line3.set_data(t_us, driver.adc1[1,:])
fig.canvas.draw()
plt.pause(0.001)
# plt.pause(3600)
except KeyboardInterrupt:
break | Python | 0.000001 |
63b828983b38eb00e68683c19c51f444102a030d | support p3k on python file | plugin/vim_bootstrap_updater.py | plugin/vim_bootstrap_updater.py | import os
try:
import urllib2
import urllib
except ImportError:
import urllib.request as urllib2
import urllib.parse as urllib
def vimrc_path(editor):
return os.path.expanduser('~/.%src' % editor)
def _generate_vimrc(editor, langs):
params = [('langs', l.strip()) for l in langs]
params.append(('editor', editor))
data = urllib.urlencode(params)
resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/generate.vim",
data)
return resp.read()
def get_available_langs():
resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/langs")
return resp.read().decode('utf-8')
def update(vimrc, editor, langs):
content = _generate_vimrc(editor, langs)
vimrc = os.path.expanduser(vimrc)
with open(vimrc, 'w') as fh:
fh.write(str(content))
return content
| import os
import urllib
import urllib2
def vimrc_path(editor):
return os.path.expanduser('~/.%src' % editor)
def _generate_vimrc(editor, langs):
params = [('langs', l.strip()) for l in langs]
params.append(('editor', editor))
data = urllib.urlencode(params)
resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/generate.vim",
data)
return resp.read()
def get_available_langs():
resp = urllib2.urlopen("https://vim-bootstrap.appspot.com/langs")
return resp.read()
def update(vimrc, editor, langs):
content = _generate_vimrc(editor, langs)
vimrc = os.path.expanduser(vimrc)
with open(vimrc, 'w') as fh:
fh.write(str(content))
return content
| Python | 0 |
5851c7524e66cfe3ee7e59542224d097d6e01f9e | Remove 's' in path. | mojo/public/tools/download_archiecture_independent_frameworks.py | mojo/public/tools/download_archiecture_independent_frameworks.py | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURRENT_PATH, "pylib"))
import gs
PREBUILT_FILE_PATH = os.path.join(CURRENT_PATH, "prebuilt", "frameworks")
FILES_TO_DOWNLOAD = [
"apptest.dartzip",
]
def download(tools_directory, version_file):
stamp_path = os.path.join(PREBUILT_FILE_PATH, "VERSION")
version_path = os.path.join(CURRENT_PATH, version_file)
with open(version_path) as version_file:
version = version_file.read().strip()
try:
with open(stamp_path) as stamp_file:
current_version = stamp_file.read().strip()
if current_version == version:
return 0 # Already have the right version.
except IOError:
pass # If the stamp file does not exist we need to download new binaries.
for file_name in FILES_TO_DOWNLOAD:
download_file(file_name, version, tools_directory)
with open(stamp_path, 'w') as stamp_file:
stamp_file.write(version)
return 0
def download_file(basename, version, tools_directory):
find_depot_tools_path = os.path.join(CURRENT_PATH, tools_directory)
sys.path.insert(0, find_depot_tools_path)
# pylint: disable=F0401
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
gs_path = "gs://mojo/file/" + version + "/" + basename
output_file = os.path.join(PREBUILT_FILE_PATH, basename)
gs.download_from_public_bucket(gs_path, output_file,
depot_tools_path)
def main():
parser = argparse.ArgumentParser(description="Downloads bundled frameworks "
"binaries from google storage.")
parser.add_argument("--tools-directory",
dest="tools_directory",
metavar="<tools-directory>",
type=str,
required=True,
help="Path to the directory containing "
"find_depot_tools.py, specified as a relative path "
"from the location of this file.")
parser.add_argument("--version-file",
dest="version_file",
metavar="<version-file>",
type=str,
default="../VERSION",
help="Path to the file containing the version of the "
"shell to be fetched, specified as a relative path "
"from the location of this file (default: "
"%(default)s).")
args = parser.parse_args()
return download(args.tools_directory, args.version_file)
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURRENT_PATH, "pylib"))
import gs
PREBUILT_FILE_PATH = os.path.join(CURRENT_PATH, "prebuilt", "frameworks")
FILES_TO_DOWNLOAD = [
"apptest.dartzip",
]
def download(tools_directory, version_file):
stamp_path = os.path.join(PREBUILT_FILE_PATH, "VERSION")
version_path = os.path.join(CURRENT_PATH, version_file)
with open(version_path) as version_file:
version = version_file.read().strip()
try:
with open(stamp_path) as stamp_file:
current_version = stamp_file.read().strip()
if current_version == version:
return 0 # Already have the right version.
except IOError:
pass # If the stamp file does not exist we need to download new binaries.
for file_name in FILES_TO_DOWNLOAD:
download_file(file_name, version, tools_directory)
with open(stamp_path, 'w') as stamp_file:
stamp_file.write(version)
return 0
def download_file(basename, version, tools_directory):
find_depot_tools_path = os.path.join(CURRENT_PATH, tools_directory)
sys.path.insert(0, find_depot_tools_path)
# pylint: disable=F0401
import find_depot_tools
depot_tools_path = find_depot_tools.add_depot_tools_to_path()
gs_path = "gs://mojo/files/" + version + "/" + basename
output_file = os.path.join(PREBUILT_FILE_PATH, basename)
gs.download_from_public_bucket(gs_path, output_file,
depot_tools_path)
def main():
parser = argparse.ArgumentParser(description="Downloads bundled frameworks "
"binaries from google storage.")
parser.add_argument("--tools-directory",
dest="tools_directory",
metavar="<tools-directory>",
type=str,
required=True,
help="Path to the directory containing "
"find_depot_tools.py, specified as a relative path "
"from the location of this file.")
parser.add_argument("--version-file",
dest="version_file",
metavar="<version-file>",
type=str,
default="../VERSION",
help="Path to the file containing the version of the "
"shell to be fetched, specified as a relative path "
"from the location of this file (default: "
"%(default)s).")
args = parser.parse_args()
return download(args.tools_directory, args.version_file)
if __name__ == "__main__":
sys.exit(main())
| Python | 0.000005 |
ff16e993beca5ff0aa490bb140a46e64d026a6c9 | Fix task banner with 'actionable' callback when using templates in name (#38165) | lib/ansible/plugins/callback/actionable.py | lib/ansible/plugins/callback/actionable.py | # (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
version_added: "2.1"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
'''
from ansible import constants as C
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'actionable'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
self.last_task = None
self.last_task_banner = None
self.shown_title = False
def v2_playbook_on_handler_task_start(self, task):
self.super_ref.v2_playbook_on_handler_task_start(task)
self.shown_title = True
def v2_playbook_on_task_start(self, task, is_conditional):
self.last_task = task
self.last_task_banner = self._get_task_banner(task)
self.shown_title = False
def display_task_banner(self):
if not self.shown_title:
self.super_ref.v2_playbook_on_task_start(self.last_task, None)
self.shown_title = True
def _print_task_banner(self, task):
self._display.banner(self.last_task_banner)
self._print_task_path(self.last_task)
self._last_task_banner = self.last_task._uuid
def _print_task_path(self, task):
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
def _get_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it thereyet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
return u"TASK [%s%s]" % (task.get_name().strip(), args)
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display_task_banner()
self.super_ref.v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_on_ok(result)
def v2_runner_on_unreachable(self, result):
self.display_task_banner()
self.super_ref.v2_runner_on_unreachable(result)
def v2_runner_on_skipped(self, result):
pass
def v2_playbook_on_include(self, included_file):
pass
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_item_on_ok(result)
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_item_on_failed(self, result):
self.display_task_banner()
self.super_ref.v2_runner_item_on_failed(result)
| # (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
version_added: "2.1"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'actionable'
def __init__(self):
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
self.last_task = None
self.shown_title = False
def v2_playbook_on_handler_task_start(self, task):
self.super_ref.v2_playbook_on_handler_task_start(task)
self.shown_title = True
def v2_playbook_on_task_start(self, task, is_conditional):
self.last_task = task
self.shown_title = False
def display_task_banner(self):
if not self.shown_title:
self.super_ref.v2_playbook_on_task_start(self.last_task, None)
self.shown_title = True
def v2_runner_on_failed(self, result, ignore_errors=False):
self.display_task_banner()
self.super_ref.v2_runner_on_failed(result, ignore_errors)
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_on_ok(result)
def v2_runner_on_unreachable(self, result):
self.display_task_banner()
self.super_ref.v2_runner_on_unreachable(result)
def v2_runner_on_skipped(self, result):
pass
def v2_playbook_on_include(self, included_file):
pass
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self.display_task_banner()
self.super_ref.v2_runner_item_on_ok(result)
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_item_on_failed(self, result):
self.display_task_banner()
self.super_ref.v2_runner_item_on_failed(result)
| Python | 0 |
ce70c151a8cbc70526e125f829a1fafdf390e9a7 | Make scalars is_active short circuit if apt (#621) | tensorboard/plugins/scalar/scalars_plugin.py | tensorboard/plugins/scalar/scalars_plugin.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Scalars plugin.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import six
from six import StringIO
from werkzeug import wrappers
import tensorflow as tf
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.scalar import metadata
class OutputFormat(object):
"""An enum used to list the valid output formats for API calls."""
JSON = 'json'
CSV = 'csv'
class ScalarsPlugin(base_plugin.TBPlugin):
"""Scalars Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ScalarsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._multiplexer = context.multiplexer
def get_plugin_apps(self):
return {
'/scalars': self.scalars_route,
'/tags': self.tags_route,
}
def is_active(self):
"""The scalars plugin is active iff any run has at least one scalar tag."""
if not self._multiplexer:
return False
return bool(self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME))
def index_impl(self):
"""Return {runName: {tagName: {displayName: ..., description: ...}}}."""
runs = self._multiplexer.Runs()
result = {run: {} for run in runs}
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
content = metadata.parse_plugin_metadata(content)
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
result[run][tag] = {'displayName': summary_metadata.display_name,
'description': plugin_util.markdown_to_safe_html(
summary_metadata.summary_description)}
return result
def scalars_impl(self, tag, run, output_format):
"""Result of the form `(body, mime_type)`."""
tensor_events = self._multiplexer.Tensors(run, tag)
values = [[tensor_event.wall_time,
tensor_event.step,
tf.make_ndarray(tensor_event.tensor_proto).item()]
for tensor_event in tensor_events]
if output_format == OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return (string_io.getvalue(), 'text/csv')
else:
return (values, 'application/json')
@wrappers.Request.application
def tags_route(self, request):
index = self.index_impl()
return http_util.Respond(request, index, 'application/json')
@wrappers.Request.application
def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO: return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
output_format = request.args.get('format')
(body, mime_type) = self.scalars_impl(tag, run, output_format)
return http_util.Respond(request, body, mime_type)
| # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Scalars plugin.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import six
from six import StringIO
from werkzeug import wrappers
import tensorflow as tf
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from tensorboard.plugins.scalar import metadata
class OutputFormat(object):
"""An enum used to list the valid output formats for API calls."""
JSON = 'json'
CSV = 'csv'
class ScalarsPlugin(base_plugin.TBPlugin):
"""Scalars Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ScalarsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._multiplexer = context.multiplexer
def get_plugin_apps(self):
return {
'/scalars': self.scalars_route,
'/tags': self.tags_route,
}
def is_active(self):
"""The scalars plugin is active iff any run has at least one scalar tag."""
return bool(self._multiplexer) and any(self.index_impl().values())
def index_impl(self):
"""Return {runName: {tagName: {displayName: ..., description: ...}}}."""
runs = self._multiplexer.Runs()
result = {run: {} for run in runs}
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(mapping):
for (tag, content) in six.iteritems(tag_to_content):
content = metadata.parse_plugin_metadata(content)
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
result[run][tag] = {'displayName': summary_metadata.display_name,
'description': plugin_util.markdown_to_safe_html(
summary_metadata.summary_description)}
return result
def scalars_impl(self, tag, run, output_format):
"""Result of the form `(body, mime_type)`."""
tensor_events = self._multiplexer.Tensors(run, tag)
values = [[tensor_event.wall_time,
tensor_event.step,
tf.make_ndarray(tensor_event.tensor_proto).item()]
for tensor_event in tensor_events]
if output_format == OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return (string_io.getvalue(), 'text/csv')
else:
return (values, 'application/json')
@wrappers.Request.application
def tags_route(self, request):
index = self.index_impl()
return http_util.Respond(request, index, 'application/json')
@wrappers.Request.application
def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO: return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
output_format = request.args.get('format')
(body, mime_type) = self.scalars_impl(tag, run, output_format)
return http_util.Respond(request, body, mime_type)
| Python | 0.000001 |
900d872d4d1f8a593f25ac982e48ac86660955fd | Store name unique | bazaar/listings/models.py | bazaar/listings/models.py | from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from ..fields import MoneyField
from ..goods.models import Product
@python_2_unicode_compatible
class Listing(models.Model):
title = models.CharField(max_length=100)
description = models.TextField(max_length=500, blank=True)
sales_units = models.IntegerField(default=1)
# TODO: this should become a gallery
image = models.ImageField(upload_to="listing_images")
product = models.ManyToManyField(Product, related_name="listings")
def __str__(self):
return self.title
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=100, unique=True)
url = models.URLField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publishing(models.Model):
external_id = models.CharField(max_length=128)
price = MoneyField()
available_units = models.IntegerField()
published = models.BooleanField(default=False)
last_update = models.DateTimeField(default=timezone.now)
listing = models.ForeignKey(Listing, related_name="publishings")
store = models.ForeignKey(Store, related_name="publishings")
def __str__(self):
return "Publishing %s on %s" % (self.external_id, self.store)
| from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from ..fields import MoneyField
from ..goods.models import Product
@python_2_unicode_compatible
class Listing(models.Model):
title = models.CharField(max_length=100)
description = models.TextField(max_length=500, blank=True)
sales_units = models.IntegerField(default=1)
# TODO: this should become a gallery
image = models.ImageField(upload_to="listing_images")
product = models.ManyToManyField(Product, related_name="listings")
def __str__(self):
return self.title
@python_2_unicode_compatible
class Store(models.Model):
name = models.CharField(max_length=100)
url = models.URLField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Publishing(models.Model):
external_id = models.CharField(max_length=128)
price = MoneyField()
available_units = models.IntegerField()
published = models.BooleanField(default=False)
last_update = models.DateTimeField(default=timezone.now)
listing = models.ForeignKey(Listing, related_name="publishings")
store = models.ForeignKey(Store, related_name="publishings")
def __str__(self):
return "Publishing %s on %s" % (self.external_id, self.store)
| Python | 0.999952 |
19df232461679b3156f9d5889d59f095e0b97d60 | Add CAN_DETECT | bears/yml/RAMLLintBear.py | bears/yml/RAMLLintBear.py | from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
@linter(executable='ramllint',
output_format='regex',
output_regex=r'(?P<severity>error|warning|info).*\n (?P<message>.+) '
r'\[(?P<origin>.+)\]')
class RAMLLintBear:
"""
RAML Linter is a static analysis, linter-like, utility that will enforce
rules on a given RAML document, ensuring consistency and quality.
Note: Files should not have leading empty lines, else the bear fails to
identify the problems correctly.
"""
LANGUAGES = {"RAML"}
REQUIREMENTS = {NpmRequirement('ramllint', '1.2.2')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Formatting'}
@staticmethod
def create_arguments(filename, file, config_file):
return filename,
| from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
@linter(executable='ramllint',
output_format='regex',
output_regex=r'(?P<severity>error|warning|info).*\n (?P<message>.+) '
r'\[(?P<origin>.+)\]')
class RAMLLintBear:
"""
RAML Linter is a static analysis, linter-like, utility that will enforce
rules on a given RAML document, ensuring consistency and quality.
Note: Files should not have leading empty lines, else the bear fails to
identify the problems correctly.
"""
LANGUAGES = {"RAML"}
REQUIREMENTS = {NpmRequirement('ramllint', '1.2.2')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
@staticmethod
def create_arguments(filename, file, config_file):
return filename,
| Python | 0.998251 |
d399f1910df7a14b9f2f36ef1d08cb7bdb839781 | Revise to t_char_count_d and comments | lc0076_minimum_window_substring.py | lc0076_minimum_window_substring.py | """Leetcode 76. Minimum Window Substring
Hard
URL: https://leetcode.com/problems/minimum-window-substring/
Given a string S and a string T, find the minimum window in S which will contain
all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
Note:
- If there is no such window in S that covers all characters in T, return the
empty string "".
- If there is such window, you are guaranteed that there will always be only one
unique minimum window in S.
"""
class SolutionCharCountDictTwoPointers(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
Time complexity: O(m+n), where
- m: lenght of s,
- n: lenght of t.
Space complexity: O(m+n).
"""
from collections import defaultdict
# Use dict to collect t's char->count.
t_char_count_d = defaultdict(int)
for c in t:
t_char_count_d[c] += 1
# Track min left & len, and t_counter.
min_left = 0
min_len = float('inf')
t_counter = len(t)
# Apply two pointers with left & right from head as a window.
left, right = 0, 0
# In s, move right to increase window to satify t.
while right < len(s):
# If right char exits in t, decrement t_counter.
if t_char_count_d[s[right]] > 0:
t_counter -= 1
# Decrement t_char_count_d and increment right.
t_char_count_d[s[right]] -= 1
right += 1
# While window satisfies t, move left to shorten it.
while t_counter == 0:
# Update min_len and min_left if improve min_len.
if right - left < min_len:
min_len = right - left
min_left = left
# Before increment left, add back t_char_count_d & t_counter.
t_char_count_d[s[left]] += 1
if t_char_count_d[s[left]] > 0:
t_counter += 1
left += 1
if min_len < float('inf'):
return s[min_left:(min_left + min_len)]
else:
return ''
def main():
# Output: "BANC"
s = "ADOBECODEBANC"
t = "ABC"
print SolutionCharCountDictTwoPointers().minWindow(s, t)
s = "ABBBBBBBBBA"
t = "AA"
print SolutionCharCountDictTwoPointers().minWindow(s, t)
if __name__ == '__main__':
main()
| """Leetcode 76. Minimum Window Substring
Hard
URL: https://leetcode.com/problems/minimum-window-substring/
Given a string S and a string T, find the minimum window in S which will contain
all the characters in T in complexity O(n).
Example:
Input: S = "ADOBECODEBANC", T = "ABC"
Output: "BANC"
Note:
- If there is no such window in S that covers all characters in T, return the
empty string "".
- If there is such window, you are guaranteed that there will always be only one
unique minimum window in S.
"""
class SolutionCharCountDictTwoPointers(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
Time complexity: O(m+n), where
- m: lenght of s,
- n: lenght of t.
Space complexity: O(m+n).
"""
from collections import defaultdict
s_len, t_len = len(s), len(t)
# Use dict to collect char counts of t.
t_char_counts = defaultdict(int)
for c in t:
t_char_counts[c] += 1
# Track min left & len, and counter.
min_left = 0
min_len = float('inf')
counter = t_len
### s = "ADOBECODEBANC"; t = "ABC"
# Apply two pointers method with left & right from head as a window.
left, right = 0, 0
# In s, move right to increase window to satify t.
while right < s_len:
# If the char exits in t, decrement counter.
if t_char_counts[s[right]] > 0:
counter -= 1
# Decrement t_char_counts and increment right.
t_char_counts[s[right]] -= 1
right += 1
# While we found valid window, move left to shorten it.
while counter == 0:
# Update min_len and min_left if improve min_len.
if right - left < min_len:
min_len = right - left
min_left = left
# Before increment left, add back t_char_counts & counter.
t_char_counts[s[left]] += 1
if t_char_counts[s[left]] > 0:
counter += 1
left += 1
if min_len < float('inf'):
return s[min_left:(min_left + min_len)]
else:
return ''
def main():
# Output: "BANC"
s = "ADOBECODEBANC"
t = "ABC"
print SolutionCharCountDictTwoPointers().minWindow(s, t)
if __name__ == '__main__':
main()
| Python | 0.000001 |
9513011caa73cbfa4aec2b96070f482466dde490 | Fix urlshorten output | plugins/internet/url.py | plugins/internet/url.py | # -*- coding: utf-8 -*-
import bot
from html.parser import HTMLParser
class LinksParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recording = 0
self.data = []
def handle_starttag(self, tag, attributes):
if tag != 'title':
return
if self.recording:
self.recording += 1
return
self.recording = 1
def handle_endtag(self, tag):
if tag == 'title' and self.recording:
self.recording -= 1
def handle_data(self, data):
if self.recording:
self.data.append(data)
class Module(bot.Module):
index = "url"
def register(self):
self.addcommand(
self.title,
"title",
"Get the title of a url.",
["url"])
self.addcommand(
self.urlshorten,
"urlshorten",
"Shorten a url using is.gd",
["-shorturl=custom ending", "-service=is.gd or v.gd", "url"]
)
def title(self, context, args):
try:
r = self.server.rget("http.url").request(args.getstr("url"),
timeout=4)
except self.server.rget("http.url").Error:
return "Error while trying to read that url."
p = LinksParser()
p.feed(r.read())
p.close()
return p.data[-1] if p.data else "No title found."
def urlshorten(self, context, args):
args.default("service", "is.gd")
args.default("shorturl", "")
shorturl = args.getstr("shorturl")
service = args.getstr("service")
if service in ['v.gd', 'is.gd']:
params = {
"url": args.getstr("url"),
"format": "simple",
"shorturl": shorturl
}
serviceurl = "http://" + service + "/create.php"
http = self.server.rget("http.url")
try:
return http.request(serviceurl,
timeout=4,
params=params).read()
except http.HTTPError as error:
return error.read().decode("utf-8")
else:
return "Service must be is.gd or v.gd."
bot.register.module(Module)
| # -*- coding: utf-8 -*-
import bot
from html.parser import HTMLParser
class LinksParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recording = 0
self.data = []
def handle_starttag(self, tag, attributes):
if tag != 'title':
return
if self.recording:
self.recording += 1
return
self.recording = 1
def handle_endtag(self, tag):
if tag == 'title' and self.recording:
self.recording -= 1
def handle_data(self, data):
if self.recording:
self.data.append(data)
class Module(bot.Module):
index = "url"
def register(self):
self.addcommand(
self.title,
"title",
"Get the title of a url.",
["url"])
self.addcommand(
self.urlshorten,
"urlshorten",
"Shorten a url using is.gd",
["-shorturl=custom ending", "-service=is.gd or v.gd", "url"]
)
def title(self, context, args):
try:
r = self.server.rget("http.url").request(args.getstr("url"),
timeout=4)
except self.server.rget("http.url").Error:
return "Error while trying to read that url."
p = LinksParser()
p.feed(r.read())
p.close()
return p.data[-1] if p.data else "No title found."
def urlshorten(self, context, args):
args.default("service", "is.gd")
args.default("shorturl", "")
shorturl = args.getstr("shorturl")
service = args.getstr("service")
if service in ['v.gd', 'is.gd']:
params = {
"url": args.getstr("url"),
"format": "simple",
"shorturl": shorturl
}
serviceurl = "http://" + service + "/create.php"
http = self.server.rget("http.url")
try:
r = http.request(serviceurl,
timeout=4,
params=params)
except http.HTTPError as error:
r = error
return r.read().decode("utf-8")
else:
return "Service must be is.gd or v.gd."
bot.register.module(Module)
| Python | 0.999998 |
824a2a547218febf61aed8d99eff5ddeeaf6f5ca | Remove unused imports | polyaxon/libs/models.py | polyaxon/libs/models.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from django.db import models
from django.core.cache import cache
class DescribableModel(models.Model):
description = models.TextField(blank=True, null=True)
class Meta:
abstract = True
class DiffModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class TypeModel(models.Model):
name = models.CharField(max_length=128, unique=True)
schema_definition = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.name
class Singleton(DiffModel):
"""A base model to represents a singleton."""
class Meta:
abstract = True
def set_cache(self):
cache.set(self.__class__.__name__, self)
def save(self, *args, **kwargs):
self.pk = 1
super(Singleton, self).save(*args, **kwargs)
self.set_cache()
def delete(self, *args, **kwargs):
pass
@classmethod
def may_be_update(cls, obj):
raise NotImplementedError
@classmethod
def load(cls):
raise NotImplementedError
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from django.core.validators import validate_slug
from django.db import models
from django.core.cache import cache
from libs.blacklist import validate_blacklist_name
class DescribableModel(models.Model):
description = models.TextField(blank=True, null=True)
class Meta:
abstract = True
class DiffModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class TypeModel(models.Model):
name = models.CharField(max_length=128, unique=True)
schema_definition = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.name
class Singleton(DiffModel):
"""A base model to represents a singleton."""
class Meta:
abstract = True
def set_cache(self):
cache.set(self.__class__.__name__, self)
def save(self, *args, **kwargs):
self.pk = 1
super(Singleton, self).save(*args, **kwargs)
self.set_cache()
def delete(self, *args, **kwargs):
pass
@classmethod
def may_be_update(cls, obj):
raise NotImplementedError
@classmethod
def load(cls):
raise NotImplementedError
| Python | 0.000001 |
814cc6cef757c3eef775240c749a098b1288eef3 | Enable searching for an image in the admin | pombola/images/admin.py | pombola/images/admin.py | from django.contrib import admin
from django.contrib.contenttypes.generic import GenericTabularInline
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.admin import AdminImageMixin
from pombola.images import models
class ImageAdmin(AdminImageMixin, admin.ModelAdmin):
list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ]
search_fields = ['person__legal_name', 'id', 'source']
def thumbnail(self, obj):
if obj.image:
im = get_thumbnail(obj.image, '100x100')
return '<img src="%s" />' % ( im.url )
else:
return "NO IMAGE FOUND"
thumbnail.allow_tags = True
class ImageAdminInline(AdminImageMixin, GenericTabularInline):
model = models.Image
extra = 0
can_delete = True
admin.site.register( models.Image, ImageAdmin )
| from django.contrib import admin
from django.contrib.contenttypes.generic import GenericTabularInline
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.admin import AdminImageMixin
from pombola.images import models
class ImageAdmin(AdminImageMixin, admin.ModelAdmin):
list_display = [ 'thumbnail', 'content_object', 'is_primary', 'source', ]
def thumbnail(self, obj):
if obj.image:
im = get_thumbnail(obj.image, '100x100')
return '<img src="%s" />' % ( im.url )
else:
return "NO IMAGE FOUND"
thumbnail.allow_tags = True
class ImageAdminInline(AdminImageMixin, GenericTabularInline):
model = models.Image
extra = 0
can_delete = True
admin.site.register( models.Image, ImageAdmin )
| Python | 0 |
6f0fddaad968078fd65ed25f81a3d8345d7924fb | Update Application Commands | kdr/cli.py | kdr/cli.py | import click
import cli_syncthing_adapter
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.version_option()
@click.group(
epilog="Run 'kdr COMMAND --help' for more information on a command.",
context_settings=CONTEXT_SETTINGS
#add_help_option=False,
#options_metavar="\b",
)
@click.pass_context
def main(ctx):
''' A tool to synchronize remote/local directories. '''
pass
#
# Subcommands start
#
@main.command()
def start():
''' Start KodeDrive daemon. '''
output = cli_syncthing_adapter.start()
click.echo("%s" % output)
@main.command()
def stop():
''' Stop KodeDrive daemon. '''
output = cli_syncthing_adapter.stop()
output = output.strip()
click.echo("%s" % output)
@main.command()
@click.option('-a', '--all', is_flag=True, help="Display all application information.")
@click.option('-s', '--status', is_flag=True, help="Return daemon status.")
@click.option('-k', '--key', is_flag=True, help="Display KodeDrive key.")
def info(**kwargs):
''' Display application information. '''
is_default = True
for opt in kwargs:
if kwargs[opt]:
is_default = False
if is_default:
click.echo(click.get_current_context().get_help())
else:
output = cli_syncthing_adapter.info(**kwargs)
click.echo("%s" % output)
@main.command()
@click.argument(
'path',
type=click.Path(exists=True, writable=True, resolve_path=True),
nargs=1, metavar="PATH",
)
def inspect(path):
''' Return information regarding directory. '''
return
@main.command()
@click.option(
'--path', type=click.Path(exists=True),
default=".", nargs=1, metavar="<PATH>",
help="Specify a folder.")
def ls(path):
''' List synchronized directories. '''
return
@main.command()
@click.argument('key', nargs=1)
@click.option(
'-t', '--tag', nargs=1, metavar="<TEXT>",
help="Associate this folder with a tag."
)
@click.option(
'-p', '--path',
type=click.Path(exists=True, writable=True, resolve_path=True),
default=".", nargs=1, metavar="<PATH>",
help="Specify which folder to link."
)
def link(key, tag, path):
''' Synchronize remote/local directory. '''
output = cli_syncthing_adapter.init(key, tag, path)
click.echo("%s" % output)
@main.command()
@click.argument(
'path',
type=click.Path(exists=True, writable=True, resolve_path=True),
nargs=1, metavar="PATH",
)
def unlink(**kwargs):
''' Stop synchronization of directory. '''
return
@main.command()
@click.argument(
'path', nargs=1,
type=click.Path(exists=True, writable=True, resolve_path=True),
)
def refresh(**kwargs):
''' Force synchronization of directory. '''
return
@main.command()
@click.argument('cur', nargs=1)
@click.argument('new', nargs=1)
def retag(cur, new):
''' Change tag associated with directory. '''
return
@main.command()
@click.argument('arg', nargs=1)
def test(arg):
''' Test random functions :) '''
cli_syncthing_adapter.test(arg)
"""
REFERENCE
@cli.command()
@click.argument('src', type=click.Path(exists=True), nargs=1)
@click.argument('dest', nargs=1)
def connect(src, dest):
''' Connect to remote server. '''
output = cli_syncthing_adapter.connect()
click.echo("%s" % output)
@click.group(invoke_without_command=True)
@click.option('-v', '--version', is_flag=True, help='Print version information and quit')
click.echo("%s '%s' is not a valid command." % ('kodedrive:', arg))
click.echo("See 'kodedrive --help'.")
"""
| import click
import cli_syncthing_adapter
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.version_option()
@click.group(
epilog="Run 'kdr COMMAND --help' for more information on a command.",
context_settings=CONTEXT_SETTINGS
#add_help_option=False,
#options_metavar="\b",
)
@click.pass_context
def main(ctx):
''' A tool to synchronize remote/local directories. '''
pass
#
# Subcommands start
#
@main.command()
def start():
''' Start KodeDrive daemon. '''
output = cli_syncthing_adapter.start()
click.echo("%s" % output)
@main.command()
def stop():
''' Stop KodeDrive daemon. '''
output = cli_syncthing_adapter.stop()
output = output.strip()
click.echo("%s" % output)
@main.command()
@click.option('-a', '--all', is_flag=True, help="Display all application information.")
@click.option('-s', '--status', is_flag=True, help="Return daemon status.")
@click.option('-k', '--key', is_flag=True, help="Display KodeDrive key.")
def info(**kwargs):
''' Display application information. '''
is_default = True
for opt in kwargs:
if kwargs[opt]:
is_default = False
if is_default:
click.echo(click.get_current_context().get_help())
else:
output = cli_syncthing_adapter.info(
all=kwargs['all'],
status=kwargs['status'],
key=kwargs['key']
)
click.echo("%s" % output)
@main.command()
@click.argument('name', nargs=1)
def inspect(name):
''' Return information regarding local directory. '''
return
@main.command()
@click.option(
'--path', type=click.Path(exists=True),
default=".", nargs=1, metavar="<PATH>",
help="Specify a folder.")
def ls(path):
''' List synchronized directories. '''
return
@main.command()
@click.argument('key', nargs=1)
@click.option('-n', '--name', nargs=1, metavar="<TEXT>", help="Associate this folder with a name.")
@click.option(
'-p', '--path', type=click.Path(exists=True, writable=True, resolve_path=True),
default=".", nargs=1, metavar="<PATH>",
help="Specify which folder to sync to."
)
def init(key, name, path):
''' Synchronize remote/local directory. '''
output = cli_syncthing_adapter.init(key, name, path)
click.echo("%s" % output)
@main.command()
@click.argument('arg', nargs=1)
def test(arg):
''' Test random functions :) '''
cli_syncthing_adapter.test(arg)
"""
REFERENCE
@cli.command()
@click.argument('src', type=click.Path(exists=True), nargs=1)
@click.argument('dest', nargs=1)
def connect(src, dest):
''' Connect to remote server. '''
output = cli_syncthing_adapter.connect()
click.echo("%s" % output)
@click.group(invoke_without_command=True)
@click.option('-v', '--version', is_flag=True, help='Print version information and quit')
click.echo("%s '%s' is not a valid command." % ('kodedrive:', arg))
click.echo("See 'kodedrive --help'.")
"""
| Python | 0 |
c99d5d30a698aafe3e554c48c9a47dd8be1a5575 | Use imap instead of map | library.py | library.py | import json
import logging
import os
import subprocess
import urllib
import grequests
import numpy
logging.basicConfig()
logger = logging.getLogger("recheck")
logger.setLevel(logging.DEBUG)
def get_change_ids(repo_path, subtree=None, since="6.months"):
"""Return array of change-Ids of merged patches.
returns list starting with most recent change
repo_path: file path of repo
since: how far back to look
"""
change_ids = []
cwd = os.getcwd()
os.chdir(repo_path)
command = "git log --no-merges --since=%s master" % since
if subtree:
command = command + " " + subtree
log = subprocess.check_output(command.split(' '))
os.chdir(cwd)
lines = log.splitlines()
for line in lines:
if line.startswith(" Change-Id: "):
change_id = line.split()[1]
if len(change_id) != 41 or change_id[0] != "I":
raise Exception("Invalid Change-Id: %s" % change_id)
change_ids.append(change_id)
return change_ids
def query_gerrit(template, change_ids, repo_name):
"""query gerrit."""
queries = []
template = "https://review.openstack.org" + template
for change_id in change_ids:
# ChangeIDs can be used in multiple branches/repos
patch_id = urllib.quote_plus("%s~master~" % repo_name) + change_id
queries.append(template % patch_id)
unsent = (grequests.get(query) for query in queries)
for r in grequests.imap(unsent, size=10):
try:
yield json.loads(r.text[4:])
except AttributeError:
# request must have failed, ignore it and move on
logger.debug("failed to parse gerrit response")
pass
def get_change_details(change_ids, repo_name):
"""get gerrit change details for a list of change_id.
Returns a generator
"""
return query_gerrit("/changes/%s/detail", change_ids, repo_name)
def get_latest_revision(change_ids, repo_name):
"""get latest revisions for a list of change_ids.
Returns a generator
"""
return query_gerrit("/changes/%s/revisions/current/review",
change_ids, repo_name)
def stats(values):
print "Average: %s" % numpy.mean(values)
print "median: %s" % numpy.median(values)
print "variance: %s" % numpy.var(values)
| import json
import logging
import os
import subprocess
import urllib
import grequests
import numpy
logging.basicConfig()
logger = logging.getLogger("recheck")
logger.setLevel(logging.DEBUG)
def get_change_ids(repo_path, subtree=None, since="6.months"):
"""Return array of change-Ids of merged patches.
returns list starting with most recent change
repo_path: file path of repo
since: how far back to look
"""
change_ids = []
cwd = os.getcwd()
os.chdir(repo_path)
command = "git log --no-merges --since=%s master" % since
if subtree:
command = command + " " + subtree
log = subprocess.check_output(command.split(' '))
os.chdir(cwd)
lines = log.splitlines()
for line in lines:
if line.startswith(" Change-Id: "):
change_id = line.split()[1]
if len(change_id) != 41 or change_id[0] != "I":
raise Exception("Invalid Change-Id: %s" % change_id)
change_ids.append(change_id)
return change_ids
def query_gerrit(template, change_ids, repo_name):
"""query gerrit."""
queries = []
template = "https://review.openstack.org" + template
for change_id in change_ids:
# ChangeIDs can be used in multiple branches/repos
patch_id = urllib.quote_plus("%s~master~" % repo_name) + change_id
queries.append(template % patch_id)
unsent = (grequests.get(query) for query in queries)
for r in grequests.map(unsent, size=10):
try:
yield json.loads(r.text[4:])
except AttributeError:
# request must have failed, ignore it and move on
logger.debug("failed to parse gerrit response")
pass
def get_change_details(change_ids, repo_name):
"""get gerrit change details for a list of change_id.
Returns a generator
"""
return query_gerrit("/changes/%s/detail", change_ids, repo_name)
def get_latest_revision(change_ids, repo_name):
"""get latest revisions for a list of change_ids.
Returns a generator
"""
return query_gerrit("/changes/%s/revisions/current/review",
change_ids, repo_name)
def stats(values):
print "Average: %s" % numpy.mean(values)
print "median: %s" % numpy.median(values)
print "variance: %s" % numpy.var(values)
| Python | 0 |
b0f5913d5f775062b8d5e253e1403b995b67c81a | Bump to version 3.2.0 | post_office/__init__.py | post_office/__init__.py | VERSION = (3, 2, 0)
from .backends import EmailBackend
default_app_config = 'post_office.apps.PostOfficeConfig'
| VERSION = (3, 2, 0, 'dev')
from .backends import EmailBackend
default_app_config = 'post_office.apps.PostOfficeConfig'
| Python | 0 |
f28732596487a2a0fc664c5444e618ce5c23eccd | fix usage | bin/extract_darkmatter.py | bin/extract_darkmatter.py | #!/usr/bin/env python
import argparse
import leveldb
import os
import shutil
import sys
from Bio import SeqIO
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", dest="input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", dest="output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="sims", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], 1)
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ihdl.close()
ohdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| #!/usr/bin/env python
import argparse
import leveldb
import os
import shutil
import sys
from Bio import SeqIO
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="cfile", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], 1)
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ihdl.close()
ohdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| Python | 0.000002 |
f93e23db1d5cedbdc75ef6b412f52f8b3800a270 | use more versatile valueByTag mechanism | applications/plugins/RigidScale/python/RigidScale/sml.py | applications/plugins/RigidScale/python/RigidScale/sml.py | import Sofa
import RigidScale.API
import SofaPython.sml
import Compliant.StructuralAPI
import Compliant.sml
printLog = True
def insertRigidScale(parentNode, solidModel, param):
""" create a RigidScale.API.ShearlessAffineBody from the solidModel
"""
if printLog:
Sofa.msg_info("RigidScale.sml", "insertRigidScale "+solidModel.name)
body = RigidScale.API.ShearlessAffineBody(parentNode, solidModel.name)
# massinfo = SofaPython.sml.getSolidRigidMassInfo(rigidModel, density)
# body.setFromRigidInfo(massinfo, offset=solidModel.position , inertia_forces = False )
if (not len(solidModel.mesh)==1):
Sofa.msg_warning("RigidScale.sml", "insertRigidScale support only single mesh solid (nb meshes={0}) - solid {1} ignored".format(len(solidModel.mesh), solidModel.name))
return None
body.setFromMesh(solidModel.mesh[0].source, voxelSize=SofaPython.units.length_from_SI(param.voxelSize), density=SofaPython.units.massDensity_from_SI(1000.), offset=solidModel.position)
body.addElasticBehavior("behavior", stiffness=SofaPython.units.elasticity_from_SI(param.rigidScaleStiffness), poissonCoef=0, numberOfGaussPoint=8)
cm = body.addCollisionMesh(solidModel.mesh[0].source, offset=solidModel.position)
cm.addVisualModel()
body.affineDofs.showObject=param.showAffine
body.affineDofs.showObjectScale=SofaPython.units.length_from_SI(param.showAffineScale)
return body
class SceneArticulatedRigidScale(SofaPython.sml.BaseScene):
""" Builds a (sub)scene from a model using compliant formulation
[tag] solid tagged with rigidScale are simulated as ShearlessAffineBody, more tags can be added to param.rigidScaleTags
[tag] mesh group tagged with rigidScalePosition are used to compute (barycenter) the positions of a rigidScale
Compliant joints are setup between the bones """
def __init__(self, parentNode, model):
SofaPython.sml.BaseScene.__init__(self, parentNode, model)
self.rigidScales = dict()
self.joints = dict()
## params
# the set of tags simulated as rigids
self.param.rigidScaleTags={"rigidScale"}
self.param.voxelSize = 0.005 # SI unit (m)
# simulation
self.param.rigidScaleStiffness = 10e3 # SI unit
# for tagged joints, values come from these dictionnaries if they contain one of the tag
self.param.jointIsComplianceByTag=dict()
self.param.jointIsComplianceByTag["default"]=False
self.param.jointComplianceByTag=dict()
self.param.jointComplianceByTag["default"]=1e-6
# visual
self.param.showAffine=False
self.param.showAffineScale=0.05 # SI unit (m)
self.param.showOffset=False
self.param.showOffsetScale=0.01 # SI unit (m)
def createScene(self):
self.node.createObject('RequiredPlugin', name='image')
self.node.createObject('RequiredPlugin', name='Flexible')
self.node.createObject('RequiredPlugin', name='Compliant')
self.node.createObject('RequiredPlugin', name='RigidScale')
# rigidScale
for tag in self.param.rigidScaleTags:
if tag in self.model.solidsByTag:
for solidModel in self.model.solidsByTag[tag]:
self.rigidScales[solidModel.id] = insertRigidScale(self.node, solidModel, self.param)
# joints
for jointModel in self.model.genericJoints.values():
self.joints[jointModel.id] = Compliant.sml.insertJoint(jointModel, self.rigidScales, self.param)
| import Sofa
import RigidScale.API
import SofaPython.sml
import Compliant.StructuralAPI
import Compliant.sml
printLog = True
def insertRigidScale(parentNode, solidModel, param):
""" create a RigidScale.API.ShearlessAffineBody from the solidModel
"""
if printLog:
Sofa.msg_info("RigidScale.sml", "insertRigidScale "+solidModel.name)
body = RigidScale.API.ShearlessAffineBody(parentNode, solidModel.name)
# massinfo = SofaPython.sml.getSolidRigidMassInfo(rigidModel, density)
# body.setFromRigidInfo(massinfo, offset=solidModel.position , inertia_forces = False )
if (not len(solidModel.mesh)==1):
Sofa.msg_warning("RigidScale.sml", "insertRigidScale support only single mesh solid (nb meshes={0}) - solid {1} ignored".format(len(solidModel.mesh), solidModel.name))
return None
body.setFromMesh(solidModel.mesh[0].source, voxelSize=SofaPython.units.length_from_SI(param.voxelSize), density=SofaPython.units.massDensity_from_SI(1000.), offset=solidModel.position)
body.addElasticBehavior("behavior", stiffness=SofaPython.units.elasticity_from_SI(param.rigidScaleStiffness), poissonCoef=0, numberOfGaussPoint=8)
cm = body.addCollisionMesh(solidModel.mesh[0].source, offset=solidModel.position)
cm.addVisualModel()
body.affineDofs.showObject=param.showAffine
body.affineDofs.showObjectScale=SofaPython.units.length_from_SI(param.showAffineScale)
return body
class SceneArticulatedRigidScale(SofaPython.sml.BaseScene):
""" Builds a (sub)scene from a model using compliant formulation
[tag] solid tagged with rigidScale are simulated as ShearlessAffineBody, more tags can be added to param.rigidScaleTags
[tag] mesh group tagged with rigidScalePosition are used to compute (barycenter) the positions of a rigidScale
Compliant joints are setup between the bones """
def __init__(self, parentNode, model):
SofaPython.sml.BaseScene.__init__(self, parentNode, model)
self.rigidScales = dict()
self.joints = dict()
## params
# the set of tags simulated as rigids
self.param.rigidScaleTags={"rigidScale"}
self.param.voxelSize = 0.005 # SI unit (m)
# simulation
self.param.jointIsCompliance = False
self.param.jointCompliance = 1e-6
self.param.rigidScaleStiffness = 10e3 # SI unit
# for tagged joints, values come from these dictionnaries if they contain one of the tag
self.param.jointIsComplianceByTag=dict()
self.param.jointComplianceByTag=dict()
# visual
self.param.showAffine=False
self.param.showAffineScale=0.05 # SI unit (m)
self.param.showOffset=False
self.param.showOffsetScale=0.01 # SI unit (m)
def createScene(self):
self.node.createObject('RequiredPlugin', name='image')
self.node.createObject('RequiredPlugin', name='Flexible')
self.node.createObject('RequiredPlugin', name='Compliant')
self.node.createObject('RequiredPlugin', name='RigidScale')
# rigidScale
for tag in self.param.rigidScaleTags:
if tag in self.model.solidsByTag:
for solidModel in self.model.solidsByTag[tag]:
self.rigidScales[solidModel.id] = insertRigidScale(self.node, solidModel, self.param)
# joints
for jointModel in self.model.genericJoints.values():
self.joints[jointModel.id] = Compliant.sml.insertJoint(jointModel, self.rigidScales, self.param)
| Python | 0 |
a5857bc5b019dda8baca03bd68f08b4a26a85911 | add import module in init file. | biokit/rtools/__init__.py | biokit/rtools/__init__.py | from .rtools import *
| Python | 0 | |
5ac8d6824a53c05ac233c9dcaf7d39171bafed31 | add params to the grid | fedoracommunity/mokshaapps/demos/controllers/root.py | fedoracommunity/mokshaapps/demos/controllers/root.py | # This file is part of Fedora Community.
# Copyright (C) 2008-2009 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from moksha.lib.base import Controller
from tg import expose, tmpl_context
from moksha.api.widgets import ContextAwareWidget, Grid
from moksha.api.widgets.containers import DashboardContainer
from moksha.lib.helpers import Category, MokshaApp
from tw.api import Widget, JSLink, js_function
from tg import config
orbited_host = config.get('orbited_host', 'localhost')
orbited_port = config.get('orbited_port', 9000)
if orbited_port:
orbited_url = '%s:%s' % (orbited_host, orbited_port)
else:
orbited_url = orbited_host
orbited_js = JSLink(link=orbited_url + '/static/Orbited.js')
kamaloka_protocol_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/amqp.protocol.js',
javascript=[orbited_js])
kamaloka_protocol_0_10_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/amqp.protocol_0_10.js',
javascript=[kamaloka_protocol_js])
kamaloka_qpid_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/qpid_amqp.js',
javascript=[kamaloka_protocol_0_10_js])
timeping_demo_app = MokshaApp('Timeping AMQP Demo', 'fedoracommunity.demos/timeping_demo',
content_id='timeping_demo',
params={'rows_per_page': 10,
'show_title': True,
'filters':{}
})
class DemoContainer(DashboardContainer, ContextAwareWidget):
layout = [Category('full_sized_demo_apps',
timeping_demo_app)
]
demo_container = DemoContainer('demo')
class TimepingGrid(Grid, ContextAwareWidget):
template='mako:fedoracommunity.mokshaapps.demos.templates.timeping_grid'
javascript=Grid.javascript + [kamaloka_qpid_js]
params=['orbited_port', 'orbited_host']
resource=None
resource_path=None
orbited_port=9000
orbited_host='localhost'
timeping_demo_grid = TimepingGrid('timeping_grid')
class RootController(Controller):
@expose('mako:moksha.templates.widget')
def index(self):
options = {}
tmpl_context.widget = demo_container
return {'options':options}
@expose('mako:moksha.templates.widget')
def timeping_demo(self, **kwds):
options = {'orbited_port': orbited_port,
'orbited_host': orbited_host}
tmpl_context.widget = timeping_demo_grid
return {'options':options}
| # This file is part of Fedora Community.
# Copyright (C) 2008-2009 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from moksha.lib.base import Controller
from tg import expose, tmpl_context
from moksha.api.widgets import ContextAwareWidget, Grid
from moksha.api.widgets.containers import DashboardContainer
from moksha.lib.helpers import Category, MokshaApp
from tw.api import Widget, JSLink, js_function
from tg import config
orbited_host = config.get('orbited_host', 'localhost')
orbited_port = config.get('orbited_port', 9000)
if orbited_port:
orbited_url = '%s:%s' % (orbited_host, orbited_port)
else:
orbited_url = orbited_host
orbited_js = JSLink(link=orbited_url + '/static/Orbited.js')
kamaloka_protocol_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/amqp.protocol.js',
javascript=[orbited_js])
kamaloka_protocol_0_10_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/amqp.protocol_0_10.js',
javascript=[kamaloka_protocol_js])
kamaloka_qpid_js = JSLink(modname='fedoracommunity.mokshaapps.demos',
filename='js/qpid_amqp.js',
javascript=[kamaloka_protocol_0_10_js])
timeping_demo_app = MokshaApp('Timeping AMQP Demo', 'fedoracommunity.demos/timeping_demo',
content_id='timeping_demo',
params={'rows_per_page': 10,
'show_title': True,
'filters':{}
})
class DemoContainer(DashboardContainer, ContextAwareWidget):
layout = [Category('full_sized_demo_apps',
timeping_demo_app)
]
demo_container = DemoContainer('demo')
class TimepingGrid(Grid, ContextAwareWidget):
template='mako:fedoracommunity.mokshaapps.demos.templates.timeping_grid'
javascript=Grid.javascript + [kamaloka_qpid_js]
params=[]
resource=None
resource_path=None
timeping_demo_grid = TimepingGrid('timeping_grid')
class RootController(Controller):
@expose('mako:moksha.templates.widget')
def index(self):
options = {}
tmpl_context.widget = demo_container
return {'options':options}
@expose('mako:moksha.templates.widget')
def timeping_demo(self, **kwds):
options = {'orbited_port': orbited_port,
'orbited_host': orbited_host}
tmpl_context.widget = timeping_demo_grid
return {'options':options}
| Python | 0.000001 |
ca856016d54e4ca19c9b6701f2a4f1061bfb2fda | Wrong column name | printer_tray/printer.py | printer_tray/printer.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cups
from openerp import models, fields, api
class Printer(models.Model):
_inherit = 'printing.printer'
tray_ids = fields.One2many(comodel_name='printing.tray',
inverse_name='printer_id',
string='Paper Sources')
@api.multi
def _prepare_update_from_cups(self, cups_connection, cups_printer):
vals = super(Printer, self)._prepare_update_from_cups(cups_connection,
cups_printer)
ppd_file_path = cups_connection.getPPD3(self.system_name)
if not ppd_file_path[2]:
return vals
ppd = cups.PPD(ppd_file_path[2])
option = ppd.findOption('InputSlot')
if not option:
return vals
vals_trays = []
tray_names = set(tray.system_name for tray in self.tray_ids)
for tray_option in option.choices:
if tray_option['choice'] not in tray_names:
tray_vals = {
'name': tray_option['text'],
'system_name': tray_option['choice'],
}
vals_trays.append((0, 0, tray_vals))
cups_trays = set(tray_option['choice'] for tray_option
in option.choices)
for tray in self.tray_ids:
if tray.system_name not in cups_trays:
vals_trays.append((2, tray.id))
vals['tray_ids'] = vals_trays
return vals
@api.multi
def print_options(self, report, format):
""" Hook to define Tray """
printing_act_obj = self.env['printing.report.xml.action']
options = super(Printer, self).print_options(report, format)
# Retrieve user default values
user = self.env.user
tray = user.printer_tray_id
# Retrieve report default values
if report.printer_tray_id:
tray = report.printer_tray_id
# Retrieve report-user specific values
action = printing_act_obj.search([('report_id', '=', report.id),
('user_id', '=', self.env.uid),
('action', '!=', 'user_default')],
limit=1)
if action and action.printer_tray_id:
tray = action.tray_id
if tray:
options['InputSlot'] = str(tray.system_name)
return options
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import cups
from openerp import models, fields, api
class Printer(models.Model):
_inherit = 'printing.printer'
tray_ids = fields.One2many(comodel_name='printing.tray',
inverse_name='printer_id',
string='Paper Sources')
@api.multi
def _prepare_update_from_cups(self, cups_connection, cups_printer):
vals = super(Printer, self)._prepare_update_from_cups(cups_connection,
cups_printer)
ppd_file_path = cups_connection.getPPD3(self.system_name)
if not ppd_file_path[2]:
return vals
ppd = cups.PPD(ppd_file_path[2])
option = ppd.findOption('InputSlot')
if not option:
return vals
vals_trays = []
tray_names = set(tray.system_name for tray in self.tray_ids)
for tray_option in option.choices:
if tray_option['choice'] not in tray_names:
tray_vals = {
'name': tray_option['text'],
'system_name': tray_option['choice'],
}
vals_trays.append((0, 0, tray_vals))
cups_trays = set(tray_option['choice'] for tray_option
in option.choices)
for tray in self.tray_ids:
if tray.system_name not in cups_trays:
vals_trays.append((2, tray.id))
vals['tray_ids'] = vals_trays
return vals
@api.multi
def print_options(self, report, format):
""" Hook to define Tray """
printing_act_obj = self.env['printing.report.xml.action']
options = super(Printer, self).print_options(report, format)
# Retrieve user default values
user = self.env.user
tray = user.printer_tray_id
# Retrieve report default values
if report.printer_tray_id:
tray = report.printer_tray_id
# Retrieve report-user specific values
action = printing_act_obj.search([('report_id', '=', report.id),
('user_id', '=', self.env.uid),
('action', '!=', 'user_default')],
limit=1)
if action and action.tray_id:
tray = action.tray_id
if tray:
options['InputSlot'] = str(tray.system_name)
return options
| Python | 0.890483 |
1a2f3f74a398422fe70d6b482cdd779f728e9a21 | add 'continue' lines, change print to logging | scripts/migrations/034-update_subscriptions_ticket_and_mr_titles.py | scripts/migrations/034-update_subscriptions_ticket_and_mr_titles.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import re
import sys
from pylons import tmpl_context as c
from bson import ObjectId
from ming.odm import session
from ming.orm import ThreadLocalORMSession
from allura import model as M
from forgetracker import model as TM
log = logging.getLogger(__name__)
def main():
task = sys.argv[-1]
c.project = None
# Fix ticket artifcat titles
title = re.compile('^Ticket [0-9]')
subs_tickets = M.Mailbox.query.find(dict(artifact_title=title)).all()
log.info('Found total %d old artifact titles (tickets).', len(subs_tickets))
for sub in subs_tickets:
if not sub.artifact_index_id:
log.info('No artifact_index_id on %s', sub)
continue
ticket = TM.Ticket.query.get(_id = ObjectId(sub.artifact_index_id.split('#')[1]))
if not ticket:
log.info('Could not find ticket for %s', sub)
continue
new_title = 'Ticket #%d: %s' % (ticket.ticket_num, ticket.summary)
log.info('"%s" --> "%s"', sub.artifact_title, new_title)
if(task != 'diff'):
sub.artifact_title = new_title
session(sub).flush(sub)
# Fix merge request artifact titles
title = re.compile('^Merge request: ')
subs_mrs = M.Mailbox.query.find(dict(artifact_title=title)).all()
log.info('Found total %d old artifact titles (merge_requests).', len(subs_tickets))
for sub in subs_mrs:
if not sub.artifact_index_id:
log.info('No artifact_index_id on %s', sub)
continue
merge_request = M.MergeRequest.query.get(_id = ObjectId(sub.artifact_index_id.split('#')[1]))
if not merge_request:
log.info('Could not find merge request for %s', sub)
continue
new_title = 'Merge Request #%d: %s' % (merge_request.request_number, merge_request.summary)
log.info('"%s" --> "%s"', sub.artifact_title , new_title)
if task != 'diff':
sub.artifact_title = new_title
session(sub).flush(sub)
if __name__ == '__main__':
main()
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import re
import sys
from pylons import tmpl_context as c
from bson import ObjectId
from ming.odm import session
from ming.orm import ThreadLocalORMSession
from allura import model as M
from forgetracker import model as TM
log = logging.getLogger(__name__)
def main():
task = sys.argv[-1]
c.project = None
# Fix ticket artifcat titles
title = re.compile('^Ticket [0-9]')
subs_tickets = M.Mailbox.query.find(dict(artifact_title=title)).all()
print 'Found total %d old artifact titles (tickets).' % len(subs_tickets)
for sub in subs_tickets:
ticket = TM.Ticket.query.get(_id = ObjectId(sub.artifact_index_id.split('#')[1]))
if not ticket:
print 'Could not find ticket for %s' % sub
new_title = 'Ticket #%d: %s' % (ticket.ticket_num, ticket.summary)
print '"%s" --> "%s"' % (sub.artifact_title , new_title)
if(task != 'diff'):
sub.artifact_title = new_title
session(sub).flush(sub)
# Fix merge request artifact titles
title = re.compile('^Merge request: ')
subs_mrs = M.Mailbox.query.find(dict(artifact_title=title)).all()
print 'Found total %d old artifact titles (merge_requests).' % len(subs_tickets)
for sub in subs_mrs:
merge_request = M.MergeRequest.query.get(_id = ObjectId(sub.artifact_index_id.split('#')[1]))
if not merge_request:
print 'Could not find merge request for %s' % sub
new_title = 'Merge Request #%d: %s' % (merge_request.request_number, merge_request.summary)
print '"%s" --> "%s"' % (sub.artifact_title , new_title)
if(task != 'diff'):
sub.artifact_title = new_title
session(sub).flush(sub)
if __name__ == '__main__':
main()
| Python | 0.000001 |
94a07652c23f55a20f856550a1ceed549b6b8cd7 | Updated the expected matching strings. | test/global_variables/TestGlobalVariables.py | test/global_variables/TestGlobalVariables.py | """Show global variables and check that they do indeed have global scopes."""
import os, time
import unittest2
import lldb
from lldbtest import *
class GlobalVariablesTestCase(TestBase):
mydir = "global_variables"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
self.buildDsym()
self.global_variables()
def test_with_dwarf(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
self.buildDwarf()
self.global_variables()
def global_variables(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the main.
self.expect("breakpoint set -f main.c -l 20", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.c', line = 20, locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# Check that GLOBAL scopes are indicated for the variables.
self.expect("frame variable -s -g -a", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['GLOBAL: (char const *) g_file_static_cstr',
'"g_file_static_cstr"',
'GLOBAL: (int) g_file_global_int = 42',
'GLOBAL: (char const *) g_file_global_cstr',
'"g_file_global_cstr"'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| """Show global variables and check that they do indeed have global scopes."""
import os, time
import unittest2
import lldb
from lldbtest import *
class GlobalVariablesTestCase(TestBase):
mydir = "global_variables"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
self.buildDsym()
self.global_variables()
def test_with_dwarf(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
self.buildDwarf()
self.global_variables()
def global_variables(self):
"""Test 'frame variable -s -a' which omits args and shows scopes."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break inside the main.
self.expect("breakpoint set -f main.c -l 20", BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.c', line = 20, locations = 1")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs = ['state is Stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list", BREAKPOINT_HIT_ONCE,
substrs = [' resolved, hit count = 1'])
# Check that GLOBAL scopes are indicated for the variables.
self.expect("frame variable -s -a", VARIABLES_DISPLAYED_CORRECTLY,
substrs = ['GLOBAL: g_file_static_cstr',
'"g_file_static_cstr"',
'GLOBAL: g_file_global_int',
'(int) 42',
'GLOBAL: g_file_global_cstr',
'"g_file_global_cstr"'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| Python | 0.999999 |
8599480ed93a0117f326689280c7a896d6bf697a | add version 3.1-4 to r-bayesm (#20807) | var/spack/repos/builtin/packages/r-bayesm/package.py | var/spack/repos/builtin/packages/r-bayesm/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBayesm(RPackage):
"""Bayesian Inference for Marketing/Micro-Econometrics
Covers many important models used in marketing and micro-econometrics
applications. The package includes: Bayes Regression (univariate or
multivariate dep var), Bayes Seemingly Unrelated Regression (SUR), Binary
and Ordinal Probit, Multinomial Logit (MNL) and Multinomial Probit (MNP),
Multivariate Probit, Negative Binomial (Poisson) Regression, Multivariate
Mixtures of Normals (including clustering), Dirichlet Process Prior Density
Estimation with normal base, Hierarchical Linear Models with normal prior
and covariates, Hierarchical Linear Models with a mixture of normals prior
and covariates, Hierarchical Multinomial Logits with a mixture of normals
prior and covariates, Hierarchical Multinomial Logits with a Dirichlet
Process prior and covariates, Hierarchical Negative Binomial Regression
Models, Bayesian analysis of choice-based conjoint data, Bayesian treatment
of linear instrumental variables models, Analysis of Multivariate Ordinal
survey data with scale usage heterogeneity (as in Rossi et al, JASA (01)),
Bayesian Analysis of Aggregate Random Coefficient Logit Models as in BLP
(see Jiang, Manchanda, Rossi 2009) For further reference, consult our book,
Bayesian Statistics and Marketing by Rossi, Allenby and McCulloch (Wiley
2005) and Bayesian Non- and Semi-Parametric Methods and Applications
(Princeton U Press 2014)."""
homepage = "https://cloud.r-project.org/package=bayesm"
url = "https://cloud.r-project.org/src/contrib/bayesm_3.1-0.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/bayesm"
version('3.1-4', sha256='061b216c62bc72eab8d646ad4075f2f78823f9913344a781fa53ea7cf4a48f94')
version('3.1-3', sha256='51e4827eca8cd4cf3626f3c2282543df7c392b3ffb843f4bfb386fe104642a10')
version('3.1-2', sha256='a332f16e998ab10b17a2b1b9838d61660c36e914fe4d2e388a59f031d52ad736')
version('3.1-1', sha256='4854517dec30ab7c994de862aae1998c2d0c5e71265fd9eb7ed36891d4676078')
version('3.1-0.1', sha256='5879823b7fb6e6df0c0fe98faabc1044a4149bb65989062df4ade64e19d26411')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.0:', type=('build', 'run'))
depends_on('r-rcpparmadillo', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RBayesm(RPackage):
"""Bayesian Inference for Marketing/Micro-Econometrics"""
homepage = "https://cloud.r-project.org/package=bayesm"
url = "https://cloud.r-project.org/src/contrib/bayesm_3.1-0.1.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/bayesm"
version('3.1-3', sha256='51e4827eca8cd4cf3626f3c2282543df7c392b3ffb843f4bfb386fe104642a10')
version('3.1-2', sha256='a332f16e998ab10b17a2b1b9838d61660c36e914fe4d2e388a59f031d52ad736')
version('3.1-1', sha256='4854517dec30ab7c994de862aae1998c2d0c5e71265fd9eb7ed36891d4676078')
version('3.1-0.1', sha256='5879823b7fb6e6df0c0fe98faabc1044a4149bb65989062df4ade64e19d26411')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-rcpp@0.12.0:', type=('build', 'run'))
depends_on('r-rcpparmadillo', type=('build', 'run'))
| Python | 0 |
52e0f47a3ff67bd0c8a31c6755b384dedd70ee02 | update scalasca to latest version, simplify recipe (#11999) | var/spack/repos/builtin/packages/scalasca/package.py | var/spack/repos/builtin/packages/scalasca/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Scalasca(AutotoolsPackage):
"""Scalasca is a software tool that supports the performance optimization
of parallel programs by measuring and analyzing their runtime
behavior. The analysis identifies potential performance
bottlenecks - in particular those concerning communication and
synchronization - and offers guidance in exploring their causes.
"""
homepage = "http://www.scalasca.org"
url = "http://apps.fz-juelich.de/scalasca/releases/scalasca/2.1/dist/scalasca-2.1.tar.gz"
list_url = "https://scalasca.org/scalasca/front_content.php?idart=1072"
version('2.5', sha256='7dfa01e383bfb8a4fd3771c9ea98ff43772e415009d9f3c5f63b9e05f2dde0f6')
version('2.4', '4a895868258030f700a635eac93d36764f60c8c63673c7db419ea4bcc6b0b760')
version('2.3.1', 'a83ced912b9d2330004cb6b9cefa7585')
version('2.2.2', '2bafce988b0522d18072f7771e491ab9')
version('2.1', 'bab9c2b021e51e2ba187feec442b96e6')
depends_on("mpi")
# version 2.4+
depends_on('cubew@4.4:', when='@2.4:')
# version 2.3+
depends_on('otf2@2:', when='@2.3:')
# version 2.3
depends_on('cube@4.3', when='@2.3:2.3.99')
# version 2.1 - 2.2
depends_on('cube@4.2', when='@2.1:2.2.999')
depends_on('otf2@1.4', when='@2.1:2.2.999')
def url_for_version(self, version):
return 'http://apps.fz-juelich.de/scalasca/releases/scalasca/{0}/dist/scalasca-{1}.tar.gz'.format(version.up_to(2), version)
def configure_args(self):
spec = self.spec
config_args = ["--enable-shared"]
if spec.satisfies('@2.4:'):
config_args.append("--with-cube=%s" % spec['cubew'].prefix.bin)
else:
config_args.append("--with-cube=%s" % spec['cube'].prefix.bin)
config_args.append("--with-otf2=%s" % spec['otf2'].prefix.bin)
if self.spec['mpi'].name == 'openmpi':
config_args.append("--with-mpi=openmpi")
elif self.spec.satisfies('^mpich@3:'):
config_args.append("--with-mpi=mpich3")
return config_args
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Scalasca(AutotoolsPackage):
"""Scalasca is a software tool that supports the performance optimization
of parallel programs by measuring and analyzing their runtime
behavior. The analysis identifies potential performance
bottlenecks - in particular those concerning communication and
synchronization - and offers guidance in exploring their causes.
"""
homepage = "http://www.scalasca.org"
url = "http://apps.fz-juelich.de/scalasca/releases/scalasca/2.1/dist/scalasca-2.1.tar.gz"
version('2.4', '4a895868258030f700a635eac93d36764f60c8c63673c7db419ea4bcc6b0b760')
version('2.3.1', 'a83ced912b9d2330004cb6b9cefa7585')
version('2.2.2', '2bafce988b0522d18072f7771e491ab9')
version('2.1', 'bab9c2b021e51e2ba187feec442b96e6')
depends_on("mpi")
# version 2.4
depends_on('cubew@4.4:', when='@2.4:')
# version 2.3
depends_on('cube@4.3', when='@2.3:2.3.99')
depends_on('otf2@2:', when='@2.3:')
# version 2.1+
depends_on('cube@4.2', when='@2.1:2.2.999')
depends_on('otf2@1.4', when='@2.1:2.2.999')
def url_for_version(self, version):
return 'http://apps.fz-juelich.de/scalasca/releases/scalasca/{0}/dist/scalasca-{1}.tar.gz'.format(version.up_to(2), version)
def configure_args(self):
spec = self.spec
config_args = ["--enable-shared"]
if spec.satisfies('@2.4:'):
config_args.append("--with-cube=%s" % spec['cubew'].prefix.bin)
else:
config_args.append("--with-cube=%s" % spec['cube'].prefix.bin)
config_args.append("--with-otf2=%s" % spec['otf2'].prefix.bin)
if self.spec['mpi'].name == 'openmpi':
config_args.append("--with-mpi=openmpi")
elif self.spec.satisfies('^mpich@3:'):
config_args.append("--with-mpi=mpich3")
return config_args
| Python | 0 |
8ffe530025e38d06ffc567fb69e9b96874db3faa | Increase version | conveyor/__init__.py | conveyor/__init__.py | __version__ = "0.1.dev3"
| __version__ = "0.1.dev2"
| Python | 0 |
ce537832eb3d1c0a7ceec213abe1d52c189037c2 | fix a bug in the controller of new courses | course_controller.py | course_controller.py | import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
import main_controller
import app.models.course_model as coursemodel
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Index(main_controller._BaseHandler):
def get(self):
courses = coursemodel.All()
self.template_values['courses'] = courses
template = JINJA_ENVIRONMENT.get_template('app/views/course/index.html')
self.response.write(template.render(self.template_values))
class Show(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Get(key = my_key)
self.template_values['course'] = course
template = JINJA_ENVIRONMENT.get_template('app/views/course/show.html')
self.response.write(template.render(self.template_values))
class New(main_controller._BaseHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('app/views/course/new.html')
self.response.write(template.render(self.template_values))
def post(self):
self.course = coursemodel.Insert(name=self.request.get('name'), description=self.request.get('description'), lang=self.request.get('lang'))
#TODO redirect to show web of the new object
self.redirect('/courses/show?key='+self.course.key.urlsafe())
class Edit(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Get(my_key)
self.template_values['course'] = course
template = JINJA_ENVIRONMENT.get_template('app/views/course/edit.html')
self.response.write(template.render(self.template_values))
def post(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
self.course = coursemodel.Update(key = my_key, name=self.request.get('name'), description=self.request.get('description'), lang=self.request.get('lang'))
self.redirect('/courses/show?key='+self.course.key.urlsafe())
class Destroy(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Delete(key = my_key)
self.redirect('/courses')
app = webapp2.WSGIApplication([
('/courses', Index),
('/courses/show', Show),
('/courses/new', New),
('/courses/edit', Edit),
('/courses/destroy', Destroy)
], debug=True) | import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
import main_controller
import app.models.course_model as coursemodel
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Index(main_controller._BaseHandler):
def get(self):
courses = coursemodel.All()
self.template_values['courses'] = courses
template = JINJA_ENVIRONMENT.get_template('app/views/course/index.html')
self.response.write(template.render(self.template_values))
class Show(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Get(key = my_key)
self.template_values['course'] = course
template = JINJA_ENVIRONMENT.get_template('app/views/course/show.html')
self.response.write(template.render(self.template_values))
class New(main_controller._BaseHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('app/views/course/new.html')
self.response.write(template.render())
def post(self):
self.course = coursemodel.Insert(name=self.request.get('name'), description=self.request.get('description'), lang=self.request.get('lang'))
#TODO redirect to show web of the new object
self.redirect('/courses/show?key='+self.course.key.urlsafe())
class Edit(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Get(my_key)
self.template_values['course'] = course
template = JINJA_ENVIRONMENT.get_template('app/views/course/edit.html')
self.response.write(template.render(self.template_values))
def post(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
self.course = coursemodel.Update(key = my_key, name=self.request.get('name'), description=self.request.get('description'), lang=self.request.get('lang'))
self.redirect('/courses/show?key='+self.course.key.urlsafe())
class Destroy(main_controller._BaseHandler):
def get(self):
my_key_string = self.request.get('key')
my_key = ndb.Key(urlsafe=my_key_string)
course = coursemodel.Delete(key = my_key)
self.redirect('/courses')
app = webapp2.WSGIApplication([
('/courses', Index),
('/courses/show', Show),
('/courses/new', New),
('/courses/edit', Edit),
('/courses/destroy', Destroy)
], debug=True) | Python | 0 |
20d5f5d5e10dcf118639b4ca538ef7537863145a | add cache 2 hours | dv_apps/dvobject_api/views_dataverses.py | dv_apps/dvobject_api/views_dataverses.py | import json
from collections import OrderedDict
from django.shortcuts import render
from django.http import Http404
from django.conf import settings
from django.http import JsonResponse, HttpResponse
from django.template.loader import render_to_string
from django.forms.models import model_to_dict
from django.views.decorators.cache import cache_page
from django.core import serializers
from dv_apps.dataverses.models import Dataverse
from dv_apps.dataverses.util import DataverseUtil
def get_pretty_val(request):
"""Quick check of url param to pretty print JSON"""
if request.GET.get('pretty', None) is not None:
return True
return False
@cache_page(60 * 60 * 2)
def view_single_dataverse_by_alias(request, alias):
try:
dv = Dataverse.objects.select_related('dvobject').get(alias=alias)
except Dataverse.DoesNotExist:
raise Http404
return view_single_dataverse(request, dv)
@cache_page(60 * 60 * 2)
def view_single_dataverse_by_id(request, dataverse_id):
try:
dv = Dataverse.objects.select_related('dvobject').get(dvobject__id =dataverse_id)
except Dataverse.DoesNotExist:
raise Http404
return view_single_dataverse(request, dv)
@cache_page(60 * 15)
def view_single_dataverse(request, dv):
"""
Show JSON for a single Dataverse
"""
if dv is None:
raise Http404
assert isinstance(dv, Dataverse), "dv must be a Dataverse object or None"
is_pretty = request.GET.get('pretty', None)
if is_pretty is not None:
is_pretty = True
resp_dict = OrderedDict()
resp_dict['status'] = "OK"
resp_dict['data'] = DataverseUtil(dv).as_json()
#model_to_dict(dv)
if is_pretty:
s = '<pre>%s</pre>' % json.dumps(resp_dict, indent=4)
return HttpResponse(s)
else:
return JsonResponse(resp_dict)#, content_type='application/json')
| import json
from collections import OrderedDict
from django.shortcuts import render
from django.http import Http404
from django.conf import settings
from django.http import JsonResponse, HttpResponse
from django.template.loader import render_to_string
from django.forms.models import model_to_dict
from django.views.decorators.cache import cache_page
from django.core import serializers
from dv_apps.dataverses.models import Dataverse
from dv_apps.dataverses.util import DataverseUtil
def get_pretty_val(request):
"""Quick check of url param to pretty print JSON"""
if request.GET.get('pretty', None) is not None:
return True
return False
def view_single_dataverse_by_alias(request, alias):
try:
dv = Dataverse.objects.select_related('dvobject').get(alias=alias)
except Dataverse.DoesNotExist:
raise Http404
return view_single_dataverse(request, dv)
def view_single_dataverse_by_id(request, dataverse_id):
try:
dv = Dataverse.objects.select_related('dvobject').get(dvobject__id =dataverse_id)
except Dataverse.DoesNotExist:
raise Http404
return view_single_dataverse(request, dv)
@cache_page(60 * 15)
def view_single_dataverse(request, dv):
"""
Show JSON for a single Dataverse
"""
if dv is None:
raise Http404
assert isinstance(dv, Dataverse), "dv must be a Dataverse object or None"
is_pretty = request.GET.get('pretty', None)
if is_pretty is not None:
is_pretty = True
resp_dict = OrderedDict()
resp_dict['status'] = "OK"
resp_dict['data'] = DataverseUtil(dv).as_json()
#model_to_dict(dv)
if is_pretty:
s = '<pre>%s</pre>' % json.dumps(resp_dict, indent=4)
return HttpResponse(s)
else:
return JsonResponse(resp_dict)#, content_type='application/json')
| Python | 0 |
4e515f070f844569b84eeb77f7e7eda883bc861e | fix class name | easy_my_coop/wizard/update_share_line.py | easy_my_coop/wizard/update_share_line.py | # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.exceptions import UserError
class ShareLineUpdateInfo(models.TransientModel):
_name = "share.line.update.info"
@api.model
def _get_share_line(self):
active_id = self.env.context.get('active_id')
return self.env['share.line'].browse(active_id)
@api.model
def _get_effective_date(self):
share_line = self._get_share_line()
return share_line.effective_date
effective_date = fields.Date(string="effective date",
required=True,
default=_get_effective_date)
cooperator = fields.Many2one(related='share_line.partner_id',
string="Cooperator")
share_line = fields.Many2one('share.line',
string="Share line",
default=_get_share_line)
@api.multi
def update(self):
line = self.share_line
cooperator = line.partner_id
sub_reg = self.env['subscription.register'].search(
[('partner_id', '=', cooperator.id),
('share_product_id', '=', line.share_product_id.id),
('quantity', '=', line.share_number),
('date', '=', line.effective_date)])
if sub_reg:
if len(sub_reg) > 1:
raise UserError(_("Error the update return more than one"
" subscription register lines."))
else:
line.effective_date = self.effective_date
sub_reg.date = self.effective_date
return True
| # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.exceptions import UserError
class PartnerUpdateInfo(models.TransientModel):
_name = "share.line.update.info"
@api.model
def _get_share_line(self):
active_id = self.env.context.get('active_id')
return self.env['share.line'].browse(active_id)
@api.model
def _get_effective_date(self):
share_line = self._get_share_line()
return share_line.effective_date
effective_date = fields.Date(string="effective date",
required=True,
default=_get_effective_date)
cooperator = fields.Many2one(related='share_line.partner_id',
string="Cooperator")
share_line = fields.Many2one('share.line',
string="Share line",
default=_get_share_line)
@api.multi
def update(self):
line = self.share_line
cooperator = line.partner_id
sub_reg = self.env['subscription.register'].search(
[('partner_id', '=', cooperator.id),
('share_product_id', '=', line.share_product_id.id),
('quantity', '=', line.share_number),
('date', '=', line.effective_date)])
if sub_reg:
if len(sub_reg) > 1:
raise UserError(_("Error the update return more than one"
" subscription register lines."))
else:
line.effective_date = self.effective_date
sub_reg.date = self.effective_date
return True
| Python | 0.000051 |
38db4b0a23e2c2aaf858d0b2bd9d5ae4df819e66 | Move imports in mythicbeastsdns component (#28033) | homeassistant/components/mythicbeastsdns/__init__.py | homeassistant/components/mythicbeastsdns/__init__.py | """Support for Mythic Beasts Dynamic DNS service."""
from datetime import timedelta
import logging
import mbddns
import voluptuous as vol
from homeassistant.const import (
CONF_DOMAIN,
CONF_HOST,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mythicbeastsdns"
DEFAULT_INTERVAL = timedelta(minutes=10)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Initialize the Mythic Beasts component."""
domain = config[DOMAIN][CONF_DOMAIN]
password = config[DOMAIN][CONF_PASSWORD]
host = config[DOMAIN][CONF_HOST]
update_interval = config[DOMAIN][CONF_SCAN_INTERVAL]
session = async_get_clientsession(hass)
result = await mbddns.update(domain, password, host, session=session)
if not result:
return False
async def update_domain_interval(now):
"""Update the DNS entry."""
await mbddns.update(domain, password, host, session=session)
async_track_time_interval(hass, update_domain_interval, update_interval)
return True
| """Support for Mythic Beasts Dynamic DNS service."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_DOMAIN,
CONF_HOST,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mythicbeastsdns"
DEFAULT_INTERVAL = timedelta(minutes=10)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Initialize the Mythic Beasts component."""
import mbddns
domain = config[DOMAIN][CONF_DOMAIN]
password = config[DOMAIN][CONF_PASSWORD]
host = config[DOMAIN][CONF_HOST]
update_interval = config[DOMAIN][CONF_SCAN_INTERVAL]
session = async_get_clientsession(hass)
result = await mbddns.update(domain, password, host, session=session)
if not result:
return False
async def update_domain_interval(now):
"""Update the DNS entry."""
await mbddns.update(domain, password, host, session=session)
async_track_time_interval(hass, update_domain_interval, update_interval)
return True
| Python | 0 |
5616573372638f2b195714cf02db8a7a02a4678f | Correct column name | luigi/tasks/rfam/pgload_go_term_mapping.py | luigi/tasks/rfam/pgload_go_term_mapping.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tasks.utils.pgloader import PGLoader
from tasks.go_terms.pgload_go_terms import PGLoadGoTerms
from .go_term_mapping_csv import RfamGoTermsCSV
from .pgload_families import RfamPGLoadFamilies
CONTROL_FILE = """LOAD CSV
FROM '{filename}' WITH ENCODING ISO-8859-14
HAVING FIELDS
(
go_term_id,
rfam_model_id
)
INTO {db_url}
TARGET COLUMNS
(
go_term_id,
rfam_model_id
)
SET
search_path = '{search_path}'
WITH
skip header = 1,
fields escaped by double-quote,
fields terminated by ','
BEFORE LOAD DO
$$
create table if not exists load_rfam_go_terms (
go_term_id character varying(10) COLLATE pg_catalog."default" NOT NULL,
rfam_model_id character varying(20) COLLATE pg_catalog."default" NOT NULL
);
$$,
$$
truncate table load_rfam_go_terms;
$$
AFTER LOAD DO
$$ insert into rfam_go_terms (
go_term_id,
rfam_model_id
) (
select
go_term_id,
rfam_model_id
from load_rfam_go_terms
)
ON CONFLICT (go_term_id, rfam_model_id) DO UPDATE SET
go_term_id = excluded.go_term_id,
rfam_model_id = excluded.rfam_model_id
;
$$,
$$
drop table load_rfam_go_terms;
$$
;
"""
class RfamPGLoadGoTerms(PGLoader): # pylint: disable=R0904
"""
This will run pgloader on the Rfam go term mapping CSV file. The importing
will update any existing mappings and will not produce duplicates.
"""
def requires(self):
return [
RfamGoTermsCSV(),
PGLoadGoTerms(),
RfamPGLoadFamilies(),
]
def control_file(self):
filename = RfamGoTermsCSV().output().fn
return CONTROL_FILE.format(
filename=filename,
db_url=self.db_url(table='load_rfam_go_terms'),
search_path=self.db_search_path(),
)
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tasks.utils.pgloader import PGLoader
from tasks.go_terms.pgload_go_terms import PGLoadGoTerms
from .go_term_mapping_csv import RfamGoTermsCSV
from .pgload_families import RfamPGLoadFamilies
CONTROL_FILE = """LOAD CSV
FROM '{filename}' WITH ENCODING ISO-8859-14
HAVING FIELDS
(
go_term_id,
rfam_model_id
)
INTO {db_url}
TARGET COLUMNS
(
go_term_id,
name
)
SET
search_path = '{search_path}'
WITH
skip header = 1,
fields escaped by double-quote,
fields terminated by ','
BEFORE LOAD DO
$$
create table if not exists load_rfam_go_terms (
go_term_id character varying(10) COLLATE pg_catalog."default" NOT NULL,
rfam_model_id character varying(20) COLLATE pg_catalog."default" NOT NULL
);
$$,
$$
truncate table load_rfam_go_terms;
$$
AFTER LOAD DO
$$ insert into rfam_go_terms (
go_term_id,
rfam_model_id
) (
select
go_term_id,
rfam_model_id
from load_rfam_go_terms
)
ON CONFLICT (go_term_id, rfam_model_id) DO UPDATE SET
go_term_id = excluded.go_term_id,
rfam_model_id = excluded.rfam_model_id
;
$$,
$$
drop table load_rfam_go_terms;
$$
;
"""
class RfamPGLoadGoTerms(PGLoader): # pylint: disable=R0904
"""
This will run pgloader on the Rfam go term mapping CSV file. The importing
will update any existing mappings and will not produce duplicates.
"""
def requires(self):
return [
RfamGoTermsCSV(),
PGLoadGoTerms(),
RfamPGLoadFamilies(),
]
def control_file(self):
filename = RfamGoTermsCSV().output().fn
return CONTROL_FILE.format(
filename=filename,
db_url=self.db_url(table='load_rfam_go_terms'),
search_path=self.db_search_path(),
)
| Python | 0 |
1f3325519a72cb98669185149b03b11c1ec25f70 | Fix line number convention | bears/c_languages/CPPLintBear.py | bears/c_languages/CPPLintBear.py | import sys
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.settings.Setting import typed_list
@linter(executable='cpplint',
normalize_line_numbers=True,
use_stdout=False,
use_stderr=True,
output_format='regex',
output_regex=r'.+:(?P<line>\d+): (?P<message>.+)')
class CPPLintBear:
"""
Check C++ code for Google's C++ style guide.
For more information, consult <https://github.com/theandrewdavis/cpplint>.
"""
LANGUAGES = {'C++'}
REQUIREMENTS = {PipRequirement('cpplint', '1.3')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
@staticmethod
def create_arguments(filename, file, config_file,
max_line_length: int = 79,
cpplint_ignore: typed_list(str) = (),
cpplint_include: typed_list(str) = (),
):
"""
:param max_line_length:
Maximum number of characters for a line.
When set to 0 allows infinite line length.
:param cpplint_ignore:
List of checkers to ignore.
:param cpplint_include:
List of checkers to explicitly enable.
"""
if not max_line_length:
max_line_length = sys.maxsize
ignore = ','.join('-'+part.strip() for part in cpplint_ignore)
include = ','.join('+'+part.strip() for part in cpplint_include)
return ('--filter=' + ignore + ',' + include,
'--linelength=' + str(max_line_length),
filename)
| import sys
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.settings.Setting import typed_list
@linter(executable='cpplint',
use_stdout=False,
use_stderr=True,
output_format='regex',
output_regex=r'.+:(?P<line>\d+): (?P<message>.+)')
class CPPLintBear:
"""
Check C++ code for Google's C++ style guide.
For more information, consult <https://github.com/theandrewdavis/cpplint>.
"""
LANGUAGES = {'C++'}
REQUIREMENTS = {PipRequirement('cpplint', '1.3')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting'}
@staticmethod
def create_arguments(filename, file, config_file,
max_line_length: int = 79,
cpplint_ignore: typed_list(str) = (),
cpplint_include: typed_list(str) = (),
):
"""
:param max_line_length:
Maximum number of characters for a line.
When set to 0 allows infinite line length.
:param cpplint_ignore:
List of checkers to ignore.
:param cpplint_include:
List of checkers to explicitly enable.
"""
if not max_line_length:
max_line_length = sys.maxsize
ignore = ','.join('-'+part.strip() for part in cpplint_ignore)
include = ','.join('+'+part.strip() for part in cpplint_include)
return ('--filter=' + ignore + ',' + include,
'--linelength=' + str(max_line_length),
filename)
| Python | 0 |
58c6868cc95a44100f18f20dfe91764727263005 | Write bytes to fobj if we open it in 'wb' mode | django_babel/management/commands/babel.py | django_babel/management/commands/babel.py | # -*- coding: utf-8 -*-
import os
from distutils.dist import Distribution
from optparse import make_option
from subprocess import call
from django.core.management.base import LabelCommand, CommandError
from django.conf import settings
class Command(LabelCommand):
args = '[makemessages] [compilemessages]'
option_list = LabelCommand.option_list + (
make_option(
'--locale', '-l',
default=None, dest='locale', action='append',
help='Creates or updates the message files for the given locale(s)'
' (e.g pt_BR). Can be used multiple times.'),
make_option('--domain', '-d',
default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--mapping-file', '-F',
default=None, dest='mapping_file',
help='Mapping file')
)
def handle_label(self, command, **options):
if command not in ('makemessages', 'compilemessages'):
raise CommandError(
"You must either apply 'makemessages' or 'compilemessages'"
)
if command == 'makemessages':
self.handle_makemessages(**options)
if command == 'compilemessages':
self.handle_compilemessages(**options)
def handle_makemessages(self, **options):
locale_paths = list(settings.LOCALE_PATHS)
domain = options.pop('domain')
locales = options.pop('locale')
# support for mapping file specification via setup.cfg
# TODO: Try to support all possible options.
distribution = Distribution()
distribution.parse_config_files(distribution.find_config_files())
mapping_file = options.pop('mapping_file', None)
has_extract = 'extract_messages' in distribution.command_options
if mapping_file is None and has_extract:
opts = distribution.command_options['extract_messages']
try:
mapping_file = opts['mapping_file'][1]
except (IndexError, KeyError):
mapping_file = None
for path in locale_paths:
potfile = os.path.join(path, '%s.pot' % domain)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.exists(potfile):
with open(potfile, 'wb') as fobj:
fobj.write(b'')
cmd = ['pybabel', 'extract', '-o', potfile]
if mapping_file is not None:
cmd.extend(['-F', mapping_file])
cmd.append(os.path.dirname(os.path.relpath(path)))
call(cmd)
for locale in locales:
pofile = os.path.join(
os.path.dirname(potfile),
locale,
'LC_MESSAGES',
'%s.po' % domain)
if not os.path.isdir(os.path.dirname(pofile)):
os.makedirs(os.path.dirname(pofile))
if not os.path.exists(pofile):
with open(pofile, 'wb') as fobj:
fobj.write(b'')
cmd = ['pybabel', 'update', '-D', domain,
'-i', potfile,
'-d', os.path.relpath(path),
'-l', locale]
call(cmd)
def handle_compilemessages(self, **options):
locale_paths = list(settings.LOCALE_PATHS)
domain = options.pop('domain')
locales = options.pop('locale')
for path in locale_paths:
for locale in locales:
po_file = os.path.join(
path, locale, 'LC_MESSAGES', domain + '.po'
)
if os.path.exists(po_file):
cmd = ['pybabel', 'compile', '-D', domain,
'-d', path, '-l', locale]
call(cmd)
| # -*- coding: utf-8 -*-
import os
from distutils.dist import Distribution
from optparse import make_option
from subprocess import call
from django.core.management.base import LabelCommand, CommandError
from django.conf import settings
class Command(LabelCommand):
args = '[makemessages] [compilemessages]'
option_list = LabelCommand.option_list + (
make_option(
'--locale', '-l',
default=None, dest='locale', action='append',
help='Creates or updates the message files for the given locale(s)'
' (e.g pt_BR). Can be used multiple times.'),
make_option('--domain', '-d',
default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--mapping-file', '-F',
default=None, dest='mapping_file',
help='Mapping file')
)
def handle_label(self, command, **options):
if command not in ('makemessages', 'compilemessages'):
raise CommandError(
"You must either apply 'makemessages' or 'compilemessages'"
)
if command == 'makemessages':
self.handle_makemessages(**options)
if command == 'compilemessages':
self.handle_compilemessages(**options)
def handle_makemessages(self, **options):
locale_paths = list(settings.LOCALE_PATHS)
domain = options.pop('domain')
locales = options.pop('locale')
# support for mapping file specification via setup.cfg
# TODO: Try to support all possible options.
distribution = Distribution()
distribution.parse_config_files(distribution.find_config_files())
mapping_file = options.pop('mapping_file', None)
has_extract = 'extract_messages' in distribution.command_options
if mapping_file is None and has_extract:
opts = distribution.command_options['extract_messages']
try:
mapping_file = opts['mapping_file'][1]
except (IndexError, KeyError):
mapping_file = None
for path in locale_paths:
potfile = os.path.join(path, '%s.pot' % domain)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.exists(potfile):
with open(potfile, 'wb') as fobj:
fobj.write('')
cmd = ['pybabel', 'extract', '-o', potfile]
if mapping_file is not None:
cmd.extend(['-F', mapping_file])
cmd.append(os.path.dirname(os.path.relpath(path)))
call(cmd)
for locale in locales:
pofile = os.path.join(
os.path.dirname(potfile),
locale,
'LC_MESSAGES',
'%s.po' % domain)
if not os.path.isdir(os.path.dirname(pofile)):
os.makedirs(os.path.dirname(pofile))
if not os.path.exists(pofile):
with open(pofile, 'wb') as fobj:
fobj.write('')
cmd = ['pybabel', 'update', '-D', domain,
'-i', potfile,
'-d', os.path.relpath(path),
'-l', locale]
call(cmd)
def handle_compilemessages(self, **options):
locale_paths = list(settings.LOCALE_PATHS)
domain = options.pop('domain')
locales = options.pop('locale')
for path in locale_paths:
for locale in locales:
po_file = os.path.join(
path, locale, 'LC_MESSAGES', domain + '.po'
)
if os.path.exists(po_file):
cmd = ['pybabel', 'compile', '-D', domain,
'-d', path, '-l', locale]
call(cmd)
| Python | 0.000001 |
99e0b2e29ec5baa525dce54a3bfcf69710f5a59b | Fix UserProfile creation | accounts/models.py | accounts/models.py | from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_save
from django.contrib.sites.models import RequestSite
from django.contrib.auth.models import User
from django.db import models
from registration import models as regmodels
from registration.signals import user_registered
from sanitizer.models import SanitizedTextField
from operator import itemgetter
from editor.models import Question, Exam, EditorTag
class RegistrationManager(regmodels.RegistrationManager):
def create_inactive_user(self, username, first_name, last_name, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(username, email, password)
if first_name:
new_user.first_name = first_name
if last_name:
new_user.last_name = last_name
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
class RegistrationProfile(regmodels.RegistrationProfile):
objects = RegistrationManager()
class UserProfile(models.Model):
user = models.OneToOneField(User)
language = models.CharField(max_length=100,default='en-GB')
bio = SanitizedTextField(default='',allowed_tags=settings.SANITIZER_ALLOWED_TAGS,allowed_attributes=settings.SANITIZER_ALLOWED_ATTRIBUTES)
favourite_questions = models.ManyToManyField(Question,blank=True,related_name='fans')
favourite_exams = models.ManyToManyField(Exam,blank=True,related_name='fans')
def sorted_tags(self):
qs = self.user.own_questions
tags = EditorTag.objects.filter(question__author=self.user).distinct()
tag_counts = [(tag,len(qs.filter(tags__id=tag.id))) for tag in tags]
tag_counts.sort(key=itemgetter(1),reverse=True)
return tag_counts
def createUserProfile(sender, instance, user_created, **kwargs):
"""Create a UserProfile object each time a User is created ; and link it.
"""
if user_created:
UserProfile.objects.create(user=instance)
post_save.connect(createUserProfile, sender=User)
| from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_save
from django.contrib.sites.models import RequestSite
from django.contrib.auth.models import User
from django.db import models
from registration import models as regmodels
from registration.signals import user_registered
from sanitizer.models import SanitizedTextField
from operator import itemgetter
from editor.models import Question, Exam, EditorTag
class RegistrationManager(regmodels.RegistrationManager):
def create_inactive_user(self, username, first_name, last_name, email, password,
site, send_email=True):
"""
Create a new, inactive ``User``, generate a
``RegistrationProfile`` and email its activation key to the
``User``, returning the new ``User``.
By default, an activation email will be sent to the new
user. To disable this, pass ``send_email=False``.
"""
new_user = User.objects.create_user(username, email, password)
if first_name:
new_user.first_name = first_name
if last_name:
new_user.last_name = last_name
new_user.is_active = False
new_user.save()
registration_profile = self.create_profile(new_user)
if send_email:
registration_profile.send_activation_email(site)
return new_user
create_inactive_user = transaction.commit_on_success(create_inactive_user)
class RegistrationProfile(regmodels.RegistrationProfile):
objects = RegistrationManager()
class UserProfile(models.Model):
user = models.OneToOneField(User)
language = models.CharField(max_length=100,default='en-GB')
bio = SanitizedTextField(default='',allowed_tags=settings.SANITIZER_ALLOWED_TAGS,allowed_attributes=settings.SANITIZER_ALLOWED_ATTRIBUTES)
favourite_questions = models.ManyToManyField(Question,blank=True,related_name='fans')
favourite_exams = models.ManyToManyField(Exam,blank=True,related_name='fans')
def sorted_tags(self):
qs = self.user.own_questions
tags = EditorTag.objects.filter(question__author=self.user).distinct()
tag_counts = [(tag,len(qs.filter(tags__id=tag.id))) for tag in tags]
tag_counts.sort(key=itemgetter(1),reverse=True)
return tag_counts
def createUserProfile(sender, instance, **kwargs):
"""Create a UserProfile object each time a User is created ; and link it.
"""
UserProfile.objects.get_or_create(user=instance)
post_save.connect(createUserProfile, sender=User)
| Python | 0.000006 |
df7e5f56fdb2a9bc34a0fdf62b5847ee4183d32e | Update import_gist.py | lib/import_gist/bin/import_gist.py | lib/import_gist/bin/import_gist.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def import_gist(url_gist):
'''
import custom functions from gist.github.com
usage: mod_name = import_gist(url_gist)
params:
url_gist: url of gist. be sure to append '/raw/' to the gist url to load script, not html
e.g. https://gist.githubusercontent.com/cosacog/67ac95feef8a2a1cd373d43a86fe2c9c/raw/
'''
import os,sys, urllib, tempfile
import urllib.request
fname_func = 'tmp_func.py' # temporary file name of .py
tmp_dir = tempfile.mkdtemp()
# check url_gist
# append '/' at the end
if url_gist[-1] is not '/':
url_gist = url_gist + '/'
# append 'raw/' at the end
if url_gist[-5:] != '/raw/':
url_gist = url_gist + 'raw/'
urllib.request.urlretrieve(url_gist, filename=os.path.join(tmp_dir,fname_func))
sys.path.append(tmp_dir)
import tmp_func as mod_func
sys.path.remove(tmp_dir)
return mod_func
if __name__ =='__main__':
print("I'm sorry. There is no main script.")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
def import_gist(url_gist):
'''
import custom functions from gist.github.com
usage: mod_name = import_gist(url_gist)
params:
url_gist: url of gist. be sure to append '/raw/' to the gist url to load script, not html
e.g. https://gist.githubusercontent.com/cosacog/67ac95feef8a2a1cd373d43a86fe2c9c/raw/
'''
import os,sys, urllib, tempfile
fname_func = 'tmp_func.py' # temporary file name of .py
tmp_dir = tempfile.mkdtemp()
# check url_gist
# append '/' at the end
if url_gist[-1] is not '/':
url_gist = url_gist + '/'
# append 'raw/' at the end
if url_gist[-5:] != '/raw/':
url_gist = url_gist + 'raw/'
urllib.request.urlretrieve(url_gist, filename=os.path.join(tmp_dir,fname_func))
sys.path.append(tmp_dir)
import tmp_func as mod_func
sys.path.remove(tmp_dir)
return mod_func
if __name__ =='__main__':
print("I'm sorry. There is no main script.")
| Python | 0.000001 |
7ef1afc579c62fa0c713d8db0bf17eb09b498a0b | Add unittest for random_integers | tests/cupy_tests/random_tests/test_sample.py | tests/cupy_tests/random_tests/test_sample.py | import mock
import unittest
from cupy import cuda
from cupy import testing
from cupy import random
@testing.gpu
class TestRandint(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
device_id = cuda.Device().id
self.m = mock.Mock()
self.m.interval.return_value = 0
random.generator._random_states = {device_id : self.m}
def test_value_error(self):
with self.assertRaises(ValueError):
random.randint(100, 1)
def test_high_and_size_are_none(self):
random.randint(3)
self.m.interval.assert_called_with(3, None)
def test_size_is_none(self):
random.randint(3, 5)
self.m.interval.assert_called_with(2, None)
def test_high_is_none(self):
random.randint(3, None, (1, 2, 3))
self.m.interval.assert_called_with(3, (1, 2, 3))
def test_no_none(self):
random.randint(3, 5, (1, 2, 3))
self.m.interval.assert_called_with(2, (1, 2, 3))
@testing.gpu
class TestRandomIntegers(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
random.sample_.randint = mock.Mock()
def test_normal(self):
random.random_integers(3, 5)
random.sample_.randint.assert_called_with(3, 6, None)
def test_high_is_none(self):
random.random_integers(3, None)
random.sample_.randint.assert_called_with(1, 4, None)
def test_size_is_not_none(self):
random.random_integers(3, 5, (1, 2, 3))
random.sample_.randint.assert_called_with(3, 6, (1, 2, 3))
| import mock
import unittest
from cupy import cuda
from cupy import testing
from cupy import random
@testing.gpu
class TestRandint(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
device_id = cuda.Device().id
self.m = mock.Mock()
self.m.interval.return_value = 0
random.generator._random_states = {device_id : self.m}
def test_value_error(self):
with self.assertRaises(ValueError):
random.randint(100, 1)
def test_high_and_size_are_none(self):
random.randint(3)
self.m.interval.assert_called_with(3, None)
def test_size_is_none(self):
random.randint(3, 5)
self.m.interval.assert_called_with(2, None)
def test_high_is_none(self):
random.randint(3, None, (1, 2, 3))
self.m.interval.assert_called_with(3, (1, 2, 3))
def test_no_none(self):
random.randint(3, 5, (1, 2, 3))
self.m.interval.assert_called_with(2, (1, 2, 3))
| Python | 0.000013 |
5aa2a3e4b724784bbedaa5a436893e5ce28f7c45 | Bump version to 0.2.3 | fluentcms_emailtemplates/__init__.py | fluentcms_emailtemplates/__init__.py | # following PEP 440
__version__ = "0.2.3"
| # following PEP 440
__version__ = "0.2.2"
| Python | 0.000001 |
eda0e5d60ca30a284c0b6b4fc209e595b5484941 | Fix _import_from() on Python 2 | dev/_import.py | dev/_import.py | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import imp
import sys
import os
from . import build_root, package_name, package_root
if sys.version_info < (3,):
getcwd = os.getcwdu
else:
getcwd = os.getcwd
def _import_from(mod, path, mod_dir=None, allow_error=False):
"""
Imports a module from a specific path
:param mod:
A unicode string of the module name
:param path:
A unicode string to the directory containing the module
:param mod_dir:
If the sub directory of "path" is different than the "mod" name,
pass the sub directory as a unicode string
:param allow_error:
If an ImportError should be raised when the module can't be imported
:return:
None if not loaded, otherwise the module
"""
if mod_dir is None:
mod_dir = mod.replace('.', os.sep)
if not os.path.exists(path):
return None
if not os.path.exists(os.path.join(path, mod_dir)) \
and not os.path.exists(os.path.join(path, mod_dir + '.py')):
return None
if os.sep in mod_dir:
append, mod_dir = mod_dir.rsplit(os.sep, 1)
path = os.path.join(path, append)
try:
mod_info = imp.find_module(mod_dir, [path])
return imp.load_module(mod, *mod_info)
except ImportError:
if allow_error:
raise
return None
def _preload(require_oscrypto, print_info):
"""
Preloads asn1crypto and optionally oscrypto from a local source checkout,
or from a normal install
:param require_oscrypto:
A bool if oscrypto needs to be preloaded
:param print_info:
A bool if info about asn1crypto and oscrypto should be printed
"""
if print_info:
print('Working dir: ' + getcwd())
print('Python ' + sys.version.replace('\n', ''))
asn1crypto = None
oscrypto = None
if require_oscrypto:
# Some CI services don't use the package name for the dir
if package_name == 'oscrypto':
oscrypto_dir = package_root
else:
oscrypto_dir = os.path.join(build_root, 'oscrypto')
oscrypto_tests = None
if os.path.exists(oscrypto_dir):
oscrypto_tests = _import_from('oscrypto_tests', oscrypto_dir, 'tests')
if oscrypto_tests is None:
import oscrypto_tests
asn1crypto, oscrypto = oscrypto_tests.local_oscrypto()
else:
if package_name == 'asn1crypto':
asn1crypto_dir = package_root
else:
asn1crypto_dir = os.path.join(build_root, 'asn1crypto')
if os.path.exists(asn1crypto_dir):
asn1crypto = _import_from('asn1crypto', asn1crypto_dir)
if asn1crypto is None:
import asn1crypto
if print_info:
print(
'\nasn1crypto: %s, %s' % (
asn1crypto.__version__,
os.path.dirname(asn1crypto.__file__)
)
)
if require_oscrypto:
print(
'oscrypto: %s backend, %s, %s' % (
oscrypto.backend(),
oscrypto.__version__,
os.path.dirname(oscrypto.__file__)
)
)
| # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import imp
import sys
import os
from . import build_root, package_name, package_root
if sys.version_info < (3,):
getcwd = os.getcwdu
else:
getcwd = os.getcwd
def _import_from(mod, path, mod_dir=None, allow_error=False):
"""
Imports a module from a specific path
:param mod:
A unicode string of the module name
:param path:
A unicode string to the directory containing the module
:param mod_dir:
If the sub directory of "path" is different than the "mod" name,
pass the sub directory as a unicode string
:param allow_error:
If an ImportError should be raised when the module can't be imported
:return:
None if not loaded, otherwise the module
"""
if mod_dir is None:
mod_dir = mod.replace('.', os.sep)
if not os.path.exists(path):
return None
if not os.path.exists(os.path.join(path, mod_dir)) \
and not os.path.exists(os.path.join(path, mod_dir + '.py')):
return None
try:
mod_info = imp.find_module(mod_dir, [path])
return imp.load_module(mod, *mod_info)
except ImportError:
if allow_error:
raise
return None
def _preload(require_oscrypto, print_info):
"""
Preloads asn1crypto and optionally oscrypto from a local source checkout,
or from a normal install
:param require_oscrypto:
A bool if oscrypto needs to be preloaded
:param print_info:
A bool if info about asn1crypto and oscrypto should be printed
"""
if print_info:
print('Working dir: ' + getcwd())
print('Python ' + sys.version.replace('\n', ''))
asn1crypto = None
oscrypto = None
if require_oscrypto:
# Some CI services don't use the package name for the dir
if package_name == 'oscrypto':
oscrypto_dir = package_root
else:
oscrypto_dir = os.path.join(build_root, 'oscrypto')
oscrypto_tests = None
if os.path.exists(oscrypto_dir):
oscrypto_tests = _import_from('oscrypto_tests', oscrypto_dir, 'tests')
if oscrypto_tests is None:
import oscrypto_tests
asn1crypto, oscrypto = oscrypto_tests.local_oscrypto()
else:
if package_name == 'asn1crypto':
asn1crypto_dir = package_root
else:
asn1crypto_dir = os.path.join(build_root, 'asn1crypto')
if os.path.exists(asn1crypto_dir):
asn1crypto = _import_from('asn1crypto', asn1crypto_dir)
if asn1crypto is None:
import asn1crypto
if print_info:
print(
'\nasn1crypto: %s, %s' % (
asn1crypto.__version__,
os.path.dirname(asn1crypto.__file__)
)
)
if require_oscrypto:
print(
'oscrypto: %s backend, %s, %s' % (
oscrypto.backend(),
oscrypto.__version__,
os.path.dirname(oscrypto.__file__)
)
)
| Python | 0.997305 |
64ae848095215715ea7448c517011d64403dee85 | Remove useless import | geotrek/api/mobile/views/trekking.py | geotrek/api/mobile/views/trekking.py | from __future__ import unicode_literals
from django.conf import settings
from django.db.models import F
from geotrek.api.mobile.serializers import trekking as api_serializers
from geotrek.api.mobile import viewsets as api_viewsets
from geotrek.api.v2.functions import Transform, Length, StartPoint
from geotrek.trekking import models as trekking_models
class TrekViewSet(api_viewsets.GeotrekViewset):
serializer_class = api_serializers.TrekListSerializer
serializer_detail_class = api_serializers.TrekDetailSerializer
filter_fields = ('difficulty', 'themes', 'networks', 'practice')
def get_queryset(self, *args, **kwargs):
queryset = trekking_models.Trek.objects.existing()\
.select_related('topo_object', 'difficulty', 'practice') \
.prefetch_related('topo_object__aggregations', 'themes', 'networks', 'attachments', 'information_desks') \
.order_by('pk').annotate(length_2d_m=Length('geom'))
if self.action == 'list':
queryset = queryset.annotate(start_point=Transform(StartPoint('geom'), settings.API_SRID))
else:
queryset = queryset.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID))
return queryset
class POIViewSet(api_viewsets.GeotrekViewset):
serializer_class = api_serializers.POIListSerializer
serializer_detail_class = api_serializers.POIListSerializer
queryset = trekking_models.POI.objects.existing() \
.select_related('topo_object', 'type', ) \
.prefetch_related('topo_object__aggregations', 'attachments') \
.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID),
geom3d_transformed=Transform(F('geom_3d'), settings.API_SRID)) \
.order_by('pk') # Required for reliable pagination
filter_fields = ('type',)
| from __future__ import unicode_literals
from django.conf import settings
from django.db.models import F
from rest_framework_extensions.mixins import DetailSerializerMixin
from geotrek.api.mobile.serializers import trekking as api_serializers
from geotrek.api.mobile import viewsets as api_viewsets
from geotrek.api.v2.functions import Transform, Length, StartPoint
from geotrek.trekking import models as trekking_models
class TrekViewSet(api_viewsets.GeotrekViewset):
serializer_class = api_serializers.TrekListSerializer
serializer_detail_class = api_serializers.TrekDetailSerializer
filter_fields = ('difficulty', 'themes', 'networks', 'practice')
def get_queryset(self, *args, **kwargs):
queryset = trekking_models.Trek.objects.existing()\
.select_related('topo_object', 'difficulty', 'practice') \
.prefetch_related('topo_object__aggregations', 'themes', 'networks', 'attachments', 'information_desks') \
.order_by('pk').annotate(length_2d_m=Length('geom'))
if self.action == 'list':
queryset = queryset.annotate(start_point=Transform(StartPoint('geom'), settings.API_SRID))
else:
queryset = queryset.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID))
return queryset
class POIViewSet(api_viewsets.GeotrekViewset):
serializer_class = api_serializers.POIListSerializer
serializer_detail_class = api_serializers.POIListSerializer
queryset = trekking_models.POI.objects.existing() \
.select_related('topo_object', 'type', ) \
.prefetch_related('topo_object__aggregations', 'attachments') \
.annotate(geom2d_transformed=Transform(F('geom'), settings.API_SRID),
geom3d_transformed=Transform(F('geom_3d'), settings.API_SRID)) \
.order_by('pk') # Required for reliable pagination
filter_fields = ('type',)
| Python | 0.000004 |
ee487dd220612dfff9cb5e63c602490371e2bdac | Update repo_metadata.py | gitmostwanted/tasks/repo_metadata.py | gitmostwanted/tasks/repo_metadata.py | from gitmostwanted.app import app, db, celery
from gitmostwanted.models.repo import Repo, RepoMean
from gitmostwanted.lib.github import api
from sqlalchemy.sql import func, expression
from datetime import datetime, timedelta
@celery.task()
def metadata_maturity(num_months):
repos = Repo.query\
.filter(Repo.created_at <= datetime.now() + timedelta(days=num_months * 30 * -1))\
.filter(Repo.mature.is_(False))
for repo in repos:
repo.mature = True
db.session.commit()
return repos.count()
@celery.task()
def metadata_refresh(num_days):
repos = Repo.query\
.filter(
Repo.checked_at.is_(None) |
(Repo.checked_at <= datetime.now() + timedelta(days=num_days * -1))
)\
.yield_per(25)\
.limit(300) # GitHub allows only 3000 calls per day within a token
for repo in repos:
repo.checked_at = datetime.now()
details, code = api.repo_info(repo.full_name)
if not details:
if 400 <= code < 500:
repo.worth -= 1
app.logger.info(
'{0} is not found, the "worth" has been decreased by 1'.format(repo.full_name)
)
continue
for key in ['description', 'language', 'homepage', 'stargazers_count']:
if getattr(repo, key) != details[key]:
setattr(repo, key, details[key])
db.session.commit()
return repos.count()
@celery.task()
def metadata_trend(num_days):
results = db.session.query(
RepoMean.repo_id, func.substring_index(
func.group_concat(
RepoMean.value.op('ORDER BY')(expression.desc(RepoMean.created_at))
), ',', 2)
)\
.filter(RepoMean.created_at >= datetime.now() + timedelta(days=num_days * -1))\
.group_by(RepoMean.repo_id)\
.all()
for result in filter(lambda x: ',' in x[1], results):
curr, prev = result[1].split(',')
if curr < prev:
app.logger.info(
'Mean value of {0} is {1}, previous was {2}. The "worth" has been decreased by 1'
.format(result[0], curr, prev)
)
db.session.query(Repo)\
.filter(Repo.id == result[0])\
.update({Repo.worth: Repo.worth - 1})
db.session.commit()
@celery.task()
def metadata_erase():
cnt = Repo.query.filter(Repo.worth < 5).delete()
db.session.commit()
return cnt
| from gitmostwanted.app import app, db, celery
from gitmostwanted.models.repo import Repo, RepoMean
from gitmostwanted.lib.github import api
from sqlalchemy.sql import func, expression
from datetime import datetime, timedelta
@celery.task()
def metadata_maturity(num_months):
repos = Repo.query\
.filter(Repo.created_at <= datetime.now() + timedelta(days=num_months * 30 * -1))\
.filter(Repo.mature.is_(False))
for repo in repos:
repo.mature = True
db.session.commit()
return repos.count()
@celery.task()
def metadata_refresh(num_days):
repos = Repo.query\
.filter(
Repo.checked_at.is_(None) |
(Repo.checked_at <= datetime.now() + timedelta(days=num_days * -1))
)\
.yield_per(25)\
.limit(300) # GitHub allows only 3000 calls per day within a token
for repo in repos:
repo.checked_at = datetime.now()
details, code = api.repo_info(repo.full_name)
if not details:
if 400 <= code < 500:
repo.worth -= 1
app.logger.info(
'{0} is not found, the "worth" has been decreased by 1'.format(repo.full_name)
)
continue
for key in ['description', 'language', 'homepage', 'stargazers_count']:
if getattr(repo, key) != details[key]:
setattr(repo, key, details[key])
db.session.commit()
return repos.count()
@celery.task()
def metadata_trend(num_days):
results = db.session.query(
RepoMean.repo_id, func.substring_index(
func.group_concat(
RepoMean.value.op('ORDER BY')(expression.desc(RepoMean.created_at))
), ',', 2)
)\
.filter(RepoMean.created_at >= datetime.now() + timedelta(days=num_days * -1))\
.group_by(RepoMean.repo_id)\
.all()
for result in filter(lambda x: ',' in x[1], results):
curr, prev = result[1].split(',')
if curr < prev:
app.logger.info(
'Mean value of {0} is {1}, previous was {2}. The "worth" has been decreased by 1'
.format(result[0], curr, prev)
)
db.session.query(Repo)\
.filter(Repo.id == result[0])\
.update({Repo.worth: Repo.worth - 1})
db.session.commit()
@celery.task()
def metadata_erase():
cnt = Repo.query.filter(Repo.worth < 0).delete()
db.session.commit()
return cnt
| Python | 0.000002 |
6105e355cf0275e00f284ac6658454905a9b9a07 | change import of tfpark | python/chronos/src/bigdl/chronos/forecaster/tfpark_forecaster.py | python/chronos/src/bigdl/chronos/forecaster/tfpark_forecaster.py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from bigdl.orca.tfpark import KerasModel as TFParkKerasModel
import tensorflow as tf
from bigdl.chronos.forecaster.abstract import Forecaster
class TFParkForecaster(TFParkKerasModel, Forecaster, metaclass=ABCMeta):
"""
Base class for TFPark KerasModel based Forecast models.
"""
def __init__(self):
"""
Build a tf.keras model.
Turns the tf.keras model returned from _build into a tfpark.KerasModel
"""
self.model = self._build()
assert (isinstance(self.model, tf.keras.Model))
super().__init__(self.model)
@abstractmethod
def _build(self):
"""
Build a tf.keras model.
:return: a tf.keras model (compiled)
"""
pass
| #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
from zoo.tfpark import KerasModel as TFParkKerasModel
import tensorflow as tf
from bigdl.chronos.forecaster.abstract import Forecaster
class TFParkForecaster(TFParkKerasModel, Forecaster, metaclass=ABCMeta):
"""
Base class for TFPark KerasModel based Forecast models.
"""
def __init__(self):
"""
Build a tf.keras model.
Turns the tf.keras model returned from _build into a tfpark.KerasModel
"""
self.model = self._build()
assert (isinstance(self.model, tf.keras.Model))
super().__init__(self.model)
@abstractmethod
def _build(self):
"""
Build a tf.keras model.
:return: a tf.keras model (compiled)
"""
pass
| Python | 0 |
60ecc08395eb266f09aa8587bf38aceb59a2b968 | Update Scramble_String.py | Array/Scramble_String.py | Array/Scramble_String.py | """
Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively.
Below is one possible representation of s1 = "great":
great
/ \
gr eat
/ \ / \
g r e at
/ \
a t
To scramble the string, we may choose any non-leaf node and swap its two children.
For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat".
rgeat
/ \
rg eat
/ \ / \
r g e at
/ \
a t
We say that "rgeat" is a scrambled string of "great".
Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae".
rgtae
/ \
rg tae
/ \ / \
r g ta e
/ \
t a
We say that "rgtae" is a scrambled string of "great".
Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1.
"""
class Solution:
# @return a boolean
def isScramble(self, s1, s2):
if len(s1) != len(s2):
return False
if s1 == s2:
return True
length = len(list(s1))
if sorted(s1) != sorted(s2):
return False
for i in xrange(1,length):
if self.isScramble(s1[:i],s2[:i]) and self.isScramble(s1[i:],s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:],s2[:-i]):
return True
return False
# Note:
# Condition: 1) length_s1 != length_s2
# 2) s1 == s2, s1与s2完全相等
# 3) sorted(s1) 与 sorted(s2)是不是相等
# 4) 比较s1[:i] s2[:i] and s1[i:],s2[i:]
# 5) 比较s1[:i], s2[length_s2-i:] and s1[i:],s2[length_s2:-i]
| Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively.
Below is one possible representation of s1 = "great":
great
/ \
gr eat
/ \ / \
g r e at
/ \
a t
To scramble the string, we may choose any non-leaf node and swap its two children.
For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat".
rgeat
/ \
rg eat
/ \ / \
r g e at
/ \
a t
We say that "rgeat" is a scrambled string of "great".
Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae".
rgtae
/ \
rg tae
/ \ / \
r g ta e
/ \
t a
We say that "rgtae" is a scrambled string of "great".
Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1.
class Solution:
# @return a boolean
def isScramble(self, s1, s2):
if len(s1) != len(s2):
return False
if s1 == s2:
return True
length = len(list(s1))
if sorted(s1) != sorted(s2):
return False
for i in xrange(1,length):
if self.isScramble(s1[:i],s2[:i]) and self.isScramble(s1[i:],s2[i:]):
return True
if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:],s2[:-i]):
return True
return False
# Note:
# Condition: 1) length_s1 != length_s2
# 2) s1 == s2, s1与s2完全相等
# 3) sorted(s1) 与 sorted(s2)是不是相等
# 4) 比较s1[:i] s2[:i] and s1[i:],s2[i:]
# 5) 比较s1[:i], s2[length_s2-i:] and s1[i:],s2[length_s2:-i]
| Python | 0.000002 |
04def6c69d5ee35edb1b2d7d2d10f7aa18b2eb47 | Move fermi activation flag out of route mapper. | palm/blink_factory.py | palm/blink_factory.py | import numpy
from palm.base.model_factory import ModelFactory
from palm.blink_model import BlinkModel
from palm.blink_state_enumerator import SingleDarkState, DoubleDarkState,\
SingleDarkStateEnumeratorFactory,\
DoubleDarkStateEnumeratorFactory
from palm.blink_route_mapper import Route, SingleDarkRouteMapperFactory,\
DoubleDarkRouteMapperFactory
class SingleDarkBlinkFactory(ModelFactory):
'''
This factory class creates an aggregated kinetic model with
the following microstate topology:
I --> A
A <--> D
A --> B
'''
def __init__(self, fermi_activation=False, MAX_A=10):
self.state_factory = SingleDarkState
self.route_factory = Route
self.fermi_activation = fermi_activation
self.MAX_A = MAX_A
def create_model(self, parameter_set):
self.parameter_set = parameter_set
N = self.parameter_set.get_parameter('N')
state_enumerator_factory = SingleDarkStateEnumeratorFactory(
N, self.state_factory, self.MAX_A)
state_enumerator = state_enumerator_factory.create_state_enumerator()
route_mapper_factory = SingleDarkRouteMapperFactory(
parameter_set=self.parameter_set,
route_factory=self.route_factory,
max_A=self.MAX_A)
route_mapper = route_mapper_factory.create_route_mapper()
new_model = BlinkModel(state_enumerator, route_mapper,
self.parameter_set, self.fermi_activation)
return new_model
class DoubleDarkBlinkFactory(ModelFactory):
'''
This factory class creates an aggregated kinetic model with
the following microstate topology:
I --> A
A <--> D1
A <--> D2
A --> B
'''
def __init__(self, fermi_activation=False, MAX_A=10):
self.state_factory = DoubleDarkState
self.route_factory = Route
self.fermi_activation = fermi_activation
self.MAX_A = MAX_A
def create_model(self, parameter_set):
self.parameter_set = parameter_set
N = self.parameter_set.get_parameter('N')
state_enumerator_factory = DoubleDarkStateEnumeratorFactory(
N, self.state_factory, self.MAX_A)
state_enumerator = state_enumerator_factory.create_state_enumerator()
route_mapper_factory = DoubleDarkRouteMapperFactory(
parameter_set=self.parameter_set,
route_factory=self.route_factory,
max_A=self.MAX_A)
route_mapper = route_mapper_factory.create_route_mapper()
new_model = BlinkModel(state_enumerator, route_mapper,
self.parameter_set)
return new_model
| import numpy
from palm.base.model_factory import ModelFactory
from palm.blink_model import BlinkModel
from palm.blink_state_enumerator import SingleDarkState, DoubleDarkState,\
SingleDarkStateEnumeratorFactory,\
DoubleDarkStateEnumeratorFactory
from palm.blink_route_mapper import Route, SingleDarkRouteMapperFactory,\
DoubleDarkRouteMapperFactory
class SingleDarkBlinkFactory(ModelFactory):
'''
This factory class creates an aggregated kinetic model with
the following microstate topology:
I --> A
A <--> D
A --> B
'''
def __init__(self, fermi_activation=False, MAX_A=10):
self.state_factory = SingleDarkState
self.route_factory = Route
self.fermi_activation = fermi_activation
self.MAX_A = MAX_A
def create_model(self, parameter_set):
self.parameter_set = parameter_set
N = self.parameter_set.get_parameter('N')
state_enumerator_factory = SingleDarkStateEnumeratorFactory(
N, self.state_factory, self.MAX_A)
state_enumerator = state_enumerator_factory.create_state_enumerator()
route_mapper_factory = SingleDarkRouteMapperFactory(
parameter_set=self.parameter_set,
route_factory=self.route_factory,
max_A=self.MAX_A)
route_mapper = route_mapper_factory.create_route_mapper()
new_model = BlinkModel(state_enumerator, route_mapper,
self.parameter_set, self.fermi_activation)
return new_model
class DoubleDarkBlinkFactory(ModelFactory):
'''
This factory class creates an aggregated kinetic model with
the following microstate topology:
I --> A
A <--> D1
A <--> D2
A --> B
'''
def __init__(self, fermi_activation=False, MAX_A=10):
self.state_factory = DoubleDarkState
self.route_factory = Route
self.fermi_activation = fermi_activation
self.MAX_A = MAX_A
def create_model(self, parameter_set):
self.parameter_set = parameter_set
N = self.parameter_set.get_parameter('N')
state_enumerator_factory = DoubleDarkStateEnumeratorFactory(
N, self.state_factory, self.MAX_A)
state_enumerator = state_enumerator_factory.create_state_enumerator()
route_mapper_factory = DoubleDarkRouteMapperFactory(
parameter_set=self.parameter_set,
route_factory=self.route_factory,
max_A=self.MAX_A,
fermi_activation=self.fermi_activation)
route_mapper = route_mapper_factory.create_route_mapper()
new_model = BlinkModel(state_enumerator, route_mapper,
self.parameter_set)
return new_model
| Python | 0 |
f89f170aa8d672a891547ed7ac77d806cda697c4 | Update TestWebserver.py | emission/tests/netTests/TestWebserver.py | emission/tests/netTests/TestWebserver.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import json
import sys
import os
import uuid
import logging
import time
# Our imports
import emission.tests.common as etc
import emission.net.api.cfc_webapp as enacw
import importlib
class TestWebserver(unittest.TestCase):
def setUp(self):
import shutil
self.webserver_conf_path = "conf/net/api/webserver.conf"
shutil.copyfile(
"%s.sample" % self.webserver_conf_path, self.webserver_conf_path
)
with open(self.webserver_conf_path, "w") as fd:
fd.write(
json.dumps(
{
"paths": {
"static_path": "webapp/www",
"python_path": "main",
"log_base_dir": ".",
"log_file": "debug.log",
"404_redirect": "http://somewhere.else",
},
"server": {
"host": "0.0.0.0",
"port": "8080",
"timeout": "3600",
"auth": "skip",
"aggregate_call_auth": "no_auth",
},
}
)
)
logging.debug("Finished setting up %s" % self.webserver_conf_path)
with open(self.webserver_conf_path) as fd:
logging.debug("Current values are %s" % json.load(fd))
def tearDown(self):
os.remove(self.webserver_conf_path)
def test404Redirect(self):
from emission.net.api.bottle import response
importlib.reload(enacw)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_header("Location"), None)
enacw.error404("")
self.assertEqual(response.status_code, 301)
self.assertEqual(response.get_header("Location"), "http://somewhere.else")
def testResolveAuth(self):
self.assertEqual(enacw.resolve_auth("skip"),"skip")
self.assertEqual(enacw.resolve_auth("token_list"),"token_list")
self.assertEqual(enacw.resolve_auth("dynamic"),"token_list")
self.assertNotEqual(enacw.resolve_auth("dynamic"),"skip")
from unittest import mock
@mock.patch.dict(os.environ, {"STUDY_CONFIG":"nrel-commute"}, clear=True)
def test_ResolveAuthWithEnvVar(self):
importlib.reload(enacw)
self.assertEqual(enacw.resolve_auth("dynamic"),"skip")
if __name__ == "__main__":
etc.configLogging()
unittest.main()
| from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import json
import sys
import os
import uuid
import logging
import time
# Our imports
import emission.tests.common as etc
import emission.net.api.cfc_webapp as enacw
import importlib
class TestWebserver(unittest.TestCase):
def setUp(self):
import shutil
self.webserver_conf_path = "conf/net/api/webserver.conf"
shutil.copyfile(
"%s.sample" % self.webserver_conf_path, self.webserver_conf_path
)
with open(self.webserver_conf_path, "w") as fd:
fd.write(
json.dumps(
{
"paths": {
"static_path": "webapp/www",
"python_path": "main",
"log_base_dir": ".",
"log_file": "debug.log",
"404_redirect": "http://somewhere.else",
},
"server": {
"host": "0.0.0.0",
"port": "8080",
"timeout": "3600",
"auth": "skip",
"aggregate_call_auth": "no_auth",
},
}
)
)
logging.debug("Finished setting up %s" % self.webserver_conf_path)
with open(self.webserver_conf_path) as fd:
logging.debug("Current values are %s" % json.load(fd))
def tearDown(self):
os.remove(self.webserver_conf_path)
def test404Redirect(self):
from emission.net.api.bottle import response
importlib.reload(enacw)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get_header("Location"), None)
enacw.error404("")
self.assertEqual(response.status_code, 301)
self.assertEqual(response.get_header("Location"), "http://somewhere.else")
def testResolveAuth(self):
import emission.net.api.cfc_webapp as enacw
self.assertEqual(enacw.resolve_auth("skip"),"skip")
self.assertEqual(enacw.resolve_auth("token_list"),"token_list")
self.assertEqual(enacw.resolve_auth("dynamic"),"token_list")
self.assertNotEqual(enacw.resolve_auth("dynamic"),"skip")
from unittest import mock
@mock.patch.dict(os.environ, {"STUDY_CONFIG":"nrel-commute"}, clear=True)
def test_ResolveAuthWithEnvVar(self):
importlib.reload(enacw)
self.assertEqual(enacw.resolve_auth("dynamic"),"skip")
if __name__ == "__main__":
etc.configLogging()
unittest.main()
| Python | 0.000001 |
37e8452ad999f42746be395d193a306f9a893dbf | Update rpc.py | ncclient/operations/third_party/juniper/rpc.py | ncclient/operations/third_party/juniper/rpc.py | from ncclient.xml_ import *
from ncclient.operations.rpc import RPC
from ncclient.operations.rpc import RPCReply
from ncclient.operations.rpc import RPCError
class GetConfiguration(RPC):
def request(self, format='xml', filter=None):
node = new_ele('get-configuration', {'format':format})
if filter is not None:
node.append(filter)
return self._request(node)
class LoadConfiguration(RPC):
def request(self, format='xml', action='merge',
target='candidate', config=None):
if config is not None:
if type(config) == list:
config = '\n'.join(config)
if action == 'set':
format = 'text'
node = new_ele('load-configuration', {'action':action, 'format':format})
if format == 'xml':
config_node = sub_ele(node, 'configuration')
config_node.append(config)
if format == 'text' and not action == 'set':
config_node = sub_ele(node, 'configuration-text').text = config
if action == 'set' and format == 'text':
config_node = sub_ele(node, 'configuration-set').text = config
return self._request(node)
class CompareConfiguration(RPC):
def request(self, rollback=0):
node = new_ele('get-configuration', {'compare':'rollback', 'rollback':str(rollback)})
return self._request(node)
class ExecuteRpc(RPC):
def request(self, rpc):
if isinstance(rpc, str):
rpc = to_ele(rpc)
return self._request(rpc)
class Command(RPC):
def request(self, command=None, format='xml'):
node = new_ele('command', {'format':format})
node.text = command
return self._request(node)
class Reboot(RPC):
def request(self):
node = new_ele('request-reboot')
return self._request(node)
class Halt(RPC):
def request(self):
node = new_ele('request-halt')
return self._request(node)
| from ncclient.xml_ import *
from ncclient.operations.rpc import RPC
from ncclient.operations.rpc import RPCReply
from ncclient.operations.rpc import RPCError
class GetConfiguration(RPC):
def request(self, format='xml', filter=None):
node = new_ele('get-configuration', {'format':format})
if filter is not None:
node.append(filter)
return self._request(node)
class LoadConfiguration(RPC):
def request(self, format='xml', action='merge',
target='candidate', config=None):
if config is not None:
if type(config) == list:
config = '\n'.join(config)
if action == 'set':
format = 'text'
node = new_ele('load-configuration', {'action':action, 'format':format})
if format == 'xml':
config_node = sub_ele(node, 'configuration')
config_node.append(config)
if format == 'text' and not action == 'set':
config_node = sub_ele(node, 'configuration-text').text = config
if action == 'set' and format == 'text':
config_node = sub_ele(node, 'configuration-set').text = config
print to_xml(node)
return self._request(node)
class CompareConfiguration(RPC):
def request(self, rollback=0):
node = new_ele('get-configuration', {'compare':'rollback', 'rollback':str(rollback)})
return self._request(node)
class ExecuteRpc(RPC):
def request(self, rpc):
if isinstance(rpc, str):
rpc = to_ele(rpc)
return self._request(rpc)
class Command(RPC):
def request(self, command=None, format='xml'):
node = new_ele('command', {'format':format})
node.text = command
return self._request(node)
class Reboot(RPC):
def request(self):
node = new_ele('request-reboot')
return self._request(node)
class Halt(RPC):
def request(self):
node = new_ele('request-halt')
return self._request(node)
| Python | 0.000001 |
8fc2ace83f7c25d5245f0496c3e52adcc2fd71c7 | Fix typo in ResolverNode move alias. | tfx/components/common_nodes/resolver_node.py | tfx/components/common_nodes/resolver_node.py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deprecated location for the TFX Resolver.
The new location is `tfx.dsl.components.common.resolver.Resolver`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Text, Type
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
def _make_deprecated_resolver_node_alias():
"""Make ResolverNode alias class.
Make the deprecation shim for ResolverNode. Needed to conform to the
convention expected by `tfx.utils.deprecation_utils` and to translate renamed
constructor arguments.
Returns:
Deprecated ResolverNode alias class.
"""
parent_deprecated_class = deprecation_utils.deprecated_alias( # pylint: disable=invalid-name
deprecated_name='tfx.components.common_nodes.resolver_node.ResolverNode',
name='tfx.dsl.components.common.resolver.Resolver',
func_or_class=resolver.Resolver)
class _NewDeprecatedClass(parent_deprecated_class):
"""Deprecated ResolverNode alias constructor.
This class location is DEPRECATED and is provided temporarily for
compatibility. Please use `tfx.dsl.components.common.resolver.Resolver`
instead.
"""
def __init__(self,
instance_name: Text,
resolver_class: Type[resolver.ResolverStrategy],
resolver_configs: Dict[Text, json_utils.JsonableType] = None,
**kwargs: types.Channel):
"""Forwarding shim for deprecated ResolverNode alias constructor.
Args:
instance_name: the name of the Resolver instance.
resolver_class: a ResolverStrategy subclass which contains the artifact
resolution logic.
resolver_configs: a dict of key to Jsonable type representing
configuration that will be used to construct the resolver strategy.
**kwargs: a key -> Channel dict, describing what are the Channels to be
resolved. This is set by user through keyword args.
"""
super(ResolverNode, self).__init__(
instance_name=instance_name,
strategy_class=resolver_class,
config=resolver_configs,
**kwargs)
return _NewDeprecatedClass
# Constant to access resolver class from resolver exec_properties.
RESOLVER_CLASS = resolver.RESOLVER_STRATEGY_CLASS
# Constant to access resolver config from resolver exec_properties.
RESOLVER_CONFIGS = resolver.RESOLVER_CONFIG
RESOLVER_CLASS_LIST = resolver.RESOLVER_STRATEGY_CLASS_LIST
RESOLVER_CONFIG_LIST = resolver.RESOLVER_CONFIG_LIST
ResolverNode = _make_deprecated_resolver_node_alias()
| # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deprecated location for the TFX Resolver.
The new location is `tfx.dsl.components.common.resolver.Resolver`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Text, Type
from tfx import types
from tfx.dsl.components.common import resolver
from tfx.utils import deprecation_utils
from tfx.utils import json_utils
def _make_deprecated_resolver_node_alias():
"""Make ResolverNode alias class.
Make the deprecation shim for ResolverNode. Needed to conform to the
convention expected by `tfx.utils.deprecation_utils` and to translate renamed
constructor arguments.
Returns:
Deprecated ResolverNode alias class.
"""
parent_deprecated_class = deprecation_utils.deprecated_alias( # pylint: disable=invalid-name
deprecated_name='tfx.components.common_nodes.resolver_node.ResolverNode',
name='tfx.dsl.components.common.resolver.ResolverNode',
func_or_class=resolver.Resolver)
class _NewDeprecatedClass(parent_deprecated_class):
"""Deprecated ResolverNode alias constructor.
This class location is DEPRECATED and is provided temporarily for
compatibility. Please use `tfx.dsl.components.common.resolver.Resolver`
instead.
"""
def __init__(self,
instance_name: Text,
resolver_class: Type[resolver.ResolverStrategy],
resolver_configs: Dict[Text, json_utils.JsonableType] = None,
**kwargs: types.Channel):
"""Forwarding shim for deprecated ResolverNode alias constructor.
Args:
instance_name: the name of the Resolver instance.
resolver_class: a ResolverStrategy subclass which contains the artifact
resolution logic.
resolver_configs: a dict of key to Jsonable type representing
configuration that will be used to construct the resolver strategy.
**kwargs: a key -> Channel dict, describing what are the Channels to be
resolved. This is set by user through keyword args.
"""
super(ResolverNode, self).__init__(
instance_name=instance_name,
strategy_class=resolver_class,
config=resolver_configs,
**kwargs)
return _NewDeprecatedClass
# Constant to access resolver class from resolver exec_properties.
RESOLVER_CLASS = resolver.RESOLVER_STRATEGY_CLASS
# Constant to access resolver config from resolver exec_properties.
RESOLVER_CONFIGS = resolver.RESOLVER_CONFIG
RESOLVER_CLASS_LIST = resolver.RESOLVER_STRATEGY_CLASS_LIST
RESOLVER_CONFIG_LIST = resolver.RESOLVER_CONFIG_LIST
ResolverNode = _make_deprecated_resolver_node_alias()
| Python | 0.999044 |
3c33b9d7ea3736329d3e0939b042db08e6365eb5 | Move experiments to ``experiments`` module | dallinger/version.py | dallinger/version.py | """Dallinger version number."""
__version__ = "2.7.0"
| """Dallinger version number."""
__version__ = "3.0.0a1"
| Python | 0.000007 |
0516ca2a5bfaa162a44f407c13b55ca9487897fe | refresh group/keywords every hour | hortiradar/database/tasks_workers.py | hortiradar/database/tasks_workers.py | from configparser import ConfigParser
from time import time
from redis import StrictRedis
import ujson as json
from keywords import get_frog, get_keywords
from selderij import app
from tasks_master import insert_tweet
keywords = get_keywords()
keywords_sync_time = time()
config = ConfigParser()
config.read("tasks_workers.ini")
posprob_minimum = config["workers"].getfloat("posprob_minimum")
redis = StrictRedis()
rt_cache_time = 60 * 60 * 6
@app.task
def find_keywords_and_groups(id_str, text, retweet_id_str):
"""Find the keywords and associated groups in the tweet."""
global keywords, keywords_sync_time
if (time() - keywords_sync_time) > 60 * 60:
keywords = get_keywords()
keywords_sync_time = time()
# First check if retweets are already processed in the cache
if retweet_id_str:
key = "t:%s" % retweet_id_str
rt = redis.get(key)
if rt:
kw, groups, tokens = json.loads(rt)
insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master")
redis.expire(key, rt_cache_time)
return
frog = get_frog()
tokens = frog.process(text) # a list of dictionaries with frog's analysis per token
kw = []
groups = []
for t in tokens:
lemma = t["lemma"].lower()
k = keywords.get(lemma, None)
if k is not None:
if t["posprob"] > posprob_minimum:
if not t["pos"].startswith(k.pos + "("):
continue
kw.append(lemma)
groups += k.groups
kw, groups = list(set(kw)), list(set(groups))
insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master")
# put retweets in the cache
if retweet_id_str:
data = [kw, groups, tokens]
redis.set(key, json.dumps(data), ex=rt_cache_time)
| from configparser import ConfigParser
from redis import StrictRedis
import ujson as json
from keywords import get_frog, get_keywords
from selderij import app
from tasks_master import insert_tweet
keywords = get_keywords()
config = ConfigParser()
config.read("tasks_workers.ini")
posprob_minimum = config["workers"].getfloat("posprob_minimum")
redis = StrictRedis()
rt_cache_time = 60 * 60 * 6
@app.task
def find_keywords_and_groups(id_str, text, retweet_id_str):
"""Find the keywords and associated groups in the tweet."""
# First check if retweets are already processed in the cache
if retweet_id_str:
key = "t:%s" % retweet_id_str
rt = redis.get(key)
if rt:
kw, groups, tokens = json.loads(rt)
insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master")
redis.expire(key, rt_cache_time)
return
frog = get_frog()
tokens = frog.process(text) # a list of dictionaries with frog's analysis per token
kw = []
groups = []
for t in tokens:
lemma = t["lemma"].lower()
k = keywords.get(lemma, None)
if k is not None:
if t["posprob"] > posprob_minimum:
if not t["pos"].startswith(k.pos + "("):
continue
kw.append(lemma)
groups += k.groups
kw, groups = list(set(kw)), list(set(groups))
insert_tweet.apply_async((id_str, kw, groups, tokens), queue="master")
# put retweets in the cache
if retweet_id_str:
data = [kw, groups, tokens]
redis.set(key, json.dumps(data), ex=rt_cache_time)
| Python | 0 |
2ec6aa75d80dce2dc68d1369173b9a361864409b | update save() | alogator/models.py | alogator/models.py | from django.db import models
from django.core.mail import send_mail
from django.utils import timezone
class LogActor(models.Model):
email = models.CharField(max_length=100,
blank=True,
null=True,
help_text='Alogator will send a messages to this email address.')
active = models.BooleanField(default=True)
mute = models.BooleanField(default=False, help_text="suppress for notification")
slackHook = models.URLField(null=True, blank=True)
slackChannel = models.CharField(max_length=50, null=True, blank=True)
postHook = models.URLField(null=True, blank=True)
def __unicode__(self):
return 'email to: %s' % (self.email)
def getMutedFilename(self):
return "/tmp/alogator_actor_%s_muted" % self.id
def save(self, *args, **kwargs):
try:
orig = self.__class__.objects.get(pk=self.pk)
except DoesNotExist:
pass
else:
if orig.mute and not self.mute:
try:
f = open(self.getMutedFilename(), 'r')
content = f.read()
except:
content = "Muted file " + self.getMutedFilename() + " does not exist."
send_mail(
'ALOGATOR: Muged logs for: %s' % self.getMutedFilename(),
content,
'debug@arteria.ch',
[self.email],
fail_silently=True
)
f = open(self.getMutedFilename(), 'a')
f.flush()
super(LogActor, self).save(*args, **kwargs) # Call the "real" save() method.
class LogSensor(models.Model):
pattern = models.CharField(max_length=100, blank=True, null=True)
caseSensitive = models.BooleanField(default=False)
actor = models.ForeignKey(LogActor)
inactivityThreshold = models.IntegerField(default=0, null=True, blank=True)
inactive = models.BooleanField(default=False)
def __unicode__(self):
return 'search for: %s' % (self.pattern)
class LogFile(models.Model):
path = models.CharField(max_length=1000, blank=True, null=True)
lastModified = models.DateTimeField(default=timezone.now, blank=True)
lastPosition = models.IntegerField(default=0)
lastSize = models.IntegerField(default=0)
sensors = models.ManyToManyField(LogSensor, blank=True)
def __unicode__(self):
return self.path
| from django.db import models
from django.core.mail import send_mail
from django.utils import timezone
class LogActor(models.Model):
email = models.CharField(max_length=100,
blank=True,
null=True,
help_text='Alogator will send a messages to this email address.')
active = models.BooleanField(default=True)
mute = models.BooleanField(default=False, help_text="suppress for notification")
slackHook = models.URLField(null=True, blank=True)
slackChannel = models.CharField(max_length=50, null=True, blank=True)
postHook = models.URLField(null=True, blank=True)
def __unicode__(self):
return 'email to: %s' % (self.email)
def getMutedFilename(self):
return "/tmp/alogator_actor_%s_muted" % self.id
def save(self, *args, **kwargs):
if self.__class__.objects.get(pk=self.pk).mute and not self.mute:
try:
f = open(self.getMutedFilename(), 'r')
content = f.read()
except:
content = "Muted file " + self.getMutedFilename() + " does not exist."
send_mail(
'ALOGATOR: Muged logs for: %s' % self.getMutedFilename(),
content,
'debug@arteria.ch',
[self.email],
fail_silently=True
)
f = open(self.getMutedFilename(), 'a')
f.flush()
super(LogActor, self).save(*args, **kwargs) # Call the "real" save() method.
class LogSensor(models.Model):
pattern = models.CharField(max_length=100, blank=True, null=True)
caseSensitive = models.BooleanField(default=False)
actor = models.ForeignKey(LogActor)
inactivityThreshold = models.IntegerField(default=0, null=True, blank=True)
inactive = models.BooleanField(default=False)
def __unicode__(self):
return 'search for: %s' % (self.pattern)
class LogFile(models.Model):
path = models.CharField(max_length=1000, blank=True, null=True)
lastModified = models.DateTimeField(default=timezone.now, blank=True)
lastPosition = models.IntegerField(default=0)
lastSize = models.IntegerField(default=0)
sensors = models.ManyToManyField(LogSensor, blank=True)
def __unicode__(self):
return self.path
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.