commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
b628eb4f737b7cb3c3becb17a6545ad400aab1a0
|
Simplify the NOPASS test for PUBDEV-2981
|
h2o-py/dynamic_tests/testdir_algos/kmeans/pyunit_NOPASS_PUBDEV_2981_kmeans_hanging.py
|
h2o-py/dynamic_tests/testdir_algos/kmeans/pyunit_NOPASS_PUBDEV_2981_kmeans_hanging.py
|
from __future__ import print_function
import sys
from builtins import range
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.kmeans import H2OKMeansEstimator
class Test_PUBDEV_2981_kmeans:
"""
PUBDEV-2981: Sometimes algos just hangs and seem to be doing nothing.
This class is created to train a kmeans model with different parameters settings and re-create the hangning
for debugging purposes.
"""
# parameters denoting filenames of interested
training1_filenames = "smalldata/gridsearch/kmeans_8_centers_3_coords.csv"
test_name = "pyunit_PUBDEV_2981_kmeans.py" # name of this test
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
training1_data = [] # store training data sets
test_failed = 0 # count total number of tests that have failed
def __init__(self):
self.setup_data()
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices
"""
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filenames))
self.x_indices = list(range(self.training1_data.ncol))
def test_kmeans_hangup(self):
"""
train a kmeans model with some parameters that will make the system hang.
"""
print("*******************************************************************************************")
h2o.cluster_info()
good_params_list = {'seed': 1464837706, 'max_iterations': 50, 'init': 'Furthest', 'k': 5}
good_model_params = {'max_runtime_secs': 0.005857068399999999}
good_model = H2OKMeansEstimator(**good_params_list)
good_model.train(x=self.x_indices, training_frame=self.training1_data, **good_model_params)
bad_params_list = {'seed': 1464837574, 'max_iterations': 10, 'k': 10, 'init': 'Furthest'}
bad_model_params= {'max_runtime_secs': 0.00519726792}
bad_model = H2OKMeansEstimator(**bad_params_list)
bad_model.train(x=self.x_indices, training_frame=self.training1_data, **bad_model_params)
print("good_model._model_json['output']['model_summary'] type is {0}. "
"bad_model._model_json['output']['model_summary'] type is "
"{1}".format(type(good_model._model_json['output']['model_summary']),
type(bad_model._model_json['output']['model_summary'])))
print("They are not equal for some reason....")
def test_PUBDEV_2980_for_kmeans():
"""
Create and instantiate class and perform tests specified for kmeans
:return: None
"""
test_kmeans_grid = Test_PUBDEV_2981_kmeans()
test_kmeans_grid.test_kmeans_hangup()
sys.stdout.flush()
if test_kmeans_grid.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_PUBDEV_2980_for_kmeans)
else:
test_PUBDEV_2980_for_kmeans()
|
Python
| 0.000095
|
@@ -1755,24 +1755,9 @@
0.00
-5857068399999999
+1
%7D%0A
@@ -1927,692 +1927,23 @@
-bad_params_list = %7B'seed': 1464837574, 'max_iterations': 10, 'k': 10, 'init': 'Furthest'%7D%0A bad_model_params= %7B'max_runtime_secs': 0.00519726792%7D%0A bad_model = H2OKMeansEstimator(**bad_params_list)%0A bad_model.train(x=self.x_indices, training_frame=self.training1_data, **bad_model_params)%0A%0A print(%22good_model._model_json%5B'output'%5D%5B'model_summary'%5D type is %7B0%7D. %22%0A %22bad_model._model_json%5B'output'%5D%5B'model_summary'%5D type is %22%0A %22%7B1%7D%22.format(type(good_model._model_json%5B'output'%5D%5B'model_summary'%5D),%0A type(bad_model._model_json%5B'output'%5D%5B'model_summary'%5D)))%0A print(%22They are not equal for some reason...
+print(%22Finished
.%22)%0A
|
b4fdb95ef8a88cfd2d283698ac005ce8d9ec3468
|
Create fetch-wms-urls.py
|
scripts/fetch-wms-urls.py
|
scripts/fetch-wms-urls.py
|
Python
| 0.000002
|
@@ -0,0 +1,335 @@
+#!/usr/bin/python%0A%0A%0Aimport requests%0Aimport json%0A%0Aurl = %22http://129.24.196.43/apps/my_app/search/datasets.json?version=3&model_run_uuid=20f303cd-624d-413d-b485-6113319003d4&model_set=outputs&model_set_type=vis%22%0A%0Ar = requests.get(url)%0Adata = json.loads(r.text)%0Afor i in data%5B%22results%22%5D:%0A full = i%5B%22services%22%5D%5B0%5D%5B%22wms%22%5D%0A print full%0A
|
|
6d4efa0bd1199bbe900a8913b829ca7201dde6ab
|
Add migration to add new Juniper SASS vars to sites
|
openedx/core/djangoapps/appsembler/sites/migrations/0003_add_juniper_new_sass_vars.py
|
openedx/core/djangoapps/appsembler/sites/migrations/0003_add_juniper_new_sass_vars.py
|
Python
| 0
|
@@ -0,0 +1,1843 @@
+# -*- coding: utf-8 -*-%0A%0A%0Aimport json%0Afrom django.db import migrations, models%0A%0A%0Adef add_juniper_new_sass_vars(apps, schema_editor):%0A %22%22%22%0A This migration adds all the new SASS variabled added during the initial%0A pass of the Tahoe Juniper release upgrade.%0A %22%22%22%0A new_sass_var_keys = %7B%0A %22$base-container-width%22: %22calcRem(1200)%22,%0A %22$base-learning-container-width%22: %22calcRem(1000)%22,%0A %22$courseware-content-container-side-padding%22: %22calcRem(100)%22,%0A %22$courseware-content-container-sidebar-width%22: %22calcRem(240)%22,%0A %22$courseware-content-container-width%22: %22$base-learning-container-width%22,%0A %22$site-nav-width%22: %22$base-container-width%22,%0A %22$inline-link-color%22: %22$brand-primary-color%22,%0A %22$light-border-color%22: %22#dedede%22,%0A %22$font-size-base-courseware%22: %22calcRem(18)%22,%0A %22$line-height-base-courseware%22: %22200%25%22,%0A %22$in-app-container-border-radius%22: %22calcRem(15)%22,%0A %22$login-register-container-width%22: %22calcRem(480)%22,%0A %7D%0A SiteConfiguration = apps.get_model('site_configuration', 'SiteConfiguration')%0A sites = SiteConfiguration.objects.all()%0A for site in sites:%0A for sass_var, sass_value in new_sass_var_keys.items():%0A exists = False%0A for key, val in site.sass_variables:%0A if key == sass_var:%0A exists = True%0A break%0A%0A if not exists:%0A site.sass_variables.append(%5Bsass_var, %5Bsass_value, sass_value%5D%5D)%0A%0A site.save()%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('appsembler_sites', '0001_initial'),%0A ('appsembler_sites', '0002_add_hide_linked_accounts_sass_var'),%0A ('site_configuration', '0004_auto_20161120_2325'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(add_juniper_new_sass_vars),%0A %5D%0A
|
|
46351669c279764e1b070943366d7c0ea84a243a
|
Build pipeline directly in build/action_maketokenizer.py. Review URL: http://codereview.chromium.org/67086
|
webkit/build/action_maketokenizer.py
|
webkit/build/action_maketokenizer.py
|
#!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: action_maketokenizer.py OUTPUTS -- INPUTS
#
# Multiple INPUTS may be listed. The sections are separated by -- arguments.
#
# OUTPUTS must contain a single item: a path to tokenizer.cpp.
#
# INPUTS must contain exactly two items. The first item must be the path to
# maketokenizer. The second item must be the path to tokenizer.flex.
import os
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 2
(outputs, inputs) = sections
assert len(outputs) == 1
output = outputs[0]
assert len(inputs) == 2
maketokenizer = inputs[0]
flex_input = inputs[1]
# Build up the command.
command = 'flex -t %s | perl %s > %s' % (flex_input, maketokenizer, output)
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
# TODO(mark): Don't use shell=True, build up the pipeline directly.
p = subprocess.Popen(command, shell=True)
return_code = p.wait()
assert return_code == 0
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Python
| 0.000001
|
@@ -1503,271 +1503,204 @@
#
-Build up the command.%0A command = 'flex -t %25s %7C perl %25s %3E %25s' %25 (flex_input, maketokenizer, output)%0A%0A # Do it. check_call is new in 2.5, so simulate its behavior with call and%0A # assert.%0A # TODO(mark): Don't use shell=True, build up the pipeline directly.
+Do it. check_call is new in 2.5, so simulate its behavior with call and%0A # assert.%0A outfile = open(output, 'wb')%0A p1 = subprocess.Popen(%5B'flex', '-t', flex_input%5D, stdout=subprocess.PIPE)
%0A p
+2
= s
@@ -1719,46 +1719,117 @@
pen(
-command, shell=Tru
+%5B'perl', maketokenizer%5D, stdin=p1.stdout,%0A stdout=outfil
e)%0A
+%0A
r
-eturn_code
+1 = p1.wait()%0A r2
= p
+2
.wai
@@ -1842,26 +1842,34 @@
assert r
-eturn_code
+1 == 0%0A assert r2
== 0%0A%0A
@@ -1880,19 +1880,9 @@
urn
-return_code
+0
%0A%0A%0Ai
|
163c214f8d714e3f1dc08324f9d48a34f813d9fe
|
Add agency creation script.
|
regscrape/regscrape_lib/commands/create_agencies.py
|
regscrape/regscrape_lib/commands/create_agencies.py
|
Python
| 0
|
@@ -0,0 +1,1165 @@
+def run():%0A from regscrape_lib.util import get_db%0A from regscrape_lib.search import get_agencies%0A from pymongo.errors import DuplicateKeyError%0A %0A db = get_db()%0A new = 0%0A %0A print 'Fetching agencies...'%0A agencies = get_agencies()%0A%0A print 'Saving agencies...'%0A%0A stop_words = %5B'the', 'and', 'of', 'on', 'in', 'for'%5D%0A for agency in agencies:%0A name_parts = agency.name.split(' ')%0A capitalized_parts = %5Bname_parts%5B0%5D.title()%5D + %5Bword.title() if word.lower() not in stop_words else word.lower() for word in name_parts%5B1:%5D%5D%0A name = ' '.join(capitalized_parts)%0A%0A record = %7B%0A '_id': agency.abbr,%0A 'name': name%0A %7D%0A%0A result = db.agencies.update(%0A %7B%0A '_id': record%5B'_id'%5D%0A %7D,%0A %7B%0A '$set': %7B'name': record%5B'name'%5D%7D%0A %7D,%0A upsert=True,%0A safe=True%0A )%0A new += 1 if 'updatedExisting' in result and not result%5B'updatedExisting'%5D else 0%0A %0A print 'Iterated over %25s agencies, of which %25s were new.' %25 (len(agencies), new)%0A %0A return %7B'total': len(agencies), 'new': new%7D%0A
|
|
392125f2b3fae38b4f4d32877ad2abaa60ea6ffd
|
Add pony/orm/examples/demo.py
|
pony/orm/examples/demo.py
|
pony/orm/examples/demo.py
|
Python
| 0
|
@@ -0,0 +1,2358 @@
+from decimal import Decimal%0Afrom pony.orm import *%0A%0Adb = Database(%22sqlite%22, %22demo.sqlite%22, create_db=True)%0A%0Aclass Customer(db.Entity):%0A id = PrimaryKey(int, auto=True)%0A name = Required(unicode)%0A email = Required(unicode, unique=True)%0A orders = Set(%22Order%22)%0A%0Aclass Order(db.Entity):%0A id = PrimaryKey(int, auto=True)%0A total_price = Required(Decimal)%0A customer = Required(Customer)%0A items = Set(%22OrderItem%22)%0A%0Aclass Product(db.Entity):%0A id = PrimaryKey(int, auto=True)%0A name = Required(unicode)%0A price = Required(Decimal)%0A items = Set(%22OrderItem%22)%0A%0Aclass OrderItem(db.Entity):%0A quantity = Required(int, default=1)%0A order = Required(Order)%0A product = Required(Product)%0A PrimaryKey(order, product)%0A%0Asql_debug(True)%0Adb.generate_mapping(create_tables=True)%0A# db.generate_mapping(check_tables=True)%0A%0Adef populate_database():%0A c1 = Customer(name='John Smith', email='john@example.com')%0A c2 = Customer(name='Matthew Reed', email='matthew@example.com')%0A c3 = Customer(name='Chuan Qin', email='chuanqin@example.com')%0A c4 = Customer(name='Rebecca Lawson', email='rebecca@example.com')%0A c5 = Customer(name='Oliver Blakey', email='oliver@example.com')%0A%0A p1 = Product(name='Kindle Fire HD', price=Decimal('284.00'))%0A p2 = Product(name='Apple iPad with Retina Display', price=Decimal('478.50'))%0A p3 = Product(name='SanDisk Cruzer 16 GB USB Flash Drive', price=Decimal('9.99'))%0A p4 = Product(name='Kingston DataTraveler 16GB USB 2.0', price=Decimal('9.98'))%0A p5 = Product(name='Samsung 840 Series 120GB SATA III SSD', price=Decimal('98.95'))%0A p6 = Product(name='Crucial m4 256GB SSD SATA 6Gb/s', price=Decimal('188.67'))%0A%0A o1 = Order(customer=c1, total_price=Decimal('292.00'))%0A OrderItem(order=o1, product=p1)%0A OrderItem(order=o1, product=p4, quantity=2)%0A%0A o2 = Order(customer=c1, total_price=Decimal('478.50'))%0A OrderItem(order=o2, product=p2)%0A%0A o3 = Order(customer=c2, total_price=Decimal('680.50'))%0A OrderItem(order=o3, product=p2)%0A OrderItem(order=o3, product=p4, quantity=2)%0A OrderItem(order=o3, product=p6)%0A%0A o4 = Order(customer=c3, total_price=Decimal('99.80'))%0A OrderItem(order=o4, product=p4, quantity=10)%0A%0A o5 = Order(customer=c4, total_price=Decimal('722.00'))%0A OrderItem(order=o5, product=p1)%0A OrderItem(order=o5, product=p2)%0A%0A commit()%0A%0A
|
|
d5ed0cf979fa393d45e2f719d3096618e0f723aa
|
Add utils.py file for util functions
|
utils.py
|
utils.py
|
Python
| 0.000001
|
@@ -0,0 +1,621 @@
+%22%22%22Utilities.%22%22%22%0A%0Aimport logging%0A%0Adef configure_logging(to_stderr=True, to_file=True, file_name='main.log'):%0A %22%22%22Configure logging destinations.%22%22%22%0A root_logger = logging.getLogger()%0A root_logger.setLevel(logging.INFO)%0A%0A format_str = '%25(asctime)s - %25(levelname)s - %25(message)s'%0A formatter = logging.Formatter(format_str)%0A%0A if to_stderr:%0A stderr_handler = logging.StreamHandler()%0A stderr_handler.setFormatter(formatter)%0A root_logger.addHandler(stderr_handler)%0A%0A if to_file:%0A file_handler = logging.FileHandler(file_name)%0A file_handler.setFormatter(formatter)%0A root_logger.addHandler(file_handler)%0A
|
|
f2b97f029e61bd70b9f4ef5d79c875132907e45e
|
add missing file.
|
gunicorn/monkey.py
|
gunicorn/monkey.py
|
Python
| 0.000001
|
@@ -0,0 +1,1937 @@
+# -*- coding: utf-8 -%0A#%0A# This file is part of gunicorn released under the MIT license.%0A# See the NOTICE for more information.%0A%0Adef patch_django():%0A %22%22%22 monkey patch django.%0A%0A This patch make sure that we use real threads to get the ident which%0A is going to happen if we are using gevent or eventlet.%0A %22%22%22%0A%0A try:%0A from django.db import DEFAULT_DB_ALIAS%0A from django.db.backends import BaseDatabaseWrapper, DatabaseError%0A%0A if %22validate_thread_sharing%22 in BaseDatabaseWrapper.__dict__:%0A import thread%0A _get_ident = thread.get_ident%0A%0A def _init(self, settings_dict, alias=DEFAULT_DB_ALIAS,%0A allow_thread_sharing=False):%0A self.connection = None%0A self.queries = %5B%5D%0A self.settings_dict = settings_dict%0A self.alias = alias%0A self.use_debug_cursor = None%0A%0A # Transaction related attributes%0A self.transaction_state = %5B%5D%0A self.savepoint_state = 0%0A self._dirty = None%0A self._thread_ident = _get_ident()%0A self.allow_thread_sharing = allow_thread_sharing%0A%0A%0A def _validate_thread_sharing(self):%0A if (not self.allow_thread_sharing%0A and self._thread_ident != _get_ident()):%0A raise DatabaseError(%22DatabaseWrapper objects created in a %22%0A %22thread can only be used in that same thread. The object %22%0A %22with alias '%25s' was created in thread id %25s and this is %22%0A %22thread id %25s.%22%0A %25 (self.alias, self._thread_ident, _get_ident()))%0A%0A BaseDatabaseWrapper.__init__ = _init%0A BaseDatabaseWrapper.validate_thread_sharing = _validate_thread_sharing%0A%0A except ImportError, e:%0A patch_django_db_backends = None%0A%0A
|
|
b0f8c27325c9b4cbc5cd5bc83ece6f3d7569f7da
|
Add gzip stream
|
gzipinputstream.py
|
gzipinputstream.py
|
Python
| 0
|
@@ -0,0 +1,2836 @@
+import zlib%0Aimport string%0A%0ABLOCK_SIZE = 16384%0A%22%22%22Read block size%22%22%22%0A%0AWINDOW_BUFFER_SIZE = 16 + zlib.MAX_WBITS%0A%22%22%22zlib window buffer size, set to gzip's format%22%22%22%0A%0A%0Aclass GzipInputStream(object):%0A %22%22%22%0A Simple class that allow streaming reads from GZip files.%0A Python 2.x gzip.GZipFile relies on .seek() and .tell(), so it%0A doesn't support this (@see: http://bo4.me/YKWSsL).%0A Adapted from: http://effbot.org/librarybook/zlib-example-4.py%0A %22%22%22%0A%0A def __init__(self, fileobj):%0A %22%22%22%0A Initialize with the given file-like object.%0A @param fileobj: file-like object,%0A %22%22%22%0A self._file = fileobj%0A self._zip = zlib.decompressobj(WINDOW_BUFFER_SIZE)%0A self._offset = 0 # position in unzipped stream%0A self._data = %22%22%0A%0A def __fill(self, num_bytes):%0A %22%22%22%0A Fill the internal buffer with 'num_bytes' of data.%0A @param num_bytes: int, number of bytes to read in (0 = everything)%0A %22%22%22%0A%0A if not self._zip:%0A return%0A%0A while not num_bytes or len(self._data) %3C num_bytes:%0A data = self._file.read(BLOCK_SIZE)%0A if not data:%0A self._data = self._data + self._zip.flush()%0A self._zip = None # no more data%0A break%0A%0A self._data = self._data + self._zip.decompress(data)%0A%0A def __iter__(self):%0A return self%0A%0A def seek(self, offset, whence=0):%0A if whence == 0:%0A position = offset%0A elif whence == 1:%0A position = self._offset + offset%0A else:%0A raise IOError(%22Illegal argument%22)%0A if position %3C self._offset:%0A raise IOError(%22Cannot seek backwards%22)%0A%0A # skip forward, in blocks%0A while position %3E self._offset:%0A if not self.read(min(position - self._offset, BLOCK_SIZE)):%0A break%0A%0A def tell(self):%0A return self._offset%0A%0A def read(self, size=0):%0A self.__fill(size)%0A if size:%0A data = self._data%5B:size%5D%0A self._data = self._data%5Bsize:%5D%0A else:%0A data = self._data%0A self._data = %22%22%0A self._offset = self._offset + len(data)%0A return data%0A%0A def next(self):%0A line = self.readline()%0A if not line:%0A raise StopIteration()%0A return line%0A%0A def readline(self):%0A # make sure we have an entire line%0A while self._zip and %22%5Cn%22 not in self._data:%0A self.__fill(len(self._data) + 512)%0A%0A pos = string.find(self._data, %22%5Cn%22) + 1%0A if pos %3C= 0:%0A return self.read()%0A return self.read(pos)%0A%0A def readlines(self):%0A lines = %5B%5D%0A while True:%0A line = self.readline()%0A if not line:%0A break%0A lines.append(line)%0A return lines%0A
|
|
95e5b80117b090ae0458df18e062bad50b0c0b5a
|
add module init file
|
io_exporter_zombye/__init__.py
|
io_exporter_zombye/__init__.py
|
Python
| 0.000001
|
@@ -0,0 +1,1432 @@
+# The MIT License (MIT)%0A#%0A# Copyright (c) 2015 Georg Sch%C3%A4fer%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in all%0A# copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A# SOFTWARE.%0A%0Abl_info = %7B%0A%09%22name%22 : %22Zombye Model Exporter%22,%0A%09%22author%22 : %22Georg Sch%C3%A4fer%22,%0A%09%22version%22 : (0, 1),%0A%09%22blender%22 : (2, 7, 5),%0A%09%22location%22 : %22File %3E Export %3E Zombye Model (.zmdl)%22,%0A%09%22description%22 : %22The script exports meshes, armatures and animations to the Zombye Model format (zmdl)%22,%0A%09%22category%22 : %22Import-Export%22%0A%7D%0A
|
|
19df1ece66f815d0aaaae5e7273117b2da9541ac
|
Create mpu9250_i2c_modi.py
|
mpu9250_i2c_modi.py
|
mpu9250_i2c_modi.py
|
Python
| 0
|
@@ -0,0 +1,1378 @@
+import smbus%0Aimport time,timeit%0A#import RPi.GPIO as GPIO%0A%0A# Global varible%0Ai2c = smbus.SMBus(1)%0Aaddr = 0x68%0A%0Ac_t0 = time.clock()%0At_t0 = time.time()%0A%0Atry:%0A%09device_id = i2c.read_byte_data(addr,0x75)%0A%09print %22Device ID:%22 + str(hex(device_id))%0A%09print %22MPU9250 I2C Connected.%22%0Aexcept:%0A%09print %22Connect failed%22%0A%0A%0Ai2c.write_byte_data(0x68, 0x6a, 0x00)%0Ai2c.write_byte_data(0x68, 0x37, 0x02)%0Ai2c.write_byte_data(0x0c, 0x0a, 0x16)%0A%0A%0A# Open File%0Af = open(%22IMU_LOG_9axis.txt%22, %22w%22)%0A%0A%0A# Loop area%0Adef read_write_mpu9250():%0A%09count = 1%0A%0A%09while True:%0A%09%09if count %3C= 500:%0A%0A%09%09%09def smbus_data_get():%0A%09%09%09%09i2c.write_byte_data(0x0c, 0x0a, 0x16)%0A%09%09%09%09#temp_out = i2c.read_i2c_block_data(addr, 0x41, 2)%0A%09%09%09%09xyz_g_offset = i2c.read_i2c_block_data(addr, 0x13, 6)%0A%09%09%09%09xyz_a_out = i2c.read_i2c_block_data(addr, 0x3B, 6)%0A%09%09%09%09xyz_g_out = i2c.read_i2c_block_data(addr, 0x43, 6)%0A%09%09%09%09xyz_a_offset = i2c .read_i2c_block_data(addr, 0x77, 6)%0A%0A%09%09%09%09xyz_mag = i2c.read_i2c_block_data(0x0c, 0x03, 6)%0A%0A%09%09%09%09c_t1 = time.clock() - c_t0%0A%09%09%09%09t_t1 = time.time() - t_t0%0A%0A%0A%09%09%09smbus_data_get()%09%0A%0A%09%09%09print %3E%3E f, count%0A%09%09%09print %3E%3E f, c_t1, t_t1%0A%09%09%09print %3E%3E f, xyz_a_out%0A%09%09%09print %3E%3E f, xyz_a_offset%0A%09%09%09print %3E%3E f, xyz_g_out%0A%09%09%09print %3E%3E f, xyz_g_offset%0A%09%09%09print %3E%3E f, xyz_mag%0A%0A%09%09%09count += 1%0A%09%09else:%0A%09%09%09f.close()%0A%09%09%09i2c.write_byte_data(addr, 0x6A, 0x07)%0A%09%09%09break%0A%0Aprint timeit.timeit(read_write_mpu9250, number = 1)%0Aprint %22Process End%22%0A
|
|
7acd91331d97a9a4c2190c7d6c8844bd4b7ccfe3
|
add cache to diagnostics/__init__.py
|
dask/diagnostics/__init__.py
|
dask/diagnostics/__init__.py
|
from .profile import Profiler
from .progress import ProgressBar
|
Python
| 0.000013
|
@@ -57,8 +57,33 @@
ressBar%0A
+from .cache import Cache%0A
|
63318185d5477fbf99e570e5ccaba303ebe26493
|
add testcases
|
jsmapper/tests/test_mapping.py
|
jsmapper/tests/test_mapping.py
|
Python
| 0.000013
|
@@ -0,0 +1,890 @@
+# -*- coding: utf-8 -*-%0A%0Afrom nose.tools import (%0A eq_,%0A ok_,%0A)%0A%0Afrom ..mapping import (%0A Mapping,%0A MappingProperty,%0A object_property,%0A)%0Afrom ..schema import JSONSchema%0Afrom ..types import (%0A Integer,%0A String,%0A)%0A%0A%0Adef test_object_property():%0A schema = JSONSchema()%0A%0A @object_property(name='property')%0A def prop():%0A return schema%0A%0A ok_(isinstance(prop, MappingProperty))%0A eq_(prop.name, 'property')%0A eq_(prop.schema, schema)%0A%0A%0Aclass Base(Mapping):%0A foo = JSONSchema(type=Integer())%0A bar = JSONSchema(type=Integer())%0A%0A%0Aclass Extended(Base):%0A foo = JSONSchema(type=Integer())%0A baz = JSONSchema(type=String())%0A%0A%0Adef test_inheritance():%0A eq_(%7BBase.foo, Base.bar%7D,%0A set(prop.schema for prop in Base._properties()))%0A eq_(%7BExtended.foo, Extended.bar, Extended.baz%7D,%0A set(prop.schema for prop in Extended._properties()))%0A
|
|
dab55518bacc70c2f44ca11787c346b821343316
|
Update cifar10 example
|
examples/cifar10_cnn.py
|
examples/cifar10_cnn.py
|
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils, generic_utils
from six.moves import range
'''
Train a (fairly simple) deep CNN on the CIFAR10 small images dataset.
GPU run command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
Note: the data was pickled with Python 2, and some encoding issues might prevent you
from loading it in Python 3. You might have to load it in Python 2,
save it in a different format, load it in Python 3 and repickle it.
'''
batch_size = 32
nb_classes = 10
nb_epoch = 200
data_augmentation = True
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(64*8*8, 512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
if not data_augmentation:
print("Not using data augmentation or normalization")
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=10)
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
print('Test score:', score)
else:
print("Using real time data augmentation")
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=True, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=True, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
for e in range(nb_epoch):
print('-'*40)
print('Epoch', e)
print('-'*40)
print("Training...")
# batch train with realtime data augmentation
progbar = generic_utils.Progbar(X_train.shape[0])
for X_batch, Y_batch in datagen.flow(X_train, Y_train):
loss = model.train(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("train loss", loss)])
print("Testing...")
# test time!
progbar = generic_utils.Progbar(X_test.shape[0])
for X_batch, Y_batch in datagen.flow(X_test, Y_test):
score = model.test(X_batch, Y_batch)
progbar.add(X_batch.shape[0], values=[("test loss", score)])
|
Python
| 0
|
@@ -2587,18 +2587,24 @@
b_epoch=
-10
+nb_epoch
)%0A sc
@@ -4066,16 +4066,25 @@
el.train
+_on_batch
(X_batch
@@ -4366,16 +4366,25 @@
del.test
+_on_batch
(X_batch
|
190df1378844c6294c6f48ad6cb0272f2146fc48
|
Add example of force https
|
examples/force_https.py
|
examples/force_https.py
|
Python
| 0.000357
|
@@ -0,0 +1,355 @@
+%22%22%22An example of using a middleware to require HTTPS connections.%0A requires https://github.com/falconry/falcon-require-https to be installed via%0A pip install falcon-require-https%0A%22%22%22%0Aimport hug%0Afrom falcon_require_https import RequireHTTPS%0A%0Ahug.API(__name__).http.add_middleware(RequireHTTPS())%0A%0A%0A@hug.get()%0Adef my_endpoint():%0A return 'Success!'%0A
|
|
c7e8f255d5ad85dc03f5f302f49295d491ac11a1
|
Create app.py
|
app.py
|
app.py
|
Python
| 0.000003
|
@@ -0,0 +1,2639 @@
+#!/usr/bin/env python%0A%0Afrom __future__ import print_function%0Afrom future.standard_library import install_aliases%0Ainstall_aliases()%0A%0Afrom urllib.parse import urlparse, urlencode%0Afrom urllib.request import urlopen, Request%0Afrom urllib.error import HTTPError%0A%0Aimport json%0Aimport os%0A%0Afrom flask import Flask%0Afrom flask import request%0Afrom flask import make_response%0A%0A# Flask app should start in global layout%0Aapp = Flask(__name__)%0A%0A%0A@app.route('/webhook', methods=%5B'POST'%5D)%0Adef webhook():%0A req = request.get_json(silent=True, force=True)%0A%0A print(%22Request:%22)%0A print(json.dumps(req, indent=4))%0A%0A res = processRequest(req)%0A%0A res = json.dumps(res, indent=4)%0A # print(res)%0A r = make_response(res)%0A r.headers%5B'Content-Type'%5D = 'application/json'%0A return r%0A%0A%0Adef processRequest(req):%0A if req.get(%22result%22).get(%22action%22) != %22yahooWeatherForecast%22:%0A return %7B%7D%0A baseurl = %22https://query.yahooapis.com/v1/public/yql?%22%0A yql_query = makeYqlQuery(req)%0A if yql_query is None:%0A return %7B%7D%0A yql_url = baseurl + urlencode(%7B'q': yql_query%7D) + %22&format=json%22%0A result = urlopen(yql_url).read()%0A data = json.loads(result)%0A res = makeWebhookResult(data)%0A return res%0A%0A%0Adef makeYqlQuery(req):%0A result = req.get(%22result%22)%0A parameters = result.get(%22parameters%22)%0A city = parameters.get(%22geo-city%22)%0A if city is None:%0A return None%0A%0A return %22select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='%22 + city + %22')%22%0A%0A%0Adef makeWebhookResult(data):%0A query = data.get('query')%0A if query is None:%0A return %7B%7D%0A%0A result = query.get('results')%0A if result is None:%0A return %7B%7D%0A%0A channel = result.get('channel')%0A if channel is None:%0A return %7B%7D%0A%0A item = channel.get('item')%0A location = channel.get('location')%0A units = channel.get('units')%0A if (location is None) or (item is None) or (units is None):%0A return %7B%7D%0A%0A condition = item.get('condition')%0A if condition is None:%0A return %7B%7D%0A%0A # print(json.dumps(item, indent=4))%0A%0A speech = %22Today in %22 + location.get('city') + %22: %22 + condition.get('text') + %5C%0A %22, the temperature is %22 + condition.get('temp') + %22 %22 + units.get('temperature')%0A%0A print(%22Response:%22)%0A print(speech)%0A%0A return %7B%0A %22speech%22: speech,%0A %22displayText%22: speech,%0A # %22data%22: data,%0A # %22contextOut%22: %5B%5D,%0A %22source%22: %22apiai-weather-webhook-sample%22%0A %7D%0A%0A%0Aif __name__ == '__main__':%0A port = int(os.getenv('PORT', 5000))%0A%0A print(%22Starting app on port %25d%22 %25 port)%0A%0A app.run(debug=False, port=port, host='0.0.0.0')%0A %0A
|
|
2b74fccbed0a63a503d59ac46fe90d0916abe39c
|
Add sublime script
|
bdo.py
|
bdo.py
|
Python
| 0.000008
|
@@ -0,0 +1,613 @@
+import sublime, sublime_plugin, subprocess, threading, time%0A%0Aclass Bdo(sublime_plugin.TextCommand):%0A def run(self, cmd):%0A sublime.active_window().show_input_panel(%22bdo %22, %22update%22, self.execute, None, None)%0A def execute(self, cmd):%0A output = subprocess.Popen(%0A %22echo %22 + cmd + %22 %7C nc -w 10 localhost 9090%22,%0A shell=True, stdout=subprocess.PIPE).stdout.read()%0A print output%0A if len(output) %3E 0:%0A view = sublime.active_window().new_file()%0A edit = view.begin_edit()%0A view.insert(edit, 0, output)%0A view.end_edit(edit)%0A
|
|
876365a7f19a3786db15dc7debbd2686fa5d02ef
|
Add WmataError class and start of Wmata class.
|
wmata.py
|
wmata.py
|
Python
| 0
|
@@ -0,0 +1,376 @@
+import datetime%0Aimport urllib%0Aimport json%0A%0Aclass WmataError(Exception):%0A pass%0A%0Aclass Wmata(object):%0A%0A base_url = 'http://api.wmata.com/%25(svc)s.svc/json/%25(endpoint)s'%0A # By default, we'll use the WMATA demonstration key%0A api_key = 'kfgpmgvfgacx98de9q3xazww' %0A%0A def __init__(self, api_key=None):%0A if api_key is not None:%0A self.api_key = api_key%0A
|
|
473516ce881711ee515606a02d3199e195d0c167
|
allow reports that extend basictabular to specify whether to run couchdb queries with stale='update_after'
|
corehq/apps/reports/basic.py
|
corehq/apps/reports/basic.py
|
from corehq.apps.reports.datatables import (DataTablesHeader, DataTablesColumn,
DTSortType)
from corehq.apps.reports.generic import GenericTabularReport
from couchdbkit_aggregate import KeyView, AggregateView
from dimagi.utils.couch.database import get_db
__all__ = ['Column', 'BasicTabularReport']
class Column(object):
"""
Unified interface for a report column that lets you specify the
DataTablesColumn arguments (UI) and CouchDB KeyView arguments (model) at
once.
"""
def __init__(self, name, calculate_fn=None, *args, **kwargs):
couch_args = (
# args specific to KeyView constructor
'key', 'couch_view', 'startkey_fn', 'endkey_fn', 'reduce_fn',
# pass-through db.view() args
)
couch_kwargs = {}
for arg in couch_args:
try:
couch_kwargs[arg] = kwargs.pop(arg)
except KeyError:
pass
if 'key' in couch_kwargs:
if 'sort_type' not in kwargs:
kwargs['sort_type'] = DTSortType.NUMERIC
kwargs['sortable'] = True
key = couch_kwargs.pop('key')
self.view = KeyView(key, **couch_kwargs)
elif calculate_fn:
kwargs['sortable'] = False
self.view = FunctionView(calculate_fn)
else:
raise Exception("Must specify either key or calculate_fn.")
self.data_tables_column = DataTablesColumn(name, *args, **kwargs)
class FunctionView(object):
def __init__(self, calculate_fn):
if isinstance(calculate_fn, type):
calculate_fn = calculate_fn()
self.calculate_fn = calculate_fn
def view(self, key, object):
return self.calculate_fn(key, object)
class ColumnCollector(type):
"""
Metaclass that collects Columns and translates them to KeyViews of an
AggregateView.
"""
def __new__(cls, name, bases, attrs):
columns = {}
for attr_name, attr in attrs.items():
if isinstance(attr, Column):
columns[attr_name] = attr
class MyAggregateView(AggregateView):
pass
# patch MyAggregateView's key_views attribute since we can't define the
# class declaratively
function_views = {}
for slug, column in columns.items():
if hasattr(column, 'view') and isinstance(column.view, KeyView):
MyAggregateView.key_views[slug] = column.view
else:
function_views[slug] = column.view
attrs['columns'] = columns
attrs['function_views'] = function_views
attrs['View'] = MyAggregateView
return super(ColumnCollector, cls).__new__(cls, name, bases, attrs)
class BasicTabularReport(GenericTabularReport):
__metaclass__ = ColumnCollector
@property
def fields(self):
return [cls.__module__ + '.' + cls.__name__
for cls in self.field_classes]
@property
def headers(self):
return DataTablesHeader(*[self.columns[c].data_tables_column
for c in self.default_column_order])
@property
def rows(self):
startkey, endkey = self.start_and_end_keys
kwargs = {
'db': get_db(),
'couch_view': getattr(self, 'couch_view', None),
'startkey': startkey,
'endkey': endkey
}
for key in self.keys:
row = self.View.view(key, **kwargs)
yield [row[c] if c in self.View.key_views
else self.function_views[c].view(key, self)
for c in self.default_column_order]
|
Python
| 0
|
@@ -2839,16 +2839,41 @@
ollector
+%0A update_after = False
%0A%0A @p
@@ -3206,32 +3206,104 @@
def rows(self):%0A
+ kwargs = %7B'stale': 'update_after'%7D if self.update_after else %7B%7D%0A
startkey
@@ -3351,19 +3351,24 @@
kwargs
- =
+.update(
%7B%0A
@@ -3522,16 +3522,17 @@
%7D
+)
%0A%0A
|
2a3f45c9a3b3b4f513b113e68b160e0780b78cb8
|
Revert "build: get rid of the sparse expand stuff"
|
tools/releasetools/build_image.py
|
tools/releasetools/build_image.py
|
#!/usr/bin/env python
#
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Build image output_image_file from input_directory and properties_file.
Usage: build_image input_directory properties_file output_image_file
"""
import os
import os.path
import subprocess
import sys
def RunCommand(cmd):
""" Echo and run the given command
Args:
cmd: the command represented as a list of strings.
Returns:
The exit code.
"""
print "Running: ", " ".join(cmd)
p = subprocess.Popen(cmd)
p.communicate()
return p.returncode
def BuildImage(in_dir, prop_dict, out_file):
"""Build an image to out_file from in_dir with property prop_dict.
Args:
in_dir: path of input directory.
prop_dict: property dictionary.
out_file: path of the output image file.
Returns:
True iff the image is built successfully.
"""
build_command = []
fs_type = prop_dict.get("fs_type", "")
run_fsck = False
if fs_type.startswith("ext"):
build_command = ["mkuserimg.sh"]
if "extfs_sparse_flag" in prop_dict:
build_command.append(prop_dict["extfs_sparse_flag"])
#run_fsck = True
build_command.extend([in_dir, out_file, fs_type,
prop_dict["mount_point"]])
if "partition_size" in prop_dict:
build_command.append(prop_dict["partition_size"])
if "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
else:
build_command = ["mkyaffs2image", "-f"]
if prop_dict.get("mkyaffs2_extra_flags", None):
build_command.extend(prop_dict["mkyaffs2_extra_flags"].split())
build_command.append(in_dir)
build_command.append(out_file)
if "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
build_command.append(prop_dict["mount_point"])
exit_code = RunCommand(build_command)
if exit_code != 0:
return False
if run_fsck:
# Inflate the sparse image
unsparse_image = os.path.join(
os.path.dirname(out_file), "unsparse_" + os.path.basename(out_file))
inflate_command = ["simg2img", out_file, unsparse_image]
exit_code = RunCommand(inflate_command)
if exit_code != 0:
os.remove(unsparse_image)
return False
# Run e2fsck on the inflated image file
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
exit_code = RunCommand(e2fsck_command)
os.remove(unsparse_image)
return exit_code == 0
def ImagePropFromGlobalDict(glob_dict, mount_point):
"""Build an image property dictionary from the global dictionary.
Args:
glob_dict: the global dictionary from the build system.
mount_point: such as "system", "data" etc.
"""
d = {}
def copy_prop(src_p, dest_p):
if src_p in glob_dict:
d[dest_p] = str(glob_dict[src_p])
common_props = (
"extfs_sparse_flag",
"mkyaffs2_extra_flags",
"selinux_fc",
)
for p in common_props:
copy_prop(p, p)
d["mount_point"] = mount_point
if mount_point == "system":
copy_prop("fs_type", "fs_type")
copy_prop("system_size", "partition_size")
elif mount_point == "data":
copy_prop("fs_type", "fs_type")
copy_prop("userdata_size", "partition_size")
elif mount_point == "cache":
copy_prop("cache_fs_type", "fs_type")
copy_prop("cache_size", "partition_size")
return d
def LoadGlobalDict(filename):
"""Load "name=value" pairs from filename"""
d = {}
f = open(filename)
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
k, v = line.split("=", 1)
d[k] = v
f.close()
return d
def main(argv):
if len(argv) != 3:
print __doc__
sys.exit(1)
in_dir = argv[0]
glob_dict_file = argv[1]
out_file = argv[2]
glob_dict = LoadGlobalDict(glob_dict_file)
image_filename = os.path.basename(out_file)
mount_point = ""
if image_filename == "system.img":
mount_point = "system"
elif image_filename == "userdata.img":
mount_point = "data"
elif image_filename == "cache.img":
mount_point = "cache"
else:
print >> sys.stderr, "error: unknown image file name ", image_filename
exit(1)
image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
if not BuildImage(in_dir, image_properties, out_file):
print >> sys.stderr, "error: failed to build %s from %s" % (out_file, in_dir)
exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
|
Python
| 0
|
@@ -1648,17 +1648,16 @@
)%0A
-#
run_fsck
|
6cf1dbcdf1ffa57136d9476eb43d2b858c4ad6ea
|
use 'settings' for system libpng.
|
third_party/libpng/libpng.gyp
|
third_party/libpng/libpng.gyp
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'../../build/common.gypi',
],
'variables': {
'use_system_libpng%': 0,
},
'conditions': [
['use_system_libpng==0', {
'targets': [
{
'target_name': 'libpng',
'type': '<(library)',
'dependencies': [
'../zlib/zlib.gyp:zlib',
],
'defines': [
'CHROME_PNG_WRITE_SUPPORT',
'PNG_USER_CONFIG',
],
'msvs_guid': 'C564F145-9172-42C3-BFCB-6014CA97DBCD',
'sources': [
'png.c',
'png.h',
'pngconf.h',
'pngerror.c',
'pnggccrd.c',
'pngget.c',
'pngmem.c',
'pngpread.c',
'pngread.c',
'pngrio.c',
'pngrtran.c',
'pngrutil.c',
'pngset.c',
'pngtrans.c',
'pngusr.h',
'pngvcrd.c',
'pngwio.c',
'pngwrite.c',
'pngwtran.c',
'pngwutil.c',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
'defines': [
'CHROME_PNG_WRITE_SUPPORT',
'PNG_USER_CONFIG',
],
},
'export_dependent_settings': [
'../zlib/zlib.gyp:zlib',
],
'conditions': [
['OS!="win"', {'product_name': 'png'}],
],
},
]
}, {
'targets': [
{
'target_name': 'libpng',
'type': '<(library)',
'dependencies': [
'../zlib/zlib.gyp:zlib',
],
'defines': [
'USE_SYSTEM_LIBPNG',
],
'direct_dependent_settings': {
'cflags': [
'<!@(pkg-config --cflags libpng)',
],
},
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other libpng)',
],
'libraries': [
'<!@(pkg-config --libs-only-l libpng)',
],
},
},
],
}],
],
}
|
Python
| 0.000029
|
@@ -1711,34 +1711,32 @@
'type': '
-%3C(library)
+settings
',%0A
|
43a59b0d883e84005f9b8687ac3aa4ed449f9b78
|
Fix Padatious to load intents and entities Doesn't make sense to add intents since no sample lines are provided
|
mycroft/skills/padatious_service.py
|
mycroft/skills/padatious_service.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from subprocess import call
from threading import Event
from time import time as get_time, sleep
from os.path import expanduser, isfile
from pkg_resources import get_distribution
from mycroft.configuration import ConfigurationManager
from mycroft.messagebus.message import Message
from mycroft.skills.core import FallbackSkill
from mycroft.util.log import LOG
PADATIOUS_VERSION = '0.3.2' # Also update in requirements.txt
class PadatiousService(FallbackSkill):
def __init__(self, emitter):
FallbackSkill.__init__(self)
self.config = ConfigurationManager.get()['padatious']
intent_cache = expanduser(self.config['intent_cache'])
try:
from padatious import IntentContainer
except ImportError:
LOG.error('Padatious not installed. Please re-run dev_setup.sh')
try:
call(['notify-send', 'Padatious not installed',
'Please run build_host_setup and dev_setup again'])
except OSError:
pass
return
ver = get_distribution('padatious').version
if ver != PADATIOUS_VERSION:
LOG.warning('Using Padatious v' + ver + '. Please re-run ' +
'dev_setup.sh to install ' + PADATIOUS_VERSION)
self.container = IntentContainer(intent_cache)
self.emitter = emitter
self.emitter.on('padatious:register_intent', self.register_intent)
self.emitter.on('padatious:register_entity', self.register_entity)
self.register_fallback(self.handle_fallback, 5)
self.finished_training_event = Event()
self.train_delay = self.config['train_delay']
self.train_time = get_time() + self.train_delay
self.wait_and_train()
def wait_and_train(self):
sleep(self.train_delay)
if self.train_time < 0.0:
return
if self.train_time <= get_time() + 0.01:
self.train_time = -1.0
self.finished_training_event.clear()
LOG.info('Training...')
self.container.train()
LOG.info('Training complete.')
self.finished_training_event.set()
def _register_object(self, message, object_name, register_func):
file_name = message.data['file_name']
name = message.data['name']
LOG.debug('Registering Padatious ' + object_name + ': ' + name)
if not isfile(file_name):
LOG.warning('Could not find file ' + file_name)
return
register_func(name, file_name)
self.train_time = get_time() + self.train_delay
self.wait_and_train()
def register_intent(self, message):
self._register_object(message, 'intent', self.container.add_intent)
def register_entity(self, message):
self._register_object(message, 'entity', self.container.add_entity)
def handle_fallback(self, message):
utt = message.data.get('utterance')
LOG.debug("Padatious fallback attempt: " + utt)
if not self.finished_training_event.is_set():
LOG.debug('Waiting for training to finish...')
self.finished_training_event.wait()
data = self.container.calc_intent(utt)
if data.conf < 0.5:
return False
data.matches['utterance'] = utt
self.emitter.emit(Message(data.name, data=data.matches))
return True
|
Python
| 0
|
@@ -3326,18 +3326,19 @@
ntainer.
+lo
a
-d
d_intent
@@ -3444,18 +3444,19 @@
ntainer.
+lo
a
-d
d_entity
|
6f3847fb1408064250c6e3ce128bc85868c15467
|
Update Windows Clang version to r201604
|
tools/clang/scripts/update.py
|
tools/clang/scripts/update.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows can't run .sh files, so this is a Python implementation of
update.sh. This script should replace update.sh on all platforms eventually."""
import os
import re
import subprocess
import sys
# Do NOT CHANGE this if you don't know what you're doing -- see
# https://code.google.com/p/chromium/wiki/UpdatingClang
# Reverting problematic clang rolls is safe, though.
# Note: this revision is only used for Windows. Other platforms use update.sh.
LLVM_WINDOWS_REVISION = '201116'
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
LLVM_DIR = os.path.join(CHROMIUM_DIR, 'third_party', 'llvm')
LLVM_BUILD_DIR = os.path.join(CHROMIUM_DIR, 'third_party', 'llvm-build',
'Release+Asserts')
CLANG_DIR = os.path.join(LLVM_DIR, 'tools', 'clang')
COMPILER_RT_DIR = os.path.join(LLVM_DIR, 'projects', 'compiler-rt')
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision')
LLVM_REPO_URL='https://llvm.org/svn/llvm-project'
if 'LLVM_REPO_URL' in os.environ:
LLVM_REPO_URL = os.environ['LLVM_REPO_URL']
def ReadStampFile():
"""Return the contents of the stamp file, or '' if it doesn't exist."""
try:
with open(STAMP_FILE, 'r') as f:
return f.read();
except IOError:
return ''
def WriteStampFile(s):
"""Write s to the stamp file."""
if not os.path.exists(LLVM_BUILD_DIR):
os.makedirs(LLVM_BUILD_DIR)
with open(STAMP_FILE, 'w') as f:
f.write(s)
def DeleteFiles(dir, pattern):
"""Delete all files in dir matching pattern."""
n = 0
regex = re.compile(r'^' + pattern + r'$')
for root, _, files in os.walk(dir):
for f in files:
if regex.match(f):
os.remove(os.path.join(root, f))
n += 1
return n
def ClobberChromiumBuildFiles():
"""Clobber Chomium build files."""
print 'Clobbering Chromium build files...'
n = 0
dirs = [
os.path.join(CHROMIUM_DIR, 'out/Debug'),
os.path.join(CHROMIUM_DIR, 'out/Release'),
]
for d in dirs:
if not os.path.exists(d):
continue
n += DeleteFiles(d, r'.*\.o')
n += DeleteFiles(d, r'.*\.obj')
n += DeleteFiles(d, r'stamp.untar')
print 'Removed %d files.' % (n)
def RunCommand(command, tries=1):
"""Run a command, possibly with multiple retries."""
for i in range(0, tries):
print 'Running %s (try #%d)' % (str(command), i + 1)
if subprocess.call(command, shell=True) == 0:
return
print 'Failed.'
sys.exit(1)
def Checkout(name, url, dir):
"""Checkout the SVN module at url into dir. Use name for the log message."""
print "Checking out %s r%s into '%s'" % (name, LLVM_WINDOWS_REVISION, dir)
RunCommand(['svn', 'checkout', '--force',
url + '@' + LLVM_WINDOWS_REVISION, dir], tries=2)
vs_version = None
def GetVSVersion():
global vs_version
if not vs_version:
# TODO(hans): Find a less hacky way to find the MSVS installation.
sys.path.append(os.path.join(CHROMIUM_DIR, 'tools', 'gyp', 'pylib'))
import gyp.MSVSVersion
# We request VS 2013 because Clang won't build with 2010, and 2013 will be
# the default for Chromium soon anyway.
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion('2013')
return vs_version
def UpdateClang():
print 'Updating Clang to %s...' % (LLVM_WINDOWS_REVISION)
if ReadStampFile() == LLVM_WINDOWS_REVISION:
print 'Already up to date.'
return 0
ClobberChromiumBuildFiles()
# Reset the stamp file in case the build is unsuccessful.
WriteStampFile('')
Checkout('LLVM', LLVM_REPO_URL + '/llvm/trunk', LLVM_DIR)
Checkout('Clang', LLVM_REPO_URL + '/cfe/trunk', CLANG_DIR)
Checkout('compiler-rt', LLVM_REPO_URL + '/compiler-rt/trunk', COMPILER_RT_DIR)
if not os.path.exists(LLVM_BUILD_DIR):
os.makedirs(LLVM_BUILD_DIR)
os.chdir(LLVM_BUILD_DIR)
if not re.search(r'cmake', os.environ['PATH'], flags=re.IGNORECASE):
# If CMake is not on the path, try looking in a standard location.
os.environ['PATH'] += os.pathsep + 'C:\\Program Files (x86)\\CMake 2.8\\bin'
RunCommand(GetVSVersion().SetupScript('x64') +
['&&', 'cmake', '-GNinja', '-DCMAKE_BUILD_TYPE=Release',
'-DLLVM_ENABLE_ASSERTIONS=ON', LLVM_DIR])
RunCommand(GetVSVersion().SetupScript('x64') + ['&&', 'ninja', 'all'])
WriteStampFile(LLVM_WINDOWS_REVISION)
print 'Clang update was successful.'
return 0
def main():
if not sys.platform in ['win32', 'cygwin']:
# For non-Windows, fall back to update.sh.
# TODO(hans): Make update.py replace update.sh completely.
# This script is called by gclient. gclient opens its hooks subprocesses
# with (stdout=subprocess.PIPE, stderr=subprocess.STDOUT) and then does
# custom output processing that breaks printing '\r' characters for
# single-line updating status messages as printed by curl and wget.
# Work around this by setting stderr of the update.sh process to stdin (!):
# gclient doesn't redirect stdin, and while stdin itself is read-only, a
# dup()ed sys.stdin is writable, try
# fd2 = os.dup(sys.stdin.fileno()); os.write(fd2, 'hi')
# TODO: Fix gclient instead, http://crbug.com/95350
return subprocess.call(
[os.path.join(os.path.dirname(__file__), 'update.sh')] + sys.argv[1:],
stderr=os.fdopen(os.dup(sys.stdin.fileno())))
if not re.search('clang=1', os.environ.get('GYP_DEFINES', '')):
print 'Skipping Clang update (clang=1 was not set in GYP_DEFINES).'
return 0
return UpdateClang()
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -667,11 +667,11 @@
'201
-116
+604
'%0A%0A#
|
e7180497f2cb59414d06b944ccea5ceffd3c8e51
|
fix build on windows
|
tools/grit/grit/tool/build.py
|
tools/grit/grit/tool/build.py
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit build' tool along with integration for this tool with the
SCons build system.
'''
import os
import getopt
import types
import sys
from grit import grd_reader
from grit import util
from grit.tool import interface
from grit import shortcuts
def ParseDefine(define):
'''Parses a define that is either like "NAME" or "NAME=VAL" and
returns its components, using True as the default value. Values of
"1" and "0" are transformed to True and False respectively.
'''
parts = [part.strip() for part in define.split('=')]
assert len(parts) >= 1
name = parts[0]
val = True
if len(parts) > 1:
val = parts[1]
if val == "1": val = True
elif val == "0": val = False
return (name, val)
class RcBuilder(interface.Tool):
'''A tool that builds RC files and resource header files for compilation.
Usage: grit build [-o OUTPUTDIR] [-D NAME[=VAL]]*
All output options for this tool are specified in the input file (see
'grit help' for details on how to specify the input file - it is a global
option).
Options:
-o OUTPUTDIR Specify what directory output paths are relative to.
Defaults to the current directory.
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
Conditional inclusion of resources only affects the output of files which
control which resources get linked into a binary, e.g. it affects .rc files
meant for compilation but it does not affect resource header files (that define
IDs). This helps ensure that values of IDs stay the same, that all messages
are exported to translation interchange files (e.g. XMB files), etc.
'''
def ShortDescription(self):
return 'A tool that builds RC files for compilation.'
def Run(self, opts, args):
self.output_directory = '.'
(own_opts, args) = getopt.getopt(args, 'o:D:')
for (key, val) in own_opts:
if key == '-o':
self.output_directory = val
elif key == '-D':
name, val = ParseDefine(val)
self.defines[name] = val
if len(args):
print "This tool takes no tool-specific arguments."
return 2
self.SetOptions(opts)
if self.scons_targets:
self.VerboseOut('Using SCons targets to identify files to output.\n')
else:
self.VerboseOut('Output directory: %s (absolute path: %s)\n' %
(self.output_directory,
os.path.abspath(self.output_directory)))
self.res = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
self.res.RunGatherers(recursive = True)
self.Process()
return 0
def __init__(self):
# Default file-creation function is built-in file(). Only done to allow
# overriding by unit test.
self.fo_create = file
# key/value pairs of C-preprocessor like defines that are used for
# conditional output of resources
self.defines = {}
# self.res is a fully-populated resource tree if Run()
# has been called, otherwise None.
self.res = None
# Set to a list of filenames for the output nodes that are relative
# to the current working directory. They are in the same order as the
# output nodes in the file.
self.scons_targets = None
# static method
def ProcessNode(node, output_node, outfile):
'''Processes a node in-order, calling its formatter before and after
recursing to its children.
Args:
node: grit.node.base.Node subclass
output_node: grit.node.io.File
outfile: open filehandle
'''
base_dir = util.dirname(output_node.GetOutputFilename())
try:
formatter = node.ItemFormatter(output_node.GetType())
if formatter:
outfile.write(formatter.Format(node, output_node.GetLanguage(),
begin_item=True, output_dir=base_dir))
except:
print u"Error processing node %s" % unicode(node)
raise
for child in node.children:
RcBuilder.ProcessNode(child, output_node, outfile)
try:
if formatter:
outfile.write(formatter.Format(node, output_node.GetLanguage(),
begin_item=False, output_dir=base_dir))
except:
print u"Error processing node %s" % unicode(node)
raise
ProcessNode = staticmethod(ProcessNode)
def Process(self):
# Update filenames with those provided by SCons if we're being invoked
# from SCons. The list of SCons targets also includes all <structure>
# node outputs, but it starts with our output files, in the order they
# occur in the .grd
if self.scons_targets:
assert len(self.scons_targets) >= len(self.res.GetOutputFiles())
outfiles = self.res.GetOutputFiles()
for ix in range(len(outfiles)):
outfiles[ix].output_filename = os.path.abspath(
self.scons_targets[ix])
else:
for output in self.res.GetOutputFiles():
output.output_filename = os.path.abspath(os.path.join(
self.output_directory, output.GetFilename()))
for output in self.res.GetOutputFiles():
self.VerboseOut('Creating %s...' % output.GetFilename())
# Don't build data package files on mac/windows because it's not used and
# there are project dependency issues. We still need to create the file
# to satisfy build dependencies.
if output.GetType() == 'data_package' and sys.platform != 'linux2':
f = open(output.GetOutputFilename(), 'wb')
f.close()
# Microsoft's RC compiler can only deal with single-byte or double-byte
# files (no UTF-8), so we make all RC files UTF-16 to support all
# character sets.
if output.GetType() in ['rc_header']:
encoding = 'cp1252'
outname = output.GetOutputFilename()
oldname = outname + '.tmp'
if os.access(oldname, os.F_OK):
os.remove(oldname)
try:
os.rename(outname, oldname)
except OSError:
oldname = None
else:
encoding = 'utf_16'
outfile = self.fo_create(output.GetOutputFilename(), 'wb')
if output.GetType() != 'data_package':
outfile = util.WrapOutputStream(outfile, encoding)
# Set the context, for conditional inclusion of resources
self.res.SetOutputContext(output.GetLanguage(), self.defines)
# TODO(joi) Handle this more gracefully
import grit.format.rc_header
grit.format.rc_header.Item.ids_ = {}
# Iterate in-order through entire resource tree, calling formatters on
# the entry into a node and on exit out of it.
self.ProcessNode(self.res, output, outfile)
outfile.close()
if output.GetType() in ['rc_header'] and oldname:
if open(oldname).read() != open(outname).read():
os.remove(oldname)
else:
os.remove(outname)
os.rename(oldname, outname)
self.VerboseOut(' done.\n')
# Print warnings if there are any duplicate shortcuts.
print '\n'.join(shortcuts.GenerateDuplicateShortcutsWarnings(
self.res.UberClique(), self.res.GetTcProject()))
# Print out any fallback warnings, and missing translation errors, and
# exit with an error code if there are missing translations in a non-pseudo
# build
print (self.res.UberClique().MissingTranslationsReport().
encode('ascii', 'replace'))
if self.res.UberClique().HasMissingTranslations():
sys.exit(-1)
|
Python
| 0.000001
|
@@ -5770,16 +5770,33 @@
.close()
+%0A continue
%0A%0A
|
a4ab01d64c505b786e6fef217829fb56c3d6b6ce
|
Add management script to generate hansard appearance scores.
|
mzalendo/scorecards/management/commands/scorecard_update_person_hansard_appearances.py
|
mzalendo/scorecards/management/commands/scorecard_update_person_hansard_appearances.py
|
Python
| 0
|
@@ -0,0 +1,1819 @@
+import datetime%0A%0Afrom django.core.management.base import NoArgsCommand%0Afrom django.core.exceptions import ImproperlyConfigured%0A%0Aclass Command(NoArgsCommand):%0A help = 'Create/update hansard scorecard entry for all mps'%0A args = ''%0A%0A def handle_noargs(self, **options):%0A # Imports are here to avoid an import loop created when the Hansard%0A # search indexes are checked%0A from core.models import Person%0A from scorecards.models import Category, Entry%0A%0A # create the category%0A try:%0A category = Category.objects.get(slug=%22hansard-appearances%22)%0A except Category.DoesNotExist:%0A raise ImproperlyConfigured(%22Please create a scorecard category with the slug 'hansard-appearances'%22)%0A%0A # Find all the people we should score for%0A people = Person.objects.all().is_mp()%0A %0A lower_limit = datetime.date.today() - datetime.timedelta(183)%0A%0A for person in people:%0A # NOTE: We could certainly do all this in a single query.%0A hansard_count = person.hansard_entries.filter(sitting__start_date__gte=lower_limit).count()%0A%0A try:%0A entry = person.scorecard_entries.get(category=category)%0A except Entry.DoesNotExist:%0A entry = Entry(content_object=person, category=category)%0A%0A if hansard_count %3C 6:%0A entry.score = -1%0A entry.remark = %22Hardly ever speaks in parliament%22%0A elif hansard_count %3C 60:%0A entry.score = 0%0A entry.remark = %22Sometimes speaks in parliament%22%0A else:%0A entry.score = 1%0A entry.remark = %22Frequently speaks in parliament%22%0A %0A entry.date = datetime.date.today()%0A%0A entry.save()%0A%0A %0A %0A%0A
|
|
4a404709081515fa0cc91683b5a9ad8f6a68eae6
|
Add a migration to drop mandatory assessment methods from brief data
|
migrations/versions/630_remove_mandatory_assessment_methods_.py
|
migrations/versions/630_remove_mandatory_assessment_methods_.py
|
Python
| 0
|
@@ -0,0 +1,1777 @@
+%22%22%22Remove mandatory assessment methods from briefs%0A%0ARevision ID: 630%0ARevises: 620%0ACreate Date: 2016-06-03 15:26:53.890401%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '630'%0Adown_revision = '620'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.sql import table, column%0Afrom sqlalchemy.dialects import postgresql%0A%0A%0Abriefs = table(%0A 'briefs',%0A column('id', sa.Integer),%0A column('lot_id', sa.Integer),%0A column('data', postgresql.JSON),%0A)%0A%0A%0Adef upgrade():%0A conn = op.get_bind()%0A for brief in conn.execute(briefs.select()):%0A if brief.data.get('evaluationType') is None:%0A continue%0A%0A optional_methods = list(%5B%0A method for method in brief.data%5B'evaluationType'%5D%0A if method not in %5B'Work history', 'Written proposal'%5D%0A %5D)%0A%0A if brief.data%5B'evaluationType'%5D != optional_methods:%0A if optional_methods:%0A brief.data%5B'evaluationType'%5D = optional_methods%0A else:%0A brief.data.pop('evaluationType')%0A%0A conn.execute(briefs.update().where(briefs.c.id == brief.id).values(%0A data=brief.data%0A ))%0A%0A%0Adef downgrade():%0A conn = op.get_bind()%0A for brief in conn.execute(briefs.select()):%0A # Add written proposal to all outcomes and research participants briefs%0A if brief.lot_id in %5B5, 8%5D:%0A brief.data%5B'evaluationType'%5D = %5B'Written proposal'%5D + brief.data.get('evaluationType', %5B%5D)%0A # Add work history to all specialists briefs%0A elif brief.lot_id == 6:%0A brief.data%5B'evaluationType'%5D = %5B'Work history'%5D + brief.data.get('evaluationType', %5B%5D)%0A%0A conn.execute(briefs.update().where(briefs.c.id == brief.id).values(%0A data=brief.data%0A ))%0A
|
|
def036cfb47f1ae9e0efaa5ff238a3b159ab9405
|
Create Quotation from Service Order Template
|
netforce_service/netforce_service/models/service_create_quot.py
|
netforce_service/netforce_service/models/service_create_quot.py
|
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
class CreateQuot(Model):
_name = "service.create.quot"
_transient = True
_fields = {
"contact_id": fields.Many2One("contact", "Contact", required=True),
"job_template_id": fields.Many2One("job.template", "Service Order Template", required=True),
}
_defaults = {
"job_template_id": lambda self, ctx: ctx.get("refer_id"),
}
def create_quot(self, ids, context={}):
obj = self.browse(ids[0])
vals = {
"contact_id": obj.contact_id.id,
"job_template_id": obj.job_template_id.id,
"lines": [],
}
tmpl = obj.job_template_id
for line in tmpl.lines:
prod = line.product_id
line_vals = {
"product_id": prod.id,
"description": line.description,
"qty": line.qty,
"uom_id": line.uom_id.id,
"unit_price": line.unit_price,
"tax_id": prod.sale_tax_id.id if prod else None,
}
vals["lines"].append(("create", line_vals))
quot_id = get_model("sale.quot").create(vals)
quot = get_model("sale.quot").browse(quot_id)
return {
"next": {
"name": "quot",
"mode": "form",
"active_id": quot_id,
},
"flash": "Quotation %s created from service order template %s" % (quot.number, tmpl.name),
}
CreateQuot.register()
|
Python
| 0
|
@@ -1784,24 +1784,50 @@
template_id%0A
+ line_sequence = 1%0A
for
@@ -1899,32 +1899,75 @@
line_vals = %7B%0A
+ %22sequence%22: line_sequence,%0A
@@ -2225,16 +2225,55 @@
e None,%0A
+ %22amount%22: line.amount,%0A
@@ -2334,16 +2334,47 @@
_vals))%0A
+ line_sequence += 1%0A
|
d44fc89f27be0e618d02202b5d067466079be16d
|
add tool to download and extract latest firmware
|
download-mcuimg.py
|
download-mcuimg.py
|
Python
| 0
|
@@ -0,0 +1,876 @@
+#! /usr/bin/env python3%0A%0Aimport urllib.request%0Afrom pprint import pprint%0Aimport zipfile%0Aimport json%0A%0Aprint('Downloading release info..')%0Arelease_info = json.loads(urllib.request.urlopen('https://api.github.com/repos/wipy/wipy/releases/latest').read().decode('utf-8'))%0Awith open('mcuimg.txt', 'w') as f:%0A pprint(release_info, f)%0A%0Aprint('TAG: %7B%7D'.format(release_info%5B'tag_name'%5D))%0Aprint('NAME: %7B%7D'.format(release_info%5B'name'%5D))%0A%0Azip_url = release_info%5B'assets'%5D%5B0%5D%5B'browser_download_url'%5D%0A%0Aprint('Downloading ZIP from: %7B%7D'.format(zip_url))%0Awith open('Binaries.zip', 'wb') as f:%0A f.write(urllib.request.urlopen(zip_url).read())%0A%0Aprint('Extracting mcuimg.bin...')%0Awith zipfile.ZipFile('Binaries.zip', 'r') as archive:%0A with open('mcuimg.bin', 'wb') as f:%0A f.write(archive.open('mcuimg.bin').read())%0A%0Aprint('perform firmware upgrade with %22wipy-ftp.py upgrade%22...')%0A
|
|
d430411fae7c20ff72e60fd63e7854de64da7e71
|
Update ceph_help.py
|
tendrl/node_agent/flows/import_cluster/ceph_help.py
|
tendrl/node_agent/flows/import_cluster/ceph_help.py
|
import subprocess
from tendrl.commons.event import Event
from tendrl.commons.message import Message
from tendrl.commons.utils import ansible_module_runner
import yaml
def import_ceph(integration_id, request_id, flow_id):
attributes = {}
if tendrl_ns.config.data['package_source_type'] == 'pip':
name = "git+https://github.com/Tendrl/ceph-integration.git@v1.2"
attributes["name"] = name
attributes["editable"] = "false"
ansible_module_path = "core/packaging/language/pip.py"
elif tendrl_ns.config.data['package_source_type'] == 'rpm':
name = "tendrl-ceph-integration"
ansible_module_path = "core/packaging/os/yum.py"
attributes["name"] = name
else:
return False
try:
runner = ansible_module_runner.AnsibleRunner(
ansible_module_path,
tendrl_ns.config.data['tendrl_ansible_exec_file'],
**attributes
)
result, err = runner.run()
except ansible_module_runner.AnsibleExecutableGenerationFailed:
return False
Event(
Message(
priority="info",
publisher=tendrl_ns.publisher_id,
payload={
"message": "Installed storage binaries on node %s" %
tendrl_ns.node_context.fqdn
},
request_id=request_id,
flow_id=flow_id,
cluster_id=integration_id,
)
)
with open("/etc/tendrl/ceph-integration/ceph-integration_logging"
".yaml", 'w+') as f:
f.write(logging_file)
config_data = {"etcd_port": tendrl_ns.config.data['etcd_port'],
"etcd_connection": tendrl_ns.config.data['etcd_connection'],
"tendrl_ansible_exec_file": "$HOME/.tendrl/ceph-integration/ansible_exec",
"log_cfg_path":"/etc/tendrl/ceph-integration/ceph-integration_logging"
".yaml", "log_level": "DEBUG",
"logging_socket_path": "/var/run/tendrl/message.sock"}
with open("/etc/tendrl/ceph-integration/ceph-integration"
".conf.yaml", 'w') as outfile:
yaml.dump(config_data, outfile, default_flow_style=False)
ceph_integration_context = "/etc/tendrl/ceph-integration/integration_id"
with open(ceph_integration_context, 'wb+') as f:
f.write(integration_id)
Event(
Message(
priority="info",
publisher=tendrl_ns.publisher_id,
payload={
"message": "Created ceph integration configuration file"
},
request_id=request_id,
flow_id=flow_id,
cluster_id=integration_id,
)
)
subprocess.Popen(["nohup", "tendrl-ceph-integration", "&"])
Event(
Message(
priority="info",
publisher=tendrl_ns.publisher_id,
payload={
"message": "Started ceph integration daemon on node %s" %
tendrl_ns.node_context.fqdn
},
request_id=request_id,
flow_id=flow_id,
cluster_id=integration_id,
)
)
logging_file = """version: 1
disable_existing_loggers: False
formatters:
simple:
format: "%(asctime)s - %(pathname)s - %(filename)s:%(lineno)s - %(funcName)20s() - %(levelname)s - %(message)s"
datefmt: "%Y-%m-%dT%H:%M:%S%z"
handlers:
console:
class: logging.StreamHandler
level: DEBUG
formatter: simple
stream: ext://sys.stdout
info_file_handler:
class: logging.handlers.TimedRotatingFileHandler
level: INFO
formatter: simple
filename: /var/log/tendrl/ceph-integration/ceph-integration_info.log
error_file_handler:
class: logging.handlers.RotatingFileHandler
level: ERROR
formatter: simple
filename: /var/log/tendrl/ceph-integration/ceph-integration_errors.log
maxBytes: 10485760 # 10MB
backupCount: 20
encoding: utf8
loggers:
my_module:
level: ERROR
handlers: [console]
propagate: no
root:
level: INFO
handlers: [console, info_file_handler, error_file_handler]
"""
|
Python
| 0.000001
|
@@ -370,16 +370,18 @@
git@v1.2
+.1
%22%0A
|
50845b24028f4aef96370abae41c7833fda8846c
|
modify report name to identify execution context
|
tensorflow/python/data/benchmarks/benchmark_base.py
|
tensorflow/python/data/benchmarks/benchmark_base.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.data benchmarking functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.platform import test
class DatasetBenchmarkBase(test.Benchmark):
"""Base class for dataset benchmarks."""
def run_benchmark(self,
dataset,
num_elements,
iters=1,
warmup=True,
apply_default_optimizations=False):
"""Benchmarks the dataset.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
apply_default_optimizations: Determines whether default optimizations
should be applied.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
"""
# The options that have been applied to the dataset are preserved so that
# they are not overwritten while benchmarking.
options = dataset.options()
options.experimental_optimization.apply_default_optimizations = (
apply_default_optimizations)
dataset = dataset.with_options(options)
# NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
# the overhead of having to execute a TensorFlow op for each step of the input
# pipeline. Note that this relies on the underlying implementation of `skip`
# to execute upstream computation. If it is optimized in the future,
# we will have to change this code.
dataset = dataset.skip(num_elements - 1)
if context.executing_eagerly():
deltas = []
for _ in range(iters):
if warmup:
iterator = iter(dataset)
next(iterator)
iterator = iter(dataset)
start = time.time()
next(iterator)
end = time.time()
deltas.append(end - start)
return np.median(deltas) / float(num_elements)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
next_element = nest.flatten(next_element)[0]
deltas = []
for _ in range(iters):
with session.Session() as sess:
if warmup:
# Run once to warm up the session caches.
sess.run(iterator.initializer)
sess.run(next_element.op)
sess.run(iterator.initializer)
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
return np.median(deltas) / float(num_elements)
def run_and_report_benchmark(self,
dataset,
num_elements,
name,
iters=5,
extras=None,
warmup=True,
apply_default_optimizations=False):
# Measure the per-element wall time.
wall_time = self.run_benchmark(dataset, num_elements, iters, warmup,
apply_default_optimizations)
if extras is None:
extras = {}
extras["num_elements"] = num_elements
self.report_benchmark(
wall_time=wall_time, iters=iters, name=name, extras=extras)
|
Python
| 0.000001
|
@@ -4435,24 +4435,143 @@
imizations)%0A
+ if context.executing_eagerly():%0A name = %22%7B%7D_eager%22.format(name)%0A else:%0A name = %22%7B%7D_graph%22.format(name)
%0A if extr
|
373bdc41b35f75a15430eb2f9a03a8ab38d401e8
|
Test for upcast with parent unbound method.
|
tests/basics/subclass_native6.py
|
tests/basics/subclass_native6.py
|
Python
| 0
|
@@ -0,0 +1,183 @@
+# Calling native base class unbound method with subclass instance.%0A%0Aclass mylist(list):%0A pass%0A%0A%0Al = mylist((1, 2, 3))%0Aassert type(l) is mylist%0Aprint(l)%0A%0Alist.append(l, 4)%0Aprint(l)%0A
|
|
9d3dd8f1921165bc0c28b94257a2266202b326bb
|
Return alliance info from key_info and characters
|
evelink/account.py
|
evelink/account.py
|
from evelink import api
from evelink import constants
class Account(object):
"""Wrapper around /account/ of the EVE API.
Note that a valid API key is required.
"""
def __init__(self, api):
self.api = api
@api.auto_call('account/AccountStatus')
def status(self, api_result=None):
"""Returns the account's subscription status."""
_str, _int, _float, _bool, _ts = api.elem_getters(api_result.result)
result = {
'paid_ts': _ts('paidUntil'),
'create_ts': _ts('createDate'),
'logins': _int('logonCount'),
'minutes_played': _int('logonMinutes'),
}
return api.APIResult(result, api_result.timestamp, api_result.expires)
@api.auto_call('account/APIKeyInfo')
def key_info(self, api_result=None):
"""Returns the details of the API key being used to auth."""
key = api_result.result.find('key')
result = {
'access_mask': int(key.attrib['accessMask']),
'type': constants.APIKey.key_types[key.attrib['type']],
'expire_ts': api.parse_ts(key.attrib['expires']) if key.attrib['expires'] else None,
'characters': {},
}
rowset = key.find('rowset')
for row in rowset.findall('row'):
character = {
'id': int(row.attrib['characterID']),
'name': row.attrib['characterName'],
'corp': {
'id': int(row.attrib['corporationID']),
'name': row.attrib['corporationName'],
},
}
result['characters'][character['id']] = character
return api.APIResult(result, api_result.timestamp, api_result.expires)
@api.auto_call('account/Characters')
def characters(self, api_result=None):
"""Returns all of the characters on an account."""
rowset = api_result.result.find('rowset')
result = {}
for row in rowset.findall('row'):
character = {
'id': int(row.attrib['characterID']),
'name': row.attrib['name'],
'corp': {
'id': int(row.attrib['corporationID']),
'name': row.attrib['corporationName'],
},
}
result[character['id']] = character
return api.APIResult(result, api_result.timestamp, api_result.expires)
|
Python
| 0
|
@@ -1591,32 +1591,246 @@
,%0A %7D%0A
+ if row.attrib%5B'allianceID'%5D:%0A character%5B'alliance'%5D = %7B%0A 'id': int(row.attrib%5B'allianceID'%5D),%0A 'name': row.attrib%5B'allianceName'%5D,%0A %7D%0A
resu
@@ -2505,32 +2505,246 @@
,%0A %7D%0A
+ if row.attrib%5B'allianceID'%5D:%0A character%5B'alliance'%5D = %7B%0A 'id': int(row.attrib%5B'allianceID'%5D),%0A 'name': row.attrib%5B'allianceName'%5D,%0A %7D%0A
resu
|
9ffa7abeccbce24b037a644612681fd397e9d13a
|
add dict example
|
trypython/basic/dict_preserved_insert_order_py37.py
|
trypython/basic/dict_preserved_insert_order_py37.py
|
Python
| 0
|
@@ -0,0 +1,995 @@
+%22%22%22%0APython 3.7 %E3%81%A7 %E8%BE%9E%E6%9B%B8%E3%81%AE%E6%8C%BF%E5%85%A5%E9%A0%86%E5%BA%8F%E3%81%8C%E4%BF%9D%E6%8C%81%E3%81%95%E3%82%8C%E3%82%8B%E3%81%93%E3%81%A8%E3%82%92%E7%A2%BA%E8%AA%8D%E3%81%99%E3%82%8B%E3%82%B5%E3%83%B3%E3%83%97%E3%83%AB%E3%81%A7%E3%81%99%E3%80%82%0A%0AREFERENCES:: http://bit.ly/2VIggXP%0A http://bit.ly/2VySRIe%0A http://bit.ly/2VFhjI4%0A http://bit.ly/2VEq058%0A http://bit.ly/2VBKrzK%0A%22%22%22%0Afrom trypython.common.commoncls import SampleBase%0Afrom trypython.common.commonfunc import pr%0A%0A%0Aclass Sample(SampleBase):%0A def exec(self):%0A %22%22%22%E3%82%B3%E3%83%BC%E3%83%89%E3%81%AE%E3%83%8D%E3%82%BF%E3%81%AF http://bit.ly/2VBKrzK%E3%80%80%E3%81%8B%E3%82%89%E6%8B%9D%E5%80%9F%22%22%22%0A languages = %5B'Python', 'Ruby', 'Perl', 'Python', 'JavaScript'%5D%0A%0A # sorted + set + list.index %E3%82%92%E7%B5%84%E3%81%BF%E5%90%88%E3%82%8F%E3%81%9B%E3%81%A6 %E9%A0%86%E5%BA%8F%E3%82%AD%E3%83%BC%E3%83%97 %E3%81%97%E3%81%AA%E3%81%8C%E3%82%89%E9%87%8D%E8%A4%87%E5%89%8A%E9%99%A4 (from http://bit.ly/2VBKrzK)%0A pr('sorted + set + list.index', sorted(set(languages), key=languages.index))%0A%0A # python 3.7 %E3%81%8B%E3%82%89%E3%81%AF dict %E3%81%AB%E3%81%A6%E6%8C%BF%E5%85%A5%E9%A0%86%E5%BA%8F%E3%81%8C%E4%BF%9D%E6%8C%81%E3%81%95%E3%82%8C%E3%82%8B%E3%81%93%E3%81%A8%E3%81%8CPython %E8%A8%80%E8%AA%9E%E4%BB%95%E6%A7%98%E3%81%AE%E4%B8%80%E9%83%A8%E3%81%A8%E3%81%AA%E3%81%A3%E3%81%9F%E3%81%AE%E3%81%A7%E3%80%81%E3%81%93%E3%82%8C%E3%81%A7%E3%82%82%E8%89%AF%E3%81%84%0A pr('dict (python 3.7)', list(dict.fromkeys(languages)))%0A%0A # %E3%81%A1%E3%81%AA%E3%81%BF%E3%81%AB %E9%A0%86%E5%BA%8F%E4%BF%9D%E8%A8%BC %E3%82%92%E3%81%97%E3%81%AA%E3%81%84 set() %E3%82%92%E4%BD%BF%E3%81%86%E3%81%A8%E9%87%8D%E8%A4%87%E5%89%8A%E9%99%A4%E3%81%AF%E3%81%A7%E3%81%8D%E3%82%8B%E3%81%91%E3%81%A9%E3%80%81%E5%BD%93%E7%84%B6%E9%A0%86%E5%BA%8F%E3%81%AF%E3%82%AD%E3%83%BC%E3%83%97%E3%81%A7%E3%81%8D%E3%81%AA%E3%81%84%0A pr('set (python 3.7)', list(set(languages)))%0A%0A%0Adef go():%0A obj = Sample()%0A obj.exec()%0A%0A%0Aif __name__ == '__main__':%0A go()%0A
|
|
f1b91a52b52dfab3b350191ede23731f0a30f4c4
|
Add pythonrc
|
python/pythonrc.py
|
python/pythonrc.py
|
Python
| 0.000004
|
@@ -0,0 +1,601 @@
+#!/usr/bin/env python%0A%0A# Inspired by https://github.com/dag/dotfiles/blob/master/python/.pythonrc%0A%0Aimport os%0Aimport readline%0A%0Areadline.parse_and_bind('tab: complete')%0Ahistory = os.path.expanduser(%22~/.pythonhist%22)%0A%0Aif os.path.exists(history):%0A try:%0A readline.read_history_file(history)%0A except IOError, e:%0A print %22Failed to read %25r: %25s%22 %25 (history, e)%0A%0Areadline.set_history_length(1024 * 5)%0A%0Adef write_history(history):%0A def wrapped():%0A import readline%0A readline.write_history_file(history)%0A return wrapped%0A%0Aimport atexit%0Aatexit.register(write_history(history))%0A
|
|
12ad56d1360d6140093f2871c32593751b8ae052
|
Add modeset_event.py
|
py/tests/modeset_event.py
|
py/tests/modeset_event.py
|
Python
| 0
|
@@ -0,0 +1,1839 @@
+#!/usr/bin/python3%0A%0Aimport pykms%0Aimport selectors%0Aimport sys%0A%0Adef readdrm(fileobj, mask):%0A for ev in card.read_events():%0A ev.data(ev)%0A%0Adef waitevent(sel):%0A events = sel.select(1)%0A if not events:%0A print(%22Error: timeout receiving event%22)%0A else:%0A for key, mask in events:%0A key.data(key.fileobj, mask)%0A%0Adef eventhandler(event):%0A print(%22Received %25s event successfully (seq %25d time %25f)%22 %25%0A (event.type, event.seq, event.time))%0A%0Acard = pykms.Card()%0Asel = selectors.DefaultSelector()%0Asel.register(card.fd, selectors.EVENT_READ, readdrm)%0A%0Ares = pykms.ResourceManager(card)%0Aconn = res.reserve_connector()%0Acrtc = res.reserve_crtc(conn)%0Applane = res.reserve_primary_plane(crtc)%0A%0Amode = conn.get_default_mode()%0Amodeb = mode.to_blob(card)%0A%0Afor format in pplane.formats:%0A if format == pykms.PixelFormat.XRGB8888:%0A break%0A if format == pykms.PixelFormat.RGB565:%0A break%0A%0Afb = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format);%0Apykms.draw_test_pattern(fb);%0A%0A# Disable request%0Acard.disable_planes()%0A%0Aprint(%22Setting %25s to %25s using %25s%22 %25 (conn.fullname, mode.name, format))%0A%0Areq = pykms.AtomicReq(card)%0A%0Areq.add(conn, %22CRTC_ID%22, crtc.id)%0Areq.add(crtc, %7B%22ACTIVE%22: 1,%0A %22MODE_ID%22: modeb.id%7D)%0Areq.add(pplane, %7B%22FB_ID%22: fb.id,%0A %22CRTC_ID%22: crtc.id,%0A %22SRC_X%22: 0 %3C%3C 16,%0A %22SRC_Y%22: 0 %3C%3C 16,%0A %22SRC_W%22: mode.hdisplay %3C%3C 16,%0A %22SRC_H%22: mode.vdisplay %3C%3C 16,%0A %22CRTC_X%22: 0,%0A %22CRTC_Y%22: 0,%0A %22CRTC_W%22: mode.hdisplay,%0A %22CRTC_H%22: mode.vdisplay%7D)%0A%0Aret = req.test(True)%0Aif ret != 0:%0A print(%22Atomic test failed: %25d%22 %25 ret)%0A sys.exit()%0A%0Areq.commit(eventhandler, allow_modeset = True)%0Awaitevent(sel)%0A%0Ainput(%22press enter to exit%5Cn%22)%0A
|
|
2ab8680c1a5e420de3f6b82db9a994eaeace164f
|
Add a snippet.
|
python/unicode/unicode.py
|
python/unicode/unicode.py
|
Python
| 0.000002
|
@@ -0,0 +1,1034 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2013 J%C3%A9r%C3%A9mie DECOCK (http://www.jdhp.org)%0A%0A# DEFINE%0Astr1 = %22Hello!%22%0Aunicode_obj1 = u%22%C2%A1Buenos d%C3%ADas!%22%0Aunicode_obj2 = u%22%E4%BD%A0%E5%A5%BD%EF%BC%81%22%0A%0A# PRINT%0A%0Aprint%0Aprint str1%0Aprint unicode_obj1%0Aprint unicode_obj2%0A%0A# CONCAT%0A%0Aprint%0Aprint str1 + %22 %22 + unicode_obj1 + %22 %22 + unicode_obj2,%0Aprint type(str1 + %22 %22 + unicode_obj1 + %22 %22 + unicode_obj2)%0A%0A# PRINT TYPE%0A%0Aprint%0Aprint str1, type(str1)%0Aprint unicode_obj1, type(unicode_obj1)%0Aprint unicode_obj2, type(unicode_obj2)%0A%0A# LENGTH%0A%0Aprint%0Aprint %22len(%22, unicode_obj2, %22) = %22, len(unicode_obj2)%0Aprint u%22len(%7B0%7D) = %7B1%7D%22.format(unicode_obj2, len(unicode_obj2))%0Aprint u%22len(%25s) = %25s%22 %25 (unicode_obj2, len(unicode_obj2))%0A%0A# UNICODE TO ASCII (%E4%BD%A0%E5%A5%BD%EF%BC%81 -%3E )%0A%0A%0A# ASCII TO UNICODE ( -%3E %E4%BD%A0%E5%A5%BD%EF%BC%81)%0A%0A%0A# UNICODE TO HEX ASCII (%E4%BD%A0%E5%A5%BD%EF%BC%81 -%3E hex)%0A%0Aprint%0Ahex_str = unicode_obj2.encode(%22utf-8%22).encode(%22hex%22)%0Aprint %22%7B0%7D %7B1%7D (len: %7B2%7D)%22.format(hex_str, type(hex_str), len(hex_str))%0A%0Ahex_list = %5Bunicode_char.encode(%22utf-8%22).encode(%22hex%22) for unicode_char in unicode_obj2%5D%0Aprint(hex_list)%0A%0A
|
|
f6acf955904765f57ba15837fd6440a524590268
|
add migrations
|
ureport/polls/migrations/0024_auto_20160118_0934.py
|
ureport/polls/migrations/0024_auto_20160118_0934.py
|
Python
| 0.000001
|
@@ -0,0 +1,392 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('polls', '0023_populate_flow_date'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='poll',%0A name='poll_date',%0A field=models.DateTimeField(),%0A ),%0A %5D%0A
|
|
5390abc3f53f18515cd9a658d6286ac8a9b09d81
|
Create parrot_trouble.py
|
Python/CodingBat/parrot_trouble.py
|
Python/CodingBat/parrot_trouble.py
|
Python
| 0.005895
|
@@ -0,0 +1,155 @@
+# http://codingbat.com/prob/p166884%0A%0Adef parrot_trouble(talking, hour):%0A if talking and (hour %3C 7 or hour %3E 20):%0A return True%0A else:%0A return False%0A
|
|
7c4df6bfa4d8d2370c96ffd9efe0017447629a5d
|
add dep-free baseclass for typing
|
graphistry/Plottable.py
|
graphistry/Plottable.py
|
Python
| 0
|
@@ -0,0 +1,757 @@
+from typing import Iterable, List, Optional, Union%0Afrom typing_extensions import Protocol%0Aimport pandas as pd%0A%0Aclass Plottable(Protocol):%0A @property%0A def _point_title(self) -%3E Optional%5Bstr%5D:%0A return None%0A%0A @property%0A def _point_label(self) -%3E Optional%5Bstr%5D:%0A return None%0A%0A @property%0A def _nodes(self) -%3E Optional%5Bpd.DataFrame%5D:%0A return None%0A%0A @property%0A def _edges(self) -%3E Optional%5Bpd.DataFrame%5D:%0A return None%0A%0A def nodes(self, nodes: pd.DataFrame, node: Optional%5Bstr%5D) -%3E 'Plottable':%0A return self%0A%0A def edges(self, nodes: pd.DataFrame, source: Optional%5Bstr%5D, destination: Optional%5Bstr%5D) -%3E 'Plottable':%0A return self%0A%0A def bind(self, **kwargs) -%3E 'Plottable':%0A return self%0A
|
|
14a9296056c4dede324465791052119890f40725
|
add a TransactionTestCase to cover the flush command
|
tests/functionals/test_transactiontestcase.py
|
tests/functionals/test_transactiontestcase.py
|
Python
| 0
|
@@ -0,0 +1,657 @@
+from django.test import TransactionTestCase%0A%0Afrom tests.north_app.models import Author%0Afrom tests.north_app.models import Book%0A%0A%0Aclass BookTestCase(TransactionTestCase):%0A def setUp(self):%0A self.author = Author.objects.create(name=%22George R. R. Martin%22)%0A self.book1 = Book.objects.create(%0A author=self.author,%0A title=%22A Game of Thrones%22,%0A pages=1234)%0A self.book2 = Book.objects.create(%0A author=self.author,%0A title=%22A Clash of Kings%22,%0A pages=1235)%0A%0A def test_delete_book(self):%0A self.book1.delete()%0A%0A self.assertEquals(self.author.book_set.count(), 1)%0A
|
|
9b345bba13b572ebdd52c6dca534a7cf95e11335
|
Add examples
|
examples/colors.py
|
examples/colors.py
|
Python
| 0
|
@@ -0,0 +1,1239 @@
+from PIL import Image, ImageDraw%0Afrom time import sleep%0A%0AOFF_TARGET = True%0A%0Aif OFF_TARGET:%0A from matrixtoolkit import Adafruit_RGBmatrix%0Aelse:%0A from rgbmatrix import Adafruit_RGBmatrix%0A%0A%0Aclass drawer():%0A %22%22%22%0A handles controls what is being drawn%0A %22%22%22%0A%0A def __init__(self):%0A # this config switch is optional as scale is by default 6%0A if OFF_TARGET:%0A self.matrix = Adafruit_RGBmatrix(32, 4, scale=5)%0A else:%0A self.matrix = Adafruit_RGBmatrix(32, 4)%0A self.alive = True%0A%0A def run(self):%0A if OFF_TARGET:%0A self.matrix.start(self.main, self.kill)%0A else:%0A self.main()%0A%0A def main(self):%0A self.image = Image.new('RGB', (64, 32))%0A draw = ImageDraw.Draw(self.image)%0A try:%0A while self.alive:%0A self.matrix.Fill((0, 255, 0))%0A except KeyboardInterrupt:%0A # hook in to make sure any future deconstructors are called%0A self.kill()%0A%0A def updateMatrix(self, image):%0A self.matrix.SetImage(image if OFF_TARGET else%0A image.im.id, 0, 0)%0A%0A def kill(self):%0A self.alive = False%0A%0Aif __name__ == '__main__':%0A d = drawer()%0A d.run()%0A
|
|
6fdf7cc68e05ce6e8e18306eca7d8e36d1a166ea
|
Add Client class to abstract from different datbase clients
|
hotline/db/db_client.py
|
hotline/db/db_client.py
|
Python
| 0
|
@@ -0,0 +1,697 @@
+import importlib%0Aimport os%0A%0Aclass DBClient:%0A%0A db_defaults = %7B'mongo': 'mongodb://localhost:27017/',%0A 'redis': 'redis://localhost:6379',%0A 'postgresql': 'postgresql://localhost:5432'%0A %7D%0A%0A def __init__(self, url=None, db_type=None, db_name=None):%0A self.db_type = db_type%0A self.url = url or DBClient.db_defaults%5Bdb_type%5D%0A db_module = importlib.import_module('db.db_%7B0%7D'.format(db_type))%0A self.client = getattr(db_module, '%7B0%7DClient'.format(db_type.capitalize()))(self.url)%0A%0A def connect(self):%0A pass%0A%0A# Update later to remove default db_type 'mongo'%0Adb_client = DBClient(db_type='mongo')%0Adb_client.connect()%0A
|
|
aac6b16b3c532d74d788cbad942af6a147a06f4b
|
add broadcast org
|
migrations/versions/0331_add_broadcast_org.py
|
migrations/versions/0331_add_broadcast_org.py
|
Python
| 0
|
@@ -0,0 +1,2204 @@
+%22%22%22%0A%0ARevision ID: 0331_add_broadcast_org%0ARevises: 0330_broadcast_invite_email%0ACreate Date: 2020-09-23 10:11:01.094412%0A%0A%22%22%22%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Aimport os%0A%0Arevision = '0331_add_broadcast_org'%0Adown_revision = '0330_broadcast_invite_email'%0A%0Aenvironment = os.environ%5B'NOTIFY_ENVIRONMENT'%5D%0A%0Aorganisation_id = '38e4bf69-93b0-445d-acee-53ea53fe02df'%0A%0A%0Adef upgrade():%0A # we've already done this manually on production%0A if environment != %22production%22:%0A insert_sql = %22%22%22%0A INSERT INTO organisation%0A (%0A id,%0A name,%0A active,%0A created_at,%0A agreement_signed,%0A crown,%0A organisation_type%0A )%0A VALUES (%0A :id,%0A :name,%0A :active,%0A current_timestamp,%0A :agreement_signed,%0A :crown,%0A :organisation_type%0A )%0A %22%22%22%0A update_service_set_broadcast_org_sql = %22%22%22%0A UPDATE services%0A SET organisation_id = :organisation_id%0A WHERE id in (%0A SELECT service_id%0A FROM service_permissions%0A WHERE permission = 'broadcast'%0A )%0A %22%22%22%0A conn = op.get_bind()%0A conn.execute(%0A sa.text(insert_sql),%0A id=organisation_id,%0A name=f'Broadcast Services (%7Benvironment%7D)',%0A active=True,%0A agreement_signed=None,%0A crown=None,%0A organisation_type='central',%0A )%0A conn.execute(%0A sa.text(update_service_set_broadcast_org_sql),%0A organisation_id=organisation_id%0A )%0A%0A%0Adef downgrade():%0A update_service_remove_org_sql = %22%22%22%0A UPDATE services%0A SET organisation_id = NULL, updated_at = current_timestamp%0A WHERE organisation_id = :organisation_id%0A %22%22%22%0A delete_sql = %22%22%22%0A DELETE FROM organisation%0A WHERE id = :organisation_id%0A %22%22%22%0A conn = op.get_bind()%0A conn.execute(sa.text(update_service_remove_org_sql), organisation_id=organisation_id)%0A conn.execute(sa.text(delete_sql), organisation_id=organisation_id)%0A
|
|
353edcdcfae15f06b998a4ad1481b3ad99e514bd
|
Remove easeventuid migration.
|
migrations/versions/127_remove_easeventuid.py
|
migrations/versions/127_remove_easeventuid.py
|
Python
| 0
|
@@ -0,0 +1,716 @@
+%22%22%22remove easeventuid%0A%0ARevision ID: 581e91bd7141%0ARevises: 262436681c4%0ACreate Date: 2015-01-10 00:57:50.944460%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '581e91bd7141'%0Adown_revision = '262436681c4'%0A%0Afrom alembic import op%0A%0A%0Adef upgrade():%0A from inbox.ignition import main_engine%0A engine = main_engine()%0A%0A if not engine.has_table('easeventuid'):%0A return%0A%0A op.drop_constraint('easeventuid_ibfk_1', 'easeventuid', type_='foreignkey')%0A op.drop_constraint('easeventuid_ibfk_2', 'easeventuid', type_='foreignkey')%0A op.drop_constraint('easeventuid_ibfk_3', 'easeventuid', type_='foreignkey')%0A%0A op.drop_table('easeventuid')%0A%0A%0Adef downgrade():%0A raise Exception('No going back.')%0A
|
|
5f7344b8a99880bec7195b951b495970116f0b0d
|
Initialize P2_blankRowInserter
|
books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P2_blankRowInserter.py
|
books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P2_blankRowInserter.py
|
Python
| 0.000064
|
@@ -0,0 +1,255 @@
+# Create a program blankRowInserter.py that takes two integers and a filename%0A# string as command line arguments. Let%E2%80%99s call the first integer N and the second%0A# integer M. Starting at row N, the program should insert M blank rows into the%0A# spreadsheet.%0A
|
|
37f5ddd7e8802b5d5213b5cadb905c39abe92dfc
|
Add test..
|
tests/adapter/mongo/test_case_group_handling.py
|
tests/adapter/mongo/test_case_group_handling.py
|
Python
| 0
|
@@ -0,0 +1,724 @@
+import pytest%0Aimport copy%0Aimport pymongo%0Aimport logging%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Adef test_init_case_group(adapter, institute_obj):%0A # given a database and an institute%0A owner = institute_obj%5B%22_id%22%5D%0A%0A # when attempting to create a case group%0A result = adapter.init_case_group(owner)%0A%0A # the result is ok%0A assert result%0A%0A%0Adef test_remove_case_group(adapter, institute_obj):%0A # given a database and an institute%0A owner = institute_obj%5B%22_id%22%5D%0A%0A # when successfully creating a case group%0A resulting_id = adapter.init_case_group(owner)%0A assert resulting_id%0A%0A # when removing it again%0A result = adapter.remove_case_group(resulting_id)%0A%0A # the result is ok%0A assert result%0A
|
|
28ad4d2770921c7d148b00ed0533b9051fb08122
|
enable utils.get to get any url with or without selector/username/password
|
utils.py
|
utils.py
|
#! /usr/bin/env python
import httplib, mimetypes, base64
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or 'application/octet-stream')
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def post_multipart(host, selector, fields, files, username, password):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response page.
"""
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
content_type, body = encode_multipart_formdata(fields, files)
h = httplib.HTTPConnection(host)
headers = { 'Authorization': 'Basic %s' % base64string,
'Content-Type': content_type,
'Content-Length': str(len(body)) }
h.request('POST', selector, body, headers)
return h.getresponse().read()
def get(host, selector, username, password):
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
h = httplib.HTTPConnection(host)
headers = { 'Authorization': 'Basic %s' % base64string }
h.request('GET', selector, "", headers)
return h.getresponse().read()
|
Python
| 0
|
@@ -1898,16 +1898,19 @@
selector
+=%22%22
, userna
@@ -1903,32 +1903,37 @@
tor=%22%22, username
+=None
, password):%0A
@@ -1922,27 +1922,53 @@
ne, password
-):%0A
+=None):%0A if username:%0A
base64st
@@ -2060,32 +2060,53 @@
onnection(host)%0A
+ if username:%0A
headers = %7B
@@ -2150,16 +2150,47 @@
tring %7D%0A
+ else:%0A headers = %7B%7D%0A
h.re
|
ecbc691307c43ad06d7f539f008fccbff690d538
|
Add unit tests for the precomputed_io module
|
unit_tests/test_precomputed_io.py
|
unit_tests/test_precomputed_io.py
|
Python
| 0
|
@@ -0,0 +1,2013 @@
+# Copyright (c) 2018 CEA%0A# Author: Yann Leprince %3Cyann.leprince@cea.fr%3E%0A#%0A# This software is made available under the MIT licence, see LICENCE.txt.%0A%0Aimport numpy as np%0Aimport pytest%0A%0Afrom neuroglancer_scripts.accessor import get_accessor_for_url%0Afrom neuroglancer_scripts.chunk_encoding import InvalidInfoError%0Afrom neuroglancer_scripts.precomputed_io import (%0A get_IO_for_existing_dataset,%0A get_IO_for_new_dataset,%0A)%0A%0A%0ADUMMY_INFO = %7B%0A %22type%22: %22image%22,%0A %22data_type%22: %22uint16%22,%0A %22num_channels%22: 1,%0A %22scales%22: %5B%0A %7B%0A %22key%22: %22key%22,%0A %22size%22: %5B8, 3, 15%5D,%0A %22resolution%22: %5B1e6, 1e6, 1e6%5D,%0A %22voxel_offset%22: %5B0, 0, 0%5D,%0A %22chunk_sizes%22: %5B%5B8, 8, 8%5D%5D,%0A %22encoding%22: %22raw%22,%0A %7D%0A %5D%0A%7D%0A%0A%0Adef test_precomputed_IO_chunk_roundtrip(tmpdir):%0A accessor = get_accessor_for_url(str(tmpdir))%0A # Minimal info file%0A io = get_IO_for_new_dataset(DUMMY_INFO, accessor)%0A dummy_chunk = np.arange(8 * 3 * 7, dtype=%22uint16%22).reshape(1, 7, 3, 8)%0A chunk_coords = (0, 8, 0, 3, 8, 15)%0A io.write_chunk(dummy_chunk, %22key%22, chunk_coords)%0A assert np.array_equal(io.read_chunk(%22key%22, chunk_coords), dummy_chunk)%0A%0A io2 = get_IO_for_existing_dataset(accessor)%0A assert io2.info == DUMMY_INFO%0A assert np.array_equal(io2.read_chunk(%22key%22, chunk_coords), dummy_chunk)%0A%0A%0Adef test_precomputed_IO_info_error(tmpdir):%0A with (tmpdir / %22info%22).open(%22w%22) as f:%0A f.write(%22invalid JSON%22)%0A accessor = get_accessor_for_url(str(tmpdir))%0A with pytest.raises(InvalidInfoError):%0A get_IO_for_existing_dataset(accessor)%0A%0A%0Adef test_precomputed_IO_validate_chunk_coords(tmpdir):%0A accessor = get_accessor_for_url(str(tmpdir))%0A # Minimal info file%0A io = get_IO_for_new_dataset(DUMMY_INFO, accessor)%0A good_chunk_coords = (0, 8, 0, 3, 0, 8)%0A bad_chunk_coords = (0, 8, 1, 4, 0, 8)%0A assert io.validate_chunk_coords(%22key%22, good_chunk_coords) is True%0A assert io.validate_chunk_coords(%22key%22, bad_chunk_coords) is False%0A
|
|
2224963d6dc413b2bf18fe585c98f85e41686175
|
Use the new mailset propper for the python fake service
|
py-fake-service/app/pixelated_user_agent.py
|
py-fake-service/app/pixelated_user_agent.py
|
from flask import Flask, request, Response, redirect
import json
import datetime
import requests
from adapter import MailService
from search import SearchQuery
app = Flask(__name__, static_url_path='', static_folder='../../web-ui/app')
client = None
converter = None
account = None
loaded = False
mail_service = MailService()
def respond_json(entity):
response = json.dumps(entity)
return Response(response=response, mimetype="application/json")
@app.route('/disabled_features')
def disabled_features():
return respond_json([])
@app.route('/mails', methods=['POST'])
def save_draft_or_send():
mail = request.json
if mail['ident']:
ident = mail_service.send(mail)
else:
ident = mail_service.save_draft(mail)
return respond_json({'ident': ident})
@app.route('/mails', methods=['PUT'])
def update_draft():
mail = request.json
ident = mail_service.update_draft(mail)
return respond_json({'ident': ident})
@app.route('/mails')
def mails():
query = SearchQuery.compile(request.args.get('q', ''))
page = request.args.get('p', '')
window_size = request.args.get('w', '')
fetched_mails = mail_service.mails(query, page, window_size)
mails = [mail.__dict__ for mail in fetched_mails]
response = {
"stats": {
"total": len(mails),
"read": 0,
"starred": 0,
"replied": 0
},
"mails": mails
}
return respond_json(response)
@app.route('/mail/<int:mail_id>', methods=['DELETE'])
def delete_mails(mail_id):
mail_service.delete_mail(mail_id)
return respond_json(None)
@app.route('/tags')
def tags():
tags = mail_service.tagsset.all_tags()
return respond_json([tag.__dict__ for tag in tags])
@app.route('/mail/<int:mail_id>')
def mail(mail_id):
return respond_json(mail_service.mail(mail_id).__dict__)
@app.route('/mail/<int:mail_id>/tags', methods=['POST'])
def mail_tags(mail_id):
new_tags = request.json['newtags']
mail_service.update_tags_for(mail_id, new_tags)
return respond_json(request.json['newtags'])
@app.route('/mail/<int:mail_id>/read', methods=['POST'])
def mark_mail_as_read(mail_id):
mail_service.mark_as_read(mail_id)
return ""
@app.route('/contacts')
def contacts():
contacts_query = request.args.get('q')
return respond_json(
{'contacts': mail_service.search_contacts(contacts_query)})
@app.route('/draft_reply_for/<int:mail_id>')
def draft_reply_for(mail_id):
mail = mail_service.draft_reply_for(mail_id)
if mail:
return respond_json(mail.__dict__)
else:
return respond_json(None)
@app.route('/control/mailset/<mailset>/load', methods=['POST'])
def load_mailset(mailset):
import os
from tarfile import TarFile
mbox_root = os.path.join(os.environ['HOME'], 'mailsets')
if not os.path.isdir(os.path.join(mbox_root)):
os.mkdir(mbox_root)
if len(os.listdir(mbox_root)) == 0:
response = requests.get(
'https://example.wazokazi.is:8154/go/static/mediumtagged.tar.gz',
verify=False)
mbox_archive_path = os.path.join(mbox_root, 'mediumtagged.tar.gz')
mbox_archive = open(mbox_archive_path, 'w')
mbox_archive.write(response.content)
mbox_archive.close()
tarfile = TarFile(name=mbox_archive_path)
tarfile.extractall(path=mbox_root)
mail_service.load_mailset()
return respond_json(None)
@app.route('/')
def index():
global loaded
if not loaded:
load_mailset('mediumtagged')
loaded = True
return app.send_static_file('index.html')
def setup():
app.run(host="0.0.0.0", debug=True, port=4567)
if __name__ == '__main__':
setup()
|
Python
| 0
|
@@ -3055,16 +3055,19 @@
/static/
+py-
mediumta
|
de4f3d3b31b5336cb541c0e6d17f198799c4dc53
|
Remove unnecessary argument
|
graphitepager/config.py
|
graphitepager/config.py
|
import os
import yaml
from alerts import Alert
def contents_of_file(filename):
open_file = open(filename)
contents = open_file.read()
open_file.close()
return contents
def get_config(path):
return Config(path)
class Config(object):
def __init__(self, path):
alert_yml = contents_of_file(path)
self._data = yaml.load(alert_yml)
def data(self, key):
return self._data[key]
def get(self, key, default=None):
return os.environ.get(key, self._data.get(key.lower(), default))
def has(self, key):
value = None
_key = key.lower()
if _key in self._data:
value = self._data[_key]
elif key in os.environ:
value = os.environ.get(key, None)
return value is not None and value != ''
def get_alerts(self, config):
alerts = []
doc_url = self.config.data('docs_url')
for alert_string in self.config.data('alerts'):
alerts.append(Alert(alert_string, doc_url))
return alerts
def has_keys(self, keys):
for key in keys:
if self.has(key) is False:
return False
return True
|
Python
| 0.043649
|
@@ -834,16 +834,8 @@
self
-, config
):%0A
|
3ce64bd781b59fffe42a59155a6f81f641647653
|
add package information
|
source/src/info.py
|
source/src/info.py
|
Python
| 0
|
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-%0A%0A%22%22%22%0ABase module variables%0A%22%22%22%0A%0A__version__ = '0.2.00'%0A__author__ = 'Joke Durnez'%0A__license__ = 'MIT'%0A__email__ = 'joke.durnez@gmail.com'%0A__status__ = 'Prototype'%0A__url__ = 'https://www.neuropowertools.org'%0A__packagename__ = 'neurodesign'%0A
|
|
117ddac033b0b337ced9589851e74056740cdb3e
|
patch to create workflow for existing leave applications
|
erpnext/patches/v10_0/workflow_leave_application.py
|
erpnext/patches/v10_0/workflow_leave_application.py
|
Python
| 0
|
@@ -0,0 +1,1315 @@
+# Copyright (c) 2017, Frappe and Contributors%0A# License: GNU General Public License v3. See license.txt%0A%0Afrom __future__ import unicode_literals%0Aimport frappe%0A%0Adef execute():%0A%09frappe.reload_doc(%22hr%22, %22doctype%22, %22leave_application%22)%0A%09frappe.reload_doc(%22workflow%22, %22doctype%22, %22workflow%22)%0A%0A%09doc = frappe.get_doc(%7B%0A%09%09'doctype': 'Workflow State',%0A%09%09'workflow_state_name': 'Open',%0A%09%09'style': 'Warning'%0A%09%09%7D).insert(ignore_permissions=True)%0A%0A%09doc = frappe.get_doc(%7B%0A%09%09'doctype': 'Workflow',%0A%09%09'workflow_name': 'Leave Approval',%0A%09%09'document_type': 'Leave Application',%0A%09%09'is_active': 1,%0A%09%09'workflow_state_field': 'workflow_state',%0A%09%09'states': %5B%7B%0A%09%09%09%22state%22: 'Open',%0A%09%09%09%22doc_status%22: 0,%0A%09%09%09%22allow_edit%22: 'Employee'%0A%09%09%7D, %7B%0A%09%09%09%22state%22: 'Approved',%0A%09%09%09%22doc_status%22: 1,%0A%09%09%09%22allow_edit%22: 'Leave Approver'%0A%09%09%7D, %7B%0A%09%09%09%22state%22: 'Rejected',%0A%09%09%09%22doc_status%22: 1,%0A%09%09%09%22allow_edit%22: 'Leave Approver'%0A%09%09%7D%5D,%0A%09%09'transitions': %5B%7B%0A%09%09%09%22state%22: 'Open',%0A%09%09%09%22action%22: 'Approve',%0A%09%09%09%22next_state%22: 'Approved',%0A%09%09%09%22allowed%22: 'Leave Approver'%0A%09%09%7D,%0A%09%09%7B%0A%09%09%09%22state%22: 'Open',%0A%09%09%09%22action%22: 'Reject',%0A%09%09%09%22next_state%22: 'Rejected',%0A%09%09%09%22allowed%22: 'Leave Approver'%0A%09%09%7D%5D%0A%09%7D).insert(ignore_permissions=True)%0A%0A%09frappe.db.sql(%22%22%22update %60tabLeave Application%60 set workflow_state = status%22%22%22)%0A%09frappe.db.sql(%22%22%22alter table %60tabLeave Application%60 drop column status%22%22%22)%0A
|
|
3aaa64c7ca9721e74fd52d3274a91fdd4c4cb678
|
add initial test cron
|
cron.py
|
cron.py
|
Python
| 0
|
@@ -0,0 +1,1513 @@
+import boto3%0Aimport credstash%0Aimport gspread%0Aimport json%0Afrom oauth2client.service_account import ServiceAccountCredentials%0Afrom oauth2client import file, client, tools%0Afrom models.v1.assets.asset import Asset%0Afrom models.v1.asset_groups.asset_group import AssetGroup%0Afrom models.v1.services.service import Service%0A%0Adef event(event, context):%0A print('event: %7B%7D'.format(event))%0A # get our gdrive creds%0A # and auth to google%0A gcreds_json=credstash.getSecret(%0A name=%22serviceapi.gdrive%22,%0A context=%7B'app': 'serviceapi'%7D,%0A region=%22us-east-1%22%0A )%0A scopes = %5B'https://www.googleapis.com/auth/drive.metadata.readonly',%0A 'https://www.googleapis.com/auth/drive.file ',%0A 'https://www.googleapis.com/auth/drive'%5D%0A credentials = ServiceAccountCredentials.from_json_keyfile_dict(json.loads(gcreds_json),scopes)%0A gs = gspread.authorize(credentials)%0A%0A # get rras%0A rras=gs.open(%22Mozilla Information Security Risk Register%22).worksheet(%22RRA3%22)%0A heading_keys=%5B%5D%0A for r in range(1,rras.row_count):%0A if r==1:%0A row_keys=rras.row_values(r)%0A for key in row_keys:%0A #lowercase and underscore the keys to fields%0A heading_keys.append(key.lower().replace(' ','_'))%0A%0A elif r %3E88:%0A row=rras.row_values(r)%0A if len(row)==0:%0A break%0A else:%0A print (json.dumps(dict(zip(heading_keys, row)),indent=4))%0A
|
|
49b3c91ffdbd04fbce523599320820278bb5d8aa
|
Add data file.
|
data.py
|
data.py
|
Python
| 0.000001
|
@@ -0,0 +1,293 @@
+# Ignore this file%0A%0A%7B'paper_abstract': 'An abstract',%0A 'authors': %5B%7B'first_names': 'XX',%0A 'surname': 'XXX',%0A 'address': 'XXX',%0A 'country': 'XXX',%0A 'email_address': 'xxx@XXX',%0A 'institution': 'XXX'%7D%5D,%0A 'title': ''%7D%0A%0A
|
|
885ed1e8e3256352d2fde771bef57997809c3c1e
|
Remove monthly_billing table from the database
|
migrations/versions/0209_remove_monthly_billing_.py
|
migrations/versions/0209_remove_monthly_billing_.py
|
Python
| 0.000002
|
@@ -0,0 +1,1728 @@
+%22%22%22%0A%0ARevision ID: 0209_remove_monthly_billing%0ARevises: 84c3b6eb16b3%0ACreate Date: 2018-07-27 14:46:30.109811%0A%0A%22%22%22%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Afrom sqlalchemy.dialects import postgresql%0A%0Arevision = '0209_remove_monthly_billing'%0Adown_revision = '84c3b6eb16b3'%0A%0A%0Adef upgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.drop_index('ix_monthly_billing_service_id', table_name='monthly_billing')%0A op.drop_table('monthly_billing')%0A # ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.create_table('monthly_billing',%0A sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),%0A sa.Column('service_id', postgresql.UUID(), autoincrement=False, nullable=False),%0A sa.Column('notification_type', postgresql.ENUM('email', 'sms', 'letter', name='notification_type'), autoincrement=False, nullable=False),%0A sa.Column('monthly_totals', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=False),%0A sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),%0A sa.Column('start_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),%0A sa.Column('end_date', postgresql.TIMESTAMP(), autoincrement=False, nullable=False),%0A sa.ForeignKeyConstraint(%5B'service_id'%5D, %5B'services.id'%5D, name='monthly_billing_service_id_fkey'),%0A sa.PrimaryKeyConstraint('id', name='monthly_billing_pkey'),%0A sa.UniqueConstraint('service_id', 'start_date', 'notification_type', name='uix_monthly_billing')%0A )%0A op.create_index('ix_monthly_billing_service_id', 'monthly_billing', %5B'service_id'%5D, unique=False)%0A # ### end Alembic commands ###%0A
|
|
324f670e747af0b949bc2c9fb503c875b7f20a7b
|
Initialize 06.sameName3
|
books/AutomateTheBoringStuffWithPython/Chapter03/06.sameName3.py
|
books/AutomateTheBoringStuffWithPython/Chapter03/06.sameName3.py
|
Python
| 0.999999
|
@@ -0,0 +1,357 @@
+# This program demonstrates global and local variable rules%0Adef spam():%0A global eggs%0A eggs = 'spam' # this is the global (global statement)%0A%0A%0Adef bacon():%0A eggs = 'bacon' # this is a local (assignment)%0A%0A%0Adef ham():%0A print(eggs) # this is the global (no assignment)%0A%0A%0Aeggs = 42 # this is the global (outside all functions)%0Aspam()%0Aprint(eggs)%0A
|
|
6987558cefb1179c4501ee5f43e39618f67c49c7
|
Initialize P02_writeCSV
|
books/AutomateTheBoringStuffWithPython/Chapter14/P02_writeCSV.py
|
books/AutomateTheBoringStuffWithPython/Chapter14/P02_writeCSV.py
|
Python
| 0.000004
|
@@ -0,0 +1,381 @@
+# This program uses the csv module to manipulate .csv files%0A%0Aimport csv%0A%0A# Writer Objects%0AoutputFile = open(%22output.csv%22, %22w%22, newline='')%0AoutputWriter = csv.writer(outputFile)%0Aprint(outputWriter.writerow(%5B'spam', 'eggs', 'bacon', 'ham'%5D))%0Aprint(outputWriter.writerow(%5B'Hello, world!', 'eggs', 'bacon', 'ham'%5D))%0Aprint(outputWriter.writerow(%5B1, 2, 3.141592, 4%5D))%0AoutputFile.close()%0A
|
|
b6aacfff8a400f4cc671790a827a778bbbc74635
|
Update customer alerts to avoid is_available_to_buy
|
oscar/apps/customer/alerts/utils.py
|
oscar/apps/customer/alerts/utils.py
|
import logging
from django.core import mail
from django.conf import settings
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.db.models import get_model, Max
from oscar.apps.customer.notifications import services
ProductAlert = get_model('customer', 'ProductAlert')
Product = get_model('catalogue', 'Product')
logger = logging.getLogger(__file__)
def send_alerts():
"""
Send out product alerts
"""
products = Product.objects.filter(
productalert__status=ProductAlert.ACTIVE
).distinct()
logger.info("Found %d products with active alerts", products.count())
for product in products:
if product.is_available_to_buy:
send_product_alerts(product)
def send_alert_confirmation(alert):
"""
Send an alert confirmation email.
"""
ctx = Context({
'alert': alert,
'site': Site.objects.get_current(),
})
subject_tpl = loader.get_template('customer/alerts/emails/confirmation_subject.txt')
body_tpl = loader.get_template('customer/alerts/emails/confirmation_body.txt')
mail.send_mail(
subject_tpl.render(ctx).strip(),
body_tpl.render(ctx),
settings.OSCAR_FROM_EMAIL,
[alert.email],
)
def send_product_alerts(product):
"""
Check for notifications for this product and send email to users
if the product is back in stock. Add a little 'hurry' note if the
amount of in-stock items is less then the number of notifications.
"""
stockrecords = product.stockrecords.all()
num_stockrecords = len(stockrecords)
if not num_stockrecords:
return
logger.info("Sending alerts for '%s'", product)
alerts = ProductAlert.objects.filter(
product=product,
status=ProductAlert.ACTIVE,
)
# Determine 'hurry mode'
num_alerts = alerts.count()
if num_stockrecords == 1:
num_in_stock = stockrecords[0].num_in_stock
hurry_mode = num_alerts < num_in_stock
else:
result = stockrecords.aggregate(max_in_stock=Max('num_in_stock'))
hurry_mode = num_alerts < result['max_in_stock']
# Load templates
message_tpl = loader.get_template('customer/alerts/message.html')
email_subject_tpl = loader.get_template('customer/alerts/emails/alert_subject.txt')
email_body_tpl = loader.get_template('customer/alerts/emails/alert_body.txt')
emails = []
num_notifications = 0
for alert in alerts:
ctx = Context({
'alert': alert,
'site': Site.objects.get_current(),
'hurry': hurry_mode,
})
if alert.user:
# Send a site notification
num_notifications += 1
services.notify_user(alert.user, message_tpl.render(ctx))
# Build email and add to list
emails.append(
mail.EmailMessage(
email_subject_tpl.render(ctx).strip(),
email_body_tpl.render(ctx),
settings.OSCAR_FROM_EMAIL,
[alert.get_email_address()],
)
)
alert.close()
# Send all emails in one go to prevent multiple SMTP
# connections to be opened
if emails:
connection = mail.get_connection()
connection.open()
connection.send_messages(emails)
connection.close()
logger.info("Sent %d notifications and %d emails", num_notifications, len(emails))
|
Python
| 0.000004
|
@@ -259,16 +259,57 @@
services
+%0Afrom oscar.core.loading import get_class
%0A%0AProduc
@@ -398,16 +398,69 @@
roduct')
+%0ASelector = get_class('partner.strategy', 'Selector')
%0A%0Alogger
@@ -767,52 +767,8 @@
ts:%0A
- if product.is_available_to_buy:%0A
@@ -2514,28 +2514,283 @@
-for alert in alerts:
+selector = Selector()%0A for alert in alerts:%0A # Check if the product is available to this user%0A strategy = selector.strategy(user=alert.user)%0A data = strategy.fetch(product)%0A if not data%5B'availability'%5D.is_available_to_buy:%0A continue%0A
%0A
|
b8ddb1b64ef2216add5b0b136b09b72d91506767
|
Add initial msgpack renderer
|
salt/renderers/msgpack.py
|
salt/renderers/msgpack.py
|
Python
| 0
|
@@ -0,0 +1,596 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import%0A%0A# Import third party libs%0Aimport msgpack%0A%0A%0Adef render(msgpack_data, saltenv='base', sls='', **kws):%0A '''%0A Accepts JSON as a string or as a file object and runs it through the JSON%0A parser.%0A%0A :rtype: A Python data structure%0A '''%0A if not isinstance(msgpack_data, basestring):%0A msgpack_data = msgpack_data.read()%0A%0A if msgpack_data.startswith('#!'):%0A msgpack_data = msgpack_data%5B(msgpack_data.find('%5Cn') + 1):%5D%0A if not msgpack_data.strip():%0A return %7B%7D%0A return msgpack.loads(msgpack_data)%0A
|
|
e665e9cb374fd67baec7ec598bfd352e04192210
|
add gripper class to pick up pieces with electromagnet
|
raspberryturk/embedded/motion/gripper.py
|
raspberryturk/embedded/motion/gripper.py
|
Python
| 0
|
@@ -0,0 +1,1146 @@
+import RPi.GPIO as GPIO%0Afrom time import sleep%0A%0Aelectromagnet_pin = 40%0Aservo_pin = 38%0A%0Aclass Gripper(object):%0A def __init__(self):%0A self.previous_z = None%0A GPIO.setmode(GPIO.BOARD)%0A GPIO.setup(servo_pin, GPIO.OUT)%0A GPIO.setup(electromagnet_pin, GPIO.OUT)%0A%0A def calibrate(self):%0A self.move(100)%0A%0A def move(self, z):%0A z = max(0.0, min(z, 100.0))%0A dc = (z * 0.067) + 4.0%0A p = GPIO.PWM(servo_pin, 50.0)%0A p.start(dc)%0A if self.previous_z is None:%0A t = 10.0%0A else:%0A t = (abs(self.previous_z - z) / 10.0) + 0.5%0A sleep(t)%0A p.stop()%0A del p%0A self.previous_z = z%0A%0A def electromagnet(self, on):%0A output = GPIO.HIGH if on else GPIO.LOW%0A GPIO.output(electromagnet_pin, output)%0A%0A def pickup(self, z):%0A self.move(z)%0A sleep(0.4)%0A self.electromagnet(True)%0A sleep(0.2)%0A self.move(100)%0A%0A def dropoff(self, z):%0A self.move(z)%0A sleep(0.2)%0A self.electromagnet(False)%0A sleep(0.4)%0A self.move(100)%0A%0A def cleanup(self):%0A GPIO.cleanup()%0A
|
|
009bf3d7ffc6545cb2d37a36bc327de2bebd283d
|
Comment admin ought to have a better preview; #444
|
judge/admin/comments.py
|
judge/admin/comments.py
|
from django.forms import ModelForm
from django.utils.html import format_html
from django.utils.translation import ungettext, ugettext_lazy as _
from reversion.admin import VersionAdmin
from judge.models import Comment
from judge.widgets import MathJaxAdminPagedownWidget, HeavySelect2Widget
class CommentForm(ModelForm):
class Meta:
widgets = {
'author': HeavySelect2Widget(data_view='profile_select2'),
'parent': HeavySelect2Widget(data_view='comment_select2'),
}
if MathJaxAdminPagedownWidget is not None:
widgets['body'] = MathJaxAdminPagedownWidget
class CommentAdmin(VersionAdmin):
fieldsets = (
(None, {'fields': ('author', 'page', 'parent', 'score', 'hidden')}),
('Content', {'fields': ('title', 'body')}),
)
list_display = ['title', 'author', 'linked_page', 'time']
search_fields = ['author__user__username', 'author__name', 'page', 'title', 'body']
actions = ['hide_comment', 'unhide_comment']
list_filter = ['hidden']
actions_on_top = True
actions_on_bottom = True
form = CommentForm
def get_queryset(self, request):
return Comment.objects.order_by('-time')
def hide_comment(self, request, queryset):
count = queryset.update(hidden=True)
self.message_user(request, ungettext('%d comment successfully hidden.',
'%d comments successfully hidden.',
count) % count)
hide_comment.short_description = _('Hide comments')
def unhide_comment(self, request, queryset):
count = queryset.update(hidden=False)
self.message_user(request, ungettext('%d comment successfully unhidden.',
'%d comments successfully unhidden.',
count) % count)
unhide_comment.short_description = _('Unhide comments')
def linked_page(self, obj):
link = obj.link
if link is not None:
return format_html('<a href="{0}">{1}</a>', link, obj.page)
else:
return format_html('{0}', obj.page)
linked_page.short_description = _('Associated page')
linked_page.allow_tags = True
linked_page.admin_order_field = 'page'
def save_model(self, request, obj, form, change):
super(CommentAdmin, self).save_model(request, obj, form, change)
if obj.hidden:
obj.get_descendants().update(hidden=obj.hidden)
|
Python
| 0
|
@@ -28,16 +28,53 @@
delForm%0A
+from django.urls import reverse_lazy%0A
from dja
@@ -107,16 +107,16 @@
at_html%0A
-
from dja
@@ -275,23 +275,28 @@
import
-MathJax
+HeavyPreview
AdminPag
@@ -288,33 +288,33 @@
PreviewAdminPage
-d
+D
ownWidget, Heavy
@@ -558,23 +558,28 @@
if
-MathJax
+HeavyPreview
AdminPag
@@ -571,33 +571,33 @@
PreviewAdminPage
-d
+D
ownWidget is not
@@ -637,15 +637,20 @@
%5D =
-MathJax
+HeavyPreview
Admi
@@ -654,17 +654,17 @@
dminPage
-d
+D
ownWidge
@@ -660,24 +660,65 @@
geDownWidget
+(preview=reverse_lazy('comment_preview'))
%0A%0A%0Aclass Com
|
a65d9a9ba0ab594903c7e811ff5bd7400c7cbedd
|
add support for cdn region.
|
dj/scripts/rax_uploader.py
|
dj/scripts/rax_uploader.py
|
# rax_uploader.py
# rackspace cdn (openhatch) specific code
# caled from post_rak.py
# that is a lie. it is currently called from post_yt.py.
import argparse
import os
import pyrax
pyrax.set_setting("identity_type", "rackspace")
from pyrax.exceptions import PyraxException
import urllib
# The following 2 imports are wrapped in try/except so that
# this code will run without any additional files.
try:
# ProgressFile is a subclass of the python open class
# as data is read, it prints a visible progress bar
from progressfile import ProgressFile
except ImportError:
# or just use python's open for testing
ProgressFile = open
try:
# read credentials from a file
from pw import rax
except ImportError:
# https://mycloud.rackspace.com/account#settings
# Username:
# API Key:
# you can fill in your credentials here
# but better to put in pw.py
rax={
"testact":{
'user': "abc",
"api_key": "123"
}
}
def auth(upload_user="test"):
auth = rax[upload_user] ## from dict of credentials
pyrax.set_credentials( username=auth['user'], password=auth['api_key'])
return pyrax.cloudfiles
class Uploader(object):
# input attributes:
pathname = '' # path to video file to upload
user = 'testact' # key to lookup user/pw in rax{} typically stored in pw.py
bucket_id = "example" # archive/s3 butcket, or container ID for rax
key_id = "" # orbject name (the key in a key value store)
debug_mode = False
# return attributes:
ret_text = '' # TODO: return error text
new_url = ''
def upload(self):
cf = auth(self.user)
if self.debug_mode:
print "cf.get_all_containers", cf.get_all_containers()
container = cf.get_container(self.bucket_id)
# check if object already exists:
# if same name and same md5, don't bother re-uploading.
try:
obj = container.get_object(self.key_id)
already_there = obj.etag == pyrax.utils.get_checksum(
self.pathname,)
ret = True
except pyrax.exceptions.NoSuchObject as e:
already_there = False
if not already_there:
done=False
while not done:
pf = ProgressFile(self.pathname, 'r')
try:
# actually upload
obj = container.upload_file(pf, obj_name = self.key_id)
if self.debug_mode:
import code
code.interact(local=locals())
done = True
ret = True
except pyrax.exceptions.ClientException as e:
print "caught pyrax.exceptions.ClientException as e"
print e
print e.code, e.details, e.message
if e.code==408:
print "looping..."
continue
print e
# self.ret_text = "rax error: %s" % ( e.body )
import code
code.interact(local=locals())
except Exception as e:
print "caught Exception as e"
"""
HTTPSConnectionPool(host='storage101.ord1.clouddrive.com', port=443): Max retries exceeded with url: /v1/MossoCloudFS_fd6d6695-7fe7-4f77-9b4a-da7696e71dc2/fosdem/veyepar/debian/debconf14/dv/plenary/2014-08-23/16_00_03.ogv (Caused by <class 'socket.error'>: [Errno 104] Connection reset by peer)
HTTPSConnectionPool(host='storage101.ord1.clouddrive.com', port=443): Max retries exceeded with url: /v1/MossoCloudFS_fd6d6695-7fe7-4f77-9b4a-da7696e71dc2/fosdem/veyepar/debian/debconf14/dv/room338/2014-08-25/10_02_05.ogv (Caused by <class 'socket.error'>: [Errno 32] Broken pipe)
"""
print e
# self.ret_text = "rax error: %s" % ( e.body )
import code
code.interact(local=locals())
ret = False
# urllib.quote
# filenames may have chars that need to be quoted for a URL.
# cdn_streaming because.. video? (not sure really)
# self.new_url = container.cdn_streaming_uri +"/"+ urllib.quote(obj.name)
self.new_url = container.cdn_uri +"/"+ urllib.quote(obj.name)
return ret
def pars_args():
parser = argparse.ArgumentParser()
parser.add_argument('--pathname', default=
'/home/carl/Videos/veyepar/test_client/test_show/webm/Lets_make_a_Test.webm',
help='pathname of file to upload.')
parser.add_argument('--user', default="testact",
help="key to lookup credintials from pw.py")
parser.add_argument('--container', default="example",
help="container to upload to.")
parser.add_argument('--obj_name',
help="key in key:value")
parser.add_argument('--debug',
help="Drops to a >>> prompt after upload")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = pars_args()
u = Uploader()
# senseable values for testing.
u.pathname = args.pathname
u.user = args.user
u.bucket_id = args.container # define this on rackspace gui
if args.obj_name is None:
u.key_id = os.path.split(u.pathname)[1]
else:
u.key_id = args.obj_name
u.debug_mode = args.debug
ret = u.upload()
print u.new_url
|
Python
| 0
|
@@ -1412,16 +1412,36 @@
n pw.py%0A
+ region = %22DFW%22%0A%0A
buck
@@ -1698,24 +1698,74 @@
oad(self):%0A%0A
+ pyrax.set_setting(%22region%22, self.region)%0A%0A
cf =
@@ -4648,95 +4648,54 @@
e',
-default=%0A '/home/carl/Videos/veyepar/test_client/test_show/webm/Lets_make_a_Test.webm'
+%0A default=os.path.abspath(__file__)
,%0A
@@ -5028,24 +5028,140 @@
ey:value%22)%0A%0A
+ parser.add_argument('--region', default=%22ORD%22,%0A help=%22http://www.rackspace.com/about/datacenters/%22)%0A%0A
parser.a
@@ -5423,24 +5423,24 @@
gs.pathname%0A
-
u.user =
@@ -5450,16 +5450,43 @@
gs.user%0A
+ u.region = args.region%0A
u.bu
|
59837bda53b958c7fdb50a3b2808a42fd667cd96
|
Create z07-dnn_autoencoder_iris.py
|
skflow-examples/z07-dnn_autoencoder_iris.py
|
skflow-examples/z07-dnn_autoencoder_iris.py
|
Python
| 0.000016
|
@@ -0,0 +1,1216 @@
+# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport random%0A%0Aimport tensorflow as tf%0Afrom tensorflow.contrib import learn%0Afrom tensorflow.contrib.learn import datasets%0A%0A# Load Iris Data%0Airis = datasets.load_iris()%0A%0A# Initialize a deep neural network autoencoder%0A# You can also add noise and add dropout if needed%0A# Details see TensorFlowDNNAutoencoder documentation.%0Aautoencoder = learn.TensorFlowDNNAutoencoder(hidden_units=%5B10, 20%5D)%0A%0A# Fit with Iris data%0Atransformed = autoencoder.fit_transform(iris.data)%0A%0Aprint(transformed)%0A
|
|
96eba676abeb8c70dcaddb692133a9314e2255c3
|
Add harvester for pcom
|
scrapi/harvesters/pcom.py
|
scrapi/harvesters/pcom.py
|
Python
| 0
|
@@ -0,0 +1,715 @@
+'''%0AHarvester for the DigitalCommons@PCOM for the SHARE project%0A%0AExample API call: http://digitalcommons.pcom.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc%0A'''%0Afrom __future__ import unicode_literals%0A%0Afrom scrapi.base import OAIHarvester%0A%0A%0Aclass PcomHarvester(OAIHarvester):%0A short_name = 'pcom'%0A long_name = 'DigitalCommons@PCOM'%0A url = 'http://digitalcommons.pcom.edu'%0A%0A base_url = 'http://digitalcommons.pcom.edu/do/oai/'%0A property_list = %5B'date', 'source', 'identifier', 'type', 'format', 'setSpec'%5D%0A timezone_granularity = True%0A%0A approved_sets = %5Bu'biomed', u'pa_systematic_reviews', u'psychology_dissertations',%0A u'scholarly_papers', u'research_day', u'posters'%5D%0A
|
|
aefd4009393e5ebf05ea9e485a1723776689ed70
|
add node and node to container link
|
tests/acceptance/mapping/node_at.py
|
tests/acceptance/mapping/node_at.py
|
Python
| 0
|
@@ -0,0 +1,2858 @@
+# Ariane CLI Python 3%0A# Node acceptance tests%0A#%0A# Copyright (C) 2015 echinopsii%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0Aimport unittest%0Afrom ariane_clip3.mapping import MappingService, Node, Container, NodeService%0A%0A__author__ = 'mffrench'%0A%0A%0Aclass NodeTest(unittest.TestCase):%0A%0A def setUp(self):%0A args = %7B'type': 'REST', 'base_url': 'http://localhost:6969/ariane/', 'user': 'yoda', 'password': 'secret'%7D%0A MappingService(args)%0A self.container = Container(name=%22test_container%22, gate_uri=%22ssh://my_host/docker/test_container%22,%0A primary_admin_gate_name=%22container name space (pid)%22, company=%22Docker%22,%0A product=%22Docker%22, c_type=%22container%22)%0A self.container.save()%0A%0A def test_create_remove_node_1(self):%0A node = Node(name=%22mysqld%22, container_id=self.container.cid)%0A node.save()%0A self.assertIsNotNone(node.nid)%0A self.container.__sync__()%0A self.assertTrue(node.nid in self.container.nodes_id)%0A self.assertIsNone(node.remove())%0A self.container.__sync__()%0A self.assertFalse(node.nid in self.container.nodes_id)%0A self.container.remove()%0A%0A def test_create_remove_node_2(self):%0A node = Node(name=%22mysqld%22, container=self.container)%0A node.save()%0A self.assertIsNotNone(node.nid)%0A self.assertTrue(node.nid in self.container.nodes_id)%0A self.assertIsNone(node.remove())%0A self.assertFalse(node.nid in self.container.nodes_id)%0A self.container.remove()%0A%0A def test_find_node_by_id(self):%0A node = Node(name=%22mysqld%22, container_id=self.container.cid)%0A node.save()%0A self.assertIsNotNone(NodeService.find_node(nid=node.nid))%0A node.remove()%0A self.assertIsNone(NodeService.find_node(nid=node.nid))%0A%0A def test_find_node_by_endpoint(self):%0A pass%0A%0A def test_get_nodes(self):%0A init_node_count = NodeService.get_nodes().__len__()%0A node = Node(name=%22mysqld%22, container_id=self.container.cid)%0A node.save()%0A self.assertEqual(NodeService.get_nodes().__len__(), init_node_count + 1)%0A node.remove()%0A self.assertEqual(NodeService.get_nodes().__len__(), init_node_count)%0A%0A
|
|
84ae279c0044e63e00c7d21823c3159e34c73d03
|
Add a memory test script
|
scripts/uvfits_memtest.py
|
scripts/uvfits_memtest.py
|
Python
| 0.000003
|
@@ -0,0 +1,1586 @@
+#!/usr/bin/env python2.7%0A# -*- mode: python; coding: utf-8 -*-%0A%0Afrom __future__ import print_function, division, absolute_import%0A%0Afrom memory_profiler import profile%0Aimport numpy as np%0Afrom astropy import constants as const%0Afrom astropy.io import fits%0Afrom pyuvdata import UVData%0A%0A%0A@profile%0Adef read_uvfits():%0A filename = '/Volumes/Data1/mwa_uvfits/1066571272.uvfits'%0A%0A # first test uvdata.read_uvfits. First read metadata then full data%0A uv_obj = UVData()%0A uv_obj.read_uvfits(filename, metadata_only=True)%0A uv_obj.read_uvfits_data(filename)%0A del(uv_obj)%0A%0A # now test details with astropy%0A hdu_list = fits.open(filename, memmap=True)%0A vis_hdu = hdu_list%5B0%5D%0A%0A # only read in times, then uvws, then visibilities%0A time0_array = vis_hdu.data.par('date')%0A uvw_array = (np.array(np.stack((vis_hdu.data.par('UU'),%0A vis_hdu.data.par('VV'),%0A vis_hdu.data.par('WW')))) * const.c.to('m/s').value).T%0A%0A if vis_hdu.header%5B'NAXIS'%5D == 7:%0A%0A data_array = (vis_hdu.data.data%5B:, 0, 0, :, :, :, 0%5D +%0A 1j * vis_hdu.data.data%5B:, 0, 0, :, :, :, 1%5D)%0A else:%0A data_array = (vis_hdu.data.data%5B:, 0, 0, :, :, 0%5D +%0A 1j * vis_hdu.data.data%5B:, 0, 0, :, :, 1%5D)%0A data_array = data_array%5B:, np.newaxis, :, :%5D%0A%0A # test for releasing resources%0A del(time0_array)%0A del(uvw_array)%0A del(data_array)%0A%0A # release file handles%0A del(vis_hdu)%0A del(hdu_list)%0A del(filename)%0A%0A return%0A%0A%0Aif __name__ == '__main__':%0A read_uvfits()%0A
|
|
6bb1d3939a076d7b7fe799cdac8885a5f67219e3
|
add ex32
|
ex32.py
|
ex32.py
|
Python
| 0.000637
|
@@ -0,0 +1,827 @@
+the_count = %5B1, 2, 3, 4, 5%5D%0Afruits = %5B'apples', 'oranges', 'pears', 'apricots'%5D%0Achange = %5B1, 'pennies', 2, 'dimes', 3, 'quarters'%5D%0A%0A# this first kind of for-loop goes through a list for number in the_count%0Afor number in the_count:%0A print %22This is count %25d%22 %25 number%0A%0A#same as above%0Afor fruit in fruits:%0A print %22A fruit of type : %25s%22 %25fruit%0A%0A#also we can go through mixed lists too%0A# notice we have to use %25r since we don't know what's in it%0Afor i in change:%0A print %22I got %25r%22 %25 i%0A%0A# we can also build lists,forst start with an empty one%0Aelements = %5B%5D%0A%0A#then use the range function to do 0 to 5 counts%0A%0Afor i in range(0,6):%0A print %22Adding %25d to the list.%22 %25 i%0A # append is a function that list understand%0A elements.append(i)%0A%0A# now we can print them out too%0Afor i in elements:%0A print %22Element was: %25d%22 %25 i%0A%0A
|
|
46a71071ed4982b02d0e49818a678dc2744c1b23
|
Bump version number to 1.0
|
flask/__init__.py
|
flask/__init__.py
|
# -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
__version__ = '1.0-dev'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed, before_render_template
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that Flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
|
Python
| 0.000074
|
@@ -272,12 +272,8 @@
'1.0
--dev
'%0A%0A#
|
3d2ad56b5d24405eb6f261b32b0347a7e7d8785a
|
use make_muc_presence
|
tests/twisted/muc/test-muc-alias.py
|
tests/twisted/muc/test-muc-alias.py
|
"""
Test that our alias is used to create MUC JIDs.
Mash-up of vcard/test-set-alias.py and muc/test-muc.py.
"""
import dbus
from twisted.words.xish import domish
from gabbletest import go, make_result_iq, acknowledge_iq, exec_test
from servicetest import call_async, lazy, match, EventPattern
def test(q, bus, conn, stream):
conn.Connect()
_, iq_event = q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged', args=[0, 1]),
EventPattern('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard'))
acknowledge_iq(stream, iq_event.stanza)
conn.Aliasing.SetAliases({1: 'lala'})
iq_event = q.expect('stream-iq', iq_type='set', query_ns='vcard-temp',
query_name='vCard')
acknowledge_iq(stream, iq_event.stanza)
event = q.expect('dbus-signal', signal='AliasesChanged',
args=[[(1, u'lala')]])
# Need to call this asynchronously as it involves Gabble sending us a
# query.
call_async(q, conn, 'RequestHandles', 2, ['chat@conf.localhost'])
event = q.expect('stream-iq', to='conf.localhost',
query_ns='http://jabber.org/protocol/disco#info')
result = make_result_iq(stream, event.stanza)
feature = result.firstChildElement().addElement('feature')
feature['var'] = 'http://jabber.org/protocol/muc'
stream.send(result)
event = q.expect('dbus-return', method='RequestHandles')
room_handle = event.value[0][0]
call_async(q, conn, 'RequestChannel',
'org.freedesktop.Telepathy.Channel.Type.Text', 2, room_handle, True)
gfc, _, _ = q.expect_many(
EventPattern('dbus-signal', signal='GroupFlagsChanged'),
EventPattern('dbus-signal', signal='MembersChanged',
args=[u'', [], [], [], [2], 0, 0]),
EventPattern('stream-presence', to='chat@conf.localhost/lala'))
assert gfc.args[1] == 0
# Send presence for other member of room.
presence = domish.Element((None, 'presence'))
presence['from'] = 'chat@conf.localhost/bob'
x = presence.addElement(('http://jabber.org/protocol/muc#user', 'x'))
item = x.addElement('item')
item['affiliation'] = 'owner'
item['role'] = 'moderator'
stream.send(presence)
# Send presence for own membership of room.
presence = domish.Element((None, 'presence'))
presence['from'] = 'chat@conf.localhost/lala'
x = presence.addElement(('http://jabber.org/protocol/muc#user', 'x'))
item = x.addElement('item')
item['affiliation'] = 'none'
item['role'] = 'participant'
stream.send(presence)
event = q.expect('dbus-signal', signal='MembersChanged',
args=[u'', [2, 3], [], [], [], 0, 0])
assert conn.InspectHandles(1, [2]) == ['chat@conf.localhost/lala']
assert conn.InspectHandles(1, [3]) == ['chat@conf.localhost/bob']
event = q.expect('dbus-return', method='RequestChannel')
conn.Disconnect()
q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
if __name__ == '__main__':
exec_test(test)
|
Python
| 0.000008
|
@@ -226,16 +226,35 @@
xec_test
+, make_muc_presence
%0Afrom se
@@ -1938,298 +1938,89 @@
-presence = domish.Element((None, 'presence'))%0A presence%5B'from'%5D = 'chat@conf.localhost/bob'%0A x = presence.addElement(('http://jabber.org/protocol/muc#user', 'x'))%0A item = x.addElement('item')%0A item%5B'affiliation'%5D = 'owner'%0A item%5B'role'%5D = 'moderator'%0A stream.send(presence
+stream.send(make_muc_presence('owner', 'moderator', 'chat@conf.localhost', 'bob')
)%0A%0A
@@ -2074,300 +2074,91 @@
-presence = domish.Element((N
+stream.send(make_muc_presence('n
one
+'
, 'p
-resence'))%0A presence%5B'from'%5D = 'chat@conf.localhost/lala'%0A x = presence.addElement(('http://jabber.org/protocol/muc#user', 'x'))%0A item = x.addElement('item')%0A item%5B'affiliation'%5D = 'none'%0A item%5B'role'%5D = 'participant'%0A stream.send(presence
+articipant', 'chat@conf.localhost', 'lala')
)%0A%0A
|
4c9af992891ac5d39be50f9876a807f131a922e4
|
prepare the organism annotation details
|
gfftools/gff_db.py
|
gfftools/gff_db.py
|
Python
| 0.000001
|
@@ -0,0 +1,2215 @@
+#!/usr/bin/env python%0A%22%22%22%0AFetch the details about the features explained in a GFF type file.%0A%0AUsage: python feature_info.py in.gff%0A%0ARequirements:%0A gfftools : https://github.com/vipints/genomeutils/blob/master/gfftools%0A%22%22%22%0A%0Aimport re%0Aimport sys%0Aimport GFFParser%0A%0Adef Intron_det(TDB):%0A %22%22%22%0A get the intron feature details MaxIntronLength MinIntronLength%0A %22%22%22%0A%0A intron_size = dict() %0A exon_size = dict() %0A for ent1 in TDB:%0A for idx, tid in enumerate(ent1%5B'transcripts'%5D):%0A%0A if not ent1%5B'exons'%5D%5Bidx%5D.any():%0A continue%0A%0A exon_cnt = len(ent1%5B'exons'%5D%5Bidx%5D)%0A if exon_cnt %3E 1:%0A%0A intron_start = 0 %0A for xq, excod in enumerate(ent1%5B'exons'%5D%5Bidx%5D): %0A %0A if xq %3E 0: %0A #print intron_start, excod%5B0%5D-1 %0A if excod%5B0%5D-intron_start==1:%0A continue%0A # intron size %0A intron_size%5Bexcod%5B0%5D-intron_start%5D = 1 %0A #print tid, excod%5B0%5D-intron_start%0A%0A intron_start = excod%5B1%5D+1%0A exon_size%5Bintron_start-excod%5B0%5D%5D = 1%0A %0A # sort the intron_size based on the keys %0A if intron_size:%0A keys_int = sorted(intron_size)%0A print 'MinIntronLength', int(keys_int%5B0%5D), int(keys_int%5B1%5D), int(keys_int%5B2%5D)%0A print 'MaxIntronLength', int(keys_int%5B-1%5D), int(keys_int%5B-2%5D), int(keys_int%5B-3%5D)%0A print %0A keys_ex = sorted(exon_size)%0A print 'MinExonLength', int(keys_ex%5B0%5D), int(keys_ex%5B1%5D), int(keys_ex%5B2%5D) %0A print 'MaxExonLength', int(keys_ex%5B-1%5D), int(keys_ex%5B-2%5D), int(keys_ex%5B-3%5D) %0A else:%0A print %22Error in feature mapping, please check the source of parent child features%22 %0A print %22May be the sources are different for parents and child features of the parent Gene%22%0A%0Adef __main__():%0A%0A try:%0A query_file = sys.argv%5B1%5D%0A except:%0A print __doc__%0A sys.exit(-1)%0A%0A # get the annotated transcripts %0A Transdb = GFFParser.Parse(query_file) %0A%0A # extract different features %0A Intron_det(Transdb)%0A%0Aif __name__ == %22__main__%22: %0A __main__() %0A
|
|
a7787c60af7059f3b1a4dc3773da04dfc72631e2
|
Add tools.ping
|
grab/tools/ping.py
|
grab/tools/ping.py
|
Python
| 0.000001
|
@@ -0,0 +1,1876 @@
+from grab import Grab%0Aimport logging%0Aimport os%0Afrom grab.tools import html%0Afrom grab.tools.pwork import make_work%0Afrom grab.tools.encoding import smart_str%0A%0APING_XML = %22%22%22%3C?xml version=%221.0%22?%3E%0A%3CmethodCall%3E%0A %09%3CmethodName%3EweblogUpdates.ping%3C/methodName%3E%0A %09%3Cparams%3E%0A %3Cparam%3E%3Cvalue%3E%25(name)s%3C/value%3E%3C/param%3E%0A %3Cparam%3E%3Cvalue%3E%25(url)s%3C/value%3E%3C/param%3E%0A %09%3C/params%3E%0A%3C/methodCall%3E%0A%22%22%22%0A%0ASERVER_LIST = %22%22%22%0Ahttp://audiorpc.weblogs.com/RPC2%0Ahttp://blogsearch.google.com.ua/ping/RPC2%0Ahttp://blogsearch.google.com/ping/RPC2%0Ahttp://blogsearch.google.ru/ping/RPC2%0Ahttp://ping.blogs.yandex.ru/RPC2%0Ahttp://ping.myblog.jp/%0Ahttp://rpc.weblogs.com/RPC2%0Ahttp://xping.pubsub.com/ping%0A%22%22%22.strip().splitlines()%0A%0Adef ping(name, url, grab, thread_number=10):%0A %22%22%22%0A Do XMLRPC ping of given site.%0A %22%22%22%0A %0A name = smart_str(name)%0A url = smart_str(url)%0A%0A def worker(rpc_url):%0A post = PING_XML %25 %7B%0A 'url': html.escape(url),%0A 'name': html.escape(name),%0A %7D%0A ok = False%0A try:%0A grab.go(rpc_url, post=post)%0A except Exception, ex:%0A logging.error(unicode(ex))%0A else:%0A if not '%3Cboolean%3E0' in grab.response.body:%0A logging.error('%25s : FAIL' %25 rpc_url)%0A logging.error(grab.response.body%5B:1000%5D)%0A else:%0A ok = True%0A return rpc_url, ok%0A%0A results = %5B%5D%0A for rpc_url, ok in make_work(worker, SERVER_LIST, thread_number):%0A results.append((rpc_url, ok))%0A return results%0A%0A%0Aif __name__ == '__main__':%0A #logging.basicConfig(level=logging.DEBUG)%0A g = Grab(timeout=15)%0A g.setup_proxylist('/web/proxy.txt', 'http', auto_change=True) %0A items = ping('seobeginner.ru', 'http://feeds2.feedburner.com/seobeginner',%0A g, thread_number=30)%0A print 'RESULT:'%0A for rpc, ok in items:%0A print rpc, ok%0A
|
|
f31a735ce54f74e7a38edaeb7c404a2dec8e5584
|
add script
|
MeowRTC/cloud/stack/python/scripts/sample-p2p.py
|
MeowRTC/cloud/stack/python/scripts/sample-p2p.py
|
Python
| 0.000001
|
@@ -0,0 +1,1814 @@
+import logging%0Aimport jinja2%0Aimport webapp2%0Aimport os%0Aimport random%0Aimport json%0Afrom google.appengine.api import channel%0A%0Ajinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))%0A%0Alogging.getLogger().setLevel(logging.DEBUG)%0A%0Ausers = set()%0A%0Adef random_string():%0A str = ''%0A for _ in range(4):%0A str += random.choice('0123456789')%0A logging.info(str)%0A return str%0A%0Aclass MainPage(webapp2.RequestHandler):%0A def get(self):%0A user = self.request.get('user')%0A if len(user) == 0:%0A user = random_string()%0A token = channel.create_channel(user)%0A template_values = %7B'token': token, 'user': user%7D%0A template = jinja_environment.get_template('index.html')%0A self.response.out.write(template.render(template_values))%0A%0Aclass DisconnectPage(webapp2.RequestHandler):%0A def post(self):%0A user = self.request.get('from')%0A logging.info(%22Disconnect: %22 + user)%0A try:%0A users.remove(user)%0A except KeyError:%0A logging.info('User not logged in')%0A%0Aclass ConnectPage(webapp2.RequestHandler):%0A def post(self):%0A user = self.request.get('from')%0A logging.info(%22Connect: %22 + user)%0A users.add(user)%0A%0Aclass MessagePage(webapp2.RequestHandler):%0A def post(self):%0A msg = json.loads(self.request.body)%0A for command in msg:%0A to_user = msg%5Bcommand%5D%5B'to'%5D%0A from_user = msg%5Bcommand%5D%5B'from'%5D%0A logging.info(from_user + ' -%3E ' + to_user + %22: %22 + command)%0A if to_user in users:%0A channel.send_message(to_user, self.request.body)%0A else:%0A logging.info('User not found')%0A channel.send_message(from_user, '%7B%22ERROR%22:%22User not found ' + to_user + '%22%7D')%0A%0Aapp = webapp2.WSGIApplication(%5B('/', MainPage), ('/message', MessagePage), ('/_ah/channel/connected/', ConnectPage), ('/_ah/channel/disconnected/', DisconnectPage)%5D, debug=True)%0A
|
|
4c60f8f643fe05b69ca475242d8c46b02697d5d4
|
Add example for type-checking chain
|
examples/howto/type_chain.py
|
examples/howto/type_chain.py
|
Python
| 0.000001
|
@@ -0,0 +1,757 @@
+from thinc.api import chain, ReLu, MaxPool, Softmax, chain%0A%0A# This example should be run with mypy. This is an example of type-level checking %0A# for network validity.%0A#%0A# We first define an invalid network.%0A# It's invalid because MaxPool expects Floats3d as input, while ReLu produces%0A# Floats2d as output. chain has type-logic to verify input and output types%0A# line up.%0A#%0A# You should see the error an error,%0A# examples/howto/type_chain.py:10: error: Cannot infer type argument 2 of %22chain%22%0Abad_model = chain(ReLu(10), MaxPool(), Softmax())%0A%0A# Now let's try it with a network that does work, just to be sure.%0Agood_model = chain(ReLu(10), ReLu(10), Softmax())%0A%0A# Finally we can reveal_type on the good model, to see what it thinks.%0Areveal_type(good_model)%0A
|
|
f6bdab51054b08d203251b0e7e73ed7818613c8d
|
add models.py file so test runner will recognize the app
|
eca_catalogue/text/models.py
|
eca_catalogue/text/models.py
|
Python
| 0
|
@@ -0,0 +1,20 @@
+# Hello test runner%0A
|
|
c38a801cdf42788ba6d3ce9d009593ee68c4225f
|
version bump for 0.25.19.6.
|
oneflow/__init__.py
|
oneflow/__init__.py
|
VERSION = '0.25.19.5'
|
Python
| 0
|
@@ -13,12 +13,12 @@
0.25.19.
-5
+6
'%0A%0A
|
8851a11d7881dc16cf132e0bf88c94f82b7e5eaa
|
version bump for 0.25.19.8.
|
oneflow/__init__.py
|
oneflow/__init__.py
|
VERSION = '0.25.19.7'
|
Python
| 0
|
@@ -17,8 +17,8 @@
.19.
-7
+8
'%0A%0A
|
52f5946e7feccd90e0aa2e1f4674a7b51837e69d
|
version bump for 0.21.7.1.
|
oneflow/__init__.py
|
oneflow/__init__.py
|
VERSION = '0.21.7'
|
Python
| 0
|
@@ -11,11 +11,13 @@
'0.21.7
+.1
'%0A%0A
|
4ebe44abb3ae91b3b93154d3f82635dd09cbce93
|
version bump for 0.25.14.8.
|
oneflow/__init__.py
|
oneflow/__init__.py
|
VERSION = '0.25.14.7'
|
Python
| 0
|
@@ -17,8 +17,8 @@
.14.
-7
+8
'%0A%0A
|
ce792a1b167b268ba1f798b3b08e08679d962d02
|
Create distributeOnSurface.py
|
af_scripts/tmp/distributeOnSurface.py
|
af_scripts/tmp/distributeOnSurface.py
|
Python
| 0
|
@@ -0,0 +1,2132 @@
+import random%0Aimport maya.mel%0A%0A%0Aclass distributeOnSurface(object):%0A %0A def __init__(self):%0A pass%0A %0A def _UI(self):%0A if cmds.window('dosWin',exists=True):%0A cmds.deleteUI('dosWin',window=True)%0A w=300%0A w2=180%0A cmds.window('dosWin',t=%22Distribute On Surface%22,s=0,rtf=1,mb=1,mxb=0,mnb=0,w=w)%0A cmds.columnLayout(%22mainColumn%22,p=%22BSMainWin%22,columnAttach=('both', 2), rowSpacing=10, columnWidth=w)%0A %0A cmds.rowLayout(%22srcTgtNamesRow%22,p=%22mainColumn%22,w=w,numberOfColumns=3,columnWidth3=(w2,30,w2),%0A adjustableColumn=2, columnAlign3=%5B('center'),('center'),('center')%5D,%0A columnAttach=%5B(1, 'both', 1), (2, 'both', 0), (3, 'both',5)%5D)%0A cmds.textScrollList(%22srcList%22,p=%22srcTgtNamesRow%22,w=w2,numberOfRows=1, allowMultiSelection=False)%0A pm.popupMenu(%22srcListPopUp%22,p=%22srcList%22)%0A pm.menuItem(p=%22srcListPopUp%22,l=%22Add Source Geo%22,c=self.srcList)%0A %0A cmds.textScrollList(%22tgtList%22,p=%22srcTgtNamesRow%22,w=w2,numberOfRows=1, allowMultiSelection=False)%0A pm.popupMenu(%22tgtListPopUp%22,p=%22tgtList%22)%0A pm.menuItem(p=%22tgtListPopUp%22,l=%22Add Base Geo%22,c=self.tgtList)%0A %0A cmds.showWindow('dosWin')%0A%0A%0A%0Asrc_obj = 'pCone1'%0Atgt_obj = 'pPlane1'%0Adel_trans = %5Bcmds.delete(x) for x in cmds.ls(sl=True,fl=True,dag=1,lf=1) if cmds.nodeType(x) != 'follicle'%5D%0Afols = %5Bx for x in cmds.ls(sl=True,fl=True,dag=1,lf=1) if cmds.nodeType(x) == 'follicle'%5D%0Acmds.select(fols,r=1)%0Amaya.mel.eval('randomizeFollicles 0.05')%0A%0Arand_uv = 0.05%0Arand_l = 0.5%0Arand_offset = 1%0A%0Adup_objs = %5B%5D%0Afor fol in fols:%0A dup_obj = pm.duplicate(src_obj,n='%7B0%7D_dup'.format(src_obj))%5B0%5D%0A dup_objs.append(dup_obj)%0A pm.parent(dup_obj,fol)%0A for attr in %5B'tx','ty','tz','rx','ry','rz'%5D:%0A pm.setAttr('%7B0%7D.%7B1%7D'.format(dup_obj,attr),0)%0A%0A# Random length %0Afor obj in dup_objs:%0A lenght_var = random.uniform(-rand_l,rand_l)%0A pm.setAttr('%7B0%7D.sz'.format(obj),(1+lenght_var))%0A%0A# Random offset%0Afor obj in dup_objs:%0A offset_var = random.uniform(-rand_offset,rand_offset)%0A pm.setAttr('%7B0%7D.tz'.format(obj),(offset_var))%0A
|
|
e484b67372be22dae78a526c21e62661e4602913
|
Add todo files with incomplete fixes
|
test/todo.py
|
test/todo.py
|
Python
| 0
|
@@ -0,0 +1,54 @@
+raise KeyError, key%0A%0Adef foo(a%0A , b):%0A pass%0A
|
|
c669d498fa81ffb399d7d7c42654f5ac69428a28
|
Update templates.py
|
infra/templates.py
|
infra/templates.py
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
JENKINS_TEMPLATE = """\
// Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////////
def libfuzzerBuild = fileLoader.fromGit('infra/libfuzzer-pipeline.groovy',
'https://github.com/google/oss-fuzz.git')
libfuzzerBuild {
git = "put git url here"
}
"""
DOCKER_TEMPLATE = """\
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
FROM ossfuzz/base-libfuzzer
MAINTAINER your@email.com
RUN apt-get install -y make autoconf automake libtool
RUN git clone <git_url> # or use other version control
COPY build.sh /src/
"""
BUILD_TEMPLATE = """\
#!/bin/bash -eu
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
cd /src/%s
# build the target.
# e.g.
#
# ./autogen.sh
# ./configure
# make clean all
# build your fuzzer(s)
# e.g.
# $CXX $CXXFLAGS -std=c++11 -Iinclude \\
# /path/to/name_of_fuzzer.cc -o /out/name_of_fuzzer \\
# -lfuzzer /path/to/library.a $FUZZER_LDFLAGS
"""
|
Python
| 0.000001
|
@@ -3213,13 +3213,18 @@
ake
-clean
+-j$(nproc)
all
|
47f7fa72ab3ba75ad4182592f6413702fd509ba7
|
Create middleware to redirect users when accessing certain paths
|
iogt/middleware.py
|
iogt/middleware.py
|
Python
| 0
|
@@ -0,0 +1,1030 @@
+from django.conf import settings%0Afrom django.http import HttpResponsePermanentRedirect%0A%0A%0Aclass SSLRedirectMiddleware(object):%0A def process_request(self, request):%0A HTTPS_PATHS = getattr(settings, 'HTTPS_PATHS', %5B%5D)%0A response_should_be_secure = self.response_should_be_secure(%0A request, HTTPS_PATHS)%0A request_is_secure = self.request_is_secure(request)%0A if response_should_be_secure and not request_is_secure:%0A return HttpResponsePermanentRedirect(%0A %22https://%7B%7D%7B%7D%22.format(request.get_host(),%0A request.get_full_path())%0A )%0A%0A def response_should_be_secure(self, request, HTTPS_PATHS):%0A for path in HTTPS_PATHS:%0A if request.path.startswith(u'/%7B%7D'.format(path)):%0A return True%0A return False%0A%0A def request_is_secure(self, request):%0A if 'HTTP_X_FORWARDED_PROTO' in request.META:%0A return request.META%5B'HTTP_X_FORWARDED_PROTO'%5D == 'https'%0A%0A return False%0A
|
|
eeab27ecc6843136938e7607a619baef8626118a
|
Make contest_ranking visible
|
judge/views/contests.py
|
judge/views/contests.py
|
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.comments import comment_form, contest_comments
from judge.models import Contest
__all__ = ['contest_list', 'contest']
def contest_list(request):
if request.user.is_authenticated() and request.user.profile.is_admin:
contests = Contest.objects.all()
else:
contests = Contest.objects.filter(is_public=True)
return render_to_response('contests.jade', {
'contests': contests,
'title': 'Contests'
}, context_instance=RequestContext(request))
def contest(request, key):
try:
contest = Contest.objects.get(code=key)
if not contest.is_public and not request.user.has_perm('judge.see_private_contest'):
raise ObjectDoesNotExist()
form = comment_form(request, 'p:' + key)
if form is None:
return HttpResponseRedirect(request.path)
return render_to_response('contest.jade', {'contest': contest,
'title': contest.name,
'comment_list': contest_comments(contest),
'comment_form': form},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return render_to_response('message.jade', {'message': 'Could not find a contest with the key "%s".' % key,
'title': 'No such contest'},
context_instance=RequestContext(request))
def contest_ranking(request, key):
return Http404()
|
Python
| 0.000073
|
@@ -320,16 +320,35 @@
contest'
+, 'contest_ranking'
%5D%0A%0A%0Adef
|
65e4659ccd3f22f817403dc39869626873f9fb34
|
Add test_runner.py
|
test/test_runner.py
|
test/test_runner.py
|
Python
| 0.000014
|
@@ -0,0 +1,404 @@
+from microscopes.lda.definition import model_definition%0Afrom microscopes.lda import model, runner%0Afrom microscopes.common.rng import rng%0A%0A%0Adef test_runner_simple():%0A defn = model_definition(n=10, v=20)%0A r = rng()%0A data = %5B%5B1, 2, 3, 4, 5%5D, %5B2, 3, 4%5D%5D%0A view = data%0A latent = model.initialize(defn=defn, data=view, r=r)%0A rnr = runner.runner(defn, view, latent)%0A rnr.run(r=r, niters=10)%0A
|
|
2bd52d823c9ae039dd0cc0adbabe7a47f003138e
|
Add unit tests
|
examples/ppo/unit_tests.py
|
examples/ppo/unit_tests.py
|
Python
| 0.000001
|
@@ -0,0 +1,2345 @@
+%0Aimport jax%0Aimport flax%0Afrom flax import nn%0Aimport numpy as onp%0A%0Aimport numpy.testing as onp_testing%0Afrom absl.testing import absltest%0A%0A#test GAE%0Afrom main import gae_advantages%0Aclass TestGAE(absltest.TestCase):%0A def test_gae_random(self):%0A # create random data, simulating 4 parallel envs and 20 time_steps%0A envs, steps = 10, 100%0A rewards = onp.random.choice(%5B-1., 0., 1.%5D, size=(steps, envs),%0A p=%5B0.01, 0.98, 0.01%5D)%0A terminal_masks = onp.ones(shape=(steps, envs), dtype=onp.float64)%0A values = onp.random.random(size=(steps + 1, envs))%0A discount = 0.99%0A gae_param = 0.95%0A adv = gae_advantages(rewards, terminal_masks, values, discount, gae_param)%0A self.assertEqual(adv.shape, (steps, envs))%0A # test the property A_%7Bt%7D = %5Cdelta_t + %5Cgamma*%5Clambda*A_%7Bt+1%7D%0A # for each agent separately%0A for e in range(envs):%0A for t in range(steps-1):%0A delta = rewards%5Bt, e%5D + discount * values%5Bt+1, e%5D - values%5Bt, e%5D%0A lhs = adv%5Bt, e%5D%0A rhs = delta + discount * gae_param * adv%5Bt+1, e%5D%0A onp_testing.assert_almost_equal(lhs, rhs)%0A%0A#test environment and preprocessing%0Afrom remote import RemoteSimulator, rcv_action_send_exp%0Afrom env import create_env%0Aclass TestEnvironmentPreprocessing(absltest.TestCase):%0A def test_creation(self):%0A frame_shape = (84, 84, 4)%0A env = create_env()%0A obs = env.reset()%0A self.assertTrue(obs.shape == frame_shape)%0A%0A def test_step(self):%0A frame_shape = (84, 84, 4)%0A env = create_env()%0A obs = env.reset()%0A actions = %5B1, 2, 3, 0%5D%0A for a in actions:%0A obs, reward, done, info = env.step(a)%0A self.assertTrue(obs.shape == frame_shape)%0A self.assertTrue(reward %3C= 1. and reward %3E= -1.)%0A self.assertTrue(isinstance(done, bool))%0A self.assertTrue(isinstance(info, dict))%0A%0A#test creation of the model and optimizer%0Afrom models import create_model, create_optimizer%0Aclass TestCreation(absltest.TestCase):%0A def test_create(self):%0A key = jax.random.PRNGKey(0)%0A key, subkey = jax.random.split(key)%0A policy_model = create_model(subkey)%0A policy_optimizer = create_optimizer(policy_model, learning_rate=1e-3)%0A self.assertTrue(isinstance(policy_model, nn.base.Model))%0A self.assertTrue(isinstance(policy_optimizer, flax.optim.base.Optimizer))%0A%0Aif __name__ == '__main__':%0A absltest.main()
|
|
4c76f9bc68451eb41338173ffbda460098d1e24e
|
make form reprocessing more error tolerant and verbose
|
corehq/apps/cleanup/management/commands/reprocess_error_forms.py
|
corehq/apps/cleanup/management/commands/reprocess_error_forms.py
|
from django.core.management.base import BaseCommand, CommandError, LabelCommand
from corehq.apps.cleanup.xforms import iter_problem_forms, reprocess_form_cases
from optparse import make_option
from dimagi.utils.parsing import string_to_datetime
class Command(BaseCommand):
args = '<domain> <since>'
help = ('Reprocesses all documents tagged as errors and tries to '
'regenerate the appropriate case blocks for them. Can pass in '
'a domain and date to process forms received after that date or '
'just a domain to process all problem forms in the domain.')
option_list = LabelCommand.option_list + \
(make_option('--dryrun', action='store_true', dest='dryrun', default=False,
help="Don't do the actual reprocessing, just print the ids that would be affected"),)
def handle(self, *args, **options):
domain = since = None
if len(args) == 1:
domain = args[0]
elif len(args) == 2:
domain = args[0]
since = string_to_datetime(args[1])
else:
raise CommandError('Usage: %s\n%s' % (self.args, self.help))
for form in iter_problem_forms(domain, since):
print "%s\t%s\t%s\t%s\t%s" % (form._id, form.received_on,
form.xmlns,
form.xpath('form/meta/username'),
form.problem.strip())
if not options["dryrun"]:
reprocess_form_cases(form)
|
Python
| 0.000004
|
@@ -1145,16 +1145,90 @@
help))%0A%0A
+ succeeded = %5B%5D%0A failed = %5B%5D%0A error_messages = set()%0A
@@ -1274,16 +1274,16 @@
since):%0A
-
@@ -1540,16 +1540,41 @@
yrun%22%5D:%0A
+ try:%0A
@@ -1608,9 +1608,466 @@
s(form)%0A
+ except Exception, e:%0A failed.append(form._id)%0A error_messages.add(str(e))%0A else:%0A succeeded.append(form._id)%0A%0A print %22%25s / %25s forms successfully processed, %25s failures%22 %25 %5C%0A (len(succeeded), len(succeeded) + len(failed), len(failed))%0A if error_messages:%0A print r%22The following errors were seen: %5Cn%25s%22 %25 (r%22%5Cn%22.join(error_messages))
%0A
|
6f2d36199b20069c9d7d5af3170f33f9147321ac
|
use deferred of request to allow adding callback from get_media_requests
|
scrapy/trunk/scrapy/contrib/pipeline/media.py
|
scrapy/trunk/scrapy/contrib/pipeline/media.py
|
from twisted.internet import defer
from scrapy.utils.misc import mustbe_deferred, defer_result
from scrapy.core import log
from scrapy.core.engine import scrapyengine
from scrapy.core.exceptions import DropItem, NotConfigured
from scrapy.http import Request
from scrapy.spider import spiders
from scrapy.conf import settings
class DomainInfo(object):
def __init__(self, domain):
self.domain = domain
self.spider = spiders.fromdomain(domain)
self.downloading = {}
self.downloaded = {}
self.waiting = {}
self.extra = {}
class MediaPipeline(object):
def __init__(self):
self.domaininfo = {}
def open_domain(self, domain):
self.domaininfo[domain] = DomainInfo(domain)
def close_domain(self, domain):
del self.domaininfo[domain]
def process_item(self, domain, response, item):
info = self.domaininfo[domain]
requests = self.get_media_requests(item, info)
assert requests is None or hasattr(requests, '__iter__'), \
'get_media_requests should return None or iterable'
def _bugtrap(_failure, request):
log.msg('Unhandled ERROR in MediaPipeline.item_media_{downloaded,failed} for %s: %s' % (request, _failure), log.ERROR, domain=domain)
lst = []
for request in requests or ():
dfd = self._enqueue(request, info)
dfd.addCallbacks(
callback=self.item_media_downloaded,
callbackArgs=(item, request, info),
errback=self.item_media_failed,
errbackArgs=(item, request, info),
)
dfd.addErrback(_bugtrap, request)
lst.append(dfd)
dlst = defer.DeferredList(lst, consumeErrors=False)
dlst.addBoth(lambda _: self.item_completed(item, info))
return dlst
def _enqueue(self, request, info):
fp = request.fingerprint()
if fp in info.downloaded:
return defer_result(info.downloaded[fp])
if fp not in info.downloading:
self._download(request, info, fp)
wad = defer.Deferred()
waiting = info.waiting.setdefault(fp, []).append(wad)
return wad
def _download(self, request, info, fp):
def _bugtrap(_failure):
log.msg('Unhandled ERROR in MediaPipeline._downloaded: %s' % (_failure), log.ERROR, domain=info.domain)
result = self.media_to_download(request, info)
if result is not None:
dwld = defer_result(result)
else:
dwld = mustbe_deferred(self.download, request, info)
dwld.addCallbacks(
callback=self.media_downloaded,
callbackArgs=(request, info),
errback=self.media_failed,
errbackArgs=(request, info),
)
dwld.addBoth(self._downloaded, info, fp)
dwld.addErrback(_bugtrap)
info.downloading[fp] = (request, dwld)
def _downloaded(self, result, info, fp):
info.downloaded[fp] = result # cache result
waiting = info.waiting[fp] # client list
del info.waiting[fp]
del info.downloading[fp]
for wad in waiting:
defer_result(result).chainDeferred(wad)
### Overradiable Interface
def download(self, request, info):
""" Defines how to request the download of media
Default gives high priority to media requests and use scheduler,
shouldn't be necessary to override.
This methods is called only if result for request isn't cached,
request fingerprint is used as cache key.
"""
return scrapyengine.schedule(request, info.spider, priority=0)
def media_to_download(self, request, info):
""" Ongoing request hook pre-cache
This method is called every time a media is requested for download, and
only once for the same request because return value is cached as media
result.
returning a non-None value implies:
- the return value is cached and piped into `item_media_downloaded` or `item_media_failed`
- prevents downloading, this means calling `download` method.
- `media_downloaded` or `media_failed` isn't called.
"""
def get_media_requests(self, item, info):
""" Return a list of Request objects to download for this item
Should return None or an iterable
Defaults return None (no media to download)
"""
def media_downloaded(self, response, request, info):
""" Method called on success download of media request
Return value is cached and used as input for `item_media_downloaded` method.
Default implementation returns None.
WARNING: returning the response object can eat your memory.
"""
def media_failed(self, failure, request, info):
""" Method called when media request failed due to any kind of download error.
Return value is cached and used as input for `item_media_failed` method.
Default implementation returns same Failure object.
"""
return failure
def item_media_downloaded(self, result, item, request, info):
""" Method to handle result of requested media for item.
result is the return value of `media_downloaded` hook, or the non-Failure instance
returned by `media_failed` hook.
return value of this method isn't important and is recommended to return None.
"""
def item_media_failed(self, failure, item, request, info):
""" Method to handle failed result of requested media for item.
result is the returned Failure instance of `media_failed` hook, or Failure instance
of an exception raised by `media_downloaded` hook.
return value of this method isn't important and is recommended to return None.
"""
def item_completed(self, item, info):
""" Method called when all media requests for a single item has returned a result or failure.
The return value of this method is used as output of pipeline stage.
`item_completed` can return item itself or raise DropItem exception.
Default returns item
"""
return item
|
Python
| 0.000001
|
@@ -2140,16 +2140,36 @@
wad =
+ request.deferred or
defer.D
|
b30a159d216113ce122506359ccf1d11767422bb
|
Handle non-string keys
|
src/sentry/utils/data_scrubber.py
|
src/sentry/utils/data_scrubber.py
|
"""
sentry.utils.data_scrubber
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import re
import six
from sentry.constants import DEFAULT_SCRUBBED_FIELDS
def varmap(func, var, context=None, name=None):
"""
Executes ``func(key_name, value)`` on all values
recurisively discovering dict and list scoped
values.
"""
if context is None:
context = set()
objid = id(var)
if objid in context:
return func(name, '<...>')
context.add(objid)
if isinstance(var, dict):
ret = dict((k, varmap(func, v, context, k)) for k, v in six.iteritems(var))
elif isinstance(var, (list, tuple)):
ret = [varmap(func, f, context, name) for f in var]
else:
ret = func(name, var)
context.remove(objid)
return ret
class SensitiveDataFilter(object):
"""
Asterisk out things that look like passwords, credit card numbers,
and API keys in frames, http, and basic extra data.
"""
MASK = '*' * 8
VALUES_RE = re.compile(r'\b(?:\d[ -]*?){13,16}\b')
def __init__(self, fields=None):
if fields:
self.fields = DEFAULT_SCRUBBED_FIELDS + tuple(fields)
else:
self.fields = DEFAULT_SCRUBBED_FIELDS
def apply(self, data):
# TODO(dcramer): move this into each interface
if 'sentry.interfaces.Stacktrace' in data:
self.filter_stacktrace(data['sentry.interfaces.Stacktrace'])
if 'sentry.interfaces.Exception' in data:
for exc in data['sentry.interfaces.Exception']['values']:
if exc.get('stacktrace'):
self.filter_stacktrace(exc['stacktrace'])
if 'sentry.interfaces.Http' in data:
self.filter_http(data['sentry.interfaces.Http'])
if 'extra' in data:
data['extra'] = varmap(self.sanitize, data['extra'])
def sanitize(self, key, value):
if value is None:
return
if isinstance(value, six.string_types) and self.VALUES_RE.search(value):
return self.MASK
if not key: # key can be a NoneType
return value
key = key.lower()
for field in self.fields:
if field in key:
# store mask as a fixed length for security
return self.MASK
return value
def filter_stacktrace(self, data):
if 'frames' not in data:
return
for frame in data['frames']:
if 'vars' not in frame:
continue
frame['vars'] = varmap(self.sanitize, frame['vars'])
def filter_http(self, data):
for n in ('data', 'cookies', 'headers', 'env', 'query_string'):
if n not in data:
continue
if isinstance(data[n], six.string_types) and '=' in data[n]:
# at this point we've assumed it's a standard HTTP query
querybits = []
for bit in data[n].split('&'):
chunk = bit.split('=')
if len(chunk) == 2:
querybits.append((chunk[0], self.sanitize(*chunk)))
else:
querybits.append(chunk)
data[n] = '&'.join('='.join(k) for k in querybits)
else:
data[n] = varmap(self.sanitize, data[n])
|
Python
| 0.025655
|
@@ -2209,37 +2209,36 @@
not
-key: # key can be a NoneType
+isinstance(key, basestring):
%0A
|
d344c91008198927d45cd7a3330915bd9e8fd89f
|
Add The Fucking Weather module from yano
|
willie/modules/fuckingweather.py
|
willie/modules/fuckingweather.py
|
Python
| 0
|
@@ -0,0 +1,855 @@
+%22%22%22%0Afuckingweather.py - Willie module for The Fucking Weather%0ACopyright 2013 Michael Yanovich%0ACopyright 2013 Edward Powell%0A%0ALicensed under the Eiffel Forum License 2.%0A%0Ahttp://willie.dftba.net%0A%22%22%22%0Afrom willie import web%0Aimport re%0A%0Adef fucking_weather(willie, trigger):%0A text = trigger.group(2)%0A if not text:%0A willie.reply(%22INVALID FUCKING PLACE. PLEASE ENTER A FUCKING ZIP CODE, OR A FUCKING CITY-STATE PAIR.%22)%0A return%0A text = web.quote(text)%0A page = web.get(%22http://thefuckingweather.com/?where=%25s%22 %25 (text))%0A re_mark = re.compile('%3Cp class=%22remark%22%3E(.*?)%3C/p%3E')%0A results = re_mark.findall(page)%0A if results:%0A willie.reply(results%5B0%5D)%0A else:%0A willie.reply(%22I CAN'T GET THE FUCKING WEATHER.%22)%0Afucking_weather.commands = %5B'fucking_weather', 'fw'%5D%0Afucking_weather.rate = 30%0Afucking_weather.priority = 'low'%0A
|
|
27c955963bfc640f5e91d103a4aff8e2a897597c
|
Implement profiling
|
jtgpy/profiling.py
|
jtgpy/profiling.py
|
Python
| 0.000015
|
@@ -0,0 +1,671 @@
+from datetime import datetime%0Aimport logging%0A%0A%0Adef log_time(target, message, log_method=None, target_args=None, target_kwargs=None):%0A%09%22%22%22Execute target and log the start/elapsed time before and after execution%22%22%22%0A%0A%09logger = logging.info%0A%09if log_method is not None:%0A%09%09logger = log_method%0A%0A%09start_time = datetime.now()%0A%09logger('Started %7Bmessage%7D at %7Bstart_time%7D'.format(message=message, start_time=start_time))%0A%0A%09if target_args is None: target_args=%5B%5D%0A%09if target_kwargs is None: target_kwargs=%7B%7D%0A%09output = target(*target_args, **target_kwargs)%0A%0A%09logger('Finished %7Bmessage%7D in %7Belapsed_time%7D'.format(message=message, elapsed_time=datetime.now() - start_time))%0A%09return output
|
|
d9a6ea57ad7bb1d7f3716fe16a49a8a24edceb67
|
Fix migrations for python3
|
registrations/migrations/0010_auto_20180212_0802.py
|
registrations/migrations/0010_auto_20180212_0802.py
|
Python
| 0.000012
|
@@ -0,0 +1,1356 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.1 on 2018-02-12 08:02%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('registrations', '0009_auto_20171027_0928'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='registration',%0A name='reg_type',%0A field=models.CharField(choices=%5B('momconnect_prebirth', 'MomConnect pregnancy registration'), ('momconnect_postbirth', 'MomConnect baby registration'), ('whatsapp_prebirth', 'WhatsApp MomConnect pregnancy registration'), ('nurseconnect', 'Nurseconnect registration'), ('whatsapp_nurseconnect', 'WhatsApp Nurseconnect registration'), ('pmtct_prebirth', 'PMTCT pregnancy registration'), ('whatsapp_pmtct_prebirth', 'WhatsApp PMTCT pregnancy registration'), ('pmtct_postbirth', 'PMTCT baby registration'), ('whatsapp_pmtct_postbirth', 'WhatsApp PMTCT baby registration'), ('loss_general', 'Loss general registration')%5D, max_length=30),%0A ),%0A migrations.AlterField(%0A model_name='source',%0A name='authority',%0A field=models.CharField(choices=%5B('patient', 'Patient'), ('advisor', 'Trusted Advisor'), ('hw_partial', 'Health Worker Partial'), ('hw_full', 'Health Worker Full')%5D, max_length=30),%0A ),%0A %5D%0A
|
|
e3c6ebd09af5292c496e3a69af499d2a507ce7dd
|
add demo main file
|
demos/blog/main.py
|
demos/blog/main.py
|
Python
| 0
|
@@ -0,0 +1,2493 @@
+import asyncio%0Aimport logging%0Aimport pathlib%0Aimport yaml%0Aimport aiopg.sa%0A%0Aimport aiohttp_jinja2%0Aimport jinja2%0Afrom aiohttp import web%0A%0Aimport aiohttp_admin%0Afrom aiohttp_admin.backends.sa import SAResource%0Aimport db%0A%0APROJ_ROOT = pathlib.Path(__file__).parent.parent%0ATEMPLATES_ROOT = pathlib.Path(__file__).parent / 'templates'%0A%0A%0Aclass SiteHandler:%0A%0A def __init__(self, pg):%0A self.pg = pg%0A%0A @aiohttp_jinja2.template('index.html')%0A async def index(self, request):%0A return %7B%7D%0A%0A%0Adef setup_admin(app, pg):%0A template_folder = str(TEMPLATES_ROOT)%0A admin = aiohttp_admin.setup(app=app, template_folder=template_folder)%0A%0A admin.add_resource(SAResource(pg, db.post, url='posts'))%0A admin.add_resource(SAResource(pg, db.tag, url='tags'))%0A admin.add_resource(SAResource(pg, db.comment, url='comments'))%0A return admin%0A%0A%0Aasync def setup_pg(app, conf, loop):%0A # create connection to the database%0A pg = await init_postgres(conf%5B'postgres'%5D, loop)%0A%0A async def close_pg(app):%0A pg.close()%0A await pg.wait_closed()%0A%0A app.on_cleanup.append(close_pg)%0A return pg%0A%0A%0Aasync def init(loop):%0A # load config from yaml file%0A conf = load_config(str(PROJ_ROOT / 'config' / 'polls.yaml'))%0A%0A # setup application and extensions%0A app = web.Application(loop=loop)%0A pg = await setup_pg(app, conf, loop)%0A%0A # init modules%0A aiohttp_jinja2.setup(%0A app, loader=jinja2.FileSystemLoader(str(TEMPLATES_ROOT)))%0A%0A setup_admin(app, pg)%0A%0A # setup views and routes%0A handler = SiteHandler(pg)%0A add_route = app.router.add_route%0A add_route('GET', '/', handler.index)%0A app.router.add_static('/static', path=str(PROJ_ROOT / 'static'))%0A%0A host, port = conf%5B'host'%5D, conf%5B'port'%5D%0A return app, host, port%0A%0A%0Adef load_config(fname):%0A with open(fname, 'rt') as f:%0A data = yaml.load(f)%0A # TODO: add config validation%0A return data%0A%0A%0Aasync def init_postgres(conf, loop):%0A engine = await aiopg.sa.create_engine(%0A database=conf%5B'database'%5D,%0A user=conf%5B'user'%5D,%0A password=conf%5B'password'%5D,%0A host=conf%5B'host'%5D,%0A port=conf%5B'port'%5D,%0A minsize=conf%5B'minsize'%5D,%0A maxsize=conf%5B'maxsize'%5D,%0A loop=loop)%0A return engine%0A%0A%0Adef main():%0A # init logging%0A logging.basicConfig(level=logging.DEBUG)%0A%0A loop = asyncio.get_event_loop()%0A app, host, port = loop.run_until_complete(init(loop))%0A web.run_app(app, host=host, port=port)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
c4ee87fa4398eca3193331888086cb437436722e
|
Add some tests for hil_client
|
test/hil_client_test.py
|
test/hil_client_test.py
|
Python
| 0
|
@@ -0,0 +1,2458 @@
+%22%22%22%0AGeneral info about these tests%0A%0AThe tests assusme that the nodes are in the %3Cfrom_project%3E which is set to be the%0A%22slurm%22 project, since that is what we are testing here.%0A%0AIf all tests pass successfully, then nodes are back in their original state.%0A%0AClass TestHILReserve moves nodes out of the slurm project and into the free pool;%0Aand TestHILRelease puts nodes back into the slurm project from the free pool%0A%0Arun the tests like this%0Apy.test %3Cpath to testfile%3E%0Apy.test hil_client_test%0A%22%22%22%0A%0Aimport inspect%0Aimport sys%0Aimport pytest%0Aimport requests%0Afrom os.path import realpath, dirname, isfile, join%0Aimport uuid%0A%0Alibdir = realpath(join(dirname(inspect.getfile(inspect.currentframe())), '../common'))%0Asys.path.append(libdir)%0A%0Aimport hil_slurm_client%0A%0A%0A# Some constants useful for tests%0Anodelist = %5B'slurm-compute1', 'slurm-compute2', 'slurm-compute3'%5D%0Ahil_client = hil_slurm_client.hil_init()%0Ato_project = 'slurm'%0Afrom_project = 'slurm'%0A%0Abad_hil_client = hil_slurm_client.hil_client_connect('http://127.3.2.1',%0A 'baduser', 'badpassword')%0A%0A%0Aclass TestHILReserve:%0A %22%22%22Tests various hil_reserve cases%22%22%22%0A%0A def test_hil_reserve_success(self):%0A %22%22%22test the regular success scenario%22%22%22%0A%0A # should raise an error if %3Cfrom_project%3E doesn't add up.%0A with pytest.raises(hil_slurm_client.ProjectMismatchError):%0A random_project = str(uuid.uuid4())%0A hil_slurm_client.hil_reserve_nodes(nodelist, random_project, hil_client)%0A%0A # should run without any errors%0A hil_slurm_client.hil_reserve_nodes(nodelist, from_project, hil_client)%0A%0A # should raise error if a bad hil_client is passed%0A with pytest.raises(requests.ConnectionError):%0A hil_slurm_client.hil_reserve_nodes(nodelist, from_project, bad_hil_client)%0A%0A%0Aclass TestHILRelease:%0A %22%22%22Test various hil_release cases%22%22%22%0A def test_hil_release(self):%0A # should raise error if a bad hil_client is passed%0A with pytest.raises(requests.ConnectionError):%0A hil_slurm_client.hil_free_nodes(nodelist, to_project, bad_hil_client)%0A%0A # calling it with a functioning hil_client should work%0A hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)%0A%0A # At this point, nodes are already owned by the %3Cto_project%3E%0A # calling it again should have no affect.%0A hil_slurm_client.hil_free_nodes(nodelist, to_project, hil_client)%0A%0A
|
|
085817b8444470c1e17ea25d4a406ce9fbcae2fc
|
fix previous fix. hello monday morning
|
test/test_freeze_app.py
|
test/test_freeze_app.py
|
# coding: utf-8
"""
Test CLI following the recipe at http://dustinrcollins.com/testing-python-command-line-apps
"""
import os
from unittest import TestCase
from tempfile import mkdtemp
from shutil import rmtree
from copy import copy
from six import StringIO
from dataset import connect
from dataset.util import FreezeException
from dataset.freeze.config import Configuration, Export
from dataset.freeze.app import create_parser, freeze_with_config, freeze_export
from sample_data import TEST_DATA
class FreezeAppTestCase(TestCase):
"""
Base TestCase class, sets up a CLI parser
"""
def setUp(self):
parser = create_parser()
self.parser = parser
self.d = mkdtemp()
self.db_path = os.path.abspath(os.path.join(self.d, 'db.sqlite'))
self.db = 'sqlite:///' + self.db_path
_db = connect(self.db)
tbl = _db['weather']
for i, row in enumerate(TEST_DATA):
_row = copy(row)
_row['count'] = i
_row['bool'] = True
_row['none'] = None
tbl.insert(_row)
def tearDown(self):
rmtree(self.d, ignore_errors=True)
def test_with_config(self):
cfg = Configuration(os.path.join(os.path.dirname(__file__), 'Freezefile.yaml'))
cfg.data['common']['database'] = self.db
cfg.data['common']['prefix'] = self.d
cfg.data['common']['query'] = 'SELECT * FROM weather'
cfg.data['exports'] = [
{'filename': '{{identity:count}}.json', 'mode': 'item', 'transform': {'bool': 'identity'}},
{'filename': 'weather.json', 'format': 'tabson'},
{'filename': 'weather.csv', 'fileobj': StringIO(), 'format': 'csv'},
{'filename': 'weather.json', 'fileobj': StringIO(), 'format': 'tabson'},
{'filename': 'weather.json', 'format': 'tabson', 'callback': 'read'},
{'skip': True}]
freeze_with_config(cfg, db=self.db)
self.assertRaises(FreezeException, freeze_export, Export(cfg.data['common'], {'query': 'SELECT * FROM notable'}))
def test_unicode_path(self):
cfg = Configuration(os.path.join(os.path.dirname(__file__), 'Freezefile.yaml'))
cfg.data['common']['database'] = self.db
cfg.data['common']['prefix'] = os.path.join(self.d, u'über')
cfg.data['common']['query'] = 'SELECT * FROM weather'
cfg.data['exports'] = [{'filename': 'weather.csv', 'format': 'csv'}]
freeze_with_config(cfg, db=self.db)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.999999
|
@@ -123,37 +123,23 @@
os%0A
-from unittest import TestCase
+import unittest
%0Afro
@@ -504,16 +504,25 @@
estCase(
+unittest.
TestCase
|
bf609b8a30c70bc26a1f2b41313e50859d2a3af6
|
Fix get_logs file
|
myriadeploy/get_logs.py
|
myriadeploy/get_logs.py
|
#!/usr/bin/env python
import myriadeploy
import subprocess
import argparse
# parse args
parser = argparse.ArgumentParser(description='collect logs from workers')
parser.add_argument("--worker", type=int, help='worker id')
parser.add_argument("config", metavar='C', type=str, help='configuration file')
arguments = parser.parse_args()
def get_host_port_path(node, default_path):
if len(node) == 2:
(hostname, port) = node
if default_path is None:
raise Exception("Path not specified for node %s" % str(node))
else:
path = default_path
else:
(hostname, port, path) = node
return (hostname, port, path)
def mkdir_if_not_exists(description):
args = ["mkdir", "-p", description]
return subprocess.call(args)
def get_std_logs_from_worker(hostname, dirname, username,
worker_id, description):
mkdir_if_not_exists(description)
if hostname == 'localhost':
uri = "%s/worker_%s_stdout" % (dirname, worker_id)
else:
uri = "%s@%s:%s/worker_%s_stdout" % (
username, hostname, dirname, worker_id)
args = ["scp", uri, "%s/worker_%s_stdout" % (description, worker_id,)]
return subprocess.call(args)
def get_error_logs_from_worker(hostname, dirname, username,
worker_id, description):
mkdir_if_not_exists(description)
if hostname == 'localhost':
uri = "%s/worker_%s_stderr" % (dirname, worker_id)
else:
uri = "%s@%s:%s/worker_%s_stderr" % (
username, hostname, dirname, worker_id)
args = ["scp", uri, "%s/worker_%s_stderr"
% (description, worker_id,)]
return subprocess.call(args)
def get_profiling_logs_from_worker(hostname, dirname,
username, worker_id, description):
mkdir_if_not_exists(description)
if hostname == 'localhost':
uri = "%s/profile.log" % (dirname)
else:
uri = "%s@%s:%s/profile.log" % (
username, hostname, dirname)
args = ["scp", uri, "%s/worker_%s_profile"
% (description, worker_id,)]
return subprocess.call(args)
def get_logs_from_master(hostname, dirname, username, description):
mkdir_if_not_exists(description)
if hostname == 'localhost':
uri = "%s/master_stdout" % (dirname)
else:
uri = "%s@%s:%s/master_stdout" % (username, hostname, dirname)
args = ["scp", uri, "%s/master_stdout" % (description)]
return subprocess.call(args)
def get_error_logs_from_master(hostname, dirname, username, description):
if hostname == 'localhost':
uri = "%s/master_stderr" % (dirname)
else:
uri = "%s@%s:%s/master_stderr" % (username, hostname, dirname)
args = ["scp", uri, "%s/master_stderr" % (description)]
return subprocess.call(args)
def getlog(config_file, from_worker_id=None):
''' get configuration'''
config = myriadeploy.read_config_file(config_file)
"""Copies the master and worker catalogs to the remote hosts."""
description = config['description']
default_path = config['path']
master = config['master']
workers = config['workers']
username = config['username']
# get logs from master
if from_worker_id is None or from_worker_id == 0:
(hostname, _, path) = get_host_port_path(master, default_path)
if get_logs_from_master(hostname, "%s/%s-files"
% (path, description), username, description):
raise Exception("Error on getting logs from master %s"
% (hostname,))
if get_error_logs_from_master(hostname, "%s/%s-files"
% (path, description), username, description):
raise Exception("Error on getting error logs from master %s"
% (hostname,))
for (i, worker) in enumerate(workers):
# Workers are numbered from 1, not 0
worker_id = i + 1
# get logs from workers
if from_worker_id is None or from_worker_id == worker_id:
(hostname, _, path) = get_host_port_path(worker, default_path)
if get_std_logs_from_worker(hostname, "%s/%s-files"
% (path, description), username, worker_id, description):
raise Exception("Error on getting logs from worker %d %s"
% (worker_id, hostname))
if get_error_logs_from_worker(hostname, "%s/%s-files"
% (path, description), username, worker_id, description):
raise Exception("Error on getting error logs from worker %d %s"
% (worker_id, hostname))
if get_profiling_logs_from_worker(hostname, "%s/%s-files"
% (path, description), username, worker_id, description):
raise Exception("Error on getting profiling logs from \
worker %d %s" % (worker_id, hostname))
def main():
if arguments.worker:
getlog(arguments.config, arguments.worker)
else:
getlog(arguments.config)
if __name__ == "__main__":
main()
|
Python
| 0.000002
|
@@ -619,24 +619,27 @@
, port, path
+, _
) = node%0A
|
9293f4972e497636aab0d7ebc98e2a742a20a10c
|
Fix typo
|
acos_client/v30/slb/virtual_port.py
|
acos_client/v30/slb/virtual_port.py
|
# Copyright 2014, Jeff Buttars, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import acos_client.errors as ae
import acos_client.v30.base as base
class VirtualPort(base.BaseV30):
# Protocols
TCP = "tcp"
UDP = "udp"
OTHERS = "others"
DIAMETER = "diameter"
DNS_TCP = "dns-tcp"
DNS_UDP = "dns-udp"
FAST_HTTP = "fast-http"
FIX = "fix"
FTP = "ftp"
FTP_PROXY = "ftp-proxy"
HTTP = "http"
HTTPS = "https"
MLB = "mlb"
MMS = "mms"
MYSQL = "mysql"
MSSQL = "mssql"
RADIUS = "radius"
RTSP = "rtsp"
SIP = "sip"
SIP_TCP = "sip-tcp"
SIPS = "sips"
SMPP_TCP = "smpp-tcp"
SPDY = "spdy"
SPDYS = "spdys"
SMTP = "smtp"
SSL_PROXY = "ssl-proxy"
TCP_PROXY = "tcp-proxy"
TFTP = "tftp"
GENERIC_PROXY = "tcp-proxy"
url_server_tmpl = '/slb/virtual-server/{name}/port/'
url_port_tmpl = '{port_number}+{protocol}'
def all(self, virtual_server_name, **kwargs):
url = self.url_server_tmpl.format(name=virtual_server_name)
return self._get(url, **kwargs)
def get(self, virtual_server_name, name, protocol, port):
url = self.url_server_tmpl.format(name=virtual_server_name)
url += self.url_port_tmpl.format(
port_number=port, protocol=protocol
)
return self._get(url)
def _set(self, virtual_server_name, name, protocol, port,
service_group_name,
s_pers_name=None, c_pers_name=None, stats=0, update=False,
exclude_minimize=[],
**kwargs):
params = {
"port": self.minimal_dict({
"name": name,
"service-group": service_group_name,
"protocol": protocol,
"port-number": int(port),
"template-persist-source-ip": s_pers_name,
"template-persist-cookie": c_pers_name,
"extended-stats": stats
}, exclude=exclude_minimize)
}
url = self.url_server_tmpl.format(name=virtual_server_name)
if update:
url += self.url_port_tmpl.format(
port_number=port, protocol=protocol
)
return self._post(url, params, **kwargs)
def create(self, virtual_server_name, name, protocol, port,
service_group_name,
s_pers_name=None, c_pers_name=None, status=1, **kwargs):
return self._set(virtual_server_name,
name, protocol, port, service_group_name,
s_pers_name, c_pers_name, status, **kwargs)
def update(self, virtual_server_name, name, protocol, port,
service_group_name,
s_pers_name=None, c_pers_name=None, status=1, **kwargs):
vp = self.get(virtual_server_name, protocol, port)
if vp is None:
raise ae.NotFound()
exclu = ['template-persist-source-ip', 'template-persist-cookie']
return self._set(virtual_server_name,
name, protocol, port, service_group_name,
s_pers_name, c_pers_name, status, True,
exclude_minimize=exclu,
**kwargs)
def delete(self, virtual_server_name, name, protocol, port):
url = self.url_server_tmpl.format(name=virtual_server_name)
url += self.url_port_tmpl.format(port_number=port, protocol=protocol)
return self._delete(url)
|
Python
| 0.999999
|
@@ -3342,24 +3342,30 @@
server_name,
+ name,
protocol, p
|
4f036669d604a902530e00ecc800b9baca6e69d1
|
Initialize stream getter for current dates games
|
streams.py
|
streams.py
|
Python
| 0
|
@@ -0,0 +1,103 @@
+import praw%0Aimport collections%0A%0Ar = praw.Reddit('Getter for stream links from /r/nbastreams by /u/me')%0A
|
|
50383f51794babceb89503d091d520c5c3032db3
|
add gc testing code
|
tests/_andre/test_gc.py
|
tests/_andre/test_gc.py
|
Python
| 0.000001
|
@@ -0,0 +1,439 @@
+%0A__author__ = %22Andre Merzky%22%0A__copyright__ = %22Copyright 2012-2013, The SAGA Project%22%0A__license__ = %22MIT%22%0A%0A%0Aimport gc%0Aimport sys%0Aimport saga%0Afrom pprint import pprint as pp%0A%0Atry :%0A%0A if True :%0A js1 = saga.job.Service ('ssh://localhost/bin/sh')%0A print sys.getrefcount (js1)%0A pp (gc.get_referrers (js1))%0A%0Aexcept saga.SagaException as e :%0A print str(e)%0A%0A%0A# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4%0A%0A
|
|
4158244e5e7d09ea6602d465a24e1a218c4d174a
|
Make function name more clearer (#406)
|
gittip/elsewhere/__init__.py
|
gittip/elsewhere/__init__.py
|
import random
from aspen import log
from aspen.utils import typecheck
from gittip import db
from psycopg2 import IntegrityError
class RunawayTrain(Exception):
pass
def get_a_participant_id():
"""Return a random participant_id.
The returned value is guaranteed to have been reserved in the database.
"""
seatbelt = 0
while 1:
participant_id = hex(int(random.random() * 16**12))[2:].zfill(12)
try:
db.execute( "INSERT INTO participants (id) VALUES (%s)"
, (participant_id,)
)
except IntegrityError: # Collision, try again with another value.
seatbelt += 1
if seatbelt > 100:
raise RunawayTrain
else:
break
return participant_id.decode('US-ASCII')
def upsert(platform, user_id, username, user_info):
"""Given str, unicode, unicode, and dict, return unicode and boolean.
Platform is the name of a platform that we support (ASCII blah). User_id is
an immutable unique identifier for the given user on the given platform
Username is the user's login/user_id on the given platform. It is only
used here for logging. Specifically, we don't reserve their username for
them on Gittip if they're new here. We give them a random participant_id
here, and they'll have a chance to change it if/when they opt in. User_id
and username may or may not be the same. User_info is a dictionary of
profile info per the named platform. All platform dicts must have an id key
that corresponds to the primary key in the underlying table in our own db.
The return value is a tuple: (participant_id [unicode], is_claimed
[boolean], is_locked [boolean], balance [Decimal]).
"""
typecheck( platform, str
, user_id, (int, unicode)
, username, unicode
, user_info, dict
)
user_id = unicode(user_id)
# Record the user info in our database.
# =====================================
INSERT = """\
INSERT INTO elsewhere
(platform, user_id)
VALUES (%s, %s)
"""
try:
db.execute(INSERT, (platform, user_id,))
except IntegrityError:
pass # That login is already in our db.
UPDATE = """\
UPDATE elsewhere
SET user_info=%s
WHERE user_id=%s
RETURNING participant_id
"""
for k, v in user_info.items():
# Cast everything to unicode. I believe hstore can take any type of
# value, but psycopg2 can't.
# https://postgres.heroku.com/blog/past/2012/3/14/introducing_keyvalue_data_storage_in_heroku_postgres/
# http://initd.org/psycopg/docs/extras.html#hstore-data-type
user_info[k] = unicode(v)
rec = db.fetchone(UPDATE, (user_info, user_id))
# Find a participant.
# ===================
if rec is not None and rec['participant_id'] is not None:
# There is already a Gittip participant associated with this account.
participant_id = rec['participant_id']
new_participant = False
else:
# This is the first time we've seen this user. Let's create a new
# participant for them.
participant_id = get_a_participant_id()
new_participant = True
# Associate the elsewhere account with the Gittip participant.
# ============================================================
ASSOCIATE = """\
UPDATE elsewhere
SET participant_id=%s
WHERE platform=%s
AND user_id=%s
AND ( (participant_id IS NULL)
OR (participant_id=%s)
)
RETURNING participant_id, is_locked
"""
log(u"Associating %s (%s) on %s with %s on Gittip."
% (username, user_id, platform, participant_id))
rows = db.fetchall( ASSOCIATE
, (participant_id, platform, user_id, participant_id)
)
rows = list(rows)
nrows = len(rows)
assert nrows in (0, 1)
if nrows == 1:
is_locked = rows[0]['is_locked']
else:
# Against all odds, the account was otherwise associated with another
# participant while we weren't looking. Maybe someone paid them money
# at *just* the right moment. If we created a new participant then back
# that out.
if new_participant:
db.execute( "DELETE FROM participants WHERE id=%s"
, (participant_id,)
)
rec = db.fetchone( "SELECT participant_id, is_locked "
"FROM elsewhere "
"WHERE platform=%s AND user_id=%s"
, (platform, user_id)
)
if rec is not None:
# Use the participant associated with this account.
participant_id = rec['participant_id']
is_locked = rec['is_locked']
assert participant_id is not None
else:
# Okay, now this is just screwy. The participant disappeared right
# at the last moment! Log it and fail.
raise Exception("We're bailing on associating %s user %s (%s) with"
" a Gittip participant."
% (platform, username, user_id))
rec = db.fetchone( "SELECT claimed_time, balance FROM participants "
"WHERE id=%s"
, (participant_id,)
)
assert rec is not None
return ( participant_id
, rec['claimed_time'] is not None
, is_locked
, rec['balance']
)
|
Python
| 0.000721
|
@@ -172,24 +172,31 @@
%0A%0Adef get_a_
+random_
participant_
@@ -3296,16 +3296,23 @@
= get_a_
+random_
particip
|
b04e10af3c0ecc3258b476dbe58c758ece888349
|
add migration file required by new paperclip/mapentity
|
geotrek/common/migrations/0002_auto_20170323_1433.py
|
geotrek/common/migrations/0002_auto_20170323_1433.py
|
Python
| 0
|
@@ -0,0 +1,659 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport embed_video.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('common', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='attachment',%0A name='attachment_link',%0A field=models.URLField(verbose_name='Picture URL', blank=True),%0A ),%0A migrations.AlterField(%0A model_name='attachment',%0A name='attachment_video',%0A field=embed_video.fields.EmbedVideoField(verbose_name='Video URL', blank=True),%0A ),%0A %5D%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.