commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
666d9c467806782827edac4b2c0c13d494e41250
|
Add a test for the status server
|
jobmon/test/test_status_server.py
|
jobmon/test/test_status_server.py
|
Python
| 0.000001
|
@@ -0,0 +1,1189 @@
+import os%0Aimport select%0Aimport socket%0Aimport time%0Aimport unittest%0A%0Afrom jobmon.protocol import *%0Afrom jobmon import protocol, status_server, transport%0A%0APORT = 9999%0A%0Aclass StatusRecorder:%0A def __init__(self):%0A self.records = %5B%5D%0A%0A def process_start(self, job):%0A self.records.append(('started', job))%0A%0A def process_stop(self, job):%0A self.records.append(('stopped', job))%0A%0Aclass TestCommandServer(unittest.TestCase):%0A def test_command_server(self):%0A status_recorder = StatusRecorder()%0A status_svr = status_server.StatusServer(status_recorder)%0A status_svr.start()%0A%0A status_peer = status_svr.get_peer()%0A%0A try:%0A status_peer.send(protocol.Event('some_job', %0A protocol.EVENT_STARTJOB))%0A%0A status_peer.send(protocol.Event('some_job',%0A protocol.EVENT_STOPJOB))%0A%0A time.sleep(5) # Give the server time to process all events%0A%0A self.assertEqual(status_recorder.records,%0A %5B('started', 'some_job'),%0A ('stopped', 'some_job')%5D)%0A finally:%0A status_peer.close()%0A status_svr.terminate()%0A
|
|
fd62cb27d848eea1c30928bfe7a727c88d7f3035
|
Fix Sphinx rendering for publisher client. (#5822)
|
pubsub/google/cloud/pubsub_v1/publisher/client.py
|
pubsub/google/cloud/pubsub_v1/publisher/client.py
|
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import os
import pkg_resources
import grpc
import six
from google.api_core import grpc_helpers
from google.cloud.pubsub_v1 import _gapic
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.gapic import publisher_client
from google.cloud.pubsub_v1.publisher._batch import thread
__version__ = pkg_resources.get_distribution('google-cloud-pubsub').version
@_gapic.add_methods(publisher_client.PublisherClient, blacklist=('publish',))
class Client(object):
_batch_class = thread.Batch
"""A publisher client for Google Cloud Pub/Sub.
This creates an object that is capable of publishing messages.
Generally, you can instantiate this client with no arguments, and you
get sensible defaults.
Args:
batch_settings (~google.cloud.pubsub_v1.types.BatchSettings): The
settings for batch publishing.
kwargs (dict): Any additional arguments provided are sent as keyword
arguments to the underlying
:class:`~.gapic.pubsub.v1.publisher_client.PublisherClient`.
Generally, you should not need to set additional keyword arguments.
Before being passed along to the GAPIC constructor, a channel may
be added if ``credentials`` are passed explicitly or if the
Pub / Sub emulator is detected as running.
"""
def __init__(self, batch_settings=(), **kwargs):
# Sanity check: Is our goal to use the emulator?
# If so, create a grpc insecure channel with the emulator host
# as the target.
if os.environ.get('PUBSUB_EMULATOR_HOST'):
kwargs['channel'] = grpc.insecure_channel(
target=os.environ.get('PUBSUB_EMULATOR_HOST'),
)
# Use a custom channel.
# We need this in order to set appropriate default message size and
# keepalive options.
if 'channel' not in kwargs:
kwargs['channel'] = grpc_helpers.create_channel(
credentials=kwargs.pop('credentials', None),
target=self.target,
scopes=publisher_client.PublisherClient._DEFAULT_SCOPES,
options={
'grpc.max_send_message_length': -1,
'grpc.max_receive_message_length': -1,
}.items(),
)
# Add the metrics headers, and instantiate the underlying GAPIC
# client.
self.api = publisher_client.PublisherClient(**kwargs)
self.batch_settings = types.BatchSettings(*batch_settings)
# The batches on the publisher client are responsible for holding
# messages. One batch exists for each topic.
self._batch_lock = self._batch_class.make_lock()
self._batches = {}
@property
def target(self):
"""Return the target (where the API is).
Returns:
str: The location of the API.
"""
return publisher_client.PublisherClient.SERVICE_ADDRESS
def _batch(self, topic, create=False, autocommit=True):
"""Return the current batch for the provided topic.
This will create a new batch if ``create=True`` or if no batch
currently exists.
Args:
topic (str): A string representing the topic.
create (bool): Whether to create a new batch. Defaults to
:data:`False`. If :data:`True`, this will create a new batch
even if one already exists.
autocommit (bool): Whether to autocommit this batch. This is
primarily useful for debugging and testing, since it allows
the caller to avoid some side effects that batch creation
might have (e.g. spawning a worker to publish a batch).
Returns:
~.pubsub_v1._batch.Batch: The batch object.
"""
# If there is no matching batch yet, then potentially create one
# and place it on the batches dictionary.
with self._batch_lock:
if not create:
batch = self._batches.get(topic)
if batch is None:
create = True
if create:
batch = self._batch_class(
autocommit=autocommit,
client=self,
settings=self.batch_settings,
topic=topic,
)
self._batches[topic] = batch
return batch
def publish(self, topic, data, **attrs):
"""Publish a single message.
.. note::
Messages in Pub/Sub are blobs of bytes. They are *binary* data,
not text. You must send data as a bytestring
(``bytes`` in Python 3; ``str`` in Python 2), and this library
will raise an exception if you send a text string.
The reason that this is so important (and why we do not try to
coerce for you) is because Pub/Sub is also platform independent
and there is no way to know how to decode messages properly on
the other side; therefore, encoding and decoding is a required
exercise for the developer.
Add the given message to this object; this will cause it to be
published once the batch either has enough messages or a sufficient
period of time has elapsed.
Example:
>>> from google.cloud.pubsub_v1 import publisher_client
>>> client = publisher_client.PublisherClient()
>>> topic = client.topic_path('[PROJECT]', '[TOPIC]')
>>> data = b'The rain in Wales falls mainly on the snails.'
>>> response = client.publish(topic, data, username='guido')
Args:
topic (str): The topic to publish messages to.
data (bytes): A bytestring representing the message body. This
must be a bytestring.
attrs (Mapping[str, str]): A dictionary of attributes to be
sent as metadata. (These may be text strings or byte strings.)
Returns:
~concurrent.futures.Future: An object conforming to the
``concurrent.futures.Future`` interface.
"""
# Sanity check: Is the data being sent as a bytestring?
# If it is literally anything else, complain loudly about it.
if not isinstance(data, six.binary_type):
raise TypeError(
'Data being published to Pub/Sub must be sent '
'as a bytestring.'
)
# Coerce all attributes to text strings.
for k, v in copy.copy(attrs).items():
if isinstance(v, six.text_type):
continue
if isinstance(v, six.binary_type):
attrs[k] = v.decode('utf-8')
continue
raise TypeError(
'All attributes being published to Pub/Sub must '
'be sent as text strings.'
)
# Create the Pub/Sub message object.
message = types.PubsubMessage(data=data, attributes=attrs)
# Delegate the publishing to the batch.
batch = self._batch(topic)
future = None
while future is None:
future = batch.publish(message)
if future is None:
batch = self._batch(topic, create=True)
return future
|
Python
| 0
|
@@ -1124,40 +1124,8 @@
t):%0A
- _batch_class = thread.Batch%0A
@@ -1948,24 +1948,57 @@
ng.%0A %22%22%22%0A
+ _batch_class = thread.Batch%0A%0A
def __in
|
09ea74a9b3b3f518c67f719c3525b14058b528af
|
add files
|
declination.py
|
declination.py
|
Python
| 0.000002
|
@@ -0,0 +1,1062 @@
+# -*- coding: utf-8 -*-%0D%0A%22%22%22%0D%0ACreated on Sun Jul 20 21:11:52 2014%0D%0A%0D%0A@author: SB%0D%0A%0D%0A%0D%0A###############################################################################%0D%0AThis function gets the declination using webservices hosted %0D%0Aby the National Oceanic and Atmospheric Administration (NOAA)%0D%0A%0D%0ADeclination is a function of latitude and longitude and date%0D%0AThere are some limits on the numbers of times you can use the webservice per%0D%0Asecond etc.%0D%0A###############################################################################%0D%0A%22%22%22%0D%0A%0D%0Aimport requests%0D%0Afrom xml.etree import ElementTree%0D%0A%0D%0Adef calc_declination(longitude, latitude, year):%0D%0A%0D%0A longitude = %22%25.4f%22%25longitude%0D%0A latitude = %22%25.4f%22%25latitude%0D%0A startYear = %22%25.4f%22%25year%0D%0A%0D%0A URL = %22http://www.ngdc.noaa.gov/geomag-web/calculators/calculate%5C%0D%0ADeclination?lat1=%22 + latitude + %22&lon1=%22 + longitude + %22&startYear=%22 + startYear + %22&resultFormat=xml%22%0D%0A%0D%0A XMLresponse = requests.get(URL)%0D%0A declination = ElementTree.fromstring(XMLresponse.content)%5B0%5D%5B4%5D.text%0D%0A return float(declination)%0D%0A
|
|
1a9f379ed121945a79eff5c2fdd468c98f0381d7
|
add Jeu.py
|
Jeu.py
|
Jeu.py
|
Python
| 0.000001
|
@@ -0,0 +1,409 @@
+import pygame%0Aimport sys%0Afrom pygame.locals import *%0A%0Aclass Jeu:%0A%0A def Jeu(self):%0A self.__score = 0%0A print('Jeu creer')%0A%0A def augmenter_Score(self):%0A return 0%0A%0A def recup_score(self):%0A return 0%0A%0A def score(self):%0A return 0%0A%0A def tableau_jeu(self):%0A return 0%0A%0A def test_collision(self):%0A return 0%0A%0A def spawn_pomme(self):%0A return 0%0A%0A
|
|
63d7639f6c0e470575820be2b51444f34aa4bf2d
|
add flask app
|
app.py
|
app.py
|
Python
| 0.000003
|
@@ -0,0 +1,686 @@
+import jinja2%0Afrom flask import Flask, jsonify, make_response%0A%0Afrom pdf_getter import main%0A%0A%0Aapp = Flask(__name__)%0A%0A%0A@app.route('/planning', methods=%5B'GET'%5D)%0Adef get_planning():%0A pdf_filename = main()%0A if pdf_filename:%0A binary_pdf = open(%22./planning.pdf%22, %22rb%22)%0A binary_pdf = binary_pdf.read()%0A response = make_response(binary_pdf)%0A response.headers%5B'Content-Type'%5D = 'application/pdf'%0A response.headers%5B'Content-Disposition'%5D = 'inline; filename=planning.pdf'%0A return response%0A else:%0A jsonify(%22Error: There is an error, please contact the admin%22)%0A%0A%0Aif __name__ == '__main__':%0A app.run(host='0.0.0.0', port=8080, debug=False)
|
|
ec78a2b7551838ab05dce6c2c93c8c42b76fc850
|
Add utility functions (1)
|
src/btc_utilities.py
|
src/btc_utilities.py
|
Python
| 0
|
@@ -0,0 +1,1823 @@
+# Brain Tumor Classification%0A# Script for Utility Functions%0A# Author: Qixun Qu%0A# Create on: 2017/10/11%0A# Modify on: 2017/10/11%0A%0A# ,,, ,,,%0A# ;%22 '; ;' %22,%0A# ; @.ss$$$$$$s.@ ;%0A# %60s$$$$$$$$$$$$$$$'%0A# $$$$$$$$$$$$$$$$$$%0A# $$$$P%22%22Y$$$Y%22%22W$$$$$%0A# $$$$ p%22$$$%22q $$$$$%0A# $$$$ .$$$$$. $$$$'%0A# $$$DaU$$O$$DaU$$$'%0A# '$$$$'.%5E.'$$$$'%0A# '&$$$$$&'%0A%0A%0Aimport numpy as np%0Afrom math import factorial%0A%0A%0Adef compute_hist(volume):%0A rvolume = np.round(volume)%0A bins = np.arange(np.min(rvolume), np.max(rvolume))%0A hist = np.histogram(rvolume, bins=bins, density=True)%0A%0A x = hist%5B1%5D%5B1:%5D%0A y = hist%5B0%5D%0A%0A i = np.where(y %3E 0)%0A bg_index = i%5B0%5D%5B1%5D + int((i%5B0%5D%5B-1%5D - i%5B0%5D%5B1%5D) / 20.)%0A bg = x%5Bbg_index%5D%0A%0A x = x%5Bbg_index:%5D%0A y = y%5Bbg_index:%5D%0A%0A ysg = savitzky_golay(y, window_size=31, order=4)%0A%0A return x, y, ysg, bg%0A%0A%0Adef savitzky_golay(y, window_size, order, deriv=0, rate=1):%0A try:%0A window_size = np.abs(np.int(window_size))%0A order = np.abs(np.int(order))%0A except ValueError:%0A raise ValueError(%22window_size and order have to be of type int%22)%0A%0A if window_size %25 2 != 1 or window_size %3C 1:%0A raise TypeError(%22window_size size must be a positive odd number%22)%0A if window_size %3C order + 2:%0A raise TypeError(%22window_size is too small for the polynomials order%22)%0A%0A order_range = range(order + 1)%0A half_window = (window_size - 1) // 2%0A%0A b = np.mat(%5B%5Bk ** i for i in order_range%5D for k in range(-half_window, half_window + 1)%5D)%0A m = np.linalg.pinv(b).A%5Bderiv%5D * rate**deriv * factorial(deriv)%0A%0A firstvals = y%5B0%5D - np.abs(y%5B1:half_window + 1%5D%5B::-1%5D - y%5B0%5D)%0A lastvals = y%5B-1%5D + np.abs(y%5B-half_window - 1:-1%5D%5B::-1%5D - y%5B-1%5D)%0A%0A y = np.concatenate((firstvals, y, lastvals))%0A%0A return np.convolve(m%5B::-1%5D, y, mode='valid')%0A
|
|
800df7bcaa57e0935c0836f1f49de6407c55c212
|
add tests for clock exercise
|
day2/clock_test.py
|
day2/clock_test.py
|
Python
| 0.000001
|
@@ -0,0 +1,1271 @@
+import unittest%0Aimport clock%0A%0Aclass ClockTest(unittest.TestCase):%0A%0A def test_on_the_hour(self):%0A self.assertEqual(%2208:00%22, Clock.at(8).__str__() )%0A self.assertEqual(%2209:00%22, Clock.at(9).__str__() )%0A%0A def test_past_the_hour(self):%0A self.assertEqual(%2211:09%22, Clock.at(11, 9).__str__() )%0A%0A def test_add_a_few_minutes(self):%0A clock = Clock.at(10) + 3%0A self.assertEqual(%2210:03%22, clock.__str__())%0A%0A def test_add_over_an_hour(self):%0A clock = Clock.at(10) + 61%0A self.assertEqual(%2211:01%22, clock.__str__())%0A%0A def test_wrap_around_at_midnight(self):%0A clock = Clock.at(23, 30) + 60%0A self.assertEqual(%2200:30%22, clock.__str__())%0A%0A def test_subtract_minutes(self):%0A clock = Clock.at(10) - 90%0A self.assertEqual(%2208:30%22, clock.__str__())%0A%0A def test_equivalent_clocks(self):%0A clock1 = Clock.at(15, 37)%0A clock2 = Clock.at(15, 37)%0A self.assertEqual(clock1, clock2)%0A%0A def test_inequivalent_clocks(self):%0A clock1 = Clock.at(15, 37)%0A clock2 = Clock.at(15, 36)%0A clock3 = Clock.at(14, 37)%0A self.assertNotEqual(clock1, clock2)%0A self.assertNotEqual(clock1, clock3)%0A%0A def test_wrap_around_backwards(self):%0A clock = Clock.at(0, 30) - 60%0A self.assertEqual(%2223:30%22, clock.__str__())%0A%0Aif __name__ == '__main__':%0A unittest.main() %0A%0A
|
|
659af8d29ec1839217c74522d8fa5bcf61e48451
|
FIX category
|
l10n_it_CEE_balance/__openerp__.py
|
l10n_it_CEE_balance/__openerp__.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Italian OpenERP Community (<http://www.openerp-italia.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Italy - Generic Chart of Accounts",
"version" : "0.1",
"depends" : ['l10n_it',],
"author" : "OpenERP Italian Community",
"description": """
Riclassificazione IV normativa UE per un piano dei conti italiano di un'impresa generica (compreso in l10n_it)
""",
"license": "AGPL-3",
"category" : "Localisation",
'website': 'http://www.openerp-italia.org/',
'init_xml': [
],
'update_xml': [
'data/account.account.type.csv',
'data/account.account.csv',
'account_view.xml',
],
'demo_xml': [
],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000001
|
@@ -1379,16 +1379,22 @@
lisation
+/Italy
%22,%0A '
|
e920d0dc413dacd2b1084c2cdcacb4bae3bc3d8b
|
Create problem10.py
|
W2/L4/problem10.py
|
W2/L4/problem10.py
|
Python
| 0.000029
|
@@ -0,0 +1,245 @@
+#L4 PROBLEM 10 %0A%0Adef isVowel(char):%0A if char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u' or char == 'A' or char == 'E' or char == 'I' or char == 'O' or char == 'U':%0A return True %0A else: %0A return False %0A
|
|
799599e1b841db8c9eff9de4cdf7dbc5cb396c37
|
Create dml.py
|
dml.py
|
dml.py
|
Python
| 0.000003
|
@@ -0,0 +1,4 @@
+dml%0A
|
|
349e63385ea71d1a69e094aeee1268c44e951b53
|
Update code funny
|
Strings/funny-string.py
|
Strings/funny-string.py
|
Python
| 0.000001
|
@@ -0,0 +1,376 @@
+# Funny String%0A# Developer: Murillo Grubler%0A# Link: https://www.hackerrank.com/challenges/funny-string/problem%0A%0Aimport string%0A%0A# ca - xz%0A# xc - cx%0A# zx - ac%0A%0A#acxz - 3%0A#acxza%0A%0Adef funnyString(s):%0A str_inverted = s%5B::-1%5D%0A for i in range(len(s) - 1):%0A print(s%5Bi%5D)%0A%0A return %22%22%0A%0Aq = int(input().strip())%0Afor a0 in range(q):%0A print(funnyString(input().strip()))%0A
|
|
45b0e958aa377afed2c62bf1e6f7c4933ccde39b
|
Add a test for main
|
test/test_main.py
|
test/test_main.py
|
Python
| 0
|
@@ -0,0 +1,853 @@
+from git_lang_guesser import main%0Afrom git_lang_guesser import git_requester%0A%0A%0ALANGUAGE = %22language%22%0A%0Atest_username = %22TestUser%22%0Aexample_data = %5B%0A %7BLANGUAGE: %22HTML%22%7D,%0A %7BLANGUAGE: %22Java%22%7D,%0A %7BLANGUAGE: %22Python%22%7D,%0A %7BLANGUAGE: %22Python%22%7D,%0A %7BLANGUAGE: %22C%22%7D,%0A%5D%0Aexpected_count = %7B%0A %22HTML%22: 1,%0A %22Java%22: 1,%0A %22Python%22: 2,%0A %22C%22: 1,%0A%7D%0Aexpected_favourite = %22Python%22%0A%0A%0Aclass TestDoGuess(object):%0A%0A def test_basic(self, monkeypatch, capsys):%0A %22%22%22Test that basic usage works%22%22%22%0A%0A def mock_request(username):%0A assert(username == test_username)%0A return example_data%0A monkeypatch.setattr(git_requester, %22get_public_repos_for_user%22, mock_request)%0A%0A main.do_guess(username=test_username, list_all=False)%0A%0A out, err = capsys.readouterr()%0A%0A assert(out.strip() == expected_favourite)%0A
|
|
c80a015151fb6648aa34e7b79cd29f4cd2c97560
|
add a example
|
test_disco_job.py
|
test_disco_job.py
|
Python
| 0.000425
|
@@ -0,0 +1,1014 @@
+from DiscoJob import DiscoJob%0A%0A%0Aimport logging%0A%0Aconfig = %7B%0A %22split_size%22: 1, #MB%0A %22input_uri%22: %22mongodb://localhost/test.modforty%22,%0A %22create_input_splits%22: True,%0A %22split_key%22: %7B'_id' : 1%7D,%0A %22output_uri%22:%22mongodb://localhost/test.out%22,%0A #%22job_output_key%22:%22I am the key%22,%0A %22job_output_value%22:%22I ame the value%22,%0A %22job_wait%22:True%0A %7D%0A%0Adef map(record, params):%0A yield record.get('name', %22NoName%22), 1%0A%0Adef reduce(iter, params):%0A from disco.util import kvgroup%0A for word, counts in kvgroup(sorted(iter)):%0A yield word, sum(counts)%0A%0Aif __name__ == '__main__':%0A%0A%0A '''%0A job = Job().run(%0A #input=%5B%22mongodb://localhost/test.modforty%22%5D,%0A input= do_split(config),%0A map=map,%0A reduce=reduce,%0A map_input_stream = mongodb_input_stream,%0A reduce_output_stream=mongodb_output_stream)%0A%0A job.wait(show=True)%0A '''%0A%0A DiscoJob(config = config,map = map,reduce = reduce).run()%0A %0A%0A
|
|
ee78590d2a6f0a509b08bf1b59b3f27560375524
|
add conftest
|
tests/conftest.py
|
tests/conftest.py
|
Python
| 0.000001
|
@@ -0,0 +1,205 @@
+from __future__ import print_function%0Aimport numpy as np%0A%0A%0Adef pytest_runtest_setup(item):%0A seed = np.random.randint(1000)%0A print(%22Seed used in np.random.seed(): %25d%22 %25 seed)%0A np.random.seed(seed)%0A
|
|
854be3d354886a8459d0987b54ad65d509db8001
|
Fix format of external link for pypi page description
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pytest-flask
============
A set of `py.test <http://pytest.org>`_ fixtures to test Flask
extensions and applications.
Features
--------
Plugin provides some fixtures to simplify app testing:
- ``client`` - an instance of ``app.test_client``,
- ``client_class`` - ``client`` fixture for class-based tests,
- ``config`` - you application config,
- ``live_server`` - runs an application in the background (useful for tests
with `Selenium <http://www.seleniumhq.org>` and other headless browsers),
- ``accept_json``, ``accept_jsonp``, ``accept_any`` - accept headers
suitable to use as parameters in ``client``.
To pass options to your application use the ``pytest.mark.app`` marker:
.. code:: python
@pytest.mark.app(debug=False)
def test_app(app):
assert not app.debug, 'Ensure the app not in debug mode'
During tests execution the application has pushed context, e.g. ``url_for``,
``session`` and other context bound objects are available without context
managers:
.. code:: python
def test_app(client):
assert client.get(url_for('myview')).status_code == 200
Response object has a ``json`` property to test a view that returns
a JSON response:
.. code:: python
@api.route('/ping')
def ping():
return jsonify(ping='pong')
def test_api_ping(client):
res = client.get(url_for('api.ping'))
assert res.json == {'ping': 'pong'}
If you want your tests done via Selenium or other headless browser use
the ``live_server`` fixture. The server's URL can be retrieved using
the ``url_for`` function:
.. code:: python
@pytest.mark.usefixtures('live_server')
class TestLiveServer:
def test_server_is_up_and_running(self):
res = urllib2.urlopen(url_for('index', _external=True))
assert b'OK' in res.read()
assert res.code == 200
Quick Start
-----------
To start using a plugin define you application fixture in ``conftest.py``:
.. code:: python
from myapp import create_app
@pytest.fixture
def app():
app = create_app()
return app
And run your test suite:
.. code:: bash
$ pip install pytest-flask
$ py.test
Contributing
------------
Don't hesitate to create a `GitHub issue
<https://github.com/vitalk/pytest-flask/issues>`_ for any **bug** or
**suggestion**.
"""
import os
import codecs
from setuptools import setup
from setuptools import find_packages
version = "0.5.0"
def read(*parts):
"""Reads the content of the file located at path created from *parts*."""
try:
return codecs.open(os.path.join(*parts), 'r', encoding='utf-8').read()
except IOError:
return ''
requirements = read('requirements', 'main.txt').splitlines()
setup(
name='pytest-flask',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version=version,
author='Vital Kudzelka',
author_email='vital.kudzelka@gmail.com',
url='https://github.com/vitalk/pytest-flask',
download_url='https://github.com/vitalk/pytest-flask/tarball/%s' % version,
description='A set of py.test fixtures to test Flask applications.',
long_description=__doc__,
license='MIT',
packages=find_packages(exclude=['docs', 'tests']),
zip_safe=False,
platforms='any',
install_requires=requirements,
tests_require=[],
keywords='pytest flask testing',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# The following makes a plugin available to pytest
entry_points = {
'pytest11': [
'flask = pytest_flask.plugin',
]
},
)
|
Python
| 0
|
@@ -512,16 +512,17 @@
hq.org%3E%60
+_
and oth
|
e8853997b7ba28da48e1620fd1466fbe8ca1d0c0
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,989 @@
+#!/usr/bin/env python%0A%0Afrom distutils.core import setup%0A%0Afrom pyarrfs import pyarrfs%0A%0Aver = pyarrfs.__version__%0Along_desc = open(%22README%22).read()%0Ashort_desc = long_desc.split('%5Cn')%5B0%5D.split(' - ')%5B1%5D.strip()%0A%0Asetup(%0A name = 'pyarrfs',%0A version = pyarrfs.__version__,%0A description = short_desc,%0A long_description = long_desc,%0A author = pyarrfs.__author__,%0A license = pyarrfs.__license__,%0A author_email = pyarrfs.__author_email__,%0A url = pyarrfs.__url__,%0A scripts = %5B'pyarrfs'%5D,%0A keywords = %5B'rar', 'fuse'%5D,%0A classifiers = %5B%0A 'Development Status :: 4 - Beta',%0A 'Intended Audience :: End Users/Desktop',%0A 'Intended Audience :: System Administrators',%0A 'License :: OSI Approved :: MIT License',%0A 'Natural Language :: English',%0A 'Operating System :: POSIX :: Linux',%0A 'Programming Language :: Python :: 2',%0A 'Topic :: System :: Archiving :: Compression',%0A 'Topic :: System :: Filesystems'%0A %5D%0A)%0A%0A
|
|
94cd57176b3251dd44a5e46346df5d9363b90f80
|
Bump version for next release
|
setup.py
|
setup.py
|
#!/usr/bin/env python
"""Installation script."""
import logging
import os
from setuptools import setup
# inline:
# import git
NAME = 'tulip'
VERSION_FILE = '{name}/_version.py'.format(name=NAME)
MAJOR = 1
MINOR = 3
MICRO = 0
VERSION = '{major}.{minor}.{micro}'.format(
major=MAJOR, minor=MINOR, micro=MICRO)
VERSION_TEXT = (
'# This file was generated from setup.py\n'
"version = '{version}'\n")
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
classifiers = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering']
package_data = {
'tulip.spec': ['parsetab.py']}
def package_jtlv():
"""Detect `jtlv`, or note its absence."""
path = os.path.join('tulip', 'interfaces', 'jtlv_grgame.jar')
if os.path.exists(path):
print('Found optional JTLV-based solver.')
package_data['tulip.interfaces'] = ['jtlv_grgame.jar']
else:
print('The jtlv synthesis tool was not found. '
'Try extern/get-jtlv.sh to get it.\n'
'It is an optional alternative to `omega`, '
'the default GR(1) solver of TuLiP.')
def git_version(version):
"""Return version with local version identifier."""
import git
repo = git.Repo('.git')
repo.git.status()
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '{v}.dev0+{sha}.dirty'.format(
v=version, sha=sha)
# commit is clean
# is it release of `version` ?
try:
tag = repo.git.describe(
match='v[0-9]*', exact_match=True,
tags=True, dirty=True)
except git.GitCommandError:
return '{v}.dev0+{sha}'.format(
v=version, sha=sha)
assert tag == 'v' + version, (tag, version)
return version
def run_setup():
"""Build parser, get version from `git`, install."""
# Build PLY table, to be installed as tulip package data
try:
import tulip.spec.lexyacc
tabmodule = tulip.spec.lexyacc.TABMODULE.split('.')[-1]
outputdir = 'tulip/spec'
parser = tulip.spec.lexyacc.Parser()
parser.build(tabmodule, outputdir=outputdir,
write_tables=True,
debug=True, debuglog=logger)
plytable_build_failed = False
except Exception as e:
print('Failed to build PLY tables: {e}'.format(e=e))
plytable_build_failed = True
# version
try:
version = git_version(VERSION)
except AssertionError:
raise
except Exception:
print('No git info: Assume release.')
version = VERSION
s = VERSION_TEXT.format(version=version)
with open(VERSION_FILE, 'w') as f:
f.write(s)
# setup
package_jtlv()
setup(
name=NAME,
version=version,
description='Temporal Logic Planning (TuLiP) Toolbox',
author='Caltech Control and Dynamical Systems',
author_email='tulip@tulip-control.org',
url='http://tulip-control.org',
bugtrack_url=('http://github.com/tulip-control/'
'tulip-control/issues'),
license='BSD',
classifiers=classifiers,
install_requires=[
'networkx >= 1.8, <= 1.10',
'numpy >= 1.7',
'omega >= 0.0.9, < 0.1.0',
'ply >= 3.4',
'polytope >= 0.1.2',
'pydot >= 1.2.0',
'scipy'],
tests_require=[
'nose',
'matplotlib',
'mock'],
packages=[
'tulip', 'tulip.transys', 'tulip.transys.export',
'tulip.abstract', 'tulip.spec',
'tulip.interfaces'],
package_dir={'tulip': 'tulip'},
package_data=package_data)
# ply failed ?
if plytable_build_failed:
print('!' * 65 +
' Failed to build PLY table. ' +
'Please run setup.py again.' +
'!' * 65)
if __name__ == '__main__':
run_setup()
|
Python
| 0
|
@@ -218,17 +218,17 @@
MICRO =
-0
+1
%0AVERSION
|
b0ada080d8c8890152a57168e6b7b449a5588f10
|
Add setup.py for PyPI
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,784 @@
+#!/usr/bin/env python%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name='clocktower',%0A version='0.1.0',%0A author='Kyle Conroy',%0A author_email='kyle@twilio.com',%0A url='https://github.com/derferman/clocktower',%0A description='Download websites from Wayback Machine',%0A install_requires=%5B'lxml'%5D,%0A data_files=%5B%5D,%0A classifiers=%5B%0A %22Development Status :: 5 - Production/Stable%22,%0A %22Intended Audience :: Developers%22,%0A %22License :: OSI Approved :: MIT License%22,%0A %22Operating System :: OS Independent%22,%0A %22Programming Language :: Python :: 2.7%22,%0A %22Topic :: Software Development :: Libraries :: Python Modules%22,%0A %5D,%0A entry_points=%7B%0A 'console_scripts': %5B%0A 'clocktower = clocktower:main'%5D%0A %7D,%0A)%0A
|
|
9c6ba2717fb71755d31d8c7b7066730171be8b20
|
bump version, add missing dependency - Django
|
setup.py
|
setup.py
|
import os
from setuptools import setup
f = open(os.path.join(os.path.dirname(__file__), 'README.md'))
readme = f.read()
f.close()
setup(
name='django-ajaximage',
version='0.1.16',
description='Add ajax image upload functionality with a progress bar to file input fields within Django admin. Images are optionally resized.',
long_description=readme,
author="Bradley Griffiths",
author_email='bradley.griffiths@gmail.com',
url='https://github.com/bradleyg/django-ajaximage',
packages=['ajaximage'],
include_package_data=True,
install_requires=['setuptools', 'pillow'],
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
|
Python
| 0
|
@@ -180,16 +180,24 @@
='0.1.16
+-rohanza
',%0A d
@@ -589,30 +589,49 @@
es=%5B
-'setuptools', 'p
+%0A 'Django',%0A 'P
illow'
+,%0A
%5D,%0A
|
fac7c76ea056179653574dffd297f927f76daa42
|
add setuptools routine
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,852 @@
+import os%0Afrom setuptools import setup%0A%0Asetup(%0A name = %22opti_ssr%22,%0A version = %220.0.4%22,%0A py_modules=%5B'opti_ssr', 'ssr_network', 'opti_network'%5D,%0A author = %22Felix Immohr, Fiete Winter%22,%0A author_email = %22test@te.st, fiete.winter@gmail.com%22,%0A description = (%22Using the OptiTrack system for different applications %22%0A %22of the SoundScape Renderer%22),%0A license = %22MIT%22,%0A keywords = %22optitrack motive natnet ssr soundscaperenderer%22.split(),%0A url = %22%22,%0A long_description=open('README').read(),%0A platforms='any',%0A classifiers=%5B%0A %22Development Status :: 3 - Alpha%22,%0A %22License :: OSI Approved :: MIT License%22,%0A %22Programming Language :: Python%22,%0A %22Programming Language :: Python :: 2%22,%0A %22Programming Language :: Python :: 3%22,%0A %22Topic :: Scientific/Engineering%22,%0A %5D,%0A)%0A
|
|
230cae4f6cce8e064b5b74f87ec09181e41f57c2
|
Add MDR setup file
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,1764 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Afrom setuptools import setup, find_packages%0A%0Adef calculate_version():%0A initpy = open('mdr/_version.py').read().split('%5Cn')%0A version = list(filter(lambda x: '__version__' in x, initpy))%5B0%5D.split('%5C'')%5B1%5D%0A return version%0A%0Apackage_version = calculate_version()%0A%0Asetup(%0A name='scikit-MDR',%0A version=package_version,%0A author='Randal S. Olson',%0A author_email='rso@randalolson.com',%0A packages=find_packages(),%0A url='https://github.com/rhiever/scikit-mdr',%0A license='License :: OSI Approved :: MIT License',%0A #entry_points=%7B'console_scripts': %5B'mdr=mdr:main', %5D%7D,%0A description=('A sklearn-compatible Python implementation of Multifactor Dimensionality Reduction (MDR) for feature construction.'),%0A long_description='''%0AA sklearn-compatible Python implementation of Multifactor Dimensionality Reduction (MDR) for feature construction.%0A%0AContact%0A=============%0AIf you have any questions or comments about scikit-MDR, please feel free to contact me via:%0A%0AE-mail: rso@randalolson.com%0A%0Aor Twitter: https://twitter.com/randal_olson%0A%0AThis project is hosted at https://github.com/rhiever/scikit-mdr%0A''',%0A zip_safe=True,%0A install_requires=%5B'numpy', 'scipy', 'pandas', 'scikit-learn'%5D,%0A classifiers=%5B%0A 'Intended Audience :: Science/Research',%0A 'License :: OSI Approved :: MIT License',%0A 'Programming Language :: Python :: 2',%0A 'Programming Language :: Python :: 2.7',%0A 'Programming Language :: Python :: 3',%0A 'Programming Language :: Python :: 3.4',%0A 'Programming Language :: Python :: 3.5',%0A %5D,%0A keywords=%5B'bioinformatics', 'GWAS', 'feature construction', 'single nucleotide polymorphisms', 'epistasis', 'dimesionality reduction'%5D,%0A)%0A
|
|
612517b15a40af29fa57d3a5507ba778ff32fa51
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,506 @@
+from setuptools import setup%0A%0Asetup(name='pattern_finder_gpu',%0A version='1.0',%0A description='Brute force OpenCL based pattern localization in images that supports masking and weighting.',%0A url='https://github.com/HearSys/pattern_finder_gpu',%0A author='Samuel John (H%C3%B6rSys GmbH)',%0A author_email='john.samuel@hoersys.de',%0A license='MIT',%0A packages=%5B'pattern_finder_gpu'%5D,%0A install_requires=%5B'pyopencl', 'numpy', 'scipy', 'matplotlib', 'skimage'%5D,%0A zip_safe=False)%0A
|
|
380fb1e3b5b8fbde868f7fffbe0a6f22fc037e55
|
Remove download_url from setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import ah_bootstrap
import pkg_resources
from setuptools import setup
from astropy_helpers.setup_helpers import register_commands, get_package_info
from astropy_helpers.version_helpers import generate_version_py
NAME = 'astropy_helpers'
VERSION = '2.0.dev'
RELEASE = 'dev' not in VERSION
DOWNLOAD_BASE_URL = 'http://pypi.io/packages/source/a/astropy-helpers'
generate_version_py(NAME, VERSION, RELEASE, False, uses_git=not RELEASE)
# Use the updated version including the git rev count
from astropy_helpers.version import version as VERSION
cmdclass = register_commands(NAME, VERSION, RELEASE)
# This package actually doesn't use the Astropy test command
del cmdclass['test']
setup(
name=pkg_resources.safe_name(NAME), # astropy_helpers -> astropy-helpers
version=VERSION,
description='Utilities for building and installing Astropy, Astropy '
'affiliated packages, and their respective documentation.',
author='The Astropy Developers',
author_email='astropy.team@gmail.com',
license='BSD',
url=' https://github.com/astropy/astropy-helpers',
long_description=open('README.rst').read(),
download_url='{0}/astropy-helpers-{1}.tar.gz'.format(DOWNLOAD_BASE_URL,
VERSION),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Framework :: Setuptools Plugin',
'Framework :: Sphinx :: Extension',
'Framework :: Sphinx :: Theme',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving :: Packaging'
],
cmdclass=cmdclass,
zip_safe=False,
**get_package_info(exclude=['astropy_helpers.tests'])
)
|
Python
| 0
|
@@ -372,79 +372,8 @@
SION
-%0ADOWNLOAD_BASE_URL = 'http://pypi.io/packages/source/a/astropy-helpers'
%0A%0Age
@@ -1152,151 +1152,8 @@
(),%0A
- download_url='%7B0%7D/astropy-helpers-%7B1%7D.tar.gz'.format(DOWNLOAD_BASE_URL,%0A VERSION),%0A
|
a9f0d310e967bea276f01cd558f48fc102ea24fc
|
add setup file for installation as module
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,439 @@
+from setuptools import setup%0A%0Asetup(name='minedatabase',%0A version='0.1',%0A description='Metabolic In silico Network Expansions',%0A url='http://github.com/JamesJeffryes/mine-database',%0A author='James Jeffryes',%0A author_email='jamesgjeffryes@gmail.com',%0A license='MIT',%0A packages=%5B'minedatabase',%0A 'minedatabase.NP_Score'%5D,%0A install_requires=%5B'pymongo'%5D,%0A extras_require=%7B%7D,%0A )%0A
|
|
77700d265fbf3fafa21b335585b8060a0b025143
|
add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1098 @@
+# Copyright 2010 Gregory Szorc%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name = 'lua-protobuf',%0A version = '0.0.1',%0A packages = %5B 'lua_protobuf' %5D,%0A scripts = %5B'protoc-gen-lua'%5D,%0A install_requires = %5B 'protobuf%3E=2.3.0' %5D,%0A author = 'Gregory Szorc',%0A author_email = 'gregory.szorc@gmail.com',%0A description = 'Lua protocol buffer code generator',%0A license = 'Apache 2.0',%0A url = 'http://github.com/indygreg/lua-protobuf'%0A)%0A
|
|
092bf8bc2e558420ca51384a3dd1019ab1115ad2
|
Fix conditional dependencies when using wheels
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from djangocms_link import __version__
INSTALL_REQUIRES = [
#'Django-Select2',
]
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
]
setup(
name='djangocms-link',
version=__version__,
description='Link Plugin for django CMS',
author='Divio AG',
author_email='info@divio.ch',
url='https://github.com/divio/djangocms-link',
packages=['djangocms_link', 'djangocms_link.migrations', 'djangocms_link.migrations_django'],
install_requires=INSTALL_REQUIRES,
license='LICENSE.txt',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False
)
|
Python
| 0.000266
|
@@ -113,55 +113,8 @@
_%0A%0A%0A
-INSTALL_REQUIRES = %5B%0A #'Django-Select2',%0A%5D%0A%0A
CLAS
@@ -996,24 +996,262 @@
res=
-INSTALL_REQUIRES
+%5B%5D,%0A extras_require=%7B%0A %22:python_version=='3.3'%22: %5B'django-select2-py3'%5D,%0A %22:python_version=='3.4'%22: %5B'django-select2-py3'%5D,%0A %22:python_version=='2.6'%22: %5B'django-select2'%5D,%0A %22:python_version=='2.7'%22: %5B'django-select2'%5D,%0A %7D
,%0A
|
a9e2e225e083575c66c33986db06b496c1596449
|
Create setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,326 @@
+from setuptools import setup, find_packages%0A%0Asetup(%0A name='wwsync',%0A version='1.0',%0A author='Matt Bachmann',%0A url='https://github.com/Bachmann1234/weight-watchers-sync',%0A description='Syncs Weight Watcher food log to Fitbit',%0A license='MIT',%0A packages=find_packages(),%0A install_requires=%5B'requests==2.9.1'%5D%0A)%0A%0A
|
|
265e1b6177552e13f332b1f39885433789f27d94
|
add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1013 @@
+#!/usr/bin/env python%0A%0Afrom setuptools import setup%0Aimport re%0A%0Awith open('jook/__init__.py', 'r') as fobj:%0A version = re.search(r'%5E__version__%5Cs*=%5Cs*%5B%5C'%22%5D(%5B%5E%5C'%22%5D*)%5B%5C'%22%5D',%0A fobj.read(), re.MULTILINE).group(1)%0A%0Awith open('README.rst', 'r') as fobj:%0A long_description = fobj.read()%0A%0Asetup(%0A name='Jook',%0A version=version,%0A description='A Jamf Pro webhook simulator',%0A long_description=long_description,%0A url='https://github.com/brysontyrrell/Jook',%0A author='Bryson Tyrrell',%0A author_email='bryson.tyrrell@gmail.com',%0A license='MIT',%0A classifiers=%5B%0A 'Development Status :: 1 - Planning',%0A 'Intended Audience :: Developers',%0A 'Intended Audience :: Information Technology',%0A 'License :: OSI Approved :: MIT License',%0A 'Programming Language :: Python :: 2.7'%0A %5D,%0A keywords='jamf webhooks testing',%0A packages=%5B'jook'%5D,%0A install_requires=%5B%0A 'dicttoxml%3E=1.7',%0A 'requests%3E=2.11'%0A %5D,%0A zip_safe=False%0A)%0A
|
|
90faa98cf8d0a11e8cfd1ff1b91b505a43f956c5
|
Add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1345 @@
+%22%22%22Module to prepare you python environment.%22%22%22%0Aimport importlib%0A%0A%0Adef run_package(package):%0A %22%22%22Try to run the package just installed.%22%22%22%0A try:%0A globals()%5Bpackage%5D = importlib.import_module(package)%0A print(%22================================%22)%0A except Exception:%0A print(%22We can't install %7B%7D%22.format(package))%0A print(%22================================%22)%0A%0A%0Adef install_and_import(package):%0A %22%22%22Install all requiered packages.%22%22%22%0A print(%22checking for %7B%7D%22.format(str(package)))%0A try:%0A importlib.import_module(package)%0A print(%22%7B%7D is already installed%22.format(package))%0A except ImportError:%0A print(%22We'll install %7B%7D before continuing%22.format(package))%0A import pip%0A pip.main(%5B'--trusted-host', 'pypi.python.org', 'install', package%5D)%0A print(%22installing %7B%7D...%22.format(package))%0A finally:%0A run_package(package)%0A%0A%0Adef get_all_packages():%0A %22%22%22%22Get all packages in requirement.txt.%22%22%22%0A lst_packages = list()%0A with open('requirement.txt') as fp:%0A for line in fp:%0A lst_packages.append(line.split(%22=%22)%5B0%5D.lower())%0A%0A return lst_packages%0A%0A%0Aif __name__ == '__main__':%0A lst_install_requires = get_all_packages()%0A for module in lst_install_requires:%0A install_and_import(module)%0A print('You are ready to use the module')%0A
|
|
1bf649207f850b21e99eb6a9479ecdb1cb03a93d
|
Update version to 0.7.1
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
install_requires = ['Django>=1.5']
try:
from collections import OrderedDict
except ImportError:
install_requires.append('ordereddict>=1.1')
setup(
name='django-auth-policy',
version='0.7',
description='Enforces a couple of common authentication policies for the '
'Django web framework.',
author='Fox-IT B.V.',
author_email='fox@fox-it.com',
maintainer='Rudolph Froger',
maintainer_email='rudolphfroger@estrate.nl',
url='https://github.com/rudolphfroger/django-auth-policy',
license='BSD',
packages=['django_auth_policy'],
package_data={'django_auth_policy': ['locale/*/LC_MESSAGES/*.mo',
'locale/*/LC_MESSAGES/*.po']},
install_requires=install_requires,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Session',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
Python
| 0.000001
|
@@ -251,16 +251,18 @@
ion='0.7
+.1
',%0A d
|
b37b2976398f52032379c916714e4b6360614e78
|
create packaging file
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,550 @@
+#!/usr/bin/env python%0A%0Aimport os%0Aimport sys%0A%0Atry:%0A from setuptools import setup%0Aexcept ImportError:%0A from distutils.core import setup%0A%0Aif sys.argv%5B-1%5D == 'publish':%0A os.system('python setup.py sdist upload')%0A sys.exit()%0A%0Awith open('README.rst') as f:%0A readme = f.read()%0Awith open('LICENSE') as f:%0A license = f.read()%0A%0Asetup(%0A name='threetaps',%0A version=threetaps.version,%0A description='3taps Python API Client.',%0A long_description=readme,%0A author='Michael Kolodny',%0A packages=%5B'threetaps'%5D,%0A license=license,%0A)%0A
|
|
18688b67e2dfc36ff6b1c28a618a289f46cc494d
|
Add Slide class
|
slide.py
|
slide.py
|
Python
| 0
|
@@ -0,0 +1,151 @@
+class Slide:%0A %22%22%22 Stores data to be studied. %22%22%22%0A%0A def __init__(self, prompt, answer):%0A self.prompt = prompt%0A self.answer = answer%0A
|
|
b418ff779c79afd0eca85ed1479ba633f25ce73c
|
Fix variable referenced before assginment in vmwareapi code.
|
nova/tests/test_vmwareapi_vm_util.py
|
nova/tests/test_vmwareapi_vm_util.py
|
Python
| 0.000001
|
@@ -0,0 +1,1841 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A#%0A# Copyright 2013 Canonical Corp.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom nova import exception%0Afrom nova import test%0Afrom nova.virt.vmwareapi import fake%0Afrom nova.virt.vmwareapi import vm_util%0A%0A%0Aclass fake_session(object):%0A def __init__(self, ret=None):%0A self.ret = ret%0A%0A def _call_method(self, *args):%0A return self.ret%0A%0A%0Aclass VMwareVMUtilTestCase(test.TestCase):%0A def setUp(self):%0A super(VMwareVMUtilTestCase, self).setUp()%0A%0A def tearDown(self):%0A super(VMwareVMUtilTestCase, self).tearDown()%0A%0A def test_get_datastore_ref_and_name(self):%0A result = vm_util.get_datastore_ref_and_name(%0A fake_session(%5Bfake.Datastore()%5D))%0A%0A self.assertEquals(result%5B1%5D, %22fake-ds%22)%0A self.assertEquals(result%5B2%5D, 1024 * 1024 * 1024)%0A self.assertEquals(result%5B3%5D, 1024 * 1024 * 500)%0A%0A def test_get_datastore_ref_and_name_without_datastore(self):%0A%0A self.assertRaises(exception.DatastoreNotFound,%0A vm_util.get_datastore_ref_and_name,%0A fake_session(), host=%22fake-host%22)%0A%0A self.assertRaises(exception.DatastoreNotFound,%0A vm_util.get_datastore_ref_and_name,%0A fake_session(), cluster=%22fake-cluster%22)%0A
|
|
c3f1e723e13598deb53a8454787204f00841c34a
|
Add extension method type
|
numba/typesystem/exttypes/methods.py
|
numba/typesystem/exttypes/methods.py
|
Python
| 0.000001
|
@@ -0,0 +1,677 @@
+%0A%0Afrom numba.typesystem import *%0A%0A#------------------------------------------------------------------------%0A# Extension Method Types%0A#------------------------------------------------------------------------%0A%0Aclass ExtMethodType(NumbaType, minitypes.FunctionType):%0A %22%22%22%0A Extension method type, a FunctionType plus the following fields:%0A%0A is_class: is classmethod?%0A is_static: is staticmethod?%0A %22%22%22%0A%0A def __init__(self, return_type, args, name=None,%0A is_class=False, is_static=False, **kwds):%0A super(ExtMethodType, self).__init__(return_type, args, name, **kwds)%0A self.is_class = is_class%0A self.is_static = is_static
|
|
7162926576b6136c17a4f1d889d7ecd2541a763c
|
Add examples_plot_features.py
|
examples/plot_features.py
|
examples/plot_features.py
|
Python
| 0
|
@@ -0,0 +1,3206 @@
+#!/usr/bin/env python%0A%0A# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project%0A# All rights reserved.%0A#%0A# This file is part of NeuroM %3Chttps://github.com/BlueBrain/NeuroM%3E%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are met:%0A#%0A# 1. Redistributions of source code must retain the above copyright%0A# notice, this list of conditions and the following disclaimer.%0A# 2. Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in the%0A# documentation and/or other materials provided with the distribution.%0A# 3. Neither the name of the copyright holder nor the names of%0A# its contributors may be used to endorse or promote products%0A# derived from this software without specific prior written permission.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22 AND%0A# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED%0A# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE%0A# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY%0A# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES%0A# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND%0A# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT%0A# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS%0A# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A'''Plot a selection of features from a morphology population'''%0A%0Afrom neurom import ezy%0Afrom neurom.analysis import morphtree as mt%0Afrom collections import defaultdict%0Aimport json%0Aimport matplotlib.pyplot as plt%0A%0Anrns = ezy.load_neurons('../morphsyn/Synthesizer/build/L23MC/')%0Asim_params = json.load(open('../morphsyn/Synthesizer/data/L23MC.json'))%0A%0A%0ANEURITES_ = (ezy.TreeType.axon,%0A ezy.TreeType.apical_dendrite,%0A ezy.TreeType.basal_dendrite)%0A%0AGET_FEATURE = %7B%0A 'trunk_azimuth': lambda nrn, typ: %5Bmt.trunk_azimuth(n, nrn.soma)%0A for n in nrn.neurites if n.type == typ%5D,%0A 'trunk_elevation': lambda nrn, typ: %5Bmt.trunk_elevation(n, nrn.soma)%0A for n in nrn.neurites if n.type == typ%5D%0A%7D%0A%0AFEATURES = GET_FEATURE.keys()%0A%0Astuff = defaultdict(lambda: defaultdict(list))%0A%0A# unpack data into arrays%0Afor nrn in nrns:%0A for t in NEURITES_:%0A for feat in FEATURES:%0A stuff%5Bfeat%5D%5Bstr(t).split('.')%5B1%5D%5D.extend(%0A GET_FEATURE%5Bfeat%5D(nrn, t)%0A )%0A%0A# Then access the arrays of azimuths with tr_azimuth%5Bkey%5D%0A# where the keys are string representations of the tree types.%0A%0Afor feat, d in stuff.iteritems():%0A for typ, data in d.iteritems():%0A print typ, feat%0A print 'Params:', sim_params%5B'components'%5D%5Btyp%5D%5Bfeat%5D%0A%0A num_bins = 100%0A n, bins, patches = plt.hist(data, num_bins, normed=1, facecolor='green', alpha=0.5)%0A plt.show()%0A
|
|
ebceea82af38e4c7f11678841bfbce3635a66f7d
|
Add twitter_parser.py
|
newsman/scraper/twitter_parser.py
|
newsman/scraper/twitter_parser.py
|
Python
| 0.00285
|
@@ -0,0 +1,620 @@
+#!/usr/bin/env python %0A#-*- coding: utf-8 -*- %0A%0A%22%22%22%0ATwitter parser parses specific twitter account in real time%0A%22%22%22%0A# @author chengdujin%0A# @contact chengdujin@gmail.com%0A# @created Nov. 19, 2013%0A%0A%0Aimport sys %0Areload(sys) %0Asys.setdefaultencoding('UTF-8')%0A%0Aimport twitter%0Aimport urllib2%0A%0Aaccess_token_key = %2224129666-M47Q6pDLZXLQy1UITxkijkTdKfkvTcBpleidNPjac%22%0Aaccess_token_secret = %220zHhqV5gmrmsnjiOEOBCvqxORwsjVC5ax4mM3dCDZ7RLk%22%0Aconsumer_key = %22hySdhZgpj5gF12kRWMoVpQ%22%0Aconsumer_secret = %222AkrRg89SdJL0qHkHwuP933fiBaNTioChMpxRdoicUQ%22%0A%0Aapi = twitter.Api(consumer_key, consumer_secret, access_token_key, access_token_secret)%0A
|
|
3912e9ab49e10f2490da36b17e8525d2c97c1844
|
add fabric
|
fabfile.py
|
fabfile.py
|
Python
| 0.000001
|
@@ -0,0 +1,533 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A%0Afrom fabric.api import run, env, cd%0A%0A%0A# the user to use for the remote commands%0Aenv.user = 'answeror'%0A# the servers where the commands are executed%0Aenv.hosts = %5B'aip.io'%5D%0A%0A%0Adef deploy():%0A run('pyenv virtualenvwrapper')%0A run('workon aip')%0A with cd('/www/aip/repo'):%0A run('git pull')%0A run('python setup.py develop')%0A # and finally touch the .wsgi file so that mod_wsgi triggers%0A # a reload of the application%0A run('touch /www/aip/repo/application.wsgi')%0A
|
|
643b4867627feb2810257f99b0b7865b43bb6454
|
Add fabfile to deploy documentation
|
fabfile.py
|
fabfile.py
|
Python
| 0.000005
|
@@ -0,0 +1,849 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0A fabfile%0A%0A Fab file to build and push documentation to github%0A%0A :copyright: %C2%A9 2013-2014 by Openlabs Technologies & Consulting (P) Limited%0A :license: BSD, see LICENSE for more details.%0A%22%22%22%0Aimport time%0Aimport getpass%0A%0Aimport os%0Afrom fabric.api import (%0A local, lcd, cd, sudo, execute, settings, env, run, prompt%0A)%0Afrom fabric.decorators import hosts%0A%0A%0A%0Adef upload_documentation():%0A %22%22%22%0A Build and upload the documentation HTML to github%0A %22%22%22%0A # Build the documentation%0A local('grunt ngdocs')%0A%0A # Checkout to gh-pages branch%0A local('git checkout gh-pages')%0A%0A # Copy back the files from docs folder%0A local('cp -a docs/* .')%0A%0A # Add the relevant files%0A local('git add .')%0A local('git commit -m %22Build documentation%22')%0A local('git push')%0A local('git checkout develop')%0A%0A
|
|
b63f6a83f86808df0a2fea66b47478e3c5ec0994
|
Create neymanPearson2.py
|
scripts/neymanPearson2.py
|
scripts/neymanPearson2.py
|
Python
| 0.999614
|
@@ -0,0 +1,1325 @@
+# Convert Neyman-Pearson testing paradigm(Fig 5.15 (a)) to python/JAX%0A# Author: Garvit9000c%0A%0Aimport jax.scipy.stats.multivariate_normal as gaussprob%0Aimport jax.numpy as jnp%0Aimport matplotlib.pyplot as plt%0A%0A#constants%0Api=jnp.pi%0Asigma=1.5%0Axmin = -4%0Axmax = 8%0Aymin = 0%0Aymax = 0.3%0Ares = 0.01%0A%0A#Domain%0Ax=jnp.arange(xmin,xmax,res)%0A%0A#functions%0Ay1=gaussprob.pdf(x, 0, sigma**2)%0Ay2=gaussprob.pdf(x, 4, sigma**2)%0A%0A#Axes Limits%0Aplt.ylim(ymin,ymax)%0Aplt.xlim(xmin,xmax)%0A%0A#Ploting Curve%0Aplt.plot(x,y1,'b') #Curve_B%0Aplt.plot(x,y2,'r') #Curve_A%0A%0A%0Aplt.vlines(x=2.3, ymin=0, ymax=0.5, linewidth=1.5, color='k')%0Aplt.xticks(%5B2.3%5D,%5B'$X%5E*$'%5D,size=18)%0Aplt.yticks(%5B%5D)%0A%0A#Shading %CE%B1 Region%0Ax1=jnp.arange(2.3,xmax,res)%0Ay_1=gaussprob.pdf(x1, 0, sigma**2)%0Aplt.fill_between(x1,y_1, 0, alpha=0.50)%0A%0A#Shading %CE%B2 Region%0Ax2=jnp.arange(xmin,2.3,res)%0Ay_2=gaussprob.pdf(x2, 4, sigma**2)%0Aplt.fill_between(x2,y_2, 0, alpha=0.50)%0A%0A#Axis Arrows%0Aplt.arrow(0,0.07,1.2,-0.05,color='black',head_width=0.02,head_length=0.2) #%CE%B2 %0Aplt.arrow(4,0.07,-1.2,-0.05,color='black',head_width=0.02,head_length=0.2)#%CE%B1%0A%0A#label%0Aplt.text(-0.4, 0.07, '%CE%B2', fontsize=15) #%CE%B2%0Aplt.text(4, 0.07, '%CE%B1', fontsize=15) #%CE%B1%0Aplt.text(-0.2, 0.28, '$H_0$', fontsize=15) #H0%0Aplt.text(3.8,0.28, '$H_1$', fontsize=15) #H1%0A%0Aplt.savefig('../figures/neymanPearson2.pdf', dpi=300)%0Aplt.show()%0A
|
|
b0bcff5bf333d866982214a4621be5359d8ec69b
|
Version bump to 0.8.5
|
namebench.py
|
namebench.py
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple DNS server comparison benchmarking tool.
Designed to assist system administrators in selection and prioritization.
"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import optparse
import sys
import tempfile
# Make it easy to import 3rd party utilities without editing their imports.
sys.path.append('lib/third_party')
from lib import benchmark
from lib import config
from lib import history_parser
from lib import nameserver_list
from lib import conn_quality
VERSION = '0.8.4'
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-r', '--runs', dest='run_count', default=1, type='int',
help='Number of test runs to perform on each nameserver.')
parser.add_option('-c', '--config', dest='config', default='namebench.cfg',
help='Config file to use.')
parser.add_option('-o', '--output', dest='output_file', default='output.csv',
help='Filename to write query results to (CSV format).')
parser.add_option('-j', '--threads', dest='thread_count',
help='# of threads to use')
parser.add_option('-y', '--timeout', dest='timeout', type='float',
help='# of seconds general requests timeout in.')
parser.add_option('-Y', '--health_timeout', dest='health_timeout',
type='float', help='health check timeout (in seconds)')
parser.add_option('-f', '--filename', dest='data_file',
default='data/alexa-top-10000-global.txt',
help='File containing a list of domain names to query.')
parser.add_option('-i', '--import', dest='import_file',
help=('Import history from safari, google_chrome, '
'internet_explorer, opera, squid, or a file path.'))
parser.add_option('-t', '--tests', dest='test_count', type='int',
help='Number of queries per run.')
parser.add_option('-x', '--select_mode', dest='select_mode',
default='weighted',
help='Selection algorithm to use (weighted, random, chunk)')
parser.add_option('-s', '--num_servers', dest='num_servers',
type='int', help='Number of nameservers to include in test')
parser.add_option('-S', '--no_secondary', dest='no_secondary',
action='store_true', help='Disable secondary servers')
parser.add_option('-O', '--only', dest='only',
action='store_true',
help='Only test nameservers passed as arguments')
(cli_options, args) = parser.parse_args()
(opt, primary_ns, secondary_ns) = config.ProcessConfiguration(cli_options)
for arg in args:
if '.' in arg:
primary_ns.append((arg, arg))
include_internal = True
if opt.only:
include_internal = False
if not primary_ns:
print 'If you use --only, you must provide nameservers to use.'
sys.exit(1)
print('namebench %s - %s (%s) on %s' %
(VERSION, opt.import_file or opt.data_file, opt.select_mode,
datetime.datetime.now()))
print ('threads=%s tests=%s runs=%s timeout=%s health_timeout=%s servers=%s' %
(opt.thread_count, opt.test_count, opt.run_count, opt.timeout,
opt.health_timeout, opt.num_servers))
print '-' * 78
if opt.import_file:
importer = history_parser.HistoryParser()
history = importer.Parse(opt.import_file)
if history:
print '- Imported %s records from %s' % (len(history), opt.import_file)
else:
print '- Could not import anything from %s' % opt.import_file
sys.exit(2)
else:
history = None
nameservers = nameserver_list.NameServers(primary_ns, secondary_ns,
num_servers=opt.num_servers,
include_internal=include_internal,
timeout=opt.timeout,
health_timeout=opt.health_timeout)
cq = conn_quality.ConnectionQuality()
(intercepted, congestion) = cq.CheckConnectionQuality()
if intercepted:
print 'XXX[ OHNO! ]XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
print 'XX Someone upstream of this machine is doing evil things and XX'
print 'XX intercepting all outgoing nameserver requests. The results XX'
print 'XX of this program will be useless. Get your ISP to fix it. XX'
print 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
print ''
if congestion > 1:
nameservers.ApplyCongestionFactor(congestion)
if len(nameservers) > 1:
nameservers.thread_count = int(opt.thread_count)
nameservers.cache_dir = tempfile.gettempdir()
nameservers.FindAndRemoveUndesirables()
print ''
print 'Final list of nameservers to benchmark:'
print '-' * 78
for ns in nameservers.SortByFastest():
if ns.warnings:
add_text = '# ' + ', '.join(ns.warnings)
else:
add_text = ''
print ' %-16.16s %-18.18s %-4.4sms %s' % (ns.ip, ns.name, ns.check_duration, add_text)
print ''
bmark = benchmark.NameBench(nameservers,
run_count=opt.run_count,
test_count=opt.test_count)
if history:
bmark.CreateTests(history, select_mode=opt.select_mode)
else:
bmark.CreateTestsFromFile(opt.data_file, select_mode=opt.select_mode)
bmark.Run()
bmark.DisplayResults()
if opt.output_file:
print ''
print '* Saving detailed results to %s' % opt.output_file
bmark.SaveResultsToCsv(opt.output_file)
best = bmark.BestOverallNameServer()
nearest = [x for x in bmark.NearestNameServers(3) if x.ip != best.ip][0:2]
print ''
print 'Recommended Configuration (fastest + nearest):'
print '----------------------------------------------'
for ns in [best] + nearest:
if ns.warnings:
warning = '(%s)' % ', '.join(ns.warnings)
else:
warning = ''
print 'nameserver %-15.15s # %s %s' % (ns.ip, ns.name, warning)
|
Python
| 0
|
@@ -1129,17 +1129,17 @@
= '0.8.
-4
+5
'%0A%0Aif __
|
e7a41ed29f6ec097e19f4c9beec9821a2804585c
|
Add organizations.py
|
octokit/resources/organizations.py
|
octokit/resources/organizations.py
|
Python
| 0.000002
|
@@ -0,0 +1,97 @@
+# encoding: utf-8%0A%0A%22%22%22Methods for the Organizations API%0Ahttp://developer.github.com/v3/orgs/%0A%22%22%22%0A
|
|
337c48648f3a891642fc58c7161fdb48e705160f
|
add timer
|
timer.py
|
timer.py
|
Python
| 0.000008
|
@@ -0,0 +1,1922 @@
+# Copyright (c) 2015, Bartlomiej Puget %3Clarhard@gmail.com%3E%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are met:%0A#%0A# * Redistributions of source code must retain the above copyright notice,%0A# this list of conditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright notice,%0A# this list of conditions and the following disclaimer in the documentation%0A# and/or other materials provided with the distribution.%0A#%0A# * Neither the name of the Bartlomiej Puget nor the names of its%0A# contributors may be used to endorse or promote products derived from this%0A# software without specific prior written permission.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22%0A# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE%0A# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE%0A# DISCLAIMED. IN NO EVENT SHALL BARTLOMIEJ PUGET BE LIABLE FOR ANY DIRECT,%0A# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,%0A# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,%0A# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY%0A# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING%0A# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,%0A# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0Afrom datetime import datetime%0A%0A%0Aclass Timer:%0A def __init__(self, message=None):%0A self.begin = None%0A self.message = message or %22::: timer :::%22%0A%0A def __enter__(self):%0A self.begin = datetime.now()%0A%0A def __exit__(self, exc_type, exc_value, exc_traceback):%0A end = datetime.now()%0A print(self.message, str(end - self.begin))%0A
|
|
cbde3323e790717fa593d75ca282e2875bb67dca
|
Add stacker ecs hook
|
stacker/hooks/ecs.py
|
stacker/hooks/ecs.py
|
Python
| 0
|
@@ -0,0 +1,1284 @@
+# A lot of this code exists to deal w/ the broken ECS connect_to_region%0A# function, and will be removed once this pull request is accepted:%0A# https://github.com/boto/boto/pull/3143%0Aimport logging%0A%0Alogger = logging.getLogger(__name__)%0A%0Afrom boto.regioninfo import get_regions%0Afrom boto.ec2containerservice.layer1 import EC2ContainerServiceConnection%0A%0A%0Adef regions():%0A return get_regions('ec2containerservice',%0A connection_cls=EC2ContainerServiceConnection)%0A%0A%0Adef connect_to_region(region_name, **kw_params):%0A for region in regions():%0A if region.name == region_name:%0A return region.connect(**kw_params)%0A return None%0A%0A%0Adef create_clusters(region, namespace, mappings, parameters, **kwargs):%0A %22%22%22 Creates ECS clusters.%0A%0A Expects a 'clusters' argument, which should contain a list of cluster%0A names to create.%0A %22%22%22%0A conn = connect_to_region(region)%0A try:%0A clusters = kwargs%5B'clusters'%5D%0A except KeyError:%0A logger.error(%22setup_clusters hook missing 'clusters' argument%22)%0A return False%0A%0A if isinstance(clusters, basestring):%0A clusters = %5Bclusters%5D%0A%0A for cluster in clusters:%0A logger.debug(%22Creating ECS cluster: %25s%22, cluster)%0A conn.create_cluster(cluster)%0A return True%0A
|
|
421b99ffb0cf84a8ccea0c4c0fe2496f895603a0
|
Add migration
|
bluebottle/initiatives/migrations/0011_auto_20190522_0931.py
|
bluebottle/initiatives/migrations/0011_auto_20190522_0931.py
|
Python
| 0.000002
|
@@ -0,0 +1,562 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.15 on 2019-05-22 07:31%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('initiatives', '0010_auto_20190521_0954'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='initiativeplatformsettings',%0A name='facebook_at_work_url',%0A ),%0A migrations.RemoveField(%0A model_name='initiativeplatformsettings',%0A name='share_options',%0A ),%0A %5D%0A
|
|
51e516f260858e699ee828ac6fc91af39c67254c
|
Add script for automatically updating schemas
|
update-schemas.py
|
update-schemas.py
|
Python
| 0
|
@@ -0,0 +1,1701 @@
+#!/usr/bin/env python%0A%0Aimport os%0Aimport re%0Aimport sys%0Aimport subprocess as sp%0A%0A%0Adef get_schemas(pattern):%0A%0A cmd = %5B'git', 'grep', '--name-only'%5D%0A output = sp.check_output(cmd + %5Bpattern, '--', 'schemas'%5D).decode('utf8')%0A names = output.split()%0A print(names)%0A%0A dedupe = dict()%0A%0A for name in names:%0A version = re.findall(r'%5Cd%5C.%5Cd.%5Cd', name)%5B0%5D%0A basepath = name.split('-')%5B0%5D%0A if basepath in dedupe and dedupe%5Bbasepath%5D %3E version:%0A continue%0A%0A dedupe%5Bbasepath%5D = version%0A%0A return %5B'%7B%7D-%7B%7D.yaml'.format(x, y) for x,y in dedupe.items()%5D%0A%0A%0Adef update_version(string):%0A%0A groups = re.search(r'((%5Cd)%5C.(%5Cd)%5C.(%5Cd))', string).groups()%0A bumped = int(groups%5B2%5D) + 1%0A%0A new_version = '%7B%7D.%7B%7D.%7B%7D'.format(groups%5B1%5D, bumped, groups%5B3%5D)%0A return re.sub(r'((%5Cd)%5C.(%5Cd)%5C.(%5Cd))', new_version, string)%0A%0A%0Adef create_updated_schema(schema, pattern, new_pattern):%0A%0A name = os.path.splitext(os.path.basename(schema))%5B0%5D%0A updated = update_version(name)%0A new_schema = re.sub(name, updated, schema)%0A%0A with open(new_schema, 'w') as new_file:%0A with open(schema, 'r') as old_file:%0A for line in old_file:%0A line = line.replace(pattern, new_pattern)%0A line = line.replace(name, updated)%0A new_file.write(line)%0A%0A%0Adef main():%0A%0A if len(sys.argv) != 2:%0A name = os.path.basename(sys.argv%5B0%5D)%0A sys.stderr.write('USAGE: %7B%7D %3Cpattern%3E%5Cn'.format(name))%0A exit(1)%0A%0A pattern = sys.argv%5B1%5D%0A new_pattern = update_version(pattern)%0A schemas = get_schemas(pattern)%0A%0A for s in schemas:%0A create_updated_schema(s, pattern, new_pattern)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
7e8ffcef7111fc3cd2f1d58831afb09741d9d8fc
|
Create my-calendar-i.py
|
Python/my-calendar-i.py
|
Python/my-calendar-i.py
|
Python
| 0.000177
|
@@ -0,0 +1,2013 @@
+# Time: O(n%5E2)%0A# Space: O(n)%0A%0A# Implement a MyCalendar class to store your events.%0A# A new event can be added if adding the event will not cause a double booking.%0A#%0A# Your class will have the method, book(int start, int end).%0A# Formally, this represents a booking on the half open interval %5Bstart, end),%0A# the range of real numbers x such that start %3C= x %3C end.%0A#%0A# A double booking happens when two events have some non-empty intersection%0A# (ie., there is some time that is common to both events.)%0A#%0A# For each call to the method MyCalendar.book,%0A# return true if the event can be added to the calendar successfully without causing a double booking.%0A# Otherwise, return false and do not add the event to the calendar.%0A#%0A# Your class will be called like this: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end)%0A# Example 1:%0A# MyCalendar();%0A# MyCalendar.book(10, 20); // returns true%0A# MyCalendar.book(15, 25); // returns false%0A# MyCalendar.book(20, 30); // returns true%0A# Explanation: %0A# The first event can be booked. The second can't because time 15 is already booked by another event.%0A# The third event can be booked, as the first event takes every time less than 20, but not including 20.%0A#%0A# Note:%0A# - The number of calls to MyCalendar.book per test case will be at most 1000.%0A# - In calls to MyCalendar.book(start, end), start and end are integers in the range %5B0, 10%5E9%5D.%0A %0Aclass MyCalendar(object):%0A%0A def __init__(self):%0A self.__calendar = %5B%5D%0A%0A%0A def book(self, start, end):%0A %22%22%22%0A :type start: int%0A :type end: int%0A :rtype: bool%0A %22%22%22%0A for i, j in self.__calendar:%0A if start %3C j and end %3E i:%0A return False%0A self.__calendar.append((start, end))%0A return True %0A%0A%0A# Your MyCalendar object will be instantiated and called as such:%0A# obj = MyCalendar()%0A# param_1 = obj.book(start,end)%0A
|
|
e1993d4d3c3199dce2be2b475a9236e95732a0f0
|
Verify computer executing the script is of OS X 10.6.1 or higher type
|
dodge.py
|
dodge.py
|
Python
| 0.000035
|
@@ -0,0 +1,1496 @@
+import platform%0A%0A%0Aclass OSXDodger(object):%0A allowed_version = %2210.12.1%22%0A%0A def __init__(self, applications_dir):%0A self.app_dir = applications_dir%0A%0A def load_applications(self):%0A %22%22%22%0A Read all applications in the %60/Applications/%60 dir%0A %22%22%22%0A pass%0A%0A def select_applications(self):%0A %22%22%22%0A Allow user to select an application they want%0A not to appear on the Dock%0A %22%22%22%0A pass%0A%0A def load_dodger_filer(self):%0A %22%22%22%0A Load the file to modify for the application%0A chosen by the user in %60select_applications%60%0A%0A The file to be loaded for is %60info.plist%60%0A %22%22%22%0A pass%0A%0A def dodge_application(self):%0A %22%22%22%0A Remive the application from the Dock%0A %22%22%22%0A pass%0A%0A @classmethod%0A def pc_is_macintosh(cls):%0A %22%22%22%0A Check if it is an %60Apple Computer%60 i.e a Mac%0A @return bool%0A %22%22%22%0A system = platform.system().lower()%0A sys_version = int((platform.mac_ver())%5B0%5D.replace(%22.%22, %22%22))%0A allowed_version = int(cls.allowed_version.replace(%22.%22, %22%22))%0A%0A if (system == %22darwin%22) and (sys_version %3E= allowed_version):%0A return True%0A else:%0A print(%22%5CnSorry :(%22)%0A print(%22FAILED. OsX-dock-dodger is only applicable to computers %22 +%0A %22running OS X %7B%7D or higher%22.format(cls.allowed_version))%0A return False%0A%0Adodge = OSXDodger(%22/Applications/%22)%0Adodge.pc_is_macintosh()%0A
|
|
e965147ef7bc89e6c8885d1521d92305604de6f8
|
add problem
|
others/find_array_index_of_sum.py
|
others/find_array_index_of_sum.py
|
Python
| 0.044376
|
@@ -0,0 +1,626 @@
+%22%22%22%0A%E9%A2%98%E7%9B%AE%EF%BC%9A%E7%BB%99%E5%AE%9A%E4%B8%80%E4%B8%AA%E6%95%B4%E6%95%B0%E6%95%B0%E7%BB%84%E5%92%8C%E4%B8%80%E4%B8%AA%E6%95%B4%E6%95%B0%EF%BC%8C%E8%BF%94%E5%9B%9E%E4%B8%A4%E4%B8%AA%E6%95%B0%E7%BB%84%E7%9A%84%E7%B4%A2%E5%BC%95%EF%BC%8C%E8%BF%99%E4%B8%A4%E4%B8%AA%E7%B4%A2%E5%BC%95%E6%8C%87%E5%90%91%E7%9A%84%E6%95%B0%E5%AD%97%E7%9A%84%E5%8A%A0%E5%92%8C%E7%AD%89%E4%BA%8E%E6%8C%87%E5%AE%9A%E7%9A%84%E6%95%B4%E6%95%B0%E3%80%82%E9%9C%80%E8%A6%81%E6%9C%80%E4%BC%98%E7%9A%84%E7%AE%97%E6%B3%95%EF%BC%8C%E5%88%86%E6%9E%90%E7%AE%97%E6%B3%95%E7%9A%84%E7%A9%BA%E9%97%B4%E5%92%8C%E6%97%B6%E9%97%B4%E5%A4%8D%E6%9D%82%E5%BA%A6%0A%22%22%22%0A%0Aif __name__ == %22__main__%22:%0A numbers = %5B1,2,3,4,5,6,7,8,9%5D%0A n = 10%0A%0A index_map = %7B%7D%0A supplment_map = %7B%7D%0A index = 0%0A for i in numbers:%0A index_map%5Bi%5D = index%0A supplment_map%5Bi%5D = n - i%0A index = index + 1%0A%0A result = %5B%5D%0A for k,v in index_map.items():%0A suppliment_element = supplment_map.get(k)%0A if suppliment_element is not None:%0A supp_index = index_map.get(suppliment_element)%0A if supp_index is not None:%0A result.append((v, supp_index))%0A%0A print(result)%0A
|
|
ef8b909beb4de8435c20ed0b45bca9478d476ed8
|
Add python script to get coordinates from the csv
|
geocode.py
|
geocode.py
|
Python
| 0.000001
|
@@ -0,0 +1,984 @@
+#! /bin/python3%0A%0Aimport csv%0Aimport time%0Afrom geopy.geocoders.googlev3 import GoogleV3%0A%0Ageocoder = GoogleV3(api_key=%22AIzaSyAy6XiyZG-6u99q-qacOz-dtT9ILbYzb-4%22)%0Awith open(%22../ReadingBusesOrig.csv%22) as cf:%0A with open(%22../out.csv%22, %22a%22) as cw:%0A reader = csv.DictReader(cf)%0A writer = csv.DictWriter(cw, %5B%22latitude%22, %22longitude%22, %22date%22%5D)%0A startrow = 0%0A for i in range(0, startrow):%0A row = reader%5Bi%5D%0A location = geocoder.geocode(row%5B'Place of Event'%5D, components=%7B%0A %22locality%22: %22Reading%22,%0A %22country%22: %22GB%22%0A %7D)%0A%0A print(%22Resolved Address: %22 + str(location.address))%0A print(%22Latitude: %22 + str(location.latitude))%0A print(%22Longitude: %22 + str(location.longitude))%0A print('%5Cn')%0A writer.writerow(%7B%0A %22latitude%22: location.latitude, %22longitude%22: location.longitude, %22date%22: row%5B'Accident Date'%5D%0A %7D)%0A time.sleep(0.2)%0A
|
|
bb928a0c0a4ddc11b05771e9eaa33f1058cc022a
|
Add pageset for ugamsolutions.com
|
tools/skp/page_sets/skia_ugamsolutions_desktop.py
|
tools/skp/page_sets/skia_ugamsolutions_desktop.py
|
Python
| 0
|
@@ -0,0 +1,1252 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A# pylint: disable=W0401,W0614%0A%0A%0Afrom telemetry.page import page as page_module%0Afrom telemetry.page import page_set as page_set_module%0A%0A%0Aclass SkiaBuildbotDesktopPage(page_module.Page):%0A%0A def __init__(self, url, page_set):%0A super(SkiaBuildbotDesktopPage, self).__init__(%0A url=url,%0A page_set=page_set,%0A credentials_path='data/credentials.json')%0A self.user_agent_type = 'desktop'%0A self.archive_data_file = 'data/skia_ugamsolutions_desktop.json'%0A%0A def RunNavigateSteps(self, action_runner):%0A action_runner.NavigateToPage(self)%0A action_runner.Wait(15)%0A%0A%0Aclass SkiaUgamsolutionsDesktopPageSet(page_set_module.PageSet):%0A%0A %22%22%22 Pages designed to represent the median, not highly optimized web %22%22%22%0A%0A def __init__(self):%0A super(SkiaUgamsolutionsDesktopPageSet, self).__init__(%0A user_agent_type='desktop',%0A archive_data_file='data/skia_ugamsolutions_desktop.json')%0A%0A urls_list = %5B%0A # Why: for crbug.com/447291%0A 'http://www.ugamsolutions.com',%0A %5D%0A%0A for url in urls_list:%0A self.AddUserStory(SkiaBuildbotDesktopPage(url, self))%0A
|
|
68e5bdc3c3a8a59f820ea15e706e85e14f2a654b
|
Add mgmt cmd to fix bad loc-type references
|
corehq/apps/locations/management/commands/fix_loc_type_reference.py
|
corehq/apps/locations/management/commands/fix_loc_type_reference.py
|
Python
| 0
|
@@ -0,0 +1,2103 @@
+from optparse import make_option%0A%0Afrom django.core.management.base import BaseCommand%0A%0Afrom corehq.apps.locations.models import SQLLocation, LocationType%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Make %22%0A%0A option_list = (%0A make_option('--dry_run',%0A action='store_true',%0A dest='dry_run',%0A default=False,%0A help='Just check what domains have problems'),%0A make_option('--noinput',%0A action='store_true',%0A dest='noinput',%0A default=False,%0A help='Skip important confirmation warnings.'),%0A )%0A%0A def handle(self, *args, **options):%0A domains = (SQLLocation.objects%0A .order_by('domain')%0A .distinct('domain')%0A .values_list('domain', flat=True))%0A for domain in domains:%0A if has_bad_location_types(domain):%0A print %22%7B%7D has bad location types%22.format(domain)%0A if not options%5B'dry_run'%5D:%0A if options%5B'noinput'%5D or raw_input(%22fix? (y/N)%22).lower() == 'y':%0A fix_domain(domain)%0A%0A%0Adef fix_domain(domain):%0A locs_w_bad_types = (SQLLocation.objects%0A .filter(domain=domain)%0A .exclude(location_type__domain=domain))%0A print %22found %7B%7D locs with bad types%22.format(locs_w_bad_types.count())%0A bad_types = LocationType.objects.filter(sqllocation__in=locs_w_bad_types).distinct()%0A assert domain not in bad_types.values_list('domain', flat=True)%0A%0A bad_to_good = %7B%7D%0A for bad_type in bad_types:%0A good_type = LocationType.objects.get(domain=domain, code=bad_type.code)%0A bad_to_good%5Bbad_type.code%5D = good_type%0A print %22successfully found corresponding loctypes on the domain for each misreferenced loctype%22%0A%0A for loc in locs_w_bad_types:%0A loc.location_type = bad_to_good%5Bloc.location_type.code%5D%0A loc.save()%0A%0A%0Adef has_bad_location_types(domain):%0A return (SQLLocation.objects%0A .filter(domain=domain)%0A .exclude(location_type__domain=domain)%0A .exists())%0A
|
|
50216cbe96f5b24f7de03e47e7b567f5ed32541b
|
Update forward compatibility horizon to 2020-12-20
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 12, 19)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1449,10 +1449,10 @@
12,
-19
+20
)%0A_F
|
502c994fa469bcd40cf2216c169b827db220cb6b
|
Disable spaceport benchmark on linux.
|
tools/perf/benchmarks/spaceport.py
|
tools/perf/benchmarks/spaceport.py
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs spaceport.io's PerfMarks benchmark."""
import logging
import os
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.core import util
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
DESCRIPTIONS = {
'canvasDrawImageFullClear':
'Using a canvas element to render. Bitmaps are blitted to the canvas '
'using the "drawImage" function and the canvas is fully cleared at '
'the beginning of each frame.',
'canvasDrawImageFullClearAlign':
'Same as canvasDrawImageFullClear except all "x" and "y" values are '
'rounded to the nearest integer. This can be more efficient on '
'translate on certain browsers.',
'canvasDrawImagePartialClear':
'Using a canvas element to render. Bitmaps are blitted to the canvas '
'using the "drawImage" function and pixels drawn in the last frame '
'are cleared to the clear color at the beginning of each frame. '
'This is generally slower on hardware accelerated implementations, '
'but sometimes faster on CPU-based implementations.',
'canvasDrawImagePartialClearAlign':
'Same as canvasDrawImageFullClearAlign but only partially clearing '
'the canvas each frame.',
'css2dBackground':
'Using div elements that have a background image specified using CSS '
'styles. These div elements are translated, scaled, and rotated using '
'CSS-2D transforms.',
'css2dImg':
'Same as css2dBackground, but using img elements instead of div '
'elements.',
'css3dBackground':
'Same as css2dBackground, but using CSS-3D transforms.',
'css3dImg':
'Same as css2dImage but using CSS-3D tranforms.',
}
class _SpaceportMeasurement(page_test.PageTest):
def __init__(self):
super(_SpaceportMeasurement, self).__init__()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--disable-gpu-vsync')
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'!document.getElementById("start-performance-tests").disabled', 60)
tab.ExecuteJavaScript("""
window.__results = {};
window.console.log = function(str) {
if (!str) return;
var key_val = str.split(': ');
if (!key_val.length == 2) return;
__results[key_val[0]] = key_val[1];
};
document.getElementById('start-performance-tests').click();
""")
num_results = 0
num_tests_in_spaceport = 24
while num_results < num_tests_in_spaceport:
tab.WaitForJavaScriptExpression(
'Object.keys(window.__results).length > %d' % num_results, 180)
num_results = tab.EvaluateJavaScript(
'Object.keys(window.__results).length')
logging.info('Completed test %d of %d' %
(num_results, num_tests_in_spaceport))
result_dict = eval(tab.EvaluateJavaScript(
'JSON.stringify(window.__results)'))
for key in result_dict:
chart, trace = key.split('.', 1)
results.AddValue(scalar.ScalarValue(
results.current_page, '%s.%s'% (chart, trace),
'objects (bigger is better)', float(result_dict[key]),
important=False, description=DESCRIPTIONS.get(chart)))
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, 'Score', 'objects (bigger is better)',
[float(x) for x in result_dict.values()],
description='Combined score for all parts of the spaceport benchmark.'))
# crbug.com/166703: This test frequently times out on Windows.
@benchmark.Disabled('mac', 'win')
class Spaceport(benchmark.Benchmark):
"""spaceport.io's PerfMarks benchmark.
http://spaceport.io/community/perfmarks
This test performs 3 animations (rotate, translate, scale) using a variety of
methods (css, webgl, canvas, etc) and reports the number of objects that can
be simultaneously animated while still achieving 30FPS.
"""
test = _SpaceportMeasurement
@classmethod
def Name(cls):
return 'spaceport'
def CreatePageSet(self, options):
spaceport_dir = os.path.join(util.GetChromiumSrcDir(), 'chrome', 'test',
'data', 'third_party', 'spaceport')
ps = page_set.PageSet(file_path=spaceport_dir)
ps.AddUserStory(page_module.Page('file://index.html', ps, ps.base_dir))
return ps
|
Python
| 0.000001
|
@@ -3907,16 +3907,70 @@
indows.%0A
+# crbug.com/469224: Test is failing on linux as well.%0A
@benchma
@@ -3993,16 +3993,25 @@
', 'win'
+, 'linux'
)%0Aclass
|
0ca7ec8da8fffbfe057038e832bb12a33384c07b
|
add date format to date column in ctable mapping
|
custom/apps/gsid/ctable_mappings.py
|
custom/apps/gsid/ctable_mappings.py
|
from ctable.fixtures import CtableMappingFixture
from ctable.models import ColumnDef, KeyMatcher
class PatientSummaryMapping(CtableMappingFixture):
name = 'patient_summary'
domains = ['gsid']
couch_view = 'gsid/patient_summary'
schedule_active = True
@property
def columns(self):
columns = [
ColumnDef(name="domain", data_type="string", value_source="key", value_index=0),
ColumnDef(name="disease_name", data_type="string", value_source="key", value_index=1),
ColumnDef(name="test_version", data_type="string", value_source="key", value_index=2),
ColumnDef(name="country", data_type="string", value_source="key", value_index=3),
ColumnDef(name="province", data_type="string", value_source="key", value_index=4),
ColumnDef(name="district", data_type="string", value_source="key", value_index=5),
ColumnDef(name="clinic", data_type="string", value_source="key", value_index=6),
ColumnDef(name="gender", data_type="string", value_source="key", value_index=7),
ColumnDef(name="date", data_type="date", value_source="key", value_index=8),
ColumnDef(name="diagnosis", data_type="string", value_source="key", value_index=9),
ColumnDef(name="age", data_type="integer", value_source="value", value_attribute="sum"),
ColumnDef(name="lot_number", data_type="integer", value_source="key", value_index=10),
ColumnDef(name="gps", data_type="string", value_source="key", value_index=11),
ColumnDef(name="gps_country", data_type="string", value_source="key", value_index=12),
ColumnDef(name="gps_province", data_type="string", value_source="key", value_index=13),
ColumnDef(name="gps_district", data_type="string", value_source="key", value_index=14),
]
return columns
|
Python
| 0.000001
|
@@ -1167,16 +1167,40 @@
_index=8
+, date_format=%22%25Y-%25m-%25d%22
),%0A
|
5d6dabcad4f2467f07765f1e28752b5cbba61d53
|
add xfailing test for #693
|
test/util/test_options.py
|
test/util/test_options.py
|
Python
| 0
|
@@ -0,0 +1,686 @@
+from __future__ import absolute_import, division, print_function%0A%0Aimport os%0Aimport pytest%0A%0Afrom dials.util.options import OptionParser%0Afrom dials.util.options import flatten_datablocks%0A%0Apytestmark = pytest.mark.skipif(%0A not os.access('/dls/i04/data/2019/cm23004-1/20190109/Eiger', os.R_OK),%0A reason='Test images not available')%0A%0A@pytest.mark.xfail%0Adef test_not_master_h5():%0A data_h5 = '/dls/i04/data/2019/cm23004-1/20190109/Eiger/gw/Thaum/Thau_4/Thau_4_1_000001.h5'%0A parser = OptionParser(read_datablocks=True, read_datablocks_from_images=True)%0A params, options = parser.parse_args(%5Bdata_h5%5D)%0A datablocks = flatten_datablocks(params.input.datablock)%0A assert len(datablocks) == 0%0A
|
|
00bba22c2fb62b378efb40d86ff633881c55991d
|
Add tests
|
tests/test_commit_view.py
|
tests/test_commit_view.py
|
Python
| 0.000001
|
@@ -0,0 +1,2624 @@
+import os%0Afrom textwrap import dedent%0A%0Aimport sublime%0A%0Afrom unittesting import DeferrableTestCase%0Afrom GitSavvy.tests.mockito import unstub, when%0Afrom GitSavvy.tests.parameterized import parameterized as p%0A%0Afrom GitSavvy.core.commands.commit import extract_commit_message, GsCommitCommand%0A%0A%0Aexamples = %5B%0A (%0A dedent(%22%22%22%5C%0A%0A %22%22%22.rstrip()),%0A %22%22%0A ),%0A (%0A dedent(%22%22%22%5C%0A%0A ## To make a commit, ...%0A %22%22%22.rstrip()),%0A %22%22%0A ),%0A (%0A dedent(%22%22%22%5C%0A The subject%0A ## To make a commit, ...%0A %22%22%22.rstrip()),%0A %22The subject%22%0A ),%0A (%0A dedent(%22%22%22%5C%0A The subject%0A b%0A c%0A d%0A ## To make a commit, ...%0A %22%22%22.rstrip()),%0A dedent(%22%22%22%5C%0A The subject%0A b%0A c%0A d%0A %22%22%22.rstrip())%0A ),%0A%0A%5D%0A%0A%0Aclass TestExtractCommitMessage(DeferrableTestCase):%0A @classmethod%0A def setUpClass(cls):%0A sublime.run_command(%22new_window%22)%0A cls.window = sublime.active_window()%0A s = sublime.load_settings(%22Preferences.sublime-settings%22)%0A s.set(%22close_windows_when_empty%22, False)%0A%0A @classmethod%0A def tearDownClass(self):%0A self.window.run_command('close_window')%0A%0A def tearDown(self):%0A unstub()%0A%0A @p.expand(examples)%0A def test_a(self, VIEW_CONTENT, output):%0A view = self.window.new_file()%0A self.addCleanup(view.close)%0A%0A view.set_syntax_file(%22Packages/GitSavvy/syntax/make_commit.sublime-syntax%22)%0A view.run_command('append', %7B'characters': VIEW_CONTENT%7D)%0A view.set_scratch(True)%0A%0A self.assertEqual(output, extract_commit_message(view).strip())%0A%0A def test_basic_default_commit_view(self):%0A view = self.window.new_file()%0A self.addCleanup(view.close)%0A exists = os.path.exists%0A when(os.path).exists(...).thenAnswer(exists)%0A when(os.path).exists(%22/foo%22).thenReturn(True)%0A when(GsCommitCommand).git(%22diff%22, ...).thenReturn(dedent(%22%22%22%5C%0A diff --git a/bar/test.txt b/bar/test.txt%0A index 9303f2c..5a9ce64 100644%0A --- a/bar/test.txt%0A +++ b/bar/test.txt%0A @@ -1,14 +1,22 @@%0A This is a diff%0A %22%22%22.rstrip()))%0A%0A self.window.run_command(%22gs_commit%22, %7B%22repo_path%22: %22/foo%22%7D)%0A yield self.window.active_view().name() == %22COMMIT: foo%22%0A%0A commit_view = self.window.active_view()%0A self.assertTrue(commit_view.find_by_selector(%22meta.dropped.git.commit%22))%0A self.assertTrue(commit_view.find_by_selector(%22git-savvy.diff%22))%0A%0A self.assertEquals(%22%22, extract_commit_message(commit_view).rstrip())%0A
|
|
c972873f6fd1c85776101ecca16fa8ae48c48c14
|
Fix movie_queue unit test again
|
tests/test_movie_queue.py
|
tests/test_movie_queue.py
|
from flexget.plugins.filter.movie_queue import queue_add, queue_get
from tests import FlexGetBase
class TestMovieQueue(FlexGetBase):
__yaml__ = """
templates:
global:
mock:
- {title: 'MovieInQueue', imdb_id: 'tt1931533', tmdb_id: 603, movie_name: MovieInQueue}
accept_all: yes
seen: local
tasks:
movie_queue_accept:
movie_queue: accept
movie_queue_add:
movie_queue: add
movie_queue_add_properties:
movie_queue:
action: add
quality: 720p
movie_queue_remove:
movie_queue: remove
movie_queue_forget:
movie_queue: forget
"""
def test_movie_queue_accept(self):
queue_add(title=u'MovieInQueue', imdb_id=u'tt1931533', tmdb_id=603)
self.execute_task('movie_queue_accept')
assert len(self.task.entries) == 1
entry = self.task.entries[0]
assert entry.get('imdb_id', eval_lazy=False) == 'tt1931533'
assert entry.get('tmdb_id', eval_lazy=False) == 603
self.execute_task('movie_queue_accept')
assert len(self.task.entries) == 0, 'Movie should only be accepted once'
def test_movie_queue_add(self):
self.execute_task('movie_queue_add')
assert len(self.task.entries) == 1
queue = queue_get()
assert len(queue) == 1
entry = queue[0]
assert entry.imdb_id == 'tt1931533'
assert entry.tmdb_id == 603
assert entry.quality == 'any'
def test_movie_queue_add_properties(self):
self.execute_task('movie_queue_add_properties')
assert len(self.task.entries) == 1
queue = queue_get()
assert len(queue) == 1
entry = queue[0]
assert entry.imdb_id == 'tt1931533'
assert entry.tmdb_id == 86838
assert entry.quality == '720p'
def test_movie_queue_remove(self):
queue_add(title=u'MovieInQueue', imdb_id=u'tt1931533', tmdb_id=603)
queue_add(title=u'KeepMe', imdb_id=u'tt1933533', tmdb_id=604)
self.execute_task('movie_queue_remove')
assert len(self.task.entries) == 1
queue = queue_get()
assert len(queue) == 1
entry = queue[0]
assert entry.imdb_id == 'tt1933533'
assert entry.tmdb_id == 604
def test_movie_queue_forget(self):
queue_add(title=u'MovieInQueue', imdb_id=u'tt1931533', tmdb_id=603)
self.execute_task('movie_queue_accept')
assert len(queue_get(downloaded=True)) == 1
self.execute_task('movie_queue_forget')
assert not queue_get(downloaded=True)
assert len(queue_get()) == 1
|
Python
| 0.000006
|
@@ -1908,13 +1908,11 @@
==
-86838
+603
%0A
|
d1eac6f370f4a06151870be25cb362370d9ec53d
|
Add salt/utils/cli.py
|
salt/utils/cli.py
|
salt/utils/cli.py
|
Python
| 0
|
@@ -0,0 +1,2681 @@
+# -*- coding: utf-8 -*-%0A'''%0AFunctions used for CLI argument handling%0A'''%0A%0A# Import python libs%0Aimport re%0Aimport yaml%0A%0A# Import salt libs%0Afrom salt._compat import string_types, integer_types%0A%0A#KWARG_REGEX = re.compile(r'%5E(%5B%5E%5Cd%5CW%5D%5B%5Cw-%5D*)=(?!=)(.*)$', re.UNICODE) # python 3%0AKWARG_REGEX = re.compile(r'%5E(%5B%5E%5Cd%5CW%5D%5B%5Cw-%5D*)=(?!=)(.*)$')%0A%0A%0Adef parse_cli(args):%0A '''%0A Parse out the args and kwargs from a list of CLI args%0A '''%0A _args = %5B%5D%0A _kwargs = %7B%7D%0A for arg in args:%0A if isinstance(arg, string_types):%0A arg_name, arg_value = parse_kwarg(arg)%0A if arg_name:%0A _kwargs%5Barg_name%5D = yamlify_arg(arg_value)%0A else:%0A _args.append(yamlify_arg(arg))%0A return _args, _kwargs%0A%0A%0Adef parse_kwarg(string_):%0A '''%0A Parses the string and looks for the following kwarg format:%0A%0A %22%7Bargument name%7D=%7Bargument value%7D%22%0A%0A For example: %22my_message=Hello world%22%0A%0A Returns the kwarg name and value, or (None, None) if the regex was not%0A matched.%0A '''%0A try:%0A return KWARG_REGEX.match(string_).groups()%0A except AttributeError:%0A return None, None%0A%0A%0Adef yamlify_arg(arg):%0A '''%0A yaml.safe_load the arg unless it has a newline in it.%0A '''%0A if not isinstance(arg, string_types):%0A return arg%0A try:%0A original_arg = str(arg)%0A if isinstance(arg, string_types):%0A if '#' in arg:%0A # Don't yamlify this argument or the '#' and everything after%0A # it will be interpreted as a comment.%0A return arg%0A if '%5Cn' not in arg:%0A arg = yaml.safe_load(arg)%0A print('arg = %7B0%7D'.format(arg))%0A if isinstance(arg, dict):%0A # dicts must be wrapped in curly braces%0A if (isinstance(original_arg, string_types) and%0A not original_arg.startswith('%7B')):%0A return original_arg%0A else:%0A return arg%0A elif isinstance(arg, (list, float, integer_types, string_types)):%0A # yaml.safe_load will load '%7C' as '', don't let it do that.%0A if arg == '' and original_arg in ('%7C',):%0A return original_arg%0A # yaml.safe_load will treat '#' as a comment, so a value of '#'%0A # will become None. Keep this value from being stomped as well.%0A elif arg is None and original_arg.strip().startswith('#'):%0A return original_arg%0A else:%0A return arg%0A else:%0A # we don't support this type%0A return original_arg%0A except Exception:%0A # In case anything goes wrong...%0A return original_arg%0A
|
|
13e6f6967298173d4979c1bc4eb9d7ec6b1f9354
|
add a wrapper, not finished yet
|
gpudb.py
|
gpudb.py
|
Python
| 0
|
@@ -0,0 +1,2353 @@
+#! /usr/bin/python%0A%0Aimport sys%0Aimport os%0Aimport shutil%0A%0Adef dbHelp():%0A print %22Command:%22%0A print %22%5Ctcreate DBName: create the database%22%0A print %22%5Ctdelete DBName: delete the database%22%0A print %22%5Ctlist: list the table infomation in the database%22%0A print %22%5Cttranslate SQL: translate SQL into CUDA file%22%0A print %22%5Ctexecute SQL: translate and execute given SQL on GPU%22%0A print %22%5Ctload TableName data: load data into the given table%22%0A print %22%5Ctexit%22%0A%0Adef dbCreate(dbName):%0A%0A ret = 0%0A dbTop = %22database%22%0A%0A if not os.path.exists(dbTop):%0A os.makedirs(dbTop)%0A%0A dbPath = dbTop + %22/%22 + dbName%0A%0A if os.path.exists(dbPath):%0A return -1%0A%0A os.makedirs(dbPath)%0A%0A cmd = 'python XML2MapReduce/main.py ' + schemaFile%0A ret = os.system(cmd)%0A%0A if ret !=0 :%0A exit(-1)%0A%0A cmd = 'make -C src/GPUCODE/ loader &%3E /dev/null'%0A ret = os.system(cmd)%0A%0A if ret != 0:%0A exit(-1)%0A%0A cmd = 'cp src/GPUCODE/gpuDBLoader ' + dbPath%0A ret = os.system(cmd)%0A%0A if ret != 0:%0A exit(-1)%0A%0A return 0%0A%0Adef dbDelete(dbName):%0A%0A dbTop = %22database%22%0A%0A dbPath = dbTop + %22/%22 + dbName%0A if os.path.exists(dbPath):%0A shutil.rmtree(dbPath)%0A%0Aif len(sys.argv) != 2:%0A print %22./gpudb.py schemaFile%22%0A exit(-1)%0A%0AschemaFile = sys.argv%5B1%5D%0A%0Awhile 1:%0A ret = 0%0A dbCreated = 0%0A dbName = %22%22%0A%0A cmd = raw_input(%22%3E%22)%0A cmdA = cmd.lstrip().rstrip().split()%0A%0A if len(cmdA) == 0:%0A continue%0A%0A if cmdA%5B0%5D.upper() == %22HELP%22:%0A dbHelp()%0A%0A elif cmdA%5B0%5D.upper() == %22?%22:%0A dbHelp()%0A%0A elif cmdA%5B0%5D.upper() == %22EXIT%22:%0A break%0A%0A elif cmdA%5B0%5D.upper() == %22CREATE%22:%0A%0A if dbCreated !=0:%0A print %22Already created database. Delete first.%22%0A continue%0A%0A%0A if len(cmdA) !=2:%0A print %22usage: create DBName%22%0A%0A else:%0A ret = dbCreate(cmdA%5B1%5D.upper())%0A if ret == -1:%0A print cmdA%5B1%5D + %22 already exists%22%0A else:%0A dbCreated = 1%0A dbName = cmdA%5B1%5D.upper()%0A print cmdA%5B1%5D + %22 has been successfully created%22%0A %0A%0A%0A elif cmdA%5B0%5D.upper() == %22DELETE%22:%0A if len(cmdA) !=2:%0A print %22usage: delete DBName%22%0A%0A dbCreated = 0%0A dbDelete(cmdA%5B1%5D.upper())%0A%0A else:%0A print %22Unknown command%22%0A%0Aos.system(%22clear%22)%0A%0A
|
|
3a14fa67e4d35fa2865d20e08c03272cff7fcd4e
|
test wechat post
|
hello.py
|
hello.py
|
Python
| 0
|
@@ -0,0 +1,284 @@
+from flask import Flask, request%0A%0Aapp = Flask(__name__)%0A%0A%0A@app.route('/')%0Adef index():%0A return '%3Ch1%3EHello World!%3C/h1%3E'%0A%0A@app.route('/kindle_push', methods=%5B'POST'%5D)%0Adef kindle_push():%0A print(request.data)%0A return 'success'%0A%0A%0Aif __name__ == '__main__':%0A app.run(debug=True)
|
|
ffdcc9d523daa5a610de1534f3ed10f4d629aaf2
|
add 'inspect' filter
|
filter_plugins/inspect.py
|
filter_plugins/inspect.py
|
Python
| 0.000026
|
@@ -0,0 +1,377 @@
+class FilterModule(object):%0A ''' A comment '''%0A%0A def filters(self):%0A return %7B%0A 'inspect': self.inspect,%0A %7D%0A%0A def inspect(self, input_value, verbose=None):%0A if (type(input_value) is list) and verbose:%0A return %22%5B%7B%7D%5D%22.format(%22,%22.join(%5Bstr(type(i)) for i in input_value%5D))%0A else:%0A return str(type(input_value))%0A
|
|
737936b91a4a908a02338373b716161f487e44c9
|
add factories
|
skrill/tests/factories.py
|
skrill/tests/factories.py
|
Python
| 0.998819
|
@@ -0,0 +1,504 @@
+from decimal import Decimal%0Aimport random%0A%0Afrom django.contrib.auth.models import User%0A%0Aimport factory%0A%0Afrom skrill.models import PaymentRequest%0Afrom skrill.settings import ISO4217%0A%0A%0Aclass UserFactory(factory.Factory):%0A FACTORY_FOR = User%0A%0A username = factory.Sequence(lambda n: %22Test User %25s%22 %25 n)%0A%0A%0Aclass PaymentRequestFactory(factory.Factory):%0A FACTORY_FOR = PaymentRequest%0A%0A user = UserFactory()%0A amount = Decimal(random.randrange(10000))/100%0A currency = random.choice(ISO4217)%5B0%5D%0A%0A
|
|
5b54313e08ddf7176583f4776c804a482b111de1
|
add test
|
mysqlutil/test/test_privileges.py
|
mysqlutil/test/test_privileges.py
|
Python
| 0.000009
|
@@ -0,0 +1,2010 @@
+#!/usr/bin/env python%0A# coding: utf-8%0A%0Aimport logging%0Aimport unittest%0A%0Afrom pykit import mysqlutil%0Afrom pykit import ututil%0A%0Add = ututil.dd%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Aclass TestPrivileges(unittest.TestCase):%0A%0A def test_load_dump(self):%0A ks = (%0A %22ALL%22,%0A %22ALTER%22,%0A %22ALTER ROUTINE%22,%0A %22CREATE%22,%0A %22CREATE ROUTINE%22,%0A %22CREATE TABLESPACE%22,%0A %22CREATE TEMPORARY TABLES%22,%0A %22CREATE USER%22,%0A %22CREATE VIEW%22,%0A %22DELETE%22,%0A %22DROP%22,%0A %22EVENT%22,%0A %22EXECUTE%22,%0A %22FILE%22,%0A %22GRANT OPTION%22,%0A %22INDEX%22,%0A %22INSERT%22,%0A %22LOCK TABLES%22,%0A %22PROCESS%22,%0A %22PROXY%22,%0A %22REFERENCES%22,%0A %22RELOAD%22,%0A %22REPLICATION CLIENT%22,%0A %22REPLICATION SLAVE%22,%0A %22SELECT%22,%0A %22SHOW DATABASES%22,%0A %22SHOW VIEW%22,%0A %22SHUTDOWN%22,%0A %22SUPER%22,%0A %22TRIGGER%22,%0A %22UPDATE%22,%0A %22USAGE%22,%0A )%0A%0A for k in ks:%0A self.assertEqual((k,), mysqlutil.privileges%5Bk%5D)%0A self.assertEqual((k,), mysqlutil.privileges%5Bk.replace(' ', '_')%5D)%0A%0A shortcuts = %7B%0A 'replicator': (%0A 'REPLICATION CLIENT',%0A 'REPLICATION SLAVE',%0A 'SELECT',%0A ),%0A 'monitor': (%0A 'SELECT',%0A ),%0A 'business': (%0A 'CREATE',%0A 'DROP',%0A 'REFERENCES',%0A 'ALTER',%0A 'DELETE',%0A 'INDEX',%0A 'INSERT',%0A 'SELECT',%0A 'UPDATE',%0A ),%0A 'readwrite': (%0A 'DELETE',%0A 'INSERT',%0A 'SELECT',%0A 'UPDATE',%0A ),%0A %7D%0A%0A for k, expected in shortcuts.items():%0A self.assertEqual(expected, mysqlutil.privileges%5Bk%5D)%0A
|
|
860580119cc6ae9241e866275eccc7d71ae95e8c
|
Build fis assets on deploy updated.
|
fapistrano/plugins/fis.py
|
fapistrano/plugins/fis.py
|
Python
| 0
|
@@ -0,0 +1,504 @@
+# -*- coding: utf-8 -*-%0A%0Afrom fabric.api import show, run, env, cd%0A%0Afrom .. import signal%0A%0Adef init():%0A signal.register('deploy.updated', build_fis_assets)%0A%0Adef build_fis_assets():%0A with show('output'):%0A run('''%0A fis release --file %25(releases_path)s/%25(new_release)s/%25(fis_conf)s %5C%0A --dest %25(releases_path)s/%25(new_release)s/%25(fis_dest)s %5C%0A --root %25(releases_path)s/%25(new_release)s/%25(fis_source)s %5C%0A --optimize %5C%0A --pack %5C%0A --md5%0A ''' %25 env)%0A
|
|
471f5738e82fbb57c1028bdf2f1556edb0b074ed
|
Rename concurrent log handler
|
src/diamond/logging/handlers/concurrentloghandler.py
|
src/diamond/logging/handlers/concurrentloghandler.py
|
Python
| 0.000006
|
@@ -0,0 +1,384 @@
+# coding=utf-8%0A%0Afrom concurrent_log_handler import ConcurrentRotatingFileHandler as CRFH%0Aimport sys%0A%0A%0Aclass ConcurrentRotatingFileHandler(TRFH):%0A%0A def flush(self):%0A try:%0A super(ConcurrentRotatingFileHandler, self).flush()%0A except IOError:%0A sys.stderr.write('ConcurrentRotatingFileHandler received a IOError!')%0A sys.stderr.flush()%0A%0A%0A%0A%0A
|
|
910fb34ec6ef8544d4de5d8baf52fbd1c2c48027
|
Create Hello World Example
|
hello.py
|
hello.py
|
Python
| 0.000077
|
@@ -0,0 +1,160 @@
+from flask import Flask%0Aapp = Flask(__name__)%0A%0A@app.route('/')%0Adef hello_world():%0A return '%3Ch1%3EHello 391 team!%3Ch1%3E'%0A%0Aif __name__ == '__main__':%0A app.run()
|
|
1290fc572bfc862ab7c8ee579257e00486a3c921
|
add a really tiny wsgi
|
hrf.wsgi
|
hrf.wsgi
|
Python
| 0.000175
|
@@ -0,0 +1,39 @@
+from hrf.hrf import app as application%0A
|
|
c242589075aa4fc9af0a17461f235348b53284a1
|
Add sample code decorator
|
decorator/cli-sample-decorator.py
|
decorator/cli-sample-decorator.py
|
Python
| 0
|
@@ -0,0 +1,392 @@
+#!/usr/bin/env python%0A%0Aclass Deco(object):%0A%0A def __init__(self, func):%0A self.func = func%0A%0A def __call__(self, *args, **kwargs):%0A result = self.func(*args, **kwargs)%0A # multiple it by itself%0A result = result * result%0A return result%0A%0A@Deco%0Adef process(x=0, y=0):%0A return x+y%0A%0Aprint process(1, 1) # 4%0Aprint process(2, 2) # 16%0Aprint process(3, 3) # 36%0A
|
|
bf6cfcaa1ac20c1cb65d2d803f64f35026c099f3
|
Add Event class as well as connect and fire methods.
|
event.py
|
event.py
|
Python
| 0
|
@@ -0,0 +1,254 @@
+class Event:%0A def __init__(self):%0A self.listeners = %5B%5D%0A%0A def connect(self, listener):%0A self.listeners.append(listener)%0A%0A def fire(self, *args, **kwargs):%0A for listener in self.listeners:%0A listener(*args, **kwargs)
|
|
8d38a72548f3bfc62bac9f49d537fa2cdee7a6df
|
Add vanilla sequential face detection example.
|
face1.py
|
face1.py
|
Python
| 0
|
@@ -0,0 +1,1770 @@
+%22%22%22Sequential, vanilla face detection.%22%22%22%0A%0Aimport datetime%0Aimport sys%0Aimport cv2%0Aimport numpy as np%0A%0Aimport util%0Aimport cascade%0A%0ADEVICE = int(sys.argv%5B1%5D)%0AWIDTH = int(sys.argv%5B2%5D)%0AHEIGHT = int(sys.argv%5B3%5D)%0ADURATION = float(sys.argv%5B4%5D) # In seconds.%0A%0A# Create the OpenCV video capture object.%0Acap = cv2.VideoCapture(DEVICE)%0Acap.set(3, WIDTH)%0Acap.set(4, HEIGHT)%0A%0A# Monitor framerates.%0Aframerate = util.RateTicker((1,5,10))%0A%0A# Allow view window to be resizeable.%0Acv2.namedWindow('face detection', cv2.cv.CV_WINDOW_NORMAL)%0A%0Aend = datetime.datetime.now() + datetime.timedelta(seconds=DURATION)%0Awhile end %3E datetime.datetime.now():%0A%0A hello, image = cap.read()%0A%0A size = np.shape(image)%5B:2%5D%0A result = list()%0A for classi in cascade.classifiers:%0A rects = classi.detectMultiScale(%0A image,%0A scaleFactor=1.3,%0A minNeighbors=3,%0A minSize=tuple(%5Bx/20 for x in size%5D),%0A maxSize=tuple(%5Bx/2 for x in size%5D),%0A )%0A if len(rects):%0A for a,b,c,d in rects:%0A result.append((a,b,c,d, cascade.colors%5Bclassi%5D))%0A for x1, y1, x2, y2, color in result:%0A cv2.rectangle(%0A image,%0A (x1, y1), (x1+x2, y1+y2), %0A color=color,%0A thickness=2,%0A )%0A scale = 0.85%0A for org, text in (%0A ((20, int(30*scale)), '%25dx%25d'%25(size%5B1%5D, size%5B0%5D)),%0A ((20, int(60*scale)), '%25.2f, %25.2f, %25.2f'%25framerate.tick()),%0A ):%0A cv2.putText(%0A image,%0A text=text,%0A org=org,%0A fontFace=cv2.FONT_HERSHEY_SIMPLEX,%0A fontScale=scale,%0A color=(0,255,0),%0A thickness=2,%0A )%0A cv2.imshow('face detection', image)%0A cv2.waitKey(1)%0A%0A# The end.%0A
|
|
78ce22e302d749e56352e6ec8f592dca4e2287f6
|
Add IATISerializer
|
akvo/api/serializers.py
|
akvo/api/serializers.py
|
Python
| 0
|
@@ -0,0 +1,990 @@
+# -*- coding: utf-8 -*-%0A%0A# Akvo RSR is covered by the GNU Affero General Public License.%0A# See more details in the license.txt file located at the root folder of the Akvo RSR module.%0A# For additional details on the GNU license please see %3C http://www.gnu.org/licenses/agpl.html %3E.%0Afrom lxml import etree%0Aimport os%0A%0Afrom tastypie.serializers import Serializer%0A%0Aclass IATISerializer(Serializer):%0A%0A def from_etree(self, data):%0A %22%22%22 transform the iati-activity XML into %22tastypie compliant%22 XML using the 'iati-xslt.xml' stylesheet%0A %22%22%22%0A if data.tag == 'iati-activity':%0A with open(os.path.join(os.path.dirname(__file__),'xml', 'iati-xslt.xml'), 'r') as f:%0A iati_xslt = f.read()%0A etree_xml = etree.XML(iati_xslt)%0A etree_xslt = etree.XSLT(etree_xml)%0A tasty_xml = etree_xslt(data)%0A return self.from_etree(tasty_xml.getroot())%0A else:%0A return super(IATISerializer, self).from_etree(data)
|
|
18934774b331dfc6ad6b12cab005a97621f042e8
|
redundant phrase in docstring
|
examples/bicluster/plot_bicluster_newsgroups.py
|
examples/bicluster/plot_bicluster_newsgroups.py
|
"""
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For each bicluster, its most common document categories and its ten
most important words get printed.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure score than clusters found by
MiniBatchKMeans.
"""
from __future__ import print_function
print(__doc__)
from time import time
import re
from collections import Counter
import numpy as np
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=5000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
print("")
print("Biclusters:")
print("-----------")
for cluster in xrange(len(categories)):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
cluster_categories = list(document_names[i] for i in cluster_docs)
counter = Counter(cluster_categories)
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100,
name)
for name, c in counter.most_common()[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-10:-1])
print("bicluster {} : {} documents, {} words".format(
str(cluster).zfill(2), n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
|
Python
| 0.998893
|
@@ -899,14 +899,8 @@
ure
-score
than
@@ -917,17 +917,17 @@
found by
-%0A
+
MiniBatc
|
6cbd414a670e4b25e8e1e8df9c0e32bee541478c
|
Create ember.py
|
ember.py
|
ember.py
|
Python
| 0.000002
|
@@ -0,0 +1,1057 @@
+from django import template%0A%0Aregister = template.Library()%0A%0A%0A@register.tag(name='linkto')%0Adef do_linkto(parser, token):%0A nodelist = parser.parse(('endlinkto',))%0A args = token.split_contents()%5B1:%5D%0A if not args:%0A raise template.TemplateSyntaxError(%22%7B0%7D tag requires at least one argument%22.format(token.contents.split()%5B0%5D))%0A parser.delete_first_token()%0A return LinkToNode(nodelist, *args)%0A%0A%0Aclass LinkToNode(template.Node):%0A def __init__(self, nodelist, *args):%0A self.args = args%0A self.nodelist = nodelist%0A%0A def render(self, context):%0A output = self.nodelist.render(context)%0A return %22%7B%7B#linkTo %22 + %22 %22.join(self.args) + '%7D%7D' + output + %22%7B%7B/linkTo%7D%7D%22%0A%0A%0A@register.tag(name='ember')%0Adef do_ember(parser, token):%0A tokens = token.split_contents()%0A args = %22 %22.join(tokens%5B1:%5D)%0A #parser.delete_first_token()%0A return EmberTagNode(args)%0A%0A%0Aclass EmberTagNode(template.Node):%0A def __init__(self, args):%0A self.args = args%0A%0A def render(self, context):%0A return %22%7B%7B%22 + self.args + %22%7D%7D%22%0A
|
|
6d80c414944bfdd6632b6068d98805a6f67353fb
|
add test script for interfaces
|
_examples/iface/test.py
|
_examples/iface/test.py
|
Python
| 0.000002
|
@@ -0,0 +1,401 @@
+# Copyright 2015 The go-python Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style%0A# license that can be found in the LICENSE file.%0A%0A## py2/py3 compat%0Afrom __future__ import print_function%0A%0Aimport iface%0A%0A### test docs%0Aprint(%22doc(iface): %25r%22 %25 (iface.__doc__,))%0A%0Aprint(%22t = iface.T()%22)%0At = iface.T()%0Aprint(%22t.F()%22)%0At.F()%0A%0Aprint(%22iface.CallIface(t)%22)%0Aiface.CallIface(t)%0A%0A
|
|
bb4f81f86d5b7bc5869da9f29cc5ea6013d4b4cf
|
Add plot speed analysis
|
analysis/plot_speeds.py
|
analysis/plot_speeds.py
|
Python
| 0.000002
|
@@ -0,0 +1,2246 @@
+%22%22%22Plots the speed readings.%22%22%22%0A%0A#from dateutil import parser as dateparser%0Afrom matplotlib import pyplot%0Aimport collections%0Aimport datetime%0Aimport json%0Aimport sys%0A%0A%0A%0Adef main():%0A %22%22%22Main function.%22%22%22%0A if sys.version_info.major %3C= 2:%0A print('Please use Python 3')%0A sys.exit(1)%0A if len(sys.argv) != 2:%0A print('Usage: %7B%7D %3Clog file%3E'.format(sys.argv%5B0%5D))%0A sys.exit(1)%0A%0A with open(sys.argv%5B1%5D) as file_:%0A lines = file_.readlines()%0A%0A first_stamp = timestamp(lines%5B0%5D)%0A speeds = collections.defaultdict(lambda: %5B%5D)%0A times = collections.defaultdict(lambda: %5B%5D)%0A acceleration_times = %5B%5D%0A not_moving_times = %5B%5D%0A run_times = %5B%5D%0A stop_times = %5B%5D%0A%0A for line in lines:%0A if 'speed_m_s' in line:%0A data = json.loads(line%5Bline.find('%7B'):%5D)%0A speeds%5Bdata%5B'device_id'%5D%5D.append(data%5B'speed_m_s'%5D)%0A times%5Bdata%5B'device_id'%5D%5D.append(timestamp(line) - first_stamp)%0A elif 'not moving according' in line:%0A not_moving_times.append(timestamp(line) - first_stamp)%0A elif 'Received run command' in line:%0A run_times.append(timestamp(line) - first_stamp)%0A elif 'Received stop command' in line or 'No waypoints, stopping' in line:%0A stop_times.append(timestamp(line) - first_stamp)%0A%0A for device, speeds in speeds.items():%0A pyplot.scatter(times%5Bdevice%5D, speeds)%0A pyplot.scatter(not_moving_times, %5B0.25%5D * len(not_moving_times), marker='x', color='blue')%0A pyplot.scatter(run_times, %5B0.3%5D * len(run_times), marker='x', color='green')%0A pyplot.scatter(stop_times, %5B0.35%5D * len(stop_times), marker='x', color='red')%0A pyplot.title(device)%0A pyplot.draw()%0A pyplot.show()%0A%0A%0Adef timestamp(line):%0A %22%22%22Returns the timestamp of a log line.%22%22%22%0A # 2016-08-22 09:57:28,343%0A year = int(line%5B0:4%5D)%0A month = int(line%5B5:7%5D)%0A day = int(line%5B8:10%5D)%0A hour = int(line%5B11:13%5D)%0A minute = int(line%5B14:16%5D)%0A seconds = int(line%5B17:19%5D)%0A comma = line.find(',')%0A millis = float(line%5Bcomma + 1:line.find(':', comma)%5D)%0A dt = datetime.datetime(year, month, day, hour, minute, seconds)%0A return dt.timestamp() + millis / 1000.%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
d19899981a77faa9c2017b6991c8e0c4ca4b7b97
|
Create db_migrate.py
|
fade/db_migrate.py
|
fade/db_migrate.py
|
Python
| 0.000003
|
@@ -0,0 +1,1007 @@
+#!flask/bin/python%0A%22%22%22%0A See LICENSE.txt file for copyright and license details.%0A%22%22%22%0A%0Aimport imp%0Afrom migrate.versioning import api%0Afrom app import db%0Afrom config import SQLALCHEMY_DATABASE_URI%0Afrom config import SQLALCHEMY_MIGRATE_REPO%0A%0Av = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)%0Amigration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%2503d_migration.py' %25 (v+1))%0Atmp_module = imp.new_module('old_model')%0Aold_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)%0Aexec(old_model, tmp_module.__dict__)%0Ascript = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI,%0A SQLALCHEMY_MIGRATE_REPO,%0A tmp_module.meta, db.metadata)%0Aopen(migration, %22wt%22).write(script)%0Aapi.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)%0Av = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)%0Aprint('New migration saved as ' + migration)%0Aprint('Current database version: ' + str(v))%0A
|
|
1646916ce204b51a2900fd9edd4626e145f0d873
|
Create objects_impl.py
|
CoreConceptsPy/objects_impl.py
|
CoreConceptsPy/objects_impl.py
|
Python
| 0.000048
|
@@ -0,0 +1,1173 @@
+from coreconcepts import AObjects%0Afrom osgeo import gdal, gdal_array%0Aimport ogr, os, osr%0Afrom gdalconst import *%0Aimport numpy as np%0A%0A%0Aclass ArcShpObjects(AObjects):%0A @staticmethod%0A def getBounds (obj):%0A #Get geometery%0A geom = obj.GetGeometryRef()%0A env = geom.GetEnvelope()%0A #Return bounds in form (MinX, MaxX, MinY, MaxY)%0A return env%0A%0A @staticmethod%0A def hasRelation (obj1, obj2, relType):%0A #Get geometeries%0A assert relType in %5B'Intersects','Equals','Disjoint','Touches','Crosses','Within','Contains','Overlaps'%5D%0A geom1 = obj1.GetGeometryRef()%0A geom2 = obj2.GetGeometryRef()%0A #Possible relations are: Intersects, Equals, Disjoint, Touches, Crosses, Within, Contains, Overlaps%0A if getattr(geom1,relType)(geom2): #getattr is equivalent to geom1.relType%0A return True%0A else:%0A return False%0A %0A %0A%0A @staticmethod%0A def getProperty (obj, prop):%0A #Get index of property - note: index 13 is building name%0A index = obj.GetFieldIndex(prop)%0A #Return value as a string%0A value = obj.GetFieldAsString(index)%0A return value%0A
|
|
fa1e8f21347d41712b03817e4fac5a07a1a991eb
|
Add `btheadphone.py` to find headphones' BTADDR
|
bin/btheadphone.py
|
bin/btheadphone.py
|
Python
| 0
|
@@ -0,0 +1,1857 @@
+#!/usr/bin/env python%0Aimport dbus%0Afrom xml.etree import ElementTree%0A%0Abluez_bus = 'org.bluez'%0Abluez_root = '/org/bluez'%0A%0Aintro_iface = 'org.freedesktop.DBus.Introspectable'%0Aprops_iface = 'org.freedesktop.DBus.Properties'%0Adev_iface = 'org.bluez.Device1'%0A%0Aa2dp_uuid = '0000110d-0000-1000-8000-00805f9b34fb'%0Aavrcp_uuid = '0000110e-0000-1000-8000-00805f9b34fb'%0A%0Asvc_audio = int('200000', 16)%0Adev_major_av = int('400', 16)%0Adev_minor_headset = int('4', 16)%0Adev_minor_headphone = int('18', 16)%0A%0A%0Adef btobj(path):%0A return dbus.SystemBus().get_object(bluez_bus, path)%0A%0A%0A# from: https://unix.stackexchange.com/a/203678/2582 (kind of)%0Adef kids(obj):%0A prefix = obj.object_path.rstrip('/')%0A for node in intro(obj, 'node'):%0A yield %22%25s/%25s%22 %25 (prefix, node)%0A%0A%0Adef interfaces(obj):%0A return list(intro(obj, 'interface'))%0A%0A%0Adef intro(obj, tag=None):%0A iface = dbus.Interface(obj, intro_iface)%0A tree = ElementTree.fromstring(iface.Introspect())%0A if not tag:%0A return tree%0A return %5Be.attrib%5B'name'%5D for e in tree if e.tag == tag%5D%0A%0A%0Adef devprop(obj, name, default=None):%0A return prop(obj, dev_iface).get(name, default)%0A%0A%0Adef prop(obj, iface, attr=None):%0A attrs = obj.GetAll(iface, dbus_interface=props_iface)%0A if attr:%0A return attrs.get(attr)%0A return attrs%0A%0A%0Adef uuids(obj):%0A return devprop(obj, 'UUIDs', %5B%5D)%0A%0A%0Adef hasbits(n, bits):%0A return (n & bits) == bits%0A%0A%0Adef is_headphones(obj):%0A cls = devprop(obj, 'Class', 0)%0A if not hasbits(cls, svc_audio):%0A return False%0A if not hasbits(cls, dev_major_av):%0A return False%0A return hasbits(cls, dev_minor_headset) or hasbits(cls, dev_minor_headphone)%0A%0A%0Abluez = btobj(bluez_root)%0Ahci = btobj(list(kids(bluez))%5B0%5D)%0Adevs = list(kids(hci))%0Aheadphones = %5Bd for d in devs if is_headphones(btobj(d))%5D%0Abth = btobj(headphones%5B0%5D)%0Aprint(devprop(bth, 'Address'))%0A
|
|
e7b5f2a91cbf55666e29ec6ac4775e8cee1cc574
|
Add ob_atom class
|
maxwellbloch/ob_atom.py
|
maxwellbloch/ob_atom.py
|
Python
| 0.9994
|
@@ -0,0 +1,1547 @@
+# -*- coding: utf-8 -*-%0A%0Aimport json%0A%0Afrom maxwellbloch import ob_base%0A%0Aclass OBAtom(ob_base.OBBase):%0A%0A def __init__(self, num_states=0, energies=%5B%5D, decays=%5B%5D, fields=%5B%5D):%0A%0A self.num_states = num_states%0A self.energies = energies%0A self.decays = decays%0A%0A self.build_fields(fields)%0A%0A def __repr__(self):%0A return (%22Atom(num_states=%7B0%7D, %22 +%0A %22energies=%7B1%7D, %22 +%0A %22decays=%7B2%7D, %22 +%0A %22fields=%7B3%7D)%22).format(self.num_states,%0A self.energies,%0A self.decays,%0A self.fields)%0A%0A def add_field(self, field_dict):%0A self.fields.append(field.Field(**field_dict))%0A%0A def build_fields(self, field_dicts):%0A self.fields = %5B%5D%0A for f in field_dicts:%0A self.add_field(f)%0A return self.fields%0A%0A def to_json_str(self):%0A %22%22%22 Return a JSON string representation of the Atom object.%0A%0A Returns:%0A (string) JSON representation of the Atom object.%0A %22%22%22%0A%0A json_dict = %7B %22num_states%22: self.num_states,%0A %22energies%22: self.energies,%0A %22decays%22: self.decays,%0A %22fields%22: %5Bf.__dict__ for f in self.fields%5D %7D%0A%0A return json.dumps(json_dict)%0A%0A @classmethod%0A def from_json_str(cls, json_str):%0A json_dict = json.loads(json_str)%0A return cls(**json_dict)%0A%0Adef main():%0A%0A print(OBAtom())%0A%0Aif __name__ == '__main__':%0A status = main()
|
|
e6b1a590a47a806e0a717d94c2dc112c7671bb2c
|
add script to merge Aaron's unWISE astrometric offsets into the WISE tile file
|
py/legacyanalysis/wise-offsets.py
|
py/legacyanalysis/wise-offsets.py
|
Python
| 0
|
@@ -0,0 +1,1179 @@
+from astrometry.util.fits import fits_table%0A%0A'''%0AThis is a little script for merging Aaron's astrometric offsets into our%0AWISE tile file.%0A'''%0A%0A#/project/projectdirs/cosmo/work/wise/outputs/merge/neo4/fulldepth/fulldepth_neo4_index.fits%0A%0AW = fits_table('legacypipe/data/wise-tiles.fits')%0A%0Aoffsets = fits_table('fulldepth_neo4_index.fits')%0A%0Aoff1 = offsets%5Boffsets.band == 1%5D%0Aoff2 = offsets%5Boffsets.band == 2%5D%0A%0Aname_map_1 = dict(%5B(tile,i) for i,tile in enumerate(off1.coadd_id)%5D)%0AW.crpix_w1 = off1.crpix%5Bnp.array(%5Bname_map_1%5Btile%5D for tile in W.coadd_id%5D)%5D%0Ara = off1.ra %5Bnp.array(%5Bname_map_1%5Btile%5D for tile in W.coadd_id%5D)%5D%0Adec = off1.dec%5Bnp.array(%5Bname_map_1%5Btile%5D for tile in W.coadd_id%5D)%5D%0Adiff = np.mean(np.hypot(W.ra - ra, W.dec - dec))%0Aprint('Mean difference RA,Dec:', diff)%0A%0Aname_map_2 = dict(%5B(tile,i) for i,tile in enumerate(off2.coadd_id)%5D)%0AW.crpix_w2 = off2.crpix%5Bnp.array(%5Bname_map_2%5Btile%5D for tile in W.coadd_id%5D)%5D%0Ara = off2.ra %5Bnp.array(%5Bname_map_2%5Btile%5D for tile in W.coadd_id%5D)%5D%0Adec = off2.dec%5Bnp.array(%5Bname_map_2%5Btile%5D for tile in W.coadd_id%5D)%5D%0Adiff = np.mean(np.hypot(W.ra - ra, W.dec - dec))%0Aprint('Mean difference RA,Dec:', diff)%0A%0AW.writeto('wise-tiles.fits')%0A
|
|
fbebb94cd621f0d7b37dbe46272fe9a09a9905a7
|
Add cram package.
|
var/spack/packages/cram/package.py
|
var/spack/packages/cram/package.py
|
Python
| 0
|
@@ -0,0 +1,439 @@
+from spack import *%0A%0Aclass Cram(Package):%0A %22%22%22Cram runs many small MPI jobs inside one large MPI job.%22%22%22%0A homepage = %22https://github.com/scalability-llnl/cram%22%0A url = %22http://github.com/scalability-llnl/cram/archive/v1.0.1.tar.gz%22%0A%0A version('1.0.1', 'c73711e945cf5dc603e44395f6647f5e')%0A%0A depends_on(%22mpi%22)%0A%0A def install(self, spec, prefix):%0A cmake(%22.%22, *std_cmake_args)%0A make()%0A make(%22install%22)%0A
|
|
6b0aaaea18c755afee0bb3ccd16b27e74c18b3da
|
Make sure IPC is communicating in bytes
|
flexget/ipc.py
|
flexget/ipc.py
|
from __future__ import unicode_literals, division, absolute_import
import argparse
import contextlib
import logging
import socket
import threading
import time
from flexget.scheduler import BufferQueue
from flexget.utils import json
from flexget.utils.tools import console
log = logging.getLogger('ipc')
def remote_execute(port, task, options):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(None)
try:
s.connect(('127.0.0.1', port))
if isinstance(options, argparse.Namespace):
options = options.__dict__
options.pop('__parent__', None)
s.sendall(json.dumps({'task': task, 'options': options}) + '\n')
for line in s.makefile():
console(line.rstrip())
except socket.error as e:
log.error('Socket error while sending execution to daemon: %s' % e)
except Exception as e:
log.exception('Unhandled error while sending execution to daemone.')
finally:
s.close()
class IPCServer(threading.Thread):
def __init__(self, manager, port):
super(IPCServer, self).__init__()
self.daemon = True
self.manager = manager
self.host = '127.0.0.1'
self.port = port
self._shutdown = False
def run(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((self.host, self.port))
except socket.error as e:
log.error('Error binding to socket %s' % e)
return
while not self._shutdown:
try:
s.settimeout(None)
s.listen(1)
conn, addr = s.accept()
conn.settimeout(60)
with contextlib.closing(conn) as conn:
buffer = b''
while True:
data = conn.recv(1024)
if not data:
log.error('Connection with client ended prematurely.')
continue
buffer += data
if b'\n' in data:
break
try:
args = json.loads(buffer)
except ValueError:
log.error('Error decoding ipc message.')
continue
bufferqueue = BufferQueue()
if self.manager.scheduler.run_queue.qsize() > 0:
conn.sendall('There is already a task executing. This task will execute next.\n')
log.info('Executing task `%s` for client at %s.' % (args['task'], addr))
self.manager.scheduler.execute(args['task'], options=args['options'], output=bufferqueue)
for line in bufferqueue:
conn.sendall(line)
except socket.error as e:
log.error('Socket error while communicating with client: %s' % e)
except Exception as e:
log.exception('Unhandled exception while communicating with client.')
def shutdown(self):
self._shutdown = True
|
Python
| 0.000002
|
@@ -737,16 +737,32 @@
le(line.
+decode('utf-8').
rstrip()
@@ -2868,16 +2868,32 @@
all(line
+.encode('utf-8')
)%0A
|
e7a5f511a87ce80b4db9939904163d19e58738c4
|
Add send game_version script
|
game_sent_game_version.py
|
game_sent_game_version.py
|
Python
| 0
|
@@ -0,0 +1,1669 @@
+#!/usr/bin/env python%0A#%0A# igcollect - Sent currently deployed game version.%0A#%0A# Copyright (c) 2016 InnoGames GmbH%0A#%0A%0Aimport os%0Aimport re%0Aimport socket%0A%0A# All possible revision files for all games.%0Arevision_file = %5B%0A '/www/ds/revision',%0A '/www/grepo/branch',%0A '/www/foe/version',%0A '/www/onyx/branch',%0A%5D%0A%0Aif __name__ == '__main__':%0A # Game name can be found from hostname, I think.%0A hostname = socket.gethostname()%0A%0A # Hostname suffix should be the shortcode.%0A game_shortcode = hostname.split('.')%5B-1%5D%0A%0A # Game market should be first two characters of hostname.%0A game_market = hostname%5B:2%5D%0A%0A # World name should be first characters followed by numbers.%0A regex = re.search('%5E%5Ba-z%5D+%5B0-9%5D+', hostname)%0A game_worldname = regex.group(0)%0A%0A # If all goes well, this variable will be set to revision.%0A revision = None%0A%0A for filename in revision_file:%0A if os.path.exists(filename):%0A with open(filename, 'r') as fh:%0A revision = fh.readlines()%5B0%5D%0A%0A # For Tribalwars the version starts after the first space%0A if filename.startswith('/www/ds'):%0A revision = revision.split(' ')%5B-1%5D%0A%0A # For Elvenar stip the 'v' from the beginning.%0A if filename.startswith('/www/onyx'):%0A if revision.startswith('v'):%0A revision = revision%5B1:%5D%0A%0A if revision:%0A # Replace all the dots.%0A revision = revision.replace('.', '_')%0A%0A # Print data if revision was valid.%0A print 'games.%7B%7D.%7B%7D.%7B%7D.version.%7B%7D'.format(%0A game_shortcode, game_market, game_worldname, revision)%0A%0A
|
|
cad82b50c8115cd92b6c1bbc1c18c9a825bad368
|
Add interactive graph with terms
|
graph.py
|
graph.py
|
Python
| 0
|
@@ -0,0 +1,1352 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport matplotlib%0Aimport matplotlib.pyplot as plt%0Afrom unicodecsv import UnicodeReader%0A%0ANEW_ANNOTATIONS = 'new_annotations.map'%0A%0A%0Adef graph():%0A readlist = %5B%5D%0A with open(NEW_ANNOTATIONS, 'r') as infile:%0A reader = UnicodeReader(infile)%0A readlist += %5Bre for re in reader%5D%0A%0A labels = %5B%5D%0A values = %5B%5D%0A wikipages = %5B%5D%0A for row in readlist:%0A labels.append(row%5B1%5D)%0A wikipages.append(row%5B2%5D)%0A values.append(float(row%5B3%5D))%0A%0A fig = plt.figure()%0A ax = fig.add_subplot(1, 1, 1, axisbg='white')%0A%0A line, = ax.plot(values, 'ro', picker=5)%0A%0A def onpick(event):%0A thisline = event.artist%0A xdata = thisline.get_xdata()%0A ydata = thisline.get_ydata()%0A ind = event.ind%0A%0A n = int(xdata%5Bind%5D)%0A v = ydata%5Bind%5D%0A%0A try:%0A ax.texts.pop()%0A except IndexError:%0A pass%0A%0A # text = matplotlib.text.Annotation(labels%5Bn%5D, xy=(n, v+0.005))%0A%0A # # ax.texts.remove(text)%0A # ax.add_artist(text)%0A%0A text = labels%5Bn%5D + '%5Cn' + wikipages%5Bn%5D%0A ax.annotate(text, xy=(n+0.005, v+0.005))%0A%0A print 'onpick points:', zip(xdata%5Bind%5D, ydata%5Bind%5D)%0A fig.canvas.draw()%0A%0A fig.canvas.mpl_connect('pick_event', onpick)%0A%0A plt.show()%0A%0Aif __name__ == '__main__':%0A graph()%0A
|
|
408af1320637063bacdd105cf00cdf71ef1ff9b2
|
Add labeled implementation
|
multi.py
|
multi.py
|
Python
| 0
|
@@ -0,0 +1,1112 @@
+from keras.layers import Input, LSTM, TimeDistributed, Dense, merge, Dropout%0Afrom keras.models import Model%0A%0Avocab_size = 200%0Aseq_len = 25%0Alabel_size = 10%0Abatch_size = 50%0Alstm_size = 512%0A%0Achar_input = Input(batch_shape=(batch_size, seq_len, vocab_size), name='char_input')%0Alabel_input = Input(batch_shape=(batch_size, seq_len, label_size), name='label_input')%0Ax = merge(%5Bchar_input, label_input%5D, mode='concat', concat_axis=-1) # checkif concat actually works as expected%0A%0Alstm_layer = LSTM(lstm_size, return_sequences=True, stateful=True)(x)%0Alstm_layer = Dropout(0.2)(lstm_layer)%0Alstm_layer = LSTM(lstm_size, return_sequences=True, stateful=True)(lstm_layer)%0Alstm_layer = Dropout(0.2)(lstm_layer)%0Alstm_layer = LSTM(lstm_size, return_sequences=True, stateful=True)(lstm_layer)%0Alstm_layer = Dropout(0.2)(lstm_layer)%0A%0Achar_output = TimeDistributed(Dense(vocab_size, activation='softmax'), name='char_output')(lstm_layer)%0Alabel_output = TimeDistributed(Dense(label_size, activation='softmax'), name='label_output')(lstm_layer)%0A%0Amodel = Model(%5Bchar_input, label_input%5D, %5Bchar_output, label_output%5D)%0Amodel.summary()%0A
|
|
51e06c31a722953f4aa7a1c0bb71b8ddd77a78d7
|
Switch to WeakKeyDict
|
pyqtgraph/tests/test_stability.py
|
pyqtgraph/tests/test_stability.py
|
"""
PyQt/PySide stress test:
Create lots of random widgets and graphics items, connect them together randomly,
the tear them down repeatedly.
The purpose of this is to attempt to generate segmentation faults.
"""
from pyqtgraph.Qt import QtTest
import pyqtgraph as pg
from random import seed, randint
import sys, gc, weakref
app = pg.mkQApp()
seed(12345)
widgetTypes = [
pg.PlotWidget,
pg.ImageView,
pg.GraphicsView,
pg.QtGui.QWidget,
pg.QtGui.QTreeWidget,
pg.QtGui.QPushButton,
]
itemTypes = [
pg.PlotCurveItem,
pg.ImageItem,
pg.PlotDataItem,
pg.ViewBox,
pg.QtGui.QGraphicsRectItem
]
widgets = []
items = []
allWidgets = weakref.WeakSet()
def crashtest():
global allWidgets
try:
gc.disable()
actions = [
createWidget,
#setParent,
forgetWidget,
showWidget,
processEvents,
#raiseException,
#addReference,
]
thread = WorkThread()
thread.start()
while True:
try:
action = randItem(actions)
action()
print('[%d widgets alive, %d zombie]' % (len(allWidgets), len(allWidgets) - len(widgets)))
except KeyboardInterrupt:
print("Caught interrupt; send another to exit.")
try:
for i in range(100):
QtTest.QTest.qWait(100)
except KeyboardInterrupt:
thread.terminate()
break
except:
sys.excepthook(*sys.exc_info())
finally:
gc.enable()
class WorkThread(pg.QtCore.QThread):
'''Intended to give the gc an opportunity to run from a non-gui thread.'''
def run(self):
i = 0
while True:
i += 1
if (i % 1000000) == 0:
print('--worker--')
def randItem(items):
return items[randint(0, len(items)-1)]
def p(msg):
print(msg)
sys.stdout.flush()
def createWidget():
p('create widget')
global widgets, allWidgets
if len(widgets) > 50:
return
widget = randItem(widgetTypes)()
widget.setWindowTitle(widget.__class__.__name__)
widgets.append(widget)
allWidgets.add(widget)
p(" %s" % widget)
return widget
def setParent():
p('set parent')
global widgets
if len(widgets) < 2:
return
child = parent = None
while child is parent:
child = randItem(widgets)
parent = randItem(widgets)
p(" %s parent of %s" % (parent, child))
child.setParent(parent)
def forgetWidget():
p('forget widget')
global widgets
if len(widgets) < 1:
return
widget = randItem(widgets)
p(' %s' % widget)
widgets.remove(widget)
def showWidget():
p('show widget')
global widgets
if len(widgets) < 1:
return
widget = randItem(widgets)
p(' %s' % widget)
widget.show()
def processEvents():
p('process events')
QtTest.QTest.qWait(25)
class TstException(Exception):
pass
def raiseException():
p('raise exception')
raise TstException("A test exception")
def addReference():
p('add reference')
global widgets
if len(widgets) < 1:
return
obj1 = randItem(widgets)
obj2 = randItem(widgets)
p(' %s -> %s' % (obj1, obj2))
obj1._testref = obj2
if __name__ == '__main__':
test_stability()
|
Python
| 0.000003
|
@@ -698,11 +698,21 @@
Weak
-Set
+KeyDictionary
()%0A%0A
@@ -2360,20 +2360,22 @@
gets
-.add(
+%5B'
widget
-)
+'%5D = 1
%0A
|
43fd2b8bd64e684ee47c91f582c8c2578e420105
|
Test check_visitors
|
test/plugins/test_common.py
|
test/plugins/test_common.py
|
Python
| 0
|
@@ -0,0 +1,820 @@
+%22%22%22Tests of edx_lint/pylint/common.py%22%22%22%0A%0Aimport pytest%0A%0Afrom edx_lint.pylint.common import check_visitors%0A%0A# pylint: disable=missing-docstring, unused-variable%0A%0Adef test_check_good_visitors():%0A @check_visitors%0A class ItsRight(object):%0A def visit_call(self):%0A pass # pragma: no cover%0A%0A def this_isnt_checked(self):%0A pass # pragma: no cover%0A%0A def visit_classdef(self):%0A pass # pragma: no cover%0A%0A%0Adef test_check_bad_visitors():%0A msg = %22Method visit_xyzzy doesn't correspond to a node class%22%0A with pytest.raises(Exception, match=msg):%0A @check_visitors%0A class ItsNotRight(object):%0A def visit_xyzzy(self):%0A pass # pragma: no cover%0A
|
|
c841fccb6099fb00b0a66a11c837bf2afd32e2f1
|
Create shell_cmd_test.py
|
shell_cmd_test.py
|
shell_cmd_test.py
|
Python
| 0.000013
|
@@ -0,0 +1,254 @@
+import subprocess%0A%0Asubprocess.call(%5B'say', ' Hello World from Python.'%5D)%0A%0Alisting_holder = subprocess.call(%5B'ls','-l'%5D)%0Aif listing_holder %3E 0:%0A%09print %22here the files listing resulting from the shell command ran from within python: %5Cn %25s%22 %25listing_holder%0A
|
|
6988dab0256ce6b6e0d5cbb4b3ac06727956ee37
|
Create a new file to calculate features from sets of points
|
emission/analysis/point_features.py
|
emission/analysis/point_features.py
|
Python
| 0
|
@@ -0,0 +1,761 @@
+# Standard imports%0Aimport math%0Aimport logging%0Aimport numpy as np%0Aimport emission.core.common as ec%0A%0Adef calSpeed(point1, point2):%0A distanceDelta = ec.calDistance(%5Bpoint1.mLongitude, point1.mLatitude%5D, %5Bpoint2.mLongitude, point2.mLatitude%5D)%0A timeDelta = point2.mTime - point1.mTime%0A # print %22Distance delta = %25s and time delta = %25s%22 %25 (distanceDelta, timeDelta)%0A # assert(timeDelta != 0)%0A if (timeDelta == 0):%0A logging.debug(%22timeDelta = 0, distanceDelta = %25s, returning speed = 0%22)%0A assert(distanceDelta %3C 0.01)%0A return 0%0A%0A # TODO: Once we perform the conversions from ms to secs as part of the%0A # usercache -%3E timeseries switch, we need to remove this division by 1000%0A return distanceDelta/(float(timeDelta)/1000)%0A
|
|
9033018854a29b09cdb7b6d9f01c371fced19e05
|
hello test
|
hello.py
|
hello.py
|
Python
| 0.999995
|
@@ -0,0 +1,428 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Sun Sep 27 11:16:02 2015%0A%0A@author: Nathan Ford%0A%22%22%22%0A%0A%0A%0A#%25%25%0A%0Adef later():%0A %22%22%22Print %22Laters World%22 and return None%22%22%22%0A print(%22Laters World%22)%0A%0A# main program starts here%0A%0Alater()%0A%0A%0A#%25%25%0A%0A# Demo file for Spyder Tutorial%0A# Hans Fangohr, University of Southampton, UK%0A%0Adef hello():%0A %22%22%22Print %22Hello World%22 and return None%22%22%22%0A print(%22Hello World%22)%0A%0A# main program starts here%0Ahello()%0A%0A
|
|
bef97246f77aaa1030b17e1213850c95786f51d7
|
Add state tests
|
app/tests/test_state.py
|
app/tests/test_state.py
|
Python
| 0.000002
|
@@ -0,0 +1,1429 @@
+import unittest%0Aimport time%0A%0Afrom app.state import state, ProgramNotFound%0A%0A%0Aclass TestState(unittest.TestCase):%0A%0A def tearDown(self):%0A if state._process is not None:%0A state._process.terminate()%0A%0A def test_start_all(self):%0A programs= %5B%22ascii_text%22, %22cheertree%22, %22cross%22, %22demo%22, %22dna%22, %0A %22game_of_life%22, %22matrix%22, %22psychedelia%22, %22rain%22, %22rainbow%22, %0A %22random_blinky%22, %22random_sparkles%22, %22simple%22, %22snow%22, %22trig%22%5D%0A for program in programs:%0A with self.subTest(program=program):%0A r = state.start_program(program)%0A self.assertTrue(state._process.is_alive())%0A%0A def test_start_not_found(self):%0A with self.assertRaises(ProgramNotFound):%0A state.start_program(%22does_not_exist%22)%0A%0A def test_start_with_good_params(self):%0A state.start_program(%22demo%22, %7B%22brightness%22: 0.5, %22rotation%22: 0%7D)%0A self.assertTrue(state._process.is_alive())%0A%0A def test_start_with_bad_brightness(self):%0A with self.assertRaises(ValueError):%0A state.start_program(%22demo%22, %7B%22brightness%22: 1.1%7D)%0A%0A def test_start_with_bad_rotation(self):%0A with self.assertRaises(ValueError):%0A state.start_program(%22demo%22, %7B%22rotation%22: 91%7D)%0A%0A def test_stop_program(self):%0A state.start_program(%22demo%22)%0A state.stop_program()%0A time.sleep(0.1)%0A self.assertFalse(state._process.is_alive())
|
|
23f69eff15b189423e0c1f36d2f71f708e4522e7
|
Add missing sys import
|
rst2pdf/opt_imports.py
|
rst2pdf/opt_imports.py
|
# -*- coding: utf-8 -*-
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
# See LICENSE.txt for licensing terms
'''
opt_imports.py contains logic for handling optional imports.
'''
import os
from log import log
PyHyphenHyphenator = None
DCWHyphenator = None
try:
import wordaxe
from wordaxe import version as wordaxe_version
from wordaxe.rl.paragraph import Paragraph
from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet
# PyHnjHyphenator is broken for non-ascii characters, so
# let's not use it and avoid useless crashes (http://is.gd/19efQ)
#from wordaxe.PyHnjHyphenator import PyHnjHyphenator
# If basehyphenator doesn't load, wordaxe is broken
# pyhyphenator and DCW *may* not load.
from wordaxe.BaseHyphenator import BaseHyphenator
try:
from wordaxe.plugins.PyHyphenHyphenator \
import PyHyphenHyphenator
except:
pass
try:
from wordaxe.DCWHyphenator import DCWHyphenator
except:
pass
except ImportError:
# log.warning("No support for hyphenation, install wordaxe")
wordaxe = None
wordaxe_version = None
BaseHyphenator = None
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.paragraph import Paragraph
try:
import sphinx
except ImportError:
sphinx = None
try:
import psyco
except ImportError:
class psyco(object):
@staticmethod
def full():
pass
try:
import aafigure
import aafigure.pdf
except ImportError:
aafigure = None
try:
from json import loads as json_loads
except ImportError:
from simplejson import loads as json_loads
try:
from reportlab.platypus.flowables import NullDraw
except ImportError: # Probably RL 2.1
from reportlab.platypus.flowables import Flowable as NullDraw
try:
from matplotlib import mathtext
except ImportError:
mathtext = None
class LazyImports(object):
''' Only import some things if we need them.
'''
def __getattr__(self, name):
if name.startswith('_load_'):
raise AttributeError
value = None
if not os.environ.get('DISABLE_' + name.upper()):
func = getattr(self, '_load_' + name)
try:
value = func()
except ImportError:
pass
# Cache the result once we have it
setattr(self, name, value)
return value
def _load_pdfinfo(self):
try:
from pyPdf import pdf
except ImportError:
import pdfrw as pdf
return pdf
def _load_PILImage(self):
try:
from PIL import Image as PILImage
except ImportError:
import Image as PILImage
return PILImage
def _load_PMImage(self):
from PythonMagick import Image
return Image
def _load_gfx(self):
import gfx
return gfx
def _load_svglib(self):
from svglib import svglib
return svglib
def _load_uniconvertor(self):
for p in sys.path:
d = os.path.join(p, 'uniconvertor')
if os.path.isdir(d):
sys.path.append(d)
from app.io import load
from app.plugins import plugins
import app
from uniconvsaver import save
app.init_lib()
plugins.load_plugin_configuration()
break
else:
raise ImportError
return load, plugins, save
LazyImports = LazyImports()
|
Python
| 0.000059
|
@@ -191,16 +191,27 @@
mport os
+%0Aimport sys
%0A%0Afrom l
|
54033b438900e17d8e3e852222dc25c981cdb7e7
|
add model_helper, forgot to commit it previously
|
models/_model_helper.py
|
models/_model_helper.py
|
Python
| 0.000002
|
@@ -0,0 +1,767 @@
+%0Achebyshev2coeffs = %5B2., 0., -1.%5D%0Achebyshev4coeffs = %5B8., 0., -8., 0., 1.%5D%0Achebyshev6coeffs = %5B32., 0., -48., 0., 18., 0., -1.%5D%0Achebyshev8coeffs = %5B128., 0., -256., 0., 160., 0., -32., 0., 1.%5D%0Achebyshev16coeffs = %5B32768., 0., -131072., 0., 212992., 0., -180224., 0., 84480., 0., -21504., 0., 2688., 0., -128., 0., 1%5D%0A%0Adef chebyshev(trial, target, M=61):%0A from mystic.math import polyeval%0A result=0.0%0A x=-1.0%0A dx = 2.0 / (M-1)%0A for i in range(M):%0A px = polyeval(trial, x)%0A if px%3C-1 or px%3E1:%0A result += (1 - px) * (1 - px)%0A x += dx%0A%0A px = polyeval(trial, 1.2) - polyeval(target, 1.2)%0A if px%3C0: result += px*px%0A%0A px = polyeval(trial, -1.2) - polyeval(target, -1.2)%0A if px%3C0: result += px*px%0A%0A return result%0A%0A
|
|
91bab4277a8875f7248af698773938d54e19724f
|
Create InputNeuronGroup_Liquid.py
|
examples/InputNeuronGroup_Liquid.py
|
examples/InputNeuronGroup_Liquid.py
|
Python
| 0
|
@@ -0,0 +1,1912 @@
+'''%0AExample of a spike generator (only outputs spikes)%0A%0AIn this example spikes are generated and sent through UDP packages. At the end of the simulation a raster plot of the %0Aspikes is created.%0A%0A'''%0A%0Afrom brian import *%0Aimport numpy%0A%0Afrom brian_multiprocess_udp import BrianConnectUDP%0A%0Anumber_of_neurons_total = 45%0Anumber_of_neurons_spiking = 30%0A%0A%0Adef main_NeuronGroup(input_Neuron_Group, simulation_clock):%0A print %22main_NeuronGroup!%22 #DEBUG!%0A%0A simclock = simulation_clock%0A%0A delta_t=5%0A %0A random_list=numpy.random.randint(number_of_neurons_total,size=number_of_neurons_spiking)%0A random_list.sort()%0A%0A spiketimes = %5B(i, delta_t*ms) for i in random_list%5D%0A %0A SpikesOut = SpikeGeneratorGroup(number_of_neurons_total, spiketimes, period=300*ms, clock=simclock) # the maximum clock of the input spikes is limited here (period)%0A%0A%0A MSpkOut=SpikeMonitor(SpikesOut) # Spikes sent by UDP%0A%0A return (%5BSpikesOut%5D,%5B%5D,%5BMSpkOut%5D)%0A%0Adef post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):%0A %22%22%22%0A input_NG: the neuron group that receives the input spikes%0A simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)%0A simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)%0A simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)%0A%0A This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!%0A %22%22%22%0A figure()%0A raster_plot(simulation_MN%5B0%5D)%0A title(%22Spikes Sent by UDP%22)%0A show(block=True) %0A%0Aif __name__==%22__main__%22:%0A%0A my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsOutput=number_of_neurons_total, post_simulation_function=post_simulation_function,%0A output_addresses=%5B(%22127.0.0.1%22, 11111)%5D, simclock_dt=5, TotalSimulationTime=10000, brian_address=0)%0A
|
|
a11d33f5e1df23f044cac709ebbbb5d369d0e6ca
|
Add first test for update_language_list function
|
tests/test_add_language/test_update_language_list.py
|
tests/test_add_language/test_update_language_list.py
|
Python
| 0
|
@@ -0,0 +1,909 @@
+# test_update_language_list%0A%0Afrom __future__ import unicode_literals%0A%0Aimport json%0Aimport os%0Aimport os.path%0A%0Aimport nose.tools as nose%0A%0Aimport yvs.shared as yvs%0Aimport utilities.add_language as add_lang%0Afrom tests.test_add_language import set_up, tear_down%0Afrom tests.test_add_language.decorators import redirect_stdout%0A%0A%0A@nose.with_setup(set_up, tear_down)%0A@redirect_stdout%0Adef test_update_languge_list_add(out):%0A %22%22%22should add new languages to language list%22%22%22%0A add_lang.update_language_list('kln', 'Klingon')%0A langs_path = os.path.join(yvs.PACKAGED_DATA_DIR_PATH, 'languages.json')%0A with open(langs_path, 'r') as langs_file:%0A langs = json.load(langs_file)%0A klingon_lang = None%0A for lang in langs:%0A if lang%5B'id'%5D == 'kln':%0A klingon_lang = lang%0A nose.assert_is_not_none(klingon_lang)%0A nose.assert_equal(klingon_lang%5B'name'%5D, 'Klingon')%0A
|
|
bd5fc565c5106d609a7dc65a663515920e29caa4
|
Add example of multi-layer chart
|
altair/vegalite/v2/examples/multiple_marks.py
|
altair/vegalite/v2/examples/multiple_marks.py
|
Python
| 0.000001
|
@@ -0,0 +1,382 @@
+%22%22%22%0AMultiple Marks%0A==============%0AThis example demonstrates creating a single chart with multiple markers%0Arepresenting the same data.%0A%22%22%22%0Aimport altair as alt%0Afrom vega_datasets import data%0A%0Astocks = data.stocks()%0A%0Achart = alt.LayerChart(stocks).encode(%0A x='date:T',%0A y='price:Q',%0A color='symbol:N'%0A).add_layers(%0A alt.Chart().mark_point(),%0A alt.Chart().mark_line()%0A)%0A
|
|
41acbf471edce3babeed4a59a7f5f2a923d6fed6
|
Create sampe_1.py
|
apps/mongodb/sampe_1.py
|
apps/mongodb/sampe_1.py
|
Python
| 0.000038
|
@@ -0,0 +1,170 @@
+import pandas as pd%0Aimport bson%0A%0AFILE=%22/folder/file.bson%22%0A%0Awith open(FILE,'rb') as f:%0A data = bson.decode_all(f.read())%0A%0Amain_df=pd.DataFrame(data)%0Amain_df.describe()%0A
|
|
e7c6a1d5ca6c6ebd85976698e8c00ca761747b59
|
ADD FEATURE : simple C/C++ compiler
|
apps/simple_compiler.py
|
apps/simple_compiler.py
|
Python
| 0
|
@@ -0,0 +1,1527 @@
+from apps.decorators import on_command%0Afrom apps.slackutils import cat_token%0Afrom subprocess import check_output, CalledProcessError, STDOUT%0Aimport os%0Aimport re%0A%0A%0A@on_command(%5B'!%EC%BB%B4%ED%8C%8C%EC%9D%BC'%5D)%0Adef run(robot, channel, tokens, user, command):%0A '''C, C++, Python %EC%86%8C%EC%8A%A4 %EC%8B%A4%ED%96%89%EC%8B%9C%EC%BC%9C%EB%93%9C%EB%A6%BC'''%0A msg = ''%0A if len(tokens) %3C 2:%0A return channel, '%EC%9E%90%EC%84%B8%ED%95%9C %EC%82%AC%EC%9A%A9%EB%B0%A9%EB%B2%95%EC%9D%80...'%0A if tokens%5B0%5D.lower() in %5B'c', 'c++'%5D:%0A source = cat_token(tokens, 1)%0A source = re.sub('&', '&', source)%0A source = re.sub('<', '%3C', source)%0A source = re.sub('>', '%3E', source)%0A source = re.sub(r'(#.*%3E)', r'%5C1%5Cn', source)%0A if tokens%5B0%5D.lower() == 'c':%0A open(user + '.c', 'w').write(source)%0A msg += check_output(%5B'gcc', user + '.c', '-o', user + '.out'%5D).decode('utf-8')%0A os.remove(user + '.c')%0A else:%0A open(user + '.cpp', 'w').write(source)%0A try:%0A msg += check_output(%5B'g++', '-std=c++11' ,user + '.cpp', '-o', user + '.out'%5D, stderr=STDOUT).decode('utf-8')%0A except CalledProcessError as e:%0A msg += e.output.decode('utf-8')%0A return channel, msg%0A os.remove(user + '.cpp')%0A try:%0A msg += check_output(%5B'./' + user + '.out'%5D).decode('utf-8')%0A except CalledProcessError as e:%0A msg += '%3E :warning: WARNING : Your program returned exit status %60' + str(e.args%5B0%5D) +'%60%5Cn'%0A msg += e.output.decode('utf-8')%0A os.remove(user + '.out')%0A return channel, msg%0A%0A
|
|
ccae8514f184d612c515f1c17b8832b89b0982db
|
Create phylo.py
|
phylo.py
|
phylo.py
|
Python
| 0.001615
|
@@ -0,0 +1,1479 @@
+def phy_descend(parent, dictionary, out=%7B%7D):%0A if parent not in out:%0A out%5Bparent%5D = %7B%7D%0A for i in dictionary.keys():%0A if dictionary%5Bi%5D == parent: phy_descend(i, dictionary, out%5Bparent%5D)%0A return out%0A%0Adef phy_ancestry(child, dictionary, out=%5B%5D):%0A if child in dictionary.keys():%0A out.append(child)%0A phy_ancestry(dictionary%5Bchild%5D, dictionary, out)%0A return out%0A%0Adef phy_stratus(dictio, layer=0):%0A if layer == 0: stratus = dictio.keys()%0A else:%0A stratus = %5B%5D%0A for i in dictio.keys():%0A stratus = stratus + phy_stratus(dictio%5Bi%5D, layer - 1)%0A return stratus%0Aclean = %5B%5D%0A%0Adef phy_toclean(family, indent=0, preceding='', printed=%5B%5D):%0A if None in family.keys(): family = family%5BNone%5D%0A print(%22%7C%22)%0A if len(family) %3E 1: preceding += '%7C'%0A else: preceding += ''%0A%0A for parent in family:%0A clean.append(preceding + '%7C__' + parent)%0A phy_toclean(family%5Bparent%5D, indent + 1, preceding + ' ', printed + %5Bparent%5D)%0A%0Adef cleanlines(clean=%5B%5D):%0A clean = clean%5B::-1%5D%0A for n in range(0,len(clean)):%0A newline = ''%0A for i in range(len(clean%5Bn%5D)):%0A tc = clean%5Bn%5D%5Bi%5D%0A if clean%5Bn%5D%5Bi:i+2%5D == '%7C%7C': tc = ''%0A if clean%5Bn%5D%5Bi:i+2%5D == '%7C ' and %5C%0A (len(clean%5Bn-1%5D) %3C i or %5C%0A (len(clean%5Bn-1%5D) %3E i and clean%5Bn-1%5D%5Bi%5D == ' ')): tc = ' '%0A else: newline = newline + tc%0A clean%5Bn%5D = newline%0A for i in clean%5B::-1%5D: print i%0A
|
|
5bed3cf9ec4ccbc94529a4d7b37802f5340803a6
|
add utilities for unicode-friendly csv file reading & writing
|
dimagi/utils/csv.py
|
dimagi/utils/csv.py
|
Python
| 0
|
@@ -0,0 +1,1955 @@
+%22%22%22 %0Aextend csv.writer and csv.reader to support Unicode%0Afrom http://docs.python.org/library/csv.html%0A%22%22%22%0Afrom __future__ import absolute_import%0A%0Aimport csv%0Aimport codecs%0Aimport cStringIO%0A%0Aclass UTF8Recoder:%0A %22%22%22%0A Iterator that reads an encoded stream and reencodes the input to UTF-8%0A %22%22%22%0A %0A def __init__(self, f, encoding):%0A self.reader = codecs.getreader(encoding)(f)%0A%0A def __iter__(self):%0A return self%0A%0A def next(self):%0A return self.reader.next().encode(%22utf-8%22)%0A%0Aclass UnicodeReader:%0A %22%22%22%0A A CSV reader which will iterate over lines in the CSV file %22f%22,%0A which is encoded in the given encoding.%0A %22%22%22%0A%0A def __init__(self, f, dialect=csv.excel, encoding=%22utf-8%22, **kwds):%0A f = UTF8Recoder(f, encoding)%0A self.reader = csv.reader(f, dialect=dialect, **kwds)%0A%0A def next(self):%0A row = self.reader.next()%0A return %5Bunicode(s, %22utf-8%22) for s in row%5D%0A%0A def __iter__(self):%0A return self%0A%0Aclass UnicodeWriter:%0A %22%22%22%0A A CSV writer which will write rows to CSV file %22f%22,%0A which is encoded in the given encoding.%0A %22%22%22%0A def __init__(self, f, dialect=csv.excel, encoding=%22utf-8%22, **kwds):%0A # Redirect output to a queue%0A self.queue = cStringIO.StringIO()%0A self.writer = csv.writer(self.queue, dialect=dialect, **kwds)%0A self.stream = f%0A self.encoder = codecs.getincrementalencoder(encoding)()%0A%0A def writerow(self, row):%0A self.writer.writerow(%5Bunicode(s).encode(%22utf-8%22) for s in row%5D)%0A # Fetch UTF-8 output from the queue ...%0A data = self.queue.getvalue()%0A data = data.decode(%22utf-8%22)%0A # ... and reencode it into the target encoding%0A data = self.encoder.encode(data)%0A # write to the target stream%0A self.stream.write(data)%0A # empty queue%0A self.queue.truncate(0)%0A%0A def writerows(self, rows):%0A for row in rows:%0A self.writerow(row)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.