commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
58627bd4cbe100a7cbd526be38cd69e8605984cd | Add json-encoder example | examples/json-encoder.py | examples/json-encoder.py | #!/usr/bin/env python3
from pycnic.core import WSGI, Handler
import datetime
import json
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime.datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class Hello(Handler):
def get(self, name="World"):
return {
"message": "Hello, {name}!".format(name=name),
"date": datetime.datetime.now()
}
class app(WSGI):
debug = True
json_cls = DateTimeEncoder
routes = [
("/", Hello()),
("/([\w]+)", Hello())
]
if __name__ == "__main__":
from wsgiref.simple_server import make_server
try:
print("Serving on 0.0.0.0:8080...")
make_server('0.0.0.0', 8080, app).serve_forever()
except KeyboardInterrupt:
pass
print("Done")
| Python | 0.000082 | |
ebfe2faa5fcf66f3f1ece597922d4a72b59c3e43 | Create B_Averages_ocean.py | Cas_1/B_Averages_ocean.py | Cas_1/B_Averages_ocean.py | #Averages of U,V,W,T,S and ETA
import numpy as np
import matplotlib.pyplot as plt
from xmitgcm import open_mdsdataset
dir0 = '/homedata/bderembl/runmit/test_southatlgyre' #Case 1 : 38 iterations
ds0 = open_mdsdataset(dir0,prefix=['Eta','U','V','W','T','S'])
print(ds0)
Average_ETA = ds0['Eta'].mean().values
print('Average of Ocean Surface Height Anomaly ')
print(Average_ETA,'m')
#Average_ETA_mask = ds0.Eta.where(ds0.hFacC>0).mean().values
#print('Average of Ocean Surface Height Anomaly without continents')
#print(Average_ETA_mask,'m')
Average_T = ds0['T'].mean().values
print('Average of Ocean Temperature')
print(Average_T,'°C')
#Average_T_mask = ds0['T'].where(ds0.hFacC>0).mean().values
#print('Average of Ocean Temperature without continents')
#print(Average_T_mask,'°C')
Average_S = ds0['S'].mean().values
print('Average of Ocean Salinity')
print(Average_S,'psu')
#Average_S_mask = ds0.S.where(ds0.hFacC>0).mean().values
#print('Average of Ocean Salinity without continents')
#print(Average_S_mask,'psu')
Average_U = ds0['U'].mean().values
print('Average of Meridional component of Ocean Velocity')
print(Average_U,'m/s')
#Average_U_mask = ds0.U.where(ds0.hFacW>0).mean().values
#print('Average of Meridional component of Ocean Velocity without continents')
#print(Average_U_mask,'m/s')
Average_V = ds0['V'].mean().values
print('Average of Zonal component of Ocean Velocity')
print(Average_V,'m/s')
#Average_V_mask = ds0.V.where(ds0.hFacS>0).mean().values
#print('Average of Meridional component of Ocean Velocity without continents')
#print(Average_V_mask,'m/s')
Average_W = ds0['W'].mean().values
print('Average of Vertical component of Ocean Velocity')
print(Average_W,'m/s')
#Average_W_mask = ds0.W.where(ds0.hFacS>0).mean().values
#print('Average of Vertical component of Ocean Velocity without continents')
#print(Average_W_mask,'m/s')
| Python | 0.001485 | |
8ada83d3140b871d7699988996ff7427c0526c9b | Remove extraneous logging from benchmarks | tensorflow/python/data/benchmarks/benchmark_base.py | tensorflow/python/data/benchmarks/benchmark_base.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.data benchmarking functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks.
class DatasetBenchmarkBase(test.Benchmark):
"""Base class for dataset benchmarks."""
def run_benchmark(self, dataset, num_elements, iters=1):
"""Benchmarks the dataset.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
iters: Number of times to repeat the timing.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
"""
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
# NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
# the overhead of multiple `session.run()` calls. Note that this relies on
# the underlying implementation of `skip`: if it is optimized in the future,
# we will have to change this code.
dataset = dataset.skip(num_elements - 1)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
next_element = nest.flatten(next_element)[0]
deltas = []
for _ in range(iters):
with session.Session() as sess:
# Run once to warm up the session caches.
sess.run(iterator.initializer)
sess.run(next_element)
sess.run(iterator.initializer)
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
return np.median(deltas) / float(num_elements)
def run_and_report_benchmark(self,
dataset,
num_elements,
name,
iters=5,
extras=None):
# Measure the per-element wall time.
wall_time = self.run_benchmark(dataset, num_elements, iters)
if extras is None:
extras = {}
extras["num_elements"] = num_elements
# 'mode' represents the mechanism used for iterating over dataset elements.
self.report_benchmark(
wall_time=wall_time, iters=iters, name=name, extras=extras)
| # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities for tf.data benchmarking functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks.
class DatasetBenchmarkBase(test.Benchmark):
"""Base class for dataset benchmarks."""
def run_benchmark(self, dataset, num_elements, iters=1):
"""Benchmarks the dataset.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
iters: Number of times to repeat the timing.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
"""
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
# NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
# the overhead of multiple `session.run()` calls. Note that this relies on
# the underlying implementation of `skip`: if it is optimized in the future,
# we will have to change this code.
dataset = dataset.skip(num_elements - 1)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
next_element = nest.flatten(next_element)[0]
deltas = []
for _ in range(iters):
with session.Session() as sess:
# Run once to warm up the session caches.
sess.run(iterator.initializer)
sess.run(next_element)
sess.run(iterator.initializer)
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
return np.median(deltas) / float(num_elements)
def run_and_report_benchmark(self,
dataset,
num_elements,
name,
iters=5,
extras=None):
# Measure the per-element wall time.
wall_time = self.run_benchmark(dataset, num_elements, iters)
if extras is None:
extras = {}
extras["elements_per_second"] = 1 / wall_time
extras["num_elements"] = num_elements
# 'mode' represents the mechanism used for iterating over dataset elements.
name = "%s_mode_cpp" % name
self.report_benchmark(
wall_time=wall_time, iters=iters, name=name, extras=extras)
| Python | 0.000001 |
9afe19676cbb87985939bd0099301a7003a38b7f | check for monitoring file and directory count | samples/folder_check.py | samples/folder_check.py | #!/usr/bin/env python
import json,os,time
PLUGIN_VERSION="1"
HEARTBEAT="true"
#set this value to 1 if the file count needs to be recursive
INCLUDE_RECURSIVE_FILES=None
FOLDER_NAME="/"
THRESHOLD_COUNT=10
def get_data():
folder_checks_data = {}
folder_checks_data['plugin_version'] = PLUGIN_VERSION
folder_checks_data['heartbeat_required'] = HEARTBEAT
try:
if INCLUDE_RECURSIVE_FILES:
file_count = sum([len(files) for r, d, files in os.walk(FOLDER_NAME)])
directory_count = sum([len(d) for r, d, files in os.walk(FOLDER_NAME)])
else:
path, dirs, files = next(os.walk(FOLDER_NAME))
file_count = len(files)
directory_count = len(dirs)
folder_checks_data['file_count'] = file_count
folder_checks_data['directory_count'] = directory_count
#logical conditions
if file_count > THRESHOLD_COUNT:
folder_checks_data['status']=0
folder_checks_data['msg']='File Count Exceeds the threshold'
return folder_checks_data
if directory_count > THRESHOLD_COUNT:
folder_checks_data['status']=0
folder_checks_data['msg']='Directory Count Exceeds the threshold'
return folder_checks_data
if file_count > THRESHOLD_COUNT and directory_count > THRESHOLD_COUNT:
folder_checks_data['status']=0
folder_checks_data['msg']='Folder / Directory Counts Exceeded the threshold'
except Exception as e:
folder_checks_data['status']=0
folder_checks_data['msg']=str(e)
return folder_checks_data
if __name__ == "__main__":
data = get_data()
print(json.dumps(data,indent=4)) | Python | 0 | |
d881ee2866bb422a266871c1b426d76c669025da | Test for CASSANDRA-8741 | nodetool_test.py | nodetool_test.py | from ccmlib.node import NodetoolError
from dtest import Tester
from tools import require
class TestNodetool(Tester):
@require("8741")
def test_decommission_after_drain_is_invalid(self):
"""
@jira_ticket CASSANDRA-8741
Running a decommission after a drain should generate
an unsupported operation message and exit with an error
code (which we receive as a NodetoolError exception).
"""
cluster = self.cluster
cluster.populate([3]).start()
version = cluster.version()
node = cluster.nodelist()[0]
node.drain(block_on_log=True)
try:
node.decommission()
self.assertFalse("Expected nodetool error")
except NodetoolError as e:
if version >= "2.1":
self.assertEqual('', e.stderr)
self.assertTrue('Unsupported operation' in e.stdout)
else:
self.assertEqual('', e.stdout)
self.assertTrue('Unsupported operation' in e.stderr)
| Python | 0 | |
125b9ce4bc5b5966f3730f3d99dba122b1d295eb | use session.request instead of session.{method} | src/sentry/http.py | src/sentry/http.py | """
sentry.utils.http
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
import six
import socket
import requests
import warnings
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from ipaddr import IPNetwork
from requests.adapters import HTTPAdapter
from requests.exceptions import SSLError
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError
except ImportError:
class ZeroReturnError(Exception):
pass
from urlparse import urlparse
USER_AGENT = 'sentry/{version} (https://getsentry.com)'.format(
version=sentry.VERSION,
)
DISALLOWED_IPS = set((IPNetwork(i) for i in settings.SENTRY_DISALLOWED_IPS))
def get_server_hostname():
# TODO(dcramer): Ideally this would parse at runtime, but we currently
# change the URL prefix when runner initializes which may be post-import
return urlparse(settings.SENTRY_URL_PREFIX).hostname
def is_valid_url(url):
"""
Tests a URL to ensure it doesn't appear to be a blacklisted IP range.
"""
parsed = urlparse(url)
if not parsed.hostname:
return False
server_hostname = get_server_hostname()
if parsed.hostname == server_hostname:
return True
try:
ip_address = socket.gethostbyname(parsed.hostname)
except socket.gaierror:
return False
if ip_address == server_hostname:
return True
ip_network = IPNetwork(ip_address)
for addr in DISALLOWED_IPS:
if ip_network in addr:
return False
return True
class BlacklistAdapter(HTTPAdapter):
def send(self, request, *args, **kwargs):
if not is_valid_url(request.url):
raise SuspiciousOperation('%s matches the URL blacklist' % (request.url,))
return super(BlacklistAdapter, self).send(request, *args, **kwargs)
def build_session():
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT})
session.mount('https://', BlacklistAdapter())
session.mount('http://', BlacklistAdapter())
return session
def safe_urlopen(url, method=None, params=None, data=None, json=None,
headers=None, allow_redirects=False, timeout=30,
verify_ssl=True, user_agent=None):
"""
A slightly safer version of ``urlib2.urlopen`` which prevents redirection
and ensures the URL isn't attempting to hit a blacklisted IP range.
"""
if user_agent is not None:
warnings.warn('user_agent is no longer used with safe_urlopen')
session = build_session()
kwargs = {}
if json:
kwargs['json'] = json
if not headers:
headers = {}
headers.setdefault('Content-Type', 'application/json')
if data:
kwargs['data'] = data
if params:
kwargs['params'] = params
if headers:
kwargs['headers'] = headers
if method is None:
method = 'POST' if (data or json) else 'GET'
try:
response = session.request(
method=method,
url=url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
# Our version of requests does not transform ZeroReturnError into an
# appropriately generically catchable exception
except ZeroReturnError as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(SSLError, exc, exc_tb)
del exc_tb
# requests' attempts to use chardet internally when no encoding is found
# and we want to avoid that slow behavior
if not response.encoding:
response.encoding = 'utf-8'
return response
def safe_urlread(response):
return response.content
| """
sentry.utils.http
~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
import six
import socket
import requests
import warnings
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from ipaddr import IPNetwork
from requests.adapters import HTTPAdapter
from requests.exceptions import SSLError
# In case SSL is unavailable (light builds) we can't import this here.
try:
from OpenSSL.SSL import ZeroReturnError
except ImportError:
class ZeroReturnError(Exception):
pass
from urlparse import urlparse
USER_AGENT = 'sentry/{version} (https://getsentry.com)'.format(
version=sentry.VERSION,
)
DISALLOWED_IPS = set((IPNetwork(i) for i in settings.SENTRY_DISALLOWED_IPS))
def get_server_hostname():
# TODO(dcramer): Ideally this would parse at runtime, but we currently
# change the URL prefix when runner initializes which may be post-import
return urlparse(settings.SENTRY_URL_PREFIX).hostname
def is_valid_url(url):
"""
Tests a URL to ensure it doesn't appear to be a blacklisted IP range.
"""
parsed = urlparse(url)
if not parsed.hostname:
return False
server_hostname = get_server_hostname()
if parsed.hostname == server_hostname:
return True
try:
ip_address = socket.gethostbyname(parsed.hostname)
except socket.gaierror:
return False
if ip_address == server_hostname:
return True
ip_network = IPNetwork(ip_address)
for addr in DISALLOWED_IPS:
if ip_network in addr:
return False
return True
class BlacklistAdapter(HTTPAdapter):
def send(self, request, *args, **kwargs):
if not is_valid_url(request.url):
raise SuspiciousOperation('%s matches the URL blacklist' % (request.url,))
return super(BlacklistAdapter, self).send(request, *args, **kwargs)
def build_session():
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT})
session.mount('https://', BlacklistAdapter())
session.mount('http://', BlacklistAdapter())
return session
def safe_urlopen(url, method=None, params=None, data=None, json=None,
headers=None, allow_redirects=False, timeout=30,
verify_ssl=True, user_agent=None):
"""
A slightly safer version of ``urlib2.urlopen`` which prevents redirection
and ensures the URL isn't attempting to hit a blacklisted IP range.
"""
if user_agent is not None:
warnings.warn('user_agent is no longer used with safe_urlopen')
session = build_session()
kwargs = {}
if json:
kwargs['json'] = json
if not headers:
headers = {}
headers.setdefault('Content-Type', 'application/json')
if data:
kwargs['data'] = data
if params:
kwargs['params'] = params
if headers:
kwargs['headers'] = headers
if method is None:
method = 'POST' if (data or json) else 'GET'
try:
response = getattr(session, method.lower())(
url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
# Our version of requests does not transform ZeroReturnError into an
# appropriately generically catchable exception
except ZeroReturnError as exc:
import sys
exc_tb = sys.exc_info()[2]
six.reraise(SSLError, exc, exc_tb)
del exc_tb
# requests' attempts to use chardet internally when no encoding is found
# and we want to avoid that slow behavior
if not response.encoding:
response.encoding = 'utf-8'
return response
def safe_urlread(response):
return response.content
| Python | 0 |
72cbdd0c1cf804eecb8f503f86e6be237719bf99 | add echo client for testing | network/echo-server/echo-client/main.py | network/echo-server/echo-client/main.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import socket
def main():
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(('127.0.0.1', 5555))
client_socket.send('This is a echo test')
data = client_socket.recv(4096)
if data:
print 'got data:', data
client_socket.close()
if __name__ == '__main__':
main()
| Python | 0.000003 | |
be96a2f7e3aeb59727ba88913cc6fda97bf8a423 | Add some unit tests | InvenTree/company/test_views.py | InvenTree/company/test_views.py | """ Unit tests for Company views (see views.py) """
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import SupplierPart
class CompanyViewTest(TestCase):
fixtures = [
'category',
'part',
'location',
'company',
'supplier_part',
]
def setUp(self):
super().setUp()
# Create a user
User = get_user_model()
User.objects.create_user('username', 'user@email.com', 'password')
self.client.login(username='username', password='password')
def test_supplier_part_delete(self):
""" Test the SupplierPartDelete view """
url = reverse('supplier-part-delete')
# Get form using 'part' argument
response = self.client.get(url, {'part': '1'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# Get form using 'parts' argument
response = self.client.get(url + '?parts[]=1&parts[]=2', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# POST to delete two parts
n = SupplierPart.objects.count()
response = self.client.post(
url,
{
'supplier-part-2': 'supplier-part-2',
'supplier-part-3': 'supplier-part-3',
'confirm_delete': True
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(n - 2, SupplierPart.objects.count())
| Python | 0.000001 | |
725832be85b7b0455cb735ce8a054007209d9645 | test scan scraper | src/hsimage.py | src/hsimage.py | import sys
from PIL import Image
img = Image.open(sys.argv[1])
width, height = img.size
xblock = 5
yblock = 5
w_width = width / xblock
w_height = height / yblock
blockmap = [(xb*w_width, yb*w_height, (xb+1)*w_width, (yb+1)*w_height)
for xb in xrange(xblock) for yb in xrange(yblock)]
newblockmap = list(blockmap)
newblockmap[0] = blockmap[14]
newblockmap[1] = blockmap[13]
newblockmap[2] = blockmap[12]
newblockmap[3] = blockmap[11]
newblockmap[4] = blockmap[10]
newblockmap[5] = blockmap[24]
newblockmap[6] = blockmap[23]
newblockmap[7] = blockmap[22]
newblockmap[8] = blockmap[21]
newblockmap[9] = blockmap[20]
newblockmap[10] = blockmap[4]
newblockmap[11] = blockmap[3]
newblockmap[12] = blockmap[2]
newblockmap[13] = blockmap[1]
newblockmap[14] = blockmap[0]
newblockmap[15] = blockmap[19]
newblockmap[16] = blockmap[18]
newblockmap[17] = blockmap[17]
newblockmap[18] = blockmap[16]
newblockmap[19] = blockmap[15]
newblockmap[20] = blockmap[9]
newblockmap[21] = blockmap[8]
newblockmap[22] = blockmap[7]
newblockmap[23] = blockmap[6]
newblockmap[24] = blockmap[5]
result = Image.new(img.mode, (width, height))
for box, sbox in zip(blockmap, newblockmap):
c = img.crop(sbox)
result.paste(c, box)
result.save(sys.argv[1])
| Python | 0.000001 | |
dede46a2d5ad1504991b05b8edab4d1ffd781f46 | fix out of range error in tracker remover plugin | searx/plugins/tracker_url_remover.py | searx/plugins/tracker_url_remover.py | '''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
'''
from flask_babel import gettext
import re
from searx.url_utils import urlunparse, parse_qsl, urlencode
regexes = {re.compile(r'utm_[^&]+'),
re.compile(r'(wkey|wemail)[^&]*'),
re.compile(r'&$')}
name = gettext('Tracker URL remover')
description = gettext('Remove trackers arguments from the returned URL')
default_on = True
preference_section = 'privacy'
def on_result(request, search, result):
if 'parsed_url' not in result:
return True
query = result['parsed_url'].query
if query == "":
return True
parsed_query = parse_qsl(query)
changes = 0
for i, (param_name, _) in enumerate(list(parsed_query)):
for reg in regexes:
if reg.match(param_name):
parsed_query.pop(i - changes)
changes += 1
result['parsed_url'] = result['parsed_url']._replace(query=urlencode(parsed_query))
result['url'] = urlunparse(result['parsed_url'])
break
return True
| '''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
'''
from flask_babel import gettext
import re
from searx.url_utils import urlunparse, parse_qsl, urlencode
regexes = {re.compile(r'utm_[^&]+'),
re.compile(r'(wkey|wemail)[^&]*'),
re.compile(r'&$')}
name = gettext('Tracker URL remover')
description = gettext('Remove trackers arguments from the returned URL')
default_on = True
preference_section = 'privacy'
def on_result(request, search, result):
if 'parsed_url' not in result:
return True
query = result['parsed_url'].query
if query == "":
return True
parsed_query = parse_qsl(query)
changed = False
for i, (param_name, _) in enumerate(list(parsed_query)):
for reg in regexes:
if reg.match(param_name):
parsed_query.pop(i)
changed = True
break
if changed:
result['parsed_url'] = result['parsed_url']._replace(query=urlencode(parsed_query))
result['url'] = urlunparse(result['parsed_url'])
return True
| Python | 0 |
a6fb8c86e14722527ff004ca1378458df252f8c0 | add doxygen module | modules/doxygen.py | modules/doxygen.py | """Doxygen module.
Create project's documentation.
Website: http://www.doxygen.org
"""
import os
import shlex
def doxygen(loader, variant=None, *args):
if len(args) == 1:
args = shlex.split(args[0])
if variant is None:
variant = os.environ.get('PROJECT_VARIANT',
loader.config.get('default_variant'))
config = loader.config.get('configuration', {})
config = config.get(variant, {})
binargs = ['doxygen', config['doxygen']['config_file']]
os.execvp(binargs[0], binargs)
commands = (doxygen,)
| Python | 0.000001 | |
7a74f85fc76af2df62bb92ff2997ab1b84caa3a0 | Test dummy IRC bot | tests/test_irc_bot_dummy.py | tests/test_irc_bot_dummy.py | """
:Copyright: 2007-2021 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
import pytest
from syslog2irc.irc import create_bot, IrcChannel, IrcConfig
from syslog2irc.signals import irc_channel_joined
@pytest.fixture
def config():
channels = {IrcChannel('#one'), IrcChannel('#two')}
return IrcConfig(
server=None,
nickname='nick',
realname='Nick',
channels=channels,
)
@pytest.fixture
def bot(config):
bot = create_bot(config)
yield bot
bot.disconnect('Done.')
def test_fake_channel_joins(bot):
received_signal_data = []
@irc_channel_joined.connect
def handle_irc_channel_joined(sender, **data):
received_signal_data.append(data)
bot.start()
assert received_signal_data == [
{'channel_name': '#one'},
{'channel_name': '#two'},
]
| Python | 0.000001 | |
3be6fadffbce4cdf5d45f4b34035b55db6abe2fc | add script for creating otu-tree table | ottreeindex/scripts/create_otu_table.py | ottreeindex/scripts/create_otu_table.py | # Collects the OTU - tree relationships across phylesystem
# Prints to file which is then inserted into postgres with COPY
# This is much faster than many inserts
from peyotl.api.phylesystem_api import PhylesystemAPI
from peyotl.phylesystem.phylesystem_umbrella import Phylesystem
from peyotl import gen_otu_dict, iter_node
from peyotl.manip import iter_trees
import setup_db
import psycopg2 as psy
import argparse
import yaml
def create_phylesystem_obj():
# create connection to local phylesystem
phylesystem_api_wrapper = PhylesystemAPI(get_from='local')
phylesystem = phylesystem_api_wrapper.phylesystem_obj
return phylesystem
def getTreeID(cursor,study_id,tree_label):
sqlstring = ('SELECT id FROM {tablename} '
'WHERE study_id=%s and tree_label=%s;'
.format(tablename='tree')
)
data = (study_id,tree_label)
print ' SQL: ',cursor.mogrify(sqlstring,data)
cursor.execute(sqlstring,data)
return cursor.fetchone()[0]
def print_otu_file(connection,cursor,phy,nstudies=None):
filename = "tree_otu.csv"
with open (filename,'w') as f:
# datafile format is 'ottid'\t'treeid' where treeid is not
# the treeid (string) in the nexson, but the treeid (int) from
# the database for faster indexing
counter = 0
for study_id, n in phy.iter_study_objs():
print study_id
otu_dict = gen_otu_dict(n)
mapped_otus = {}
# iterate over the OTUs in the study, collecting
# the mapped ones
for oid, o in otu_dict.items():
label = o['^ot:originalLabel']
ottname = o.get('^ot:ottTaxonName')
if ottname is not None:
ottID = o.get('^ot:ottId')
otu_props = [ottname,ottID]
mapped_otus[oid]=otu_props
print oid,ottID,label,ottname
# now iterate over trees and collect OTUs used in
# each tree
for trees_group_id, tree_label, tree in iter_trees(n):
tree_id = getTreeID(cursor,study_id,tree_label)
if (tree_id is None):
raise LookupError('tree_id for study {s}, tree {t}'
' not found'.format(s=study_id,t=tree_label))
for node_id, node in iter_node(tree):
oid = node.get('@otu')
# no @otu property on internal nodes
if oid is not None:
otu_props = mapped_otus.get(oid)
if otu_props is not None:
ottname = otu_props[0]
ottID = otu_props[1]
print tree_label,oid,ottID,ottname
f.write('{t},{o}\n'.format(t=tree_id,o=ottID))
counter+=1
if (nstudies and counter>=nstudies):
f.close()
break
return filename
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='create otu-tree table')
parser.add_argument('configfile',
help='path to the config file'
)
parser.add_argument('-n',
dest='nstudies',
type=int,
help='load only n studies; if absent, load all studies'
)
args = parser.parse_args()
# read config variables
config_dict={}
with open(args.configfile,'r') as f:
config_dict = yaml.safe_load(f)
connection, cursor = setup_db.connect(config_dict)
phy = create_phylesystem_obj()
print_otu_file(connection,cursor,phy,args.nstudies)
| Python | 0 | |
a8ec11719ccc158fd457ed02f2b8459d1b452975 | Create tweets.py | tweets.py | tweets.py | import sqlite3
def main(cursor):
cursor.execute("select * from tweets")
for tweet in cursor.fetchall():
tid = tweet[0]
tdate = tweet[1]
text = tweet[2]
geo = tweet[3]
t = str(tdate + text + geo)
print '-----tweet: %s ' % text
print '------date: %s' % tdate
print '-------geo: %s' % geo
print '----length: %s' % len(text)
print '\n'
if __name__ == '__main__':
conn = sqlite3.connect('tweets.db')
conn.text_factory = str
cur = conn.cursor()
main(cur)
| Python | 0.000008 | |
0fa30986e1f97331f96444e0b3b0f86cbe20c68a | Add tests for JsonBackend __init__ and commit methods | shadho/backend/json/tests/test_db.py | shadho/backend/json/tests/test_db.py | import pytest
from shadho.backend.base.tests.test_db import TestBaseBackend
from shadho.backend.json.db import JsonBackend
import json
import os
import shutil
class TestJsonBackend(object):
def test_init(self):
"""Ensure that initialization sets up the db and filepath."""
# Test default initialization
b = JsonBackend()
assert b.path == os.path.join(os.getcwd(), 'shadho.json')
assert b.db == {'models': {},
'domains': {},
'results': {},
'values': {}}
assert b.commit_frequency == 10
assert b.update_frequency == 10
# Test custom initialization
b = JsonBackend(path='foo.bar',
commit_frequency=42,
update_frequency=42)
assert b.path == os.path.join(os.getcwd(), 'foo.bar')
assert b.db == {'models': {},
'domains': {},
'results': {},
'values': {}}
assert b.commit_frequency == 42
assert b.update_frequency == 42
# Test without specifying a file name
b = JsonBackend(path='/tmp')
assert b.path == os.path.join('/tmp', 'shadho.json')
assert b.db == {'models': {},
'domains': {},
'results': {},
'values': {}}
assert b.commit_frequency == 10
assert b.update_frequency == 10
def test_commit(self):
"""Ensure that commit writes to file and the file is loadable."""
temp = shutil.mkdtemp()
fpath = os.path.join(temp, 'shahdo.json')
# Test saving and loading
b = JsonBackend(path=temp)
assert os.path.isfile(fpath)
with open(fpath, 'r') as f:
db = json.load(f)
assert db == {'models': {},
'domains': {},
'results': {},
'values': {}}
shutil.rmtree(temp)
def test_count(self):
"""Ensure that the correct counts are returned for object classes"""
| Python | 0.000002 | |
871f79a0b2bd235df457e3a1dc502d5c18bd934a | Add some generic python utilities as a basis for scripts | tools/build/common_utils.py | tools/build/common_utils.py | from __future__ import print_function
import os
def game_root_path():
file_path = os.path.dirname(os.path.abspath(__file__))
return os.path.abspath(os.path.join(file_path, '..', '..'))
def files_with_type(root, type):
all_files = [os.path.join(root, filename) for filename in os.listdir(root)]
typed_files = [path for path in all_files if path.endswith('.' + type)]
return typed_files
def sha1_of_file(filepath):
import hashlib
if not os.path.exists(filepath):
return ''
with open(filepath, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def fetch_file(url, target_path, sha1):
if sha1_of_file(target_path) == sha1:
return True # Already downloaded
import urllib
if hasattr(urllib, 'urlretrieve'):
# Python 2
urllib.urlretrieve(url, target_path)
else:
# Python 3
import urllib.request
urllib.request.urlretrieve(url, target_path)
if sha1 == None:
print('sha1 of ' + target_path + ': ' + sha1_of_file(target_path))
elif sha1_of_file(target_path) != sha1:
if os.path.exists(target_path):
os.remove(target_path)
return False
return True
def python27_path():
import sys
exe = ''
if sys.version_info.minor == 7 and sys.version_info.major == 2:
exe = sys.executable
elif sys.platform.startswith("linux"):
exe = '/usr/local/bin/python2.7'
elif sys.platform == "darwin":
exe = '/usr/local/bin/python2.7'
elif sys.platform == "win32":
exe = 'C:\Python27\python.exe'
return exe
if __name__ == '__main__':
print('Game root path: ' + game_root_path())
| Python | 0.000001 | |
7d22c38348ccd411871942ef0dd43ed57794de16 | include benchmark code | bench.py | bench.py | from statistics import mean
import heapq
import importlib
import time
import numpy as np
r = np.random.random(1000*1000)
mergers = {
'heapq': ('merge', 'nlargest', 'nsmallest'),
'cyheapq': ('merge', 'nlargest', 'nsmallest'),
'cytoolz': ('merge_sorted', 'topk', None),
}
mods = list(mergers.keys())
name_max_len = max(map(len, mods))
def test(runs, loops, f, *args):
times = []
for _ in range(runs):
start = time.monotonic()
for _ in range(loops):
f(*args)
stop = time.monotonic()
times.append(stop-start)
times.sort()
return mean(times[1:-2])
for t in ('merge', 'nlargest', 'nsmallest'):
print('---', t, '---')
for mod, (merge, nlargest, nsmallest) in sorted(mergers.items()):
module = importlib.import_module(mod)
merge = getattr(module, merge)
nlargest = getattr(module, nlargest)
nsmallest = getattr(module, nsmallest) if nsmallest else None
a = list(r)
b = list(r)
if t == 'merge':
print(mod.rjust(name_max_len), 'merge', test(5, 100000, merge, a, a, b, b))
elif t == 'nlargest':
print(mod.rjust(name_max_len), 'nlargest', test(5, 5, nlargest, 10, a))
elif t == 'nsmallest' and nsmallest:
print(mod.rjust(name_max_len), 'nsmallest', test(5, 5, nsmallest, 10, a))
| Python | 0.000001 | |
5b2aebb9b9f9fafe291f0890f03c44abd661ca68 | add celery_work | celery_work.py | celery_work.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from celery import Celery, platforms
from app import create_app
def make_celery(app):
"""Create the celery process."""
# Init the celery object via app's configuration.
celery = Celery(
app.import_name,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL'])
# Flask-Celery-Helper to auto-setup the config.
celery.conf.update(app.config)
TaskBase = celery.Task
platforms.C_FORCE_ROOT = True
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
"""Will be execute when create the instance object of ContextTesk."""
# Will context(Flask's Extends) of app object(Producer Sit)
# be included in celery object(Consumer Site).
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
# Include the app_context into celery.Task.
# Let other Flask extensions can be normal calls.
celery.Task = ContextTask
return celery
flask_app = create_app(os.getenv('FLASK_CONFIG') or 'default')
# 1. Each celery process needs to create an instance of the Flask application.
# 2. Register the celery object into the app object.
celery = make_celery(flask_app) | Python | 0.999978 | |
9c53e59ee0c4e5418b54d47c932454b7b907dc03 | Revert escape nickname, desc, etc in user profile | seahub/profile/forms.py | seahub/profile/forms.py | # encoding: utf-8
from django import forms
from seahub.profile.models import Profile, DetailedProfile
class ProfileForm(forms.Form):
nickname = forms.CharField(max_length=64, required=False)
intro = forms.CharField(max_length=256, required=False)
def save(self, username):
nickname = self.cleaned_data['nickname']
intro = self.cleaned_data['intro']
Profile.objects.add_or_update(username, nickname, intro)
class DetailedProfileForm(ProfileForm):
department = forms.CharField(max_length=512, required=False)
telephone = forms.CharField(max_length=100, required=False)
def save(self, username):
super(DetailedProfileForm, self).save(username)
department = self.cleaned_data['department']
telephone = self.cleaned_data['telephone']
DetailedProfile.objects.add_or_update(username, department, telephone)
| # encoding: utf-8
from django import forms
from django.utils.html import escape
from seahub.profile.models import Profile, DetailedProfile
class ProfileForm(forms.Form):
nickname = forms.CharField(max_length=64, required=False)
intro = forms.CharField(max_length=256, required=False)
def save(self, username):
nickname = escape(self.cleaned_data['nickname'])
intro = escape(self.cleaned_data['intro'])
Profile.objects.add_or_update(username, nickname, intro)
class DetailedProfileForm(ProfileForm):
department = forms.CharField(max_length=512, required=False)
telephone = forms.CharField(max_length=100, required=False)
def save(self, username):
super(DetailedProfileForm, self).save(username)
department = escape(self.cleaned_data['department'])
telephone = escape(self.cleaned_data['telephone'])
DetailedProfile.objects.add_or_update(username, department, telephone)
| Python | 0 |
b23ec502b89ab70b9e8edd1868f4e9717392b7b2 | Add missing migrations | account/migrations/0004_auto_20170416_1821.py | account/migrations/0004_auto_20170416_1821.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-16 18:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0003_passwordexpiry_passwordhistory'),
]
operations = [
migrations.AlterModelOptions(
name='passwordhistory',
options={'verbose_name': 'password history', 'verbose_name_plural': 'password histories'},
),
]
| Python | 0.000029 | |
9a691ae746c5b501ed37792383600da1ba381b20 | Add exitcode.py | bin/exitcode.py | bin/exitcode.py | #!/usr/bin/env python
#
# Copyright 2010 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# exitcode.py
#
# This program parses kickstart invocation records looking for failures.
# If failures are found, it prints a message and exits with a non-zero
# exit code. If no failures are found, it exits with 0.
#
# This program also renames the .out and .err file to .out.XXX and .err.XXX
# where XXX is a sequence number. This sequence number is incremented each
# time the program is run with the same kickstart.out argument.
#
# Since XML parsers are slow, this program doesn't parse the full invocation
# XML, but rather looks for the <status> tag in the XML and extracts the raw
# exitcode using simple string manipulations. This turns out to be much
# faster than using an XML parser. On .out files with 1000 invocation
# records this program runs in about 30 milliseconds and uses less than
# 4 MB of physical memory.
#
import sys
import re
import os
from optparse import OptionParser
__author__ = "Gideon Juve <juve@usc.edu>"
def fail(message=None):
if message: print "fail: %s" % message
sys.exit(1)
def rename(outfile):
"""Rename .out and .err files to .out.XXX and .err.XXX where XXX
is the next sequence number. Returns the new name, or fails with
an error message and a non-zero exit code."""
# This is just to prevent the file from being accidentally renamed
# again in testing.
if re.search("\.out\.[0-9]{3}$", outfile):
return outfile
# Must end in .out
if not outfile.endswith(".out"):
fail("%s does not look like a kickstart .out file" % outfile)
# Find next file in sequence
retry = None
for i in range(0,1000):
candidate = "%s.%03d" % (outfile,i)
if not os.path.isfile(candidate):
retry = i
break
# unlikely to occur
if retry is None:
fail("%s has been renamed too many times!" % (outfile))
basename = outfile[:-4]
# rename .out to .out.000
newout = "%s.out.%03d" % (basename,retry)
os.rename(outfile,newout)
# rename .err to .err.000 if it exists
errfile = "%s.err" % (basename)
if os.path.isfile(errfile):
newerr = "%s.err.%03d" % (basename,retry)
os.rename(errfile,newerr)
return newout
def exitcode(outfile):
"""Parse invocation records looking for status codes. Returns
the number of successful invocations, or fails with an error
message and a non-zero exit code."""
# Read the file first
f = open(outfile)
txt = f.read()
f.close()
# Verify the length
if len(txt) == 0:
fail("kickstart produced no output")
# Check the exitcode of all tasks
regex = re.compile(r'raw="(-?[0-9]+)"')
succeeded = 0
e = 0
while True:
b = txt.find("<status", e)
if b < 0: break
e = txt.find("</status>", b)
if e < 0: fail("mismatched <status>")
e = e + len("</status>")
m = regex.search(txt[b:e])
if m: raw = int(m.group(1))
else: fail("<status> was missing valid 'raw' attribute")
if raw != 0:
fail("task exited with raw status %d" % raw)
succeeded = succeeded + 1
# Require at least one task to succeed
if succeeded == 0:
fail("no tasks succeeded")
return succeeded
def main():
usage = "Usage: %prog [options] kickstart.out"
parser = OptionParser(usage)
parser.add_option("-t", "--tasks", action="store", type="int",
dest="tasks", metavar="N",
help="Number of tasks expected. If less than N tasks succeeded, then exitcode will fail.")
parser.add_option("-r", "--return", action="store", type="int",
dest="exitcode", default=0, metavar="R",
help="Return code reported by DAGMan. This can be specified in a DAG using the $RETURN variable.")
parser.add_option("-n", "--no-rename", action="store_false",
dest="rename", default=True,
help="Don't rename kickstart.out and .err to .out.XXX and .err.XXX. Useful for testing.")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("please specify kickstart.out")
outfile = args[0]
if not os.path.isfile(outfile):
fail("%s does not exist" % outfile)
# if we are renaming, then rename
if options.rename:
outfile = rename(outfile)
# check supplied exitcode first
if options.exitcode != 0:
fail("dagman reported non-zero exitcode: %d" % options.exitcode)
# check exitcodes of all tasks
succeeded = exitcode(outfile)
# if we know how many tasks to expect, check that they all succeeded
if options.tasks and options.tasks>=0 and succeeded != options.tasks:
fail("wrong number of successful tasks: wanted %d got %d" % \
(options.tasks,succeeded))
# If we reach this, then it was OK
sys.exit(0)
if __name__ == "__main__":
main() | Python | 0.000195 | |
ae94990bc8b790b5307ccaee992f09fefc045692 | add Tester lockedNormal | python/medic/plugins/Tester/lockedNormal.py | python/medic/plugins/Tester/lockedNormal.py | from medic.core import testerBase
from maya import OpenMaya
class LockedNormal(testerBase.TesterBase):
Name = "LockedNormal"
Description = "vertex(s) which has locked normal"
Fixable = True
def __init__(self):
super(LockedNormal, self).__init__()
def Match(self, node):
return node.object().hasFn(OpenMaya.MFn.kMesh)
def Test(self, node):
it = None
mesh = None
try:
it = OpenMaya.MItMeshVertex(node.object())
mesh = OpenMaya.MFnMesh(node.object())
except:
return (False, None)
result = False
comp = OpenMaya.MFnSingleIndexedComponent()
comp_obj = comp.create(OpenMaya.MFn.kMeshVertComponent)
while (not it.isDone()):
normal_indices = OpenMaya.MIntArray()
it.getNormalIndices(normal_indices)
for i in range(normal_indices.length()):
if mesh.isNormalLocked(normal_indices[i]):
result = True
comp.addElement(it.index())
break
it.next()
return (result, comp_obj if result else None)
def Fix(self, node, component, parameterParser):
if node.dg().isFromReferencedFile():
return False
target_normal_indices = OpenMaya.MIntArray()
mesh = OpenMaya.MFnMesh(node.object())
it = OpenMaya.MItMeshVertex(node.getPath(), component)
while (not it.isDone()):
normal_indices = OpenMaya.MIntArray()
it.getNormalIndices(normal_indices)
for i in range(normal_indices.length()):
target_normal_indices.append(normal_indices[i])
it.next()
mesh.unlockVertexNormals(target_normal_indices)
return True
Tester = LockedNormal
| Python | 0 | |
6852efdb02925a4e300611b49a693c59ca13e7b0 | Add wrapper for new vulkan worker (#6) | python/src/main/python/drivers/worker_vk.py | python/src/main/python/drivers/worker_vk.py | #!/usr/bin/env python3
# Copyright 2018 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import sys
import time
import subprocess
from subprocess import CalledProcessError
HERE = os.path.abspath(__file__)
# Set path to higher-level directory for access to dependencies
sys.path.append(
os.path.dirname(os.path.dirname(HERE))
)
import vulkanize
from fuzzer_service import FuzzerService
import fuzzer_service.ttypes as tt
from thrift.transport import THttpClient, TTransport
from thrift.Thrift import TApplicationException
from thrift.protocol import TBinaryProtocol
################################################################################
def writeToFile(content, filename):
with open(filename, 'w') as f:
f.write(content)
################################################################################
def remove(f):
if os.path.isdir(f):
shutil.rmtree(f)
elif os.path.isfile(f):
os.remove(f)
################################################################################
def adb(adbargs, serial=None):
adbcmd = 'adb'
if serial:
adbcmd += ' -s {}'.format(serial)
adbcmd += ' ' + adbargs
p = subprocess.run(adbcmd, shell=True)
return p.returncode
################################################################################
def prepareVertFile():
vertFilename = 'test.vert'
vertFileDefaultContent = '''#version 310 es
layout(location=0) in highp vec4 a_position;
void main (void) {
gl_Position = a_position;
}
'''
if not os.path.isfile(vertFilename):
writeToFile(vertFileDefaultContent, vertFilename)
################################################################################
def getImageVulkanAndroid(frag):
print('## ' + frag)
remove('image.ppm')
remove('image.png')
vulkanize.vulkanize(frag, 'test')
adb('shell rm -rf /sdcard/graphicsfuzz/*')
prepareVertFile()
adb('push test.vert test.frag test.json /sdcard/graphicsfuzz/')
# clean logcat
adb('logcat -b crash -b system -c')
runtestcmd = 'shell am start'
runtestcmd += ' -n vulkan.samples.T15_draw_cube/android.app.NativeActivity'
print('* Will run: ' + runtestcmd)
adb(runtestcmd)
# Wait for DONE file, or timeout
timeoutSec = 10
deadline = time.time() + timeoutSec
done = False
while time.time() < deadline:
retcode = adb('shell test -f /sdcard/graphicsfuzz/DONE')
if retcode == 0:
done = True
break
else:
time.sleep(0.1)
if not done:
return False
# Get the image
adb('pull /sdcard/graphicsfuzz/image.ppm')
# convert it
retcode = subprocess.run('convert image.ppm image.png', shell=True)
return True
################################################################################
def doImageJob(imageJob):
name = imageJob.name.replace('.frag','')
fragFile = name + '.frag'
jsonFile = name + '.json'
png = 'image.png'
res = tt.ImageJobResult()
# Set nice defaults to fields we will not update anyway
res.passSanityCheck = True
res.log = 'Start: ' + name + '\n'
writeToFile(imageJob.fragmentSource, fragFile)
writeToFile(imageJob.uniformsInfo, jsonFile)
if os.path.isfile(png):
os.remove(png)
getimage = getImageVulkanAndroid(fragFile)
if not getimage:
res.status = tt.JobStatus.CRASH
# adb log
adb('logcat -b crash -b system -d > logcat.txt')
res.log += '\n#### ADB LOGCAT START\n'
with open('logcat.txt', 'r') as f:
res.log += f.read()
res.log += '\n#### ADB LOGCAT END\n'
else:
res.status = tt.JobStatus.SUCCESS
with open(png, 'rb') as f:
res.PNG = f.read()
return res
################################################################################
def get_service(server, args):
try:
httpClient = THttpClient.THttpClient(server)
transport = TTransport.TBufferedTransport(httpClient)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
service = FuzzerService.Client(protocol)
transport.open()
# Get token
# TODO: grab information from worker
platforminfo = '''
{
"clientplatform": "Wrapper on vulkan"
}
'''
tryToken = args.token
print("Call getToken()")
tokenRes = service.getToken(platforminfo, tryToken)
assert type(tokenRes) != None
if tokenRes.token == None:
print('Token error: ' + tt.TokenError._VALUES_TO_NAMES[tokenRes.error])
exit(1)
token = tokenRes.token
print("Got token: " + token)
assert(token == args.token)
if not os.path.exists(args.token):
os.makedirs(args.token)
# Set working dir
os.chdir(args.token)
return service, token
except (TApplicationException, ConnectionRefusedError, ConnectionResetError) as exception:
return None, None
################################################################################
# Main
parser = argparse.ArgumentParser()
parser.add_argument(
'token',
help='Worker token to identify to the server')
parser.add_argument(
'--adbID',
help='adb (Android Debug Bridge) ID of the device to run tests on. Run "adb devices" to list these IDs')
parser.add_argument(
'--server',
default='http://localhost:8080',
help='Server URL (default: http://localhost:8080 )')
args = parser.parse_args()
print('token: ' + args.token)
server = args.server + '/request'
print('server: ' + server)
# Set device ID
if args.adbID:
os.environ["ANDROID_SERIAL"] = args.adbID
# Prepare device
adb('shell mkdir -p /sdcard/graphicsfuzz/')
service = None
# Main loop
while True:
if not(service):
service, token = get_service(server, args)
if not(service):
print("Cannot connect to server, retry in a second...")
time.sleep(1)
continue
try:
job = service.getJob(token)
if job.noJob != None:
print("No job")
elif job.skipJob != None:
print("Skip job")
service.jobDone(token, job)
else:
assert(job.imageJob != None)
print("#### Image job: " + job.imageJob.name)
job.imageJob.result = doImageJob(job.imageJob)
print("Send back, results status: {}".format(job.imageJob.result.status))
service.jobDone(token, job)
except (TApplicationException, ConnectionError) as exception:
print("Connection to server lost. Re-initialising client.")
service = None
time.sleep(1)
| Python | 0 | |
088ec16cf33d4be4b396976d9e9ab1a5f17045fc | make contrib an app | adhocracy4/contrib/apps.py | adhocracy4/contrib/apps.py | from django.apps import AppConfig
class OrganisationsConfig(AppConfig):
name = 'adhocracy4.contrib'
label = 'a4contrib'
| Python | 0.000012 | |
e020f81593268899a04cce726823c512b8b54762 | copy over the PlotContainerEditor to the more appropriately named and located ComponentEditor. | enthought/enable2/component_editor.py | enthought/enable2/component_editor.py | """ Defines a Traits editor for displaying an Enable component.
"""
#-------------------------------------------------------------------------------
# Written by: David C. Morrill
# Date: 01/26/2007
# (c) Copyright 2007 by Enthought, Inc.
#----------------------------------------------------------------------------
from enthought.enable2.api import ColorTrait
from enthought.etsconfig.api import ETSConfig
from enthought.traits.ui.api import BasicEditorFactory
if ETSConfig.toolkit == 'wx':
from enthought.traits.ui.wx.editor import Editor
from enthought.enable2.wx_backend.api import Window
elif ETSConfig.toolkit == 'qt4':
from enthought.traits.ui.qt4.editor import Editor
from enthought.enable2.qt4_backend.api import Window
else:
Editor = object
Window = None
class _ComponentEditor( Editor ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The plot editor is scrollable (overrides Traits UI Editor).
scrollable = True
#---------------------------------------------------------------------------
# Finishes initializing the editor by creating the underlying toolkit
# widget:
#---------------------------------------------------------------------------
def init( self, parent ):
""" Finishes initializing the editor by creating the underlying toolkit
widget.
"""
self._window = Window( parent, component=self.value )
self.control = self._window.control
self._window.bg_color = self.factory.bgcolor
#---------------------------------------------------------------------------
# Updates the editor when the object trait changes externally to the editor:
#---------------------------------------------------------------------------
def update_editor( self ):
""" Updates the editor when the object trait changes externally to the
editor.
"""
pass
class ComponentEditor( BasicEditorFactory ):
""" wxPython editor factory for Enable components.
"""
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The class used to create all editor styles (overrides BasicEditorFactory).
klass = _ComponentEditor
# The background color for the window
bgcolor = ColorTrait('sys_window')
| Python | 0 | |
06164dbeb1ec113b24ca25a41e624793d878875f | implement a transferrable voting algorithm | instant_runoff_voting.py | instant_runoff_voting.py | from collections import defaultdict, Counter
def runoff(voters):
"""
a function that calculates an election winner from a list of voter selections using an
Instant Runoff Voting algorithm. https://en.wikipedia.org/wiki/Instant-runoff_voting
Each voter selects several candidates in order of preference.
The votes are tallied from the each voter's first choice.
If the first-place candidate has more than half the total votes, they win.
Otherwise, find the candidate who got the least votes and remove them from each person's voting list.
In case of a tie for least, remove all of the tying candidates.
In case of a complete tie between every candidate, return None
Continue until somebody has more than half the votes; they are the winner.
The function takes a list of voter ballots; each ballot will be a list of candidates in descending order of
preference.
Returns the symbol corresponding to the winning candidate.
"""
final_tally = defaultdict(int)
removed_candidates = []
for this_round in range(len(voters[0])):
this_round_votes = [voter[this_round] for voter in voters if voter[this_round] not in removed_candidates]
tally = dict(Counter(this_round_votes))
for candidate in tally:
final_tally[candidate] +=tally[candidate]
leader = max(final_tally, key=tally.get)
total_votes = sum([final_tally[i] for i in final_tally])
if final_tally[leader] >= total_votes / 2.0:
return leader
# no clear winner
knockout_candidate = min(tally, key=tally.get)
knockout_candidate_votes = tally[knockout_candidate]
for candidate in tally:
if tally[candidate] == knockout_candidate_votes:
removed_candidates.append(candidate)
del final_tally[knockout_candidate]
voters = [
['c', 'a', 'b', 'd', 'e'],
['b', 'e', 'd', 'c', 'a'],
['b', 'e', 'c', 'a', 'd'],
['d', 'b', 'c', 'a', 'e'],
['c', 'b', 'd', 'a', 'e']
]
assert(runoff(voters) == "b")
| Python | 0 | |
5d6ef1cf969bac9fb53db0224eebdeb4a1bb6ff0 | Update app/exceptions/__init__.py | app/exceptions/__init__.py | app/exceptions/__init__.py |
class BadConfigurationError(Exception):
pass
class ClientUnavailableError(Exception):
pass
class ClusterNotConfiguredError(Exception):
pass
| Python | 0 | |
0efb59e8d1bef5a1d8e5e3eb7ffddf09f5b8943a | Add tests to LoadCommand | jarbas/core/tests/test_load_command.py | jarbas/core/tests/test_load_command.py | from unittest.mock import Mock, patch
from django.test import TestCase
from jarbas.core.management.commands import LoadCommand
from jarbas.core.models import Activity
from jarbas.core.tests import sample_activity_data
class TestStaticMethods(TestCase):
def setUp(self):
self.cmd = LoadCommand()
def test_get_file_name(self):
expected = '1970-01-01-ahoy.xz'
with self.settings(AMAZON_S3_DATASET_DATE='1970-01-01'):
self.assertEqual(expected, self.cmd.get_file_name('ahoy'))
def test_get_model_name(self):
self.assertEqual('Activity', self.cmd.get_model_name(Activity))
class TestPrintCount(TestCase):
def setUp(self):
self.cmd = LoadCommand()
@patch('jarbas.core.management.commands.print')
def test_print_no_records(self, mock_print):
self.cmd.print_count(Activity)
arg = 'Current count: 0 Activitys '
kwargs = {'end': '\r'}
mock_print.assert_called_with(arg, **kwargs)
@patch('jarbas.core.management.commands.print')
def test_print_with_records(self, mock_print):
Activity.objects.create(**sample_activity_data)
self.cmd.print_count(Activity)
arg = 'Current count: 1 Activitys '
kwargs = {'end': '\r'}
mock_print.assert_called_with(arg, **kwargs)
@patch('jarbas.core.management.commands.print')
def test_print_with_permanent_keyword_arg(self, mock_print):
self.cmd.print_count(Activity, permanent=True)
arg = 'Current count: 0 Activitys '
kwargs = {'end': '\n'}
mock_print.assert_called_with(arg, **kwargs)
class TestDropAll(TestCase):
def test_drop_all(self):
self.assertEqual(0, Activity.objects.count())
Activity.objects.create(**sample_activity_data)
self.assertEqual(1, Activity.objects.count())
LoadCommand().drop_all(Activity)
self.assertEqual(0, Activity.objects.count())
class TestLocalMethods(TestCase):
def setUp(self):
self.cmd = LoadCommand()
self.source = '/whatever/works'
self.name = 'ahoy'
def test_get_path(self):
expected = '/whatever/works/1970-01-01-ahoy.xz'
with self.settings(AMAZON_S3_DATASET_DATE='1970-01-01'):
result = self.cmd.get_path(self.source, self.name)
self.assertEqual(expected, result)
@patch('jarbas.core.management.commands.print')
@patch('jarbas.core.management.commands.os.path.exists')
def test_load_local_exists(self, mock_exists, mock_print):
mock_exists.return_value = True
self.assertIsInstance(self.cmd.load_local(self.source, self.name), str)
@patch('jarbas.core.management.commands.print')
@patch('jarbas.core.management.commands.os.path.exists')
def test_load_local_fail(self, mock_exists, mock_print):
mock_exists.return_value = False
self.assertFalse(self.cmd.load_local(self.source, self.name))
class TestRemoteMethods(TestCase):
def setUp(self):
self.cmd = LoadCommand()
self.name = 'ahoy'
self.url = 'https://south.amazonaws.com/jarbas/1970-01-01-ahoy.xz'
self.custom_settings = {
'AMAZON_S3_DATASET_DATE': '1970-01-01',
'AMAZON_S3_REGION': 'south',
'AMAZON_S3_BUCKET': 'jarbas'
}
def test_get_url(self):
with self.settings(**self.custom_settings):
result = self.cmd.get_url(self.name)
self.assertEqual(self.url, result)
@patch('jarbas.core.management.commands.print')
@patch('jarbas.core.management.commands.urlretrieve')
def test_load_remote(self, mock_urlretrieve, mock_print):
with self.settings(**self.custom_settings):
result = self.cmd.load_remote(self.name)
self.assertEqual(self.url, mock_urlretrieve.call_args[0][0])
self.assertIsInstance(result, str)
class TestAddArguments(TestCase):
def test_add_arguments(self):
mock = Mock()
LoadCommand().add_arguments(mock)
self.assertEqual(2, mock.add_argument.call_count) | Python | 0.000001 | |
8affeda715b1facf12de1dab1d445bbe54616306 | Fix JSON serialisation problem with AJAX basket | oscar/core/ajax.py | oscar/core/ajax.py | import six
from django.contrib import messages
from six.moves import map
class FlashMessages(object):
"""
Intermediate container for flash messages.
This is useful as, at the time of creating the message, we don't know
whether the response is an AJAX response or not.
"""
def __init__(self):
self.msgs = {}
def add_message(self, level, message):
self.msgs.setdefault(level, []).append(message)
def add_messages(self, level, messages):
for msg in messages:
self.add_message(level, msg)
def info(self, message):
self.add_message(messages.INFO, message)
def warning(self, message):
self.add_message(messages.WARNING, message)
def error(self, message):
self.add_message(messages.ERROR, message)
def success(self, message):
self.add_message(messages.SUCCESS, message)
def to_json(self):
payload = {}
for level, msgs in self.msgs.items():
tag = messages.DEFAULT_TAGS.get(level, 'info')
payload[tag] = [six.text_type(msg) for msg in msgs]
return payload
def apply_to_request(self, request):
for level, msgs in self.msgs.items():
for msg in msgs:
messages.add_message(request, level, msg)
| import six
from django.contrib import messages
from six.moves import map
class FlashMessages(object):
"""
Intermediate container for flash messages.
This is useful as, at the time of creating the message, we don't know
whether the response is an AJAX response or not.
"""
def __init__(self):
self.msgs = {}
def add_message(self, level, message):
self.msgs.setdefault(level, []).append(message)
def add_messages(self, level, messages):
for msg in messages:
self.add_message(level, msg)
def info(self, message):
self.add_message(messages.INFO, message)
def warning(self, message):
self.add_message(messages.WARNING, message)
def error(self, message):
self.add_message(messages.ERROR, message)
def success(self, message):
self.add_message(messages.SUCCESS, message)
def to_json(self):
payload = {}
for level, msgs in self.msgs.items():
tag = messages.DEFAULT_TAGS.get(level, 'info')
payload[tag] = map(six.text_type, msgs)
return payload
def apply_to_request(self, request):
for level, msgs in self.msgs.items():
for msg in msgs:
messages.add_message(request, level, msg)
| Python | 0 |
63d22058d15a11fad7232683630976d472997c33 | Add planetary time recipe | recipes/planetarytime.py | recipes/planetarytime.py | """
Author: João Ventura <flatangleweb@gmail.com>
This recipe shows sample code for handling
planetary times.
"""
from flatlib.datetime import Datetime
from flatlib.geopos import GeoPos
from flatlib.tools import planetarytime
# Build a date and location
date = Datetime('2015/03/13', '17:00', '+00:00')
pos = GeoPos('38n32', '8w54')
# Get the planetary hour table
hourTable = planetarytime.getHourTable(date, pos)
print(hourTable.dayRuler()) # Venus
print(hourTable.nightRuler()) # Mars
print(hourTable.hourRuler()) # Saturn
# Use the info Dict to print hour number information
info = hourTable.currInfo()
print(info['hourNumber']) # 11
print(info['start']) # <2015/03/13 16:42:10 00:00:00>
print(info['end']) # <2015/03/13 17:41:20 00:00:00> | Python | 0.999993 | |
98c863c9d45bdfb328a5e79d0928c8b9694bc753 | Remove redundant import | check_mesos.py | check_mesos.py | #!/usr/bin/env python
import nagiosplugin
import argparse
import logging
import re
INFINITY = float('inf')
HEALTHY = 1
UNHEALTHY = -1
try:
from urllib2 import *
except ImportError:
from urllib.request import *
from urllib.error import HTTPError
try:
import json
except ImportError:
import simplejson as json
class MesosMaster(nagiosplugin.Resource):
def __init__(self, baseuri, frameworks):
self.baseuri = baseuri
self.frameworks = frameworks
def probe(self):
logging.info('Base URI is %s', self.baseuri)
try:
response = urlopen(self.baseuri + '/health')
logging.debug('Response from %s is %s', response.geturl(), response)
if response.getcode() in [200, 204]:
yield nagiosplugin.Metric('master health', HEALTHY)
else:
yield nagiosplugin.Metric('master health', UNHEALTHY)
except HTTPError, e:
logging.debug('HTTP error %s', e)
yield nagiosplugin.Metric('master health', UNHEALTHY)
response = urlopen(self.baseuri + '/master/state.json')
logging.debug('Response from %s is %s', response.geturl(), response)
state = json.load(response)
has_leader = len(state.get('leader', '')) > 0
yield nagiosplugin.Metric('active slaves', state['activated_slaves'])
yield nagiosplugin.Metric('active leader', 1 if has_leader else 0)
for framework_regex in self.frameworks:
framework = None
for candidate in state['frameworks']:
if re.search(framework_regex, candidate['name']) is not None:
framework = candidate
unregistered_time = INFINITY
if framework is not None:
unregistered_time = framework['unregistered_time']
if not framework['active'] and unregistered_time == 0:
unregistered_time = INFINITY
yield nagiosplugin.Metric('framework ' + framework_regex, unregistered_time, context='framework')
@nagiosplugin.guarded
def main():
argp = argparse.ArgumentParser()
argp.add_argument('-H', '--host', required=True,
help='The hostname of a Mesos master to check')
argp.add_argument('-P', '--port', default=5050,
help='The Mesos master HTTP port - defaults to 5050')
argp.add_argument('-n', '--slaves', default=1,
help='The minimum number of slaves the cluster must be running')
argp.add_argument('-F', '--framework', default=[], action='append',
help='Check that a framework is registered matching the given regex, may be specified multiple times')
argp.add_argument('-v', '--verbose', action='count', default=0,
help='increase output verbosity (use up to 3 times)')
args = argp.parse_args()
unhealthy_range = nagiosplugin.Range('%d:%d' % (HEALTHY - 1, HEALTHY + 1))
slave_range = nagiosplugin.Range('%s:' % (args.slaves,))
check = nagiosplugin.Check(
MesosMaster('http://%s:%d' % (args.host, args.port), args.framework),
nagiosplugin.ScalarContext('master health', unhealthy_range, unhealthy_range),
nagiosplugin.ScalarContext('active slaves', slave_range, slave_range),
nagiosplugin.ScalarContext('active leader', '1:1', '1:1'),
nagiosplugin.ScalarContext('framework', '0:0', '0:0'))
check.main(verbose=args.verbose)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
import nagiosplugin
import urllib2
import argparse
import logging
import re
INFINITY = float('inf')
HEALTHY = 1
UNHEALTHY = -1
try:
from urllib2 import *
except ImportError:
from urllib.request import *
from urllib.error import HTTPError
try:
import json
except ImportError:
import simplejson as json
class MesosMaster(nagiosplugin.Resource):
def __init__(self, baseuri, frameworks):
self.baseuri = baseuri
self.frameworks = frameworks
def probe(self):
logging.info('Base URI is %s', self.baseuri)
try:
response = urlopen(self.baseuri + '/health')
logging.debug('Response from %s is %s', response.geturl(), response)
if response.getcode() in [200, 204]:
yield nagiosplugin.Metric('master health', HEALTHY)
else:
yield nagiosplugin.Metric('master health', UNHEALTHY)
except HTTPError, e:
logging.debug('HTTP error %s', e)
yield nagiosplugin.Metric('master health', UNHEALTHY)
response = urlopen(self.baseuri + '/master/state.json')
logging.debug('Response from %s is %s', response.geturl(), response)
state = json.load(response)
has_leader = len(state.get('leader', '')) > 0
yield nagiosplugin.Metric('active slaves', state['activated_slaves'])
yield nagiosplugin.Metric('active leader', 1 if has_leader else 0)
for framework_regex in self.frameworks:
framework = None
for candidate in state['frameworks']:
if re.search(framework_regex, candidate['name']) is not None:
framework = candidate
unregistered_time = INFINITY
if framework is not None:
unregistered_time = framework['unregistered_time']
if not framework['active'] and unregistered_time == 0:
unregistered_time = INFINITY
yield nagiosplugin.Metric('framework ' + framework_regex, unregistered_time, context='framework')
@nagiosplugin.guarded
def main():
argp = argparse.ArgumentParser()
argp.add_argument('-H', '--host', required=True,
help='The hostname of a Mesos master to check')
argp.add_argument('-P', '--port', default=5050,
help='The Mesos master HTTP port - defaults to 5050')
argp.add_argument('-n', '--slaves', default=1,
help='The minimum number of slaves the cluster must be running')
argp.add_argument('-F', '--framework', default=[], action='append',
help='Check that a framework is registered matching the given regex, may be specified multiple times')
argp.add_argument('-v', '--verbose', action='count', default=0,
help='increase output verbosity (use up to 3 times)')
args = argp.parse_args()
unhealthy_range = nagiosplugin.Range('%d:%d' % (HEALTHY - 1, HEALTHY + 1))
slave_range = nagiosplugin.Range('%s:' % (args.slaves,))
check = nagiosplugin.Check(
MesosMaster('http://%s:%d' % (args.host, args.port), args.framework),
nagiosplugin.ScalarContext('master health', unhealthy_range, unhealthy_range),
nagiosplugin.ScalarContext('active slaves', slave_range, slave_range),
nagiosplugin.ScalarContext('active leader', '1:1', '1:1'),
nagiosplugin.ScalarContext('framework', '0:0', '0:0'))
check.main(verbose=args.verbose)
if __name__ == '__main__':
main()
| Python | 0.001503 |
92c8afbb5131374611fb21b4da0b0af1a2f37a45 | add dummy test | tests/dummy.py | tests/dummy.py | import pytest
from pyannote.database import get_databases
def test_dummy():
assert isinstance(get_databases(), list)
| Python | 0.999602 | |
0c8b7fa865df535f5baa33025c184bbf4234b7b1 | Create script to transform shapefile into csv distance matrix | shp_to_csv_distances.py | shp_to_csv_distances.py | """Create a csv matrix of distances between shapefile geometry objects.
Requirements: fiona, shapely
Written by: Taylor Denouden
Date: November 25, 2015
"""
import random
import fiona
from shapely.geometry import shape
from scripts.printer import print_progress
def main():
"""Main script execution."""
outfile = open("out.csv", "w")
ids = extract_ids("data/high_polys.shp")
# Write header
print "Writing Header"
outfile.write("NODE")
for i in ids:
outfile.write("," + i)
outfile.write("\n")
# Write rows
print "Writing Rows"
for i, j in enumerate(ids):
print_progress(i/len(ids))
outfile.write(j)
write_row_distances(j, ids, "data/high_polys.shp", outfile)
outfile.write("\n")
print_progress(1)
print
def extract_ids(input_file):
"""Extract all polygon ids from input shapefile."""
with fiona.open(input_file, 'r') as source:
return [shp['id'] for shp in source]
def write_row_distances(i, ids, input_file, outfile):
"""Write distances between shape with id i and all other shapes in ids."""
with fiona.open(input_file, 'r') as source:
source = list(source)
i_shp = shape(source[int(i)]['geometry'])
for j in ids:
j_shp = shape(source[int(j)]['geometry'])
if i_shp.is_valid and j_shp.is_valid:
dist = i_shp.distance(j_shp)
else:
dist = -1
outfile.write("," + str(dist))
if __name__ == "__main__":
main()
| Python | 0 | |
6301b47bdcc4cb507a29583210f648c4e24834d6 | Add a utility script for decoding packet traces | util/decode_packet_trace.py | util/decode_packet_trace.py | #!/usr/bin/env python
# Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
#
# This script is used to dump protobuf packet traces to ASCII
# format. It assumes that protoc has been executed and already
# generated the Python package for the packet messages. This can
# be done manually using:
# protoc --python_out=. --proto_path=src/proto src/proto/packet.proto
#
# The ASCII trace format uses one line per request on the format cmd,
# addr, size, tick,flags. For example:
# r,128,64,4000,0
# w,232123,64,500000,0
import struct
import sys
import packet_pb2
def DecodeVarint(in_file):
"""
The decoding of the Varint32 is copied from
google.protobuf.internal.decoder and is only repeated here to
avoid depending on the internal functions in the library. If the
end of file is reached, return (0, 0).
"""
result = 0
shift = 0
pos = 0
# Use a 32-bit mask
mask = 0xffffffff
while 1:
c = in_file.read(1)
if len(c) == 0:
return (0, 0)
b = struct.unpack('<B', c)[0]
result |= ((b & 0x7f) << shift)
pos += 1
if not (b & 0x80):
if result > 0x7fffffffffffffff:
result -= (1 << 64)
result |= ~mask
else:
result &= mask
return (result, pos)
shift += 7
if shift >= 64:
raise IOError('Too many bytes when decoding varint.')
def decodeMessage(in_file, message):
"""
Attempt to read a message from the file and decode it. Return
False if no message could be read.
"""
try:
size, pos = DecodeVarint(in_file)
if size == 0:
return False
buf = in_file.read(size)
message.ParseFromString(buf)
return True
except IOError:
return False
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], " <protobuf input> <ASCII output>"
exit(-1)
try:
proto_in = open(sys.argv[1], 'rb')
except IOError:
print "Failed to open ", sys.argv[1], " for reading"
exit(-1)
try:
ascii_out = open(sys.argv[2], 'w')
except IOError:
print "Failed to open ", sys.argv[2], " for writing"
exit(-1)
# Read the magic number in 4-byte Little Endian
magic_number = proto_in.read(4)
if magic_number != "gem5":
print "Unrecognized file"
exit(-1)
print "Parsing packet header"
# Add the packet header
header = packet_pb2.PacketHeader()
decodeMessage(proto_in, header)
print "Object id:", header.obj_id
print "Tick frequency:", header.tick_freq
print "Parsing packets"
num_packets = 0
ignored_flags = False
packet = packet_pb2.Packet()
# Decode the packet messages until we hit the end of the file
while decodeMessage(proto_in, packet):
num_packets += 1
# ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum
cmd = 'r' if packet.cmd == 1 else ('w' if packet.cmd == 4 else 'u')
if packet.HasField('flags'):
# Currently not printing flags
ignored_flags = True
ascii_out.write('%s,%s,%s,%s\n' % (cmd, packet.addr, packet.size,
packet.tick))
print "Parsed packets:", num_packets
if ignored_flags:
print "Encountered packet flags that were ignored"
# We're done
ascii_out.close()
proto_in.close()
if __name__ == "__main__":
main()
| Python | 0 | |
9a4dd1c0c51cf2732b50d5594b2a4bf661b8262f | Add geoip_lookup.py | geoip_lookup.py | geoip_lookup.py | import sys
if len(sys.argv) < 2:
print """geoip_lookup.py ---
"resolve" IP addresses to approximate geo-information
Usage:
python geoip_lookup.py IP [ GEOIP_SERVER ]
where IP is the address to resolve, and
GEOIP_SERVER is an optional GeoIP server to contact.
(The Seattle network testbed provides two GeoIP servers,
http://geoipserver.poly.edu:12679 and http://geoipserver2.poly.edu:12679 )
"""
sys.exit(0)
from repyportability import *
add_dy_support(locals())
geoip_client = dy_import_module("geoip_client.r2py")
try:
geoipserver = sys.argv[2]
geoip_client.geoip_init_client(url=geoipserver)
except IndexError:
geoip_client.geoip_init_client()
ip = sys.argv[1]
print "Address", ip, "is located in", geoip_client.geoip_record_by_addr(ip)
| Python | 0.000027 | |
24d1162740aa9a9948665d97dc082a555a1ccf13 | Rename initial_args to standard argv. | grip/command.py | grip/command.py | """\
grip.command
~~~~~~~~~~~~
Implements the command-line interface for Grip.
Usage:
grip [options] [<path>] [<address>]
grip -h | --help
grip --version
Where:
<path> is a file to render or a directory containing a README.md file
<address> is what to listen on, of the form <host>[:<port>], or just <port>
Options:
--gfm Use GitHub-Flavored Markdown, e.g. comments or issues
--context=<repo> The repository context, only taken into account with --gfm
"""
import sys
from path_and_address import resolve, split_address
from docopt import docopt
from .server import serve
from . import __version__
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
def main(argv=None):
"""The entry point of the application."""
if argv is None:
argv = sys.argv[1:]
version = 'Grip ' + __version__
# Parse options
args = docopt(usage, argv=argv, version=version)
# Parse arguments
path, address = resolve(args['<path>'], args['<address>'])
host, port = split_address(address)
# Validate address
if address and not host and not port:
print 'Error: Invalid address', repr(address)
# Run server
try:
serve(path, host, port, args['--gfm'], args['--context'])
return 0
except ValueError, ex:
print 'Error:', ex
return 1
| """\
grip.command
~~~~~~~~~~~~
Implements the command-line interface for Grip.
Usage:
grip [options] [<path>] [<address>]
grip -h | --help
grip --version
Where:
<path> is a file to render or a directory containing a README.md file
<address> is what to listen on, of the form <host>[:<port>], or just <port>
Options:
--gfm Use GitHub-Flavored Markdown, e.g. comments or issues
--context=<repo> The repository context, only taken into account with --gfm
"""
import sys
from path_and_address import resolve, split_address
from docopt import docopt
from .server import serve
from . import __version__
usage = '\n\n\n'.join(__doc__.split('\n\n\n')[1:])
def main(initial_args=None):
"""The entry point of the application."""
if initial_args is None:
initial_args = sys.argv[1:]
version = 'Grip ' + __version__
# Parse options
args = docopt(usage, argv=initial_args, version=version)
# Parse arguments
path, address = resolve(args['<path>'], args['<address>'])
host, port = split_address(address)
# Validate address
if address and not host and not port:
print 'Error: Invalid address', repr(address)
# Run server
try:
serve(path, host, port, args['--gfm'], args['--context'])
return 0
except ValueError, ex:
print 'Error:', ex
return 1
| Python | 0 |
cf357e46b3d9664325ca69f3b7c0393c89ad44a7 | Add some function tests. | tests/test_func.py | tests/test_func.py | from .utils import assert_eval
def test_simple_func():
assert_eval('(def @a $a 8) (@a)', 1, 8)
def test_simple_func_args():
assert_eval(
'(def @a $a $a)'
'(@a 1)'
'(@a 2)'
'(@a 5)',
1,
1,
2,
5)
def test_func_args_overwrite_globals():
assert_eval(
'(def @a $a 3)'
'(set $a 10)'
'$a'
'(@a 8)'
'$a',
1,
10,
10,
3,
8,
)
def test_func_args_with_offset():
assert_eval(
'(def @a $d (+ $d $i))'
'(def @b $i (+ $i $j))'
'(@a 1 2 3)'
'(@b 8 9 10)'
'$a\n$b\n$c\n$d\n$e\n$i\n$j\n$k\n',
1, 1,
4,
17,
0, 0, 0, 1, 2, 8, 9, 10,
)
| Python | 0 | |
2628bfa261c9bb76f4d3742bbb36f1179d961c83 | add Pool and OrderedPool tests | tests/test_pool.py | tests/test_pool.py | import unittest
import greenhouse
import greenhouse.poller
from test_base import TESTING_TIMEOUT, StateClearingTestCase
class PoolTestCase(StateClearingTestCase):
POOL = greenhouse.Pool
def test_basic(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
pool.close()
def test_with_blocking(self):
def f(x):
if x % 2:
greenhouse.pause()
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
pool.close()
def test_shuts_down(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
for x in xrange(30):
pool.get()
pool.close()
for x in xrange(30):
pool.put(x)
greenhouse.pause()
assert len(pool.inq.queue) == 30, len(pool.inq.queue)
def test_as_context_manager(self):
def f(x):
return x ** 2
with self.POOL(f) as pool:
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
def test_starting_back_up(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
for x in xrange(30):
pool.get()
pool.close()
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
l.sort()
assert l == [x ** 2 for x in xrange(30)]
class OrderedPoolTestCase(PoolTestCase):
POOL = greenhouse.OrderedPool
def test_ordered_basic(self):
def f(x):
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
assert l == [x ** 2 for x in xrange(30)]
pool.close()
def test_ordered_with_blocking(self):
def f(x):
if x % 2:
greenhouse.pause()
return x ** 2
pool = self.POOL(f)
pool.start()
for x in xrange(30):
pool.put(x)
l = []
for x in xrange(30):
l.append(pool.get())
assert l == [x ** 2 for x in xrange(30)]
pool.close()
| Python | 0 | |
79f57f27824caa423ff873fdee3a9b8916ed410b | extract speech information for reps | import/parse/speeches.py | import/parse/speeches.py | """
parse data from govtrack.us
from: data/crawl/govtrack/people.xml
"""
import web
from xml.sax import make_parser, handler
class SpeechesXML(handler.ContentHandler):
def __init__(self,callback):
self.callback = callback
self.current = None
def startElement(self, name, attrs):
if name == 'representative':
self.current = web.storage(attrs)
self.current.speech_data = (self.current.id,self.current.Speeches,
self.current.WordsPerSpeech)
def endElement(self, name):
if name == 'representative':
self.callback(self.current)
self.current = None
def callback(rep):
if rep.get('Speeches') != '':
print rep.speech_data
def main(callback):
parser = make_parser()
parser.setContentHandler(SpeechesXML(callback))
parser.parse('speeches.xml')
if __name__ == "__main__": main(callback)
| Python | 0.998676 | |
ec6dff24e3049ddaab392f0bc5b8d8b724e41e20 | Print the trending Python repos on GitHub | trending_python.py | trending_python.py | #!/usr/bin/env python3
import bs4
import requests
url = 'https://github.com/trending?l=Python'
soup = bs4.BeautifulSoup(requests.get(url).content, 'lxml') # or 'html5lib'
repos = soup.find('ol', class_="repo-list").find_all('a', href=True)
repos = (r.text.strip().replace(' ', '') for r in repos if '/' in r.text)
print('\n'.join(repos))
| Python | 0.998715 | |
37691851b6e21a6a51140f512fd9802e964b0785 | Create beta_pythons_dynamic_classes_3.py | Solutions/beta/beta_pythons_dynamic_classes_3.py | Solutions/beta/beta_pythons_dynamic_classes_3.py | def create_class(class_name, secrets = None):
if not class_name: return None
class NewClass(object):
pass
NewClass.__name__ = class_name
if not secrets: return NewClass
for i in secrets:
if 'function' in str(type(secrets[i])):
setattr(NewClass, i, classmethod(secrets[i]))
else:
setattr(NewClass, i, secrets[i])
return NewClass
| Python | 0.000067 | |
32d9a97336c786660a838dc69cfab2ebe3436343 | update viafReconciliationPeople.py | viafReconciliationPeople.py | viafReconciliationPeople.py | import requests
import csv
from fuzzywuzzy import fuzz
import json
import urllib
baseURL = 'http://viaf.org/viaf/search/viaf?query=local.personalNames+%3D+%22'
f=csv.writer(open('viafPeopleResults.csv', 'wb'))
f.writerow(['search']+['result']+['viaf']+['lc']+['isni']+['ratio']+['partialRatio']+['tokenSort']+['tokenSet']+['avg'])
with open('people.txt') as txt:
for row in txt:
print row
rowEdited = urllib.quote(row.decode('utf-8-sig').encode('utf-8').strip())
url = baseURL+rowEdited+'%22+and+local.sources+%3D+%22lc%22&sortKeys=holdingscount&maximumRecords=1&httpAccept=application/rdf+json'
response = requests.get(url).content
try:
response = response[response.index('<recordData xsi:type="ns1:stringOrXmlFragment">')+47:response.index('</recordData>')].replace('"','"')
response = json.loads(response)
label = response['mainHeadings']['data'][0]['text']
viafid = response['viafID']
except:
label = ''
viafid = ''
ratio = fuzz.ratio(row, label)
partialRatio = fuzz.partial_ratio(row, label)
tokenSort = fuzz.token_sort_ratio(row, label)
tokenSet = fuzz.token_set_ratio(row, label)
avg = (ratio+partialRatio+tokenSort+tokenSet)/4
if viafid != '':
links = json.loads(requests.get('http://viaf.org/viaf/'+viafid+'/justlinks.json').text)
viafid = 'http://viaf.org/viaf/'+viafid
try:
lc = 'http://id.loc.gov/authorities/names/'+json.dumps(links['LC'][0]).replace('"','')
except:
lc = ''
try:
isni = 'http://isni.org/isni/'+json.dumps(links['ISNI'][0]).replace('"','')
except:
isni = ''
else:
lc = ''
isni = ''
f=csv.writer(open('viafPeopleResults.csv', 'a'))
f.writerow([row.strip()]+[label]+[viafid]+[lc]+[isni]+[ratio]+[partialRatio]+[tokenSort]+[tokenSet]+[avg])
| Python | 0 | |
2934f80f294759ec202e0305025da2d7e71d3ae3 | Add plot_throughput.py. | problem/net_file_xfer_tput_174608/plot_throughput.py | problem/net_file_xfer_tput_174608/plot_throughput.py | #! /usr/bin/env python3
# Copyright 2017 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
from bs4 import BeautifulSoup
import datetime
import matplotlib.pyplot as plt
import os
import re
import requests
'''Summarizes data from codereview.stackexchange.com/questions/174608/.'''
def get_cached_pastebin_text(url):
fspec = os.path.basename(url) + '.html'
if not os.path.exists(fspec):
r = requests.get(url)
assert r.ok
with open(fspec, 'w') as fout:
fout.write(r.text)
soup = BeautifulSoup(open(fspec).read(), 'html.parser')
raw = str(soup.find(id='paste_code'))
return raw.split('\n')
def hms(stamp):
'''12:00:00 -> noon.'''
h, m, s = (int(n) for n in stamp.split(':'))
today = datetime.date.today()
return datetime.datetime(
year=today.year, month=today.month, day=today.day,
hour=h, minute=m, second=s)
def get_progress(chunk_size, url='https://pastebin.com/ehncSeqD'):
chunk_re = re.compile(
r'^(\d{2}:\d{2}:\d{2}) - Chunk (\d+) of (\d+)')
detail_re = re.compile(
r'^(\d{2}:\d{2}:\d{2}) - Interconnect. (\d+) of (\d+)')
cur_chunk = -1
for line in get_cached_pastebin_text(url):
m = chunk_re.search(line)
if m:
assert cur_chunk < int(m.group(2)) # strictly monotonic
cur_chunk = int(m.group(2))
m = detail_re.search(line)
if m:
assert chunk_size >= int(m.group(3))
yield(hms(m.group(1)),
cur_chunk * chunk_size + int(m.group(2)))
def plot_tput(chunk_size=2e5, verbose=False):
prog = {} # maps elapsed time to download progress (in bytes)
start = None
for stamp, bytes in get_progress(int(chunk_size)):
if start:
elapsed = int((stamp - start).total_seconds())
# With limited resolution (1sec) timestamps, last measurement wins.
prog[elapsed] = bytes
if verbose:
print(elapsed, bytes)
else:
start = stamp
x = [p[0] for p in prog.items()]
y = [p[1] / 1024.0 for p in prog.items()] # total KBytes downloaded so far
plt.scatter(x, y)
plt.show()
if __name__ == '__main__':
os.chdir('/tmp')
plot_tput()
| Python | 0.000001 | |
9d1f8f6bfd59cf2e083276ef095618f8545c5167 | Add test to check packages for Python2.6 compatibility, as well as core. | lib/spack/spack/test/python_version.py | lib/spack/spack/test/python_version.py | ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This test ensures that all Spack files are Python version 2.6 or less.
Spack was originally 2.7, but enough systems in 2014 are still using
2.6 on their frontend nodes that we need 2.6 to get adopted.
"""
import unittest
import os
import re
import llnl.util.tty as tty
from external import pyqver2
import spack
spack_max_version = (2,6)
class PythonVersionTest(unittest.TestCase):
def spack_python_files(self):
# first file is the spack script.
yield spack.spack_file
# Next files are all the source files and package files.
search_paths = [spack.lib_path, spack.var_path]
# Iterate through the whole spack source tree.
for path in search_paths:
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.match(r'^[^.#].*\.py$', filename):
yield os.path.join(root, filename)
def all_package_py_files(self):
for name in spack.db.all_package_names():
yield spack.db.filename_for_package_name(name)
def check_python_versions(self, files):
# dict version -> filename -> reasons
all_issues = {}
for fn in files:
with open(fn) as pyfile:
versions = pyqver2.get_versions(pyfile.read())
for ver, reasons in versions.items():
if ver > spack_max_version:
if not ver in all_issues:
all_issues[ver] = {}
all_issues[ver][fn] = reasons
if all_issues:
tty.error("Spack must run on Python version %d.%d"
% spack_max_version)
for v in sorted(all_issues.keys(), reverse=True):
msgs = []
for fn in sorted(all_issues[v].keys()):
short_fn = fn
if fn.startswith(spack.prefix):
short_fn = fn[len(spack.prefix):]
reasons = [r for r in set(all_issues[v][fn]) if r]
for r in reasons:
msgs.append(("%s:%s" % ('spack' + short_fn, r[0]), r[1]))
tty.error("These files require version %d.%d:" % v)
maxlen = max(len(f) for f, prob in msgs)
fmt = "%%-%ds%%s" % (maxlen+3)
print fmt % ('File', 'Reason')
print fmt % ('-' * (maxlen), '-' * 20)
for msg in msgs:
print fmt % msg
self.assertTrue(len(all_issues) == 0)
def test_core_module_compatibility(self):
self.check_python_versions(self.spack_python_files())
def test_package_module_compatibility(self):
self.check_python_versions(self.all_package_py_files())
| ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This test ensures that all Spack files are Python version 2.6 or less.
Spack was originally 2.7, but enough systems in 2014 are still using
2.6 on their frontend nodes that we need 2.6 to get adopted.
"""
import unittest
import os
import re
import llnl.util.tty as tty
from external import pyqver2
import spack
spack_max_version = (2,6)
class PythonVersionTest(unittest.TestCase):
def spack_python_files(self):
# first file is the spack script.
yield spack.spack_file
# Next files are all the source files and package files.
search_paths = [spack.lib_path, spack.var_path]
# Iterate through the whole spack source tree.
for path in search_paths:
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.match(r'^[^.#].*\.py$', filename):
yield os.path.join(root, filename)
def test_python_versions(self):
# dict version -> filename -> reasons
all_issues = {}
for fn in self.spack_python_files():
with open(fn) as pyfile:
versions = pyqver2.get_versions(pyfile.read())
for ver, reasons in versions.items():
if ver > spack_max_version:
if not ver in all_issues:
all_issues[ver] = {}
all_issues[ver][fn] = reasons
if all_issues:
tty.error("Spack must run on Python version %d.%d"
% spack_max_version)
for v in sorted(all_issues.keys(), reverse=True):
msgs = []
for fn in sorted(all_issues[v].keys()):
short_fn = fn
if fn.startswith(spack.prefix):
short_fn = fn[len(spack.prefix):]
reasons = [r for r in set(all_issues[v][fn]) if r]
for r in reasons:
msgs.append(("%s:%s" % ('spack' + short_fn, r[0]), r[1]))
tty.error("These files require version %d.%d:" % v)
maxlen = max(len(f) for f, prob in msgs)
fmt = "%%-%ds%%s" % (maxlen+3)
print fmt % ('File', 'Reason')
print fmt % ('-' * (maxlen), '-' * 20)
for msg in msgs:
print fmt % msg
self.assertTrue(len(all_issues) == 0)
| Python | 0 |
3ea69c783393b6c62f3428c6ec83a24fe7634b6c | add grader in Python | 8-kyu/grader.py | 8-kyu/grader.py | def grader(score):
if score < 0.6 or score > 1:
return 'F'
elif score < 0.7:
return 'D'
elif score < 0.8:
return 'C'
elif score < 0.9:
return 'B'
else:
return 'A'
| Python | 0.000128 | |
37dda1d235017bebb9bb0f6eff150dd12222762f | remove organisation from db | migrations/versions/0162_remove_org.py | migrations/versions/0162_remove_org.py | """
Revision ID: 0162_remove_org
Revises: 0161_email_branding
Create Date: 2018-02-06 17:08:11.879844
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0162_remove_org'
down_revision = '0161_email_branding'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('services', 'organisation_id')
op.drop_column('services_history', 'organisation_id')
op.drop_table('organisation')
op.alter_column('service_email_branding', 'email_branding_id', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('organisation_id', postgresql.UUID(), autoincrement=False, nullable=True)) # noqa
op.add_column('services', sa.Column('organisation_id', postgresql.UUID(), autoincrement=False, nullable=True))
op.create_table(
'organisation',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('colour', sa.VARCHAR(length=7), autoincrement=False, nullable=True),
sa.Column('logo', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('name', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='organisation_pkey')
)
op.create_index('ix_services_history_organisation_id', 'services_history', ['organisation_id'], unique=False)
op.create_foreign_key('services_organisation_id_fkey', 'services', 'organisation', ['organisation_id'], ['id'])
op.create_index('ix_services_organisation_id', 'services', ['organisation_id'], unique=False)
op.alter_column('service_email_branding', 'email_branding_id', nullable=True)
| Python | 0 | |
d1e8a8bb6ffc852bf07c40968029c5def7dc0a96 | Correct the dict | nclxd/nova/virt/lxd/host_utils.py | nclxd/nova/virt/lxd/host_utils.py | # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def get_fs_info(path):
"""get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesytem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
used = (hddinfo.f_blocks - hddinfo.f_bfree) * hddinfo.f_frsize
available = st.f_bavail * st.f_frsize
return {'total': total,
'available': available,
'used': used}
def get_memory_mb_usage():
"""Get the used memory size(MB) of the host.
"returns: the total usage of memory(MB)
"""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
| # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
def get_fs_info(path):
"""get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesytem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
used = (hddinfo.f_blocks - hddinfo.f_bfree) * hddinfo.f_frsize
available = st.f_bavail * st.f_frsize
return {'total': total,
'available': free,
'used': used}
def get_memory_mb_usage():
"""Get the used memory size(MB) of the host.
"returns: the total usage of memory(MB)
"""
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
| Python | 0.999145 |
85060c7653a04f18e6f5cd016e113327ba3a2878 | Add support for Sercomm IP camera discovery. (#238) | netdisco/discoverables/sercomm.py | netdisco/discoverables/sercomm.py | """
Discover Sercomm network cameras.
These are rebranded as iControl and many others, and are usually
distributed as part of an ADT or Comcast/Xfinity monitoring package.
https://github.com/edent/Sercomm-API
"""
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering camera services."""
def get_entries(self):
"""Get all Sercomm iControl devices."""
return self.find_by_device_description({'manufacturer': 'iControl'})
| Python | 0 | |
1295f2867eb7348959d86618b8e80c001cc41ff7 | Add 'lib' init module. | akhet/paster_templates/akhet/+package+/lib/__init__.py | akhet/paster_templates/akhet/+package+/lib/__init__.py | """Miscellaneous support packages for {{project}}.
"""
| Python | 0 | |
c491b9379966e772c0ab4649584a8d5a0773c403 | Update repositoryInstaller.py | scripts/devSetup/repositoryInstaller.py | scripts/devSetup/repositoryInstaller.py | from __future__ import print_function
__author__ = u'schmatz'
import configuration
import errors
import subprocess
import os
import sys
from which import which
#git clone https://github.com/nwinter/codecombat.git coco
class RepositoryInstaller():
def __init__(self,config):
self.config = config
assert isinstance(self.config,configuration.Configuration)
if not self.checkIfGitExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Git is missing. Please install it (try 'sudo apt-get install git')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing git.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Git is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
#http://stackoverflow.com/questions/9329243/xcode-4-4-and-later-install-command-line-tools
if not self.checkIfCurlExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Curl is missing. Please install it (try 'sudo apt-get install curl')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing curl.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Curl is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
def checkIfGitExecutableExists(self):
gitPath = which(u"git")
if gitPath:
return True
else:
return False
#TODO: Refactor this into a more appropriate file
def checkIfCurlExecutableExists(self):
curlPath = which("curl")
if curlPath:
return True
else:
return False
def cloneRepository(self):
print(u"Cloning repository...")
#TODO: CHANGE THIS BEFORE LAUNCH
return_code = True
git_folder = self.config.directory.root_install_directory + os.sep + "coco"
print("Installing into " + git_folder)
return_code = subprocess.call("git clone " + self.config.repository_url +" coco",cwd=self.config.directory.root_install_directory,shell=True)
#TODO: remove this on windos
subprocess.call("chown -R " +git_folder + " 0777",shell=True)
if return_code and self.config.system.operating_system != u"windows":
#raise errors.CoCoError("Failed to clone git repository")
import shutil
#import sys
#sys.stdout.flush()
raw_input(u"Copy it now")
#shutil.copytree(u"/Users/schmatz/coco",self.config.directory.root_install_directory + os.sep + u"coco")
print(u"Copied tree just for you")
#print("FAILED TO CLONE GIT REPOSITORY")
#input("Clone the repository and click any button to continue")
elif self.config.system.operating_system == u"windows":
raise errors.CoCoError(u"Windows doesn't support automated installations of npm at this point.")
else:
print(u"Cloned git repository")
def install_node_packages(self):
print(u"Installing node packages...")
#TODO: "Replace npm with more robust package
#npm_location = self.config.directory.bin_directory + os.sep + "node" + os.sep + "bin" + os.sep + "npm"
npm_location = u"npm"
if sys.version_info[0] == 2:
py_cmd = "python"
else:
py_cmd = subprocess.check_output(['which', 'python2'])
return_code = subprocess.call([npm_location, u"install",
"--python=" + py_cmd],
cwd=self.config.directory.root_dir +
os.sep + u"coco")
if return_code:
raise errors.CoCoError(u"Failed to install node packages")
else:
print(u"Installed node packages!")
| from __future__ import print_function
__author__ = u'schmatz'
import configuration
import errors
import subprocess
import os
import sys
from which import which
#git clone https://github.com/nwinter/codecombat.git coco
class RepositoryInstaller():
def __init__(self,config):
self.config = config
assert isinstance(self.config,configuration.Configuration)
if not self.checkIfGitExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Git is missing. Please install it (try 'sudo apt-get install git')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing git.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Git is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
#http://stackoverflow.com/questions/9329243/xcode-4-4-and-later-install-command-line-tools
if not self.checkIfCurlExecutableExists():
if self.config.system.operating_system == "linux":
raise errors.CoCoError("Curl is missing. Please install it(try 'sudo apt-get install curl')\nIf you are not using Ubuntu then please see your Linux Distribution's documentation for help installing curl.")
elif self.config.system.operating_system == "mac":
raise errors.CoCoError("Curl is missing. Please install the Xcode command line tools.")
raise errors.CoCoError(u"Git is missing. Please install git.")
def checkIfGitExecutableExists(self):
gitPath = which(u"git")
if gitPath:
return True
else:
return False
#TODO: Refactor this into a more appropriate file
def checkIfCurlExecutableExists(self):
curlPath = which("curl")
if curlPath:
return True
else:
return False
def cloneRepository(self):
print(u"Cloning repository...")
#TODO: CHANGE THIS BEFORE LAUNCH
return_code = True
git_folder = self.config.directory.root_install_directory + os.sep + "coco"
print("Installing into " + git_folder)
return_code = subprocess.call("git clone " + self.config.repository_url +" coco",cwd=self.config.directory.root_install_directory,shell=True)
#TODO: remove this on windos
subprocess.call("chown -R " +git_folder + " 0777",shell=True)
if return_code and self.config.system.operating_system != u"windows":
#raise errors.CoCoError("Failed to clone git repository")
import shutil
#import sys
#sys.stdout.flush()
raw_input(u"Copy it now")
#shutil.copytree(u"/Users/schmatz/coco",self.config.directory.root_install_directory + os.sep + u"coco")
print(u"Copied tree just for you")
#print("FAILED TO CLONE GIT REPOSITORY")
#input("Clone the repository and click any button to continue")
elif self.config.system.operating_system == u"windows":
raise errors.CoCoError(u"Windows doesn't support automated installations of npm at this point.")
else:
print(u"Cloned git repository")
def install_node_packages(self):
print(u"Installing node packages...")
#TODO: "Replace npm with more robust package
#npm_location = self.config.directory.bin_directory + os.sep + "node" + os.sep + "bin" + os.sep + "npm"
npm_location = u"npm"
if sys.version_info[0] == 2:
py_cmd = "python"
else:
py_cmd = subprocess.check_output(['which', 'python2'])
return_code = subprocess.call([npm_location, u"install",
"--python=" + py_cmd],
cwd=self.config.directory.root_dir +
os.sep + u"coco")
if return_code:
raise errors.CoCoError(u"Failed to install node packages")
else:
print(u"Installed node packages!")
| Python | 0.000001 |
8fe99eedd4e1a1604277c42ed8f2ea0dc2e622de | add simple csv utility module | mediacloud/mediawords/util/csv.py | mediacloud/mediawords/util/csv.py | """Utility functions for dealing with csvs."""
import csv
import io
def get_csv_string_from_dicts(dicts: list) -> str:
"""Given a list of dicts, return a representative csv string."""
if len(dicts) < 1:
return ''
csvio = io.StringIO()
csvwriter = csv.DictWriter(csvio, fieldnames=dicts[0].keys())
csvwriter.writeheader()
[csvwriter.writerow(d) for d in dicts]
return csvio.getvalue()
def get_dicts_from_csv_string(csvstring: str) -> list:
"""Given a csv string, return a list of dicts."""
if len(csvstring) < 1:
return []
csvio = io.StringIO(csvstring)
return list(csv.DictReader(csvio))
| Python | 0 | |
f6c2d5e37685b149cfd447545c58ce1fc4d836b9 | Add function to create view for Span candidate subclasses | snorkel/models/views.py | snorkel/models/views.py |
def create_serialized_candidate_view(session, C, verbose=True):
"""Creates a view in the database for a Candidate sub-class C defined over
Span contexts, which are direct children of a single sentence.
Creates VIEW with schema:
candidate.id, candidate.split, span0.*, ..., spanK.*, sentence.*
NOTE: This limited functionality should be expanded for arbitrary context
trees. Also this should be made more dialect-independent.
"""
selects, froms, joins = [], [], []
for i, arg in enumerate(C.__argnames__):
selects.append("span{0}.*".format(i))
froms.append("span AS span{0}".format(i))
joins.append("{0}.{1}_id = span{2}.id".format(C.__tablename__, arg, i))
sql = """
CREATE VIEW {0}_serialized AS
SELECT
candidate.id,
candidate.split,
{1},
sentence.*
FROM
candidate,
{0},
{2},
sentence
WHERE
candidate.id = {0}.id
AND sentence.id = span0.sentence_id
AND {3}
""".format(
C.__tablename__,
", ".join(selects),
", ".join(froms),
" AND ".join(joins)
)
if verbose:
print("Creating view...")
print(sql)
session.execute(sql) | Python | 0 | |
135324dd3346f7830abbe64cb5eadf82d1ca963c | add - module for generating data sets. | versus/src/data.py | versus/src/data.py | """
Module for loading datasets
"""
import gzip
import theano.tensor as T
import theano
import numpy
import cPickle
import os
def load_MNIST(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(os.path.split(__file__)[0], "..", "data", dataset)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
import urllib
origin = 'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
print 'Downloading data from %s' % origin
urllib.urlretrieve(origin, dataset)
print '... loading data'
# Load the dataset
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
#train_set, valid_set, test_set format: tuple(input, target)
#input is an numpy.ndarray of 2 dimensions (a matrix)
#witch row's correspond to an example. target is a
#numpy.ndarray of 1 dimensions (vector)) that have the same length as
#the number of rows in the input. It should give the target
#target to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval | Python | 0 | |
9732c401fb51ae0b757be5108835b71e7c389850 | Add tests | django_comments_xtd/tests/test_get_version.py | django_comments_xtd/tests/test_get_version.py | try:
from unittest.mock import patch
except ImportError:
from mock import patch
from django.test import TestCase
class GetVersionTestCase(TestCase):
@patch('django_comments_xtd.VERSION', (2, 8, 0, 'f', 0))
def test_get_version_when_patch_equal_to_zero(self):
from django_comments_xtd import get_version
self.assertEqual(get_version(), '2.8.0')
@patch('django_comments_xtd.VERSION', (2, 8, 1, 'f', 0))
def test_get_version_when_patch_greater_than_zero(self):
from django_comments_xtd import get_version
self.assertEqual(get_version(), '2.8.1')
| Python | 0.000001 | |
2eb163c5dd675c2e7a9cedb5d6868545833cbf34 | Add lemma rules | spacy/en/lemma_rules.py | spacy/en/lemma_rules.py | # encoding: utf8
from __future__ import unicode_literals
LEMMA_RULES = {
"noun": [
["s", ""],
["ses", "s"],
["ves", "f"],
["xes", "x"],
["zes", "z"],
["ches", "ch"],
["shes", "sh"],
["men", "man"],
["ies", "y"]
],
"verb": [
["s", ""],
["ies", "y"],
["es", "e"],
["es", ""],
["ed", "e"],
["ed", ""],
["ing", "e"],
["ing", ""]
],
"adj": [
["er", ""],
["est", ""],
["er", "e"],
["est", "e"]
],
"punct": [
["“", "\""],
["”", "\""],
["\u2018", "'"],
["\u2019", "'"]
]
}
| Python | 0.000237 | |
45628f2abd6ec66ad48679732d600174a3a7de26 | add a script | jython/surfaceMapToDs.py | jython/surfaceMapToDs.py | #!/bin/env jython
import sys
import java.io
import org.gavrog
def dsymbolFromCyclicAdjacencies(adjs):
vertexToChamber = {}
edgeToChamber = {}
chamberToVertex = {}
size = 0
for v in adjs:
vertexToChamber[v] = size
for w in adjs[v]:
if w == v:
raise RuntimeException("found a loop at vertex %s" % v)
else:
edgeToChamber[v, w] = size
chamberToVertex[size] = v
chamberToVertex[size + 1] = v
size += 2
ds = org.gavrog.joss.dsyms.basic.DynamicDSymbol(2)
elms = ds.grow(size)
for v, w in edgeToChamber:
D = edgeToChamber[v, w]
E = edgeToChamber[w, v]
if E is None:
print ("# WARNING: missing %s in adjacencies for %s" % (v, w))
ds.redefineOp(0, elms[D], elms[E + 1])
for v in adjs:
d = 2 * len(adjs[v])
D = vertexToChamber[v]
for i in range(1, d, 2):
ds.redefineOp(1, elms[D + i], elms[D + (i + 1) % d])
for D in range(0, size, 2):
ds.redefineOp(2, elms[D], elms[D + 1])
for D in range(size):
ds.redefineV(0, 1, elms[D], 1)
ds.redefineV(1, 2, elms[D], 1)
return org.gavrog.joss.dsyms.basic.DSymbol(ds), chamberToVertex
if __name__ == '__main__':
import re
text = sys.stdin.read()
data = [ [ int(s) for s in re.split(r' +', line.strip()) ]
for line in re.split(r'\n+', text.strip()) ]
adjs = dict((a[0], a[1:]) for a in data)
ds, _ = dsymbolFromCyclicAdjacencies(adjs)
print ds
| Python | 0.000003 | |
25e0a9cab06e518add6c2a018258dd4d59ad5611 | Add python example that uses newly added download callbacks. | examples/python/download_packages_with_cbs.py | examples/python/download_packages_with_cbs.py | #!/usr/bin/env python
"""
librepo - Example of download_packages() function with use of end,
failure and mirrorfailure callbacks.
"""
import librepo
if __name__ == "__main__":
# Setup logging
#def debug_function(msg, _):
# print "DEBUG: %s" % msg
#librepo.set_debug_log_handler(debug_function)
# Prepare handle
h = librepo.Handle()
h.urls = ["http://beaker-project.org/yum/client-testing/Fedora19/"]
h.repotype = librepo.YUMREPO
# Callbacks
def endcb(data):
print "EndCb: Download of %s finished" % data
def failurecb(data, msg):
print "FailureCb: Download of %s failed with error: %s" % (data, msg)
def mirrorfailurecb(data, msg):
print "MirrorFailureCb: Download of %s from mirror failed with: %s" % (data, msg)
# Prepare list of targets
packages = []
target = librepo.PackageTarget("beaker-0.14.0-1.fc18.src.rpm",
handle=h,
checksum_type=librepo.SHA256,
checksum="e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
resume=True,
cbdata="beaker-0.14.0-1.fc18.src.rpm",
endcb=endcb,
failurecb=failurecb,
mirrorfailurecb=mirrorfailurecb)
packages.append(target)
target = librepo.PackageTarget("beaker-0.13.2-1.fc17.noarch.rpm",
handle=h,
checksum_type=librepo.SHA256,
checksum="foobar",
cbdata="beaker-0.13.2-1.fc17.noarch.rpm (bad checksum)",
endcb=endcb,
failurecb=failurecb,
mirrorfailurecb=mirrorfailurecb)
packages.append(target)
target = librepo.PackageTarget("beaker-0.13.2-1.fc17.src.rpm",
handle=h,
checksum_type=librepo.SHA256,
checksum="xyz",
cbdata="beaker-0.13.2-1.fc17.src.rpm (bad checksum)",
endcb=endcb,
failurecb=failurecb,
mirrorfailurecb=mirrorfailurecb)
packages.append(target)
target = librepo.PackageTarget("beaker-client-0.14.1-1.fc18.noarch.rpm",
handle=h,
expectedsize=333333333333333,
cbdata="beaker-client-0.14.1-1.fc18.noarch.rpm (bad size)",
endcb=endcb,
failurecb=failurecb,
mirrorfailurecb=mirrorfailurecb)
packages.append(target)
target = librepo.PackageTarget("rhts-4.56-1.fc17.src.rpm_bad_filename",
handle=h,
cbdata="rhts-4.56-1.fc17.src.rpm_bad_filename (bad path)",
endcb=endcb,
failurecb=failurecb,
mirrorfailurecb=mirrorfailurecb)
packages.append(target)
librepo.download_packages(packages, failfast=False)
for target in packages:
print "### %s: %s" % (target.local_path, target.err or "OK")
print "Relative URL: ", target.relative_url
print "Destination: ", target.dest
print "Base URL: ", target.base_url
print "Checksum type: ", target.checksum_type
print "Expected checksum: ", target.checksum
print "Resume: ", bool(target.resume)
print "Local path: ", target.local_path
print "Error: ", target.err
print
| Python | 0 | |
4ca336ee7b29609e5cc87dccf1a66c233038aa94 | Create cpp_header_merger.py | cpp_header_merger.py | cpp_header_merger.py | __author__ = 'Joshua Zhang'
"""A C/C++ header merging tool """
import os
import re
import argparse
# matching c/c++ #include patterns
pattern_include = r"#.*include.+(\.hpp|\.h)+"
pattern_squote = r"<.+>"
pattern_quote = r'".+"'
pattern_pragma = r"#pragma.+once"
regex_include = re.compile(pattern_include, re.IGNORECASE)
regex_squote = re.compile(pattern_squote, re.IGNORECASE)
regex_quote = re.compile(pattern_quote, re.IGNORECASE)
regex_pragma = re.compile(pattern_pragma, re.IGNORECASE)
# blacklist
black_list = set()
def custom_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--include', help='Include path for headers', required=True)
parser.add_argument('-o', '--output', help='Output file path', required=False)
parser.add_argument('-e', '--entry', help='Entry header file to start with', required=True)
return parser
def nonblank_lines(f):
for l in f:
line = l.rstrip()
if line:
yield line
def remove_comments(string):
pattern = r"(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*$)"
# first group captures quoted strings (double or single)
# second group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
def _replacer(match):
# if the 2nd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(2) is not None:
return "" # so we will return empty to remove the comment
else: # otherwise, we will return the 1st group
return match.group(1) # captured quoted-string
return regex.sub(_replacer, string)
def replace_nonsystem_header(line, file):
if re.search(regex_include, line) is not None:
if re.search(regex_squote, line) is not None:
target = line.split('<')[-1].split('>')[0]
if target in black_list:
target = 'blacklist'
else:
target = os.path.abspath(include_path + target)
elif re.search(regex_quote, line) is not None:
target = line.split('"')[1]
target = os.path.dirname(os.path.abspath(file)) + '/' + target
else:
raise Exception("Invalid #include header")
target = os.path.abspath(target)
if target not in history:
history.add(target)
return "/*" + line + "*/" + os.linesep + process_header(target)
else:
return "/*" + line + " skipped */"
return line
def process_header(file):
print("Processing: " + file)
try:
with open(file, "rb") as fnow:
this_buffer = []
require_guard = None
# remove c/c++ comments
lines_wo_comments = remove_comments(fnow.read())
for line in nonblank_lines(lines_wo_comments.splitlines()):
new_line = replace_nonsystem_header(line, file)
if re.search(regex_pragma, new_line) is not None:
new_line = ""
require_guard = 1
tmp = file.lstrip(os.path.abspath(include_path)).upper().replace('/', '_').replace('.', '_')
this_guard_name = "_AUTOMATIC_GUARD_" + tmp + "_"
this_buffer.append("#ifndef " + this_guard_name + os.linesep + '#define ' + this_guard_name)
this_buffer.append(new_line)
if require_guard == 1:
this_buffer.append("#endif /* END " + this_guard_name + " */")
this_string = os.linesep.join(this_buffer)
# print(this_string)
return this_string
except IOError:
skipped_list.add(file.lstrip(os.path.abspath(include_path)))
return ''
def merge_header(entry, output):
with open(output, "wb") as fout:
# open output for write
result = process_header(entry)
fout.write(result)
print("Done.")
if __name__ == '__main__':
parser = custom_parser()
args = vars(parser.parse_args())
entry_file = args['entry']
include_path = args['include']
output_file = args['output'] if args['output'] is not None else entry_file + "_out.hpp"
history = set(['blacklist'])
skipped_list = set()
merge_header(entry_file, output_file)
# print skipped files
print("\nThe following files are skipped, should be system headers, otherwise there must have mistakes.")
for skipped in skipped_list:
print("***Unable to open file: " + skipped + ", skipped")
| Python | 0 | |
e212ad90a8fedb8e29abe3683b99a28d4030b544 | Add process module for Popen compat handling | passpie/process.py | passpie/process.py | from subprocess import Popen, PIPE
from ._compat import *
class Proc(Popen):
def communicate(self, **kwargs):
if kwargs.get('input') and isinstance(kwargs['input'], basestring):
kwargs['input'] = kwargs['input'].encode('utf-8')
return super(Proc, self).communicate(**kwargs)
def __exit__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__exit__'):
super(Proc, self).__exit__(*args, **kwargs)
def __enter__(self, *args, **kwargs):
if hasattr(super(Proc, self), '__enter__'):
return super(Proc, self).__enter__(*args, **kwargs)
return self
def call(*args, **kwargs):
kwargs.setdefault('stdout', PIPE)
kwargs.setdefault('stderr', PIPE)
kwargs.setdefault('stdin', PIPE)
kwargs.setdefault('shell', False)
kwargs_input = kwargs.pop('input', None)
with Proc(*args, **kwargs) as proc:
output, error = proc.communicate(input=kwargs_input)
if isinstance(output, basestring):
output = output.decode('utf-8')
return output, error
| Python | 0 | |
860819e2f843fb1c93b95621110ba313cb3b718b | Add basic rtp support. | pcs/packets/rtp.py | pcs/packets/rtp.py | # Copyright (c) 2008, Bruce M. Simpson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# File: $Id$
#
# Author: Bruce M. Simpson
#
# Description: Classes which describe RFC 3550 RTP packets.
#
import inspect
import struct
import time
import pcs
from pcs.packets import payload
#import rtp_map
# TODO: Make sender header inherit from rtcp as it needs
# to see the Report Count.
# TODO: RTCP BYE, APP, SDES.
# TODO: SDES: CNAME, NAME, EMAIL, PHONE, LOC, TOOL, NOTE, PRIV TLVs.
# TODO: Sender report blocks.
# TODO: Receiver reports.
class rtp(pcs.Packet):
"""RFC 3550 Real Time Protocol"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
v = pcs.Field("v", 2) # version
p = pcs.Field("p", 1) # padded
x = pcs.Field("x", 1) # extended
cc = pcs.Field("cc", 4) # csrc count
m = pcs.Field("m", 4) # m-bit
pt = pcs.Field("pt", 7, discriminator=True) # payload type
seq = pcs.Field("seq", 16) # sequence
ssrc = pcs.Field("ssrc", 32) # source
opt = pcs.OptionListField("opt") # optional fields
pcs.Packet.__init__(self, [v, p, x, cc, m, pt, seq, ssrc, opt], \
bytes = bytes, **kv)
self.description = inspect.getdoc(self)
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if bytes is not None:
offset = self.sizeof()
curr = offset
remaining = len(bytes) - offset
# Parse CSRC.
nc = self.cc
while nc > 0 and remaining >= 4:
value = struct.unpack("!I", bytes[curr:curr+4])
csrc = pcs.Field("csrc", 32, default=value)
self.opt._options.append(csrc)
curr += 4
remaining -= 4
# Parse Header Extension.
if self.x == 1 and remaining >= 4:
extlen = struct.unpack("!H", bytes[curr+2:curr+4])
extlen <<= 2
extlen = min(extlen, remaining)
# Copy the entire chunk so we keep the type field.
ext = pcs.StringField("ext", extlen * 8, \
default=bytes[curr:extlen+4])
self.opt._options.append(ext)
curr += extlen
remaining -= extlen
# Heed padding byte.
npad = 0
if self.p == 1:
npad = bytes[-1]
self.data = payload.payload(bytes[curr:remaining-npad], \
timestamp = timestamp)
else:
self.data = None
#def next(self, bytes, timestamp):
# """Decapsulate RTP payload header according to payload type."""
class rtcp(pcs.Packet):
"""RFC 3550 Real Time Control Protocol header"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
v = pcs.Field("v", 2)
p = pcs.Field("p", 1)
rc = pcs.Field("rc", 5)
pt = pcs.Field("pt", 8)
length = pcs.Field("length", 16)
ssrc = pcs.Field("ssrc", 32)
pcs.Packet.__init__(self, [v, p, rc, pt, length, ssrc], \
bytes = bytes, **kv)
self.description = inspect.getdoc(self)
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if bytes is not None:
offset = self.sizeof()
curr = offset
remaining = len(bytes) - offset
# XXX TODO look at pt and decapsulate next.
self.data = payload.payload(bytes[curr:remaining], \
timestamp = timestamp)
else:
self.data = None
class sender(pcs.Packet):
"""RFC 3550 Real Time Control Protocol sender message portion"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
ntpts = pcs.Field("ntpts", 64)
rtpts = pcs.Field("rtpts", 32)
spkts = pcs.Field("spkts", 32)
sbytes = pcs.Field("sbytes", 32)
opt = pcs.OptionListField("opt")
pcs.Packet.__init__(self, [ntpts, rtpts, spkts, sbytes, opt],
bytes = bytes, **kv)
self.description = inspect.getdoc(self)
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if bytes is not None:
offset = self.sizeof()
curr = offset
remaining = len(bytes) - offset
# XXX TODO decapsulate all the report counts.
# to do this, we need to see the parent RC.
self.data = payload.payload(bytes[curr:remaining], \
timestamp = timestamp)
else:
self.data = None
| Python | 0 | |
53038aea2b439acdc265f81b9f031336ea1f27f3 | Add lc480_sliding_window_median.py | lc480_sliding_window_median.py | lc480_sliding_window_median.py | """Leetcode 480. Sliding Window Median
URL: https://leetcode.com/problems/sliding-window-median/
Hard
Median is the middle value in an ordered integer list.
If the size of the list is even, there is no middle value.
So the median is the mean of the two middle value.
Examples:
[2,3,4] , the median is 3
[2,3], the median is (2 + 3) / 2 = 2.5
Given an array nums, there is a sliding window of size k which is moving from
the very left of the array to the very right.
You can only see the k numbers in the window.
Each time the sliding window moves right by one position.
Your job is to output the median array for each window in the original array.
For example,
Given nums = [1,3,-1,-3,5,3,6,7], and k = 3.
Window position Median
--------------- -----
[1 3 -1] -3 5 3 6 7 1
1 [3 -1 -3] 5 3 6 7 -1
1 3 [-1 -3 5] 3 6 7 -1
1 3 -1 [-3 5 3] 6 7 3
1 3 -1 -3 [5 3 6] 7 5
1 3 -1 -3 5 [3 6 7] 6
Therefore, return the median sliding window as [1,-1,-1,3,5,6].
Note:
You may assume k is always valid, ie:
k is always smaller than input array's size for non-empty array.
"""
class Solution(object):
def medianSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[float]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000044 | |
9a6bf30ecfa7b843d8588a8a7b052f87089e44c7 | convert csv to excel | write_excel.py | write_excel.py |
def Excel2CSV(ExcelFile, SheetName, CSVFile):
workbook = xlrd.open_workbook(ExcelFile)
try:
worksheet = workbook.sheet_by_name(SheetName)
except xlrd.biffh.XLRDError:
print "Missing portmap for switch " + str(SheetName)
print "Exiting program. Check spelling of Sheet name"
quit()
csvfile = open(CSVFile, 'wb')
wr = csv.writer(csvfile, quotechar="'", quoting=csv.QUOTE_ALL)
for rownum in xrange(worksheet.nrows):
wr.writerow(
list(x.encode('utf-8') if type(x) == type(u'') else x
for x in worksheet.row_values(rownum)))
csvfile.close() | Python | 0.999999 | |
a5ec49a658de23263802c7ddad02a4e34073a2a4 | add example of a go block returning value through a channel | example/go_block.py | example/go_block.py | import csp
def lazy_echo(x):
yield csp.wait(0.5)
print "I'm done"
yield csp.stop(x)
def main():
chan = csp.go(lazy_echo(1))
print (yield csp.take(chan))
chan = csp.go(lazy_echo(2))
yield csp.wait(2)
print (yield csp.take(chan))
| Python | 0 | |
410834d842a2d024f8af24009ee99ef834f91e29 | Add migration for `room_history_entry.active_during` | pycroft/model/alembic/versions/20234ac06668_use_tstzrange_for_room_history_entry.py | pycroft/model/alembic/versions/20234ac06668_use_tstzrange_for_room_history_entry.py | """Use tstzrange for room_history_entry
Revision ID: 20234ac06668
Revises: f138079b24c5
Create Date: 2021-10-24 16:31:51.027020
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
from sqlalchemy.dialects.postgresql import TSTZRANGE
revision = '20234ac06668'
down_revision = 'f138079b24c5'
branch_labels = None
depends_on = None
RHE = 'room_history_entry'
def upgrade():
# -UPDATE FUNCTION
op.execute("drop function user_room_change_update_history cascade")
op.execute("""\
CREATE OR REPLACE FUNCTION user_room_change_update_history() RETURNS trigger
LANGUAGE plpgsql STRICT
AS $$
BEGIN
IF old.room_id IS DISTINCT FROM new.room_id THEN
IF old.room_id IS NOT NULL THEN
/* User was living in a room before, history entry must be ended */
/* active_during is expected to be [) */
UPDATE "room_history_entry"
SET active_during = active_during - tstzrange(CURRENT_TIMESTAMP, null, '[)')
WHERE room_id = old.room_id AND user_id = new.id
AND active_during && tstzrange(CURRENT_TIMESTAMP, null, '[)');
END IF;
IF new.room_id IS NOT NULL THEN
/* User moved to a new room. history entry must be created */
INSERT INTO "room_history_entry" (user_id, room_id, active_during)
/* We must add one second so that the user doesn't have two entries
for the same timestamp */
VALUES(new.id, new.room_id, tstzrange(CURRENT_TIMESTAMP, null, '[)'));
END IF;
END IF;
RETURN NULL;
END;
$$;""")
###
# +ACTIVE_DURING
op.add_column(RHE,
sa.Column('active_during', TSTZRANGE, nullable=True))
op.execute("update room_history_entry set active_during = tstzrange(begins_at, ends_at, '[)')")
op.alter_column('membership', 'active_during', nullable=False)
op.create_index('ix_room_history_entry_active_during', RHE, ['active_during'],
unique=False, postgresql_using='gist')
op.execute("create extension if not exists btree_gist")
op.execute(
'alter table room_history_entry '
'add constraint "room_history_entry_room_id_user_id_active_during_excl" '
'EXCLUDE USING gist (room_id WITH =, user_id WITH =, active_during WITH &&);'
)
###
# -UNIQUENESS CHECK
op.execute(f'drop function room_history_entry_uniqueness cascade')
# also deletes the trigger
###
op.drop_constraint('room_history_entry_check', table_name=RHE)
op.drop_column(RHE, 'begins_at')
op.drop_column(RHE, 'ends_at')
def downgrade():
# +BEGINS_AT
op.add_column(RHE, sa.Column(
'begins_at', postgresql.TIMESTAMP(timezone=True),
server_default=sa.text('CURRENT_TIMESTAMP'),
autoincrement=False, nullable=False
))
op.execute('update room_history_entry set begins_at = lower(active_during)')
op.alter_column(RHE, 'begins_at', nullable=False)
op.create_index('ix_room_history_entry_begins_at', RHE, ['begins_at'],
unique=False)
###
# +ENDS_AT
op.add_column(RHE,
sa.Column('ends_at', postgresql.TIMESTAMP(timezone=True), autoincrement=False,
nullable=True))
op.execute('update room_history_entry set ends_at = upper(active_during)')
op.create_index('ix_room_history_entry_ends_at', RHE, ['ends_at'],
unique=False)
###
op.create_check_constraint(
"room_history_entry_check", RHE,
"ends_at is null or begins_at <= ends_at",
)
# +UNIQUENESS CHECK
op.execute("""\
CREATE FUNCTION room_history_entry_uniqueness() RETURNS trigger
LANGUAGE plpgsql STABLE STRICT
AS $$
DECLARE
rhe_id integer;
count integer;
BEGIN
SELECT COUNT(*), MAX(rhe.id) INTO STRICT count, rhe_id FROM "room_history_entry" rhe
WHERE
(tstzrange(NEW.begins_at,
COALESCE(new.ends_at, 'infinity'::timestamp),
'()')
&&
tstzrange(rhe.begins_at,
COALESCE(rhe.ends_at, 'infinity'::timestamp),
'()')
)
AND NEW.user_id = rhe.user_id AND NEW.id != rhe.id;
IF count > 0 THEN
RAISE EXCEPTION 'entry overlaps with entry %',
rhe_id
USING ERRCODE = 'integrity_constraint_violation';
END IF;
RETURN NULL;
END;
$$; """)
op.execute(
"CREATE TRIGGER room_history_entry_uniqueness_trigger "
"AFTER INSERT OR UPDATE ON room_history_entry "
"FOR EACH ROW EXECUTE PROCEDURE room_history_entry_uniqueness();"
)
###
# -ACTIVE_DURING
op.drop_constraint(
constraint_name='room_history_entry_room_id_user_id_active_during_excl',
table_name=RHE,
)
op.drop_index('ix_room_history_entry_active_during', table_name=RHE,
postgresql_using='gist')
op.drop_column(RHE, 'active_during')
###
# +UPDATE_FUNCTION
op.execute("""\
CREATE OR REPLACE FUNCTION user_room_change_update_history() RETURNS trigger
LANGUAGE plpgsql STRICT
AS $$
BEGIN
IF old.room_id IS DISTINCT FROM new.room_id THEN
IF old.room_id IS NOT NULL THEN
/* User was living in a room before, history entry must be ended */
UPDATE "room_history_entry" SET ends_at = CURRENT_TIMESTAMP
WHERE user_id = new.id AND ends_at IS NULL;
END IF;
IF new.room_id IS NOT NULL THEN
/* User moved to a new room. history entry must be created */
INSERT INTO "room_history_entry" (user_id, room_id, begins_at)
/* We must add one second so that the user doesn't have two entries
for the same timestamp */
VALUES(new.id, new.room_id, CURRENT_TIMESTAMP + INTERVAL '1' second);
END IF;
END IF;
RETURN NULL;
END;
$$;""")
###
| Python | 0.000001 | |
5007a2910f54c339c50667993c11fd4586412524 | add letter code | wordonhd/Letter.py | wordonhd/Letter.py | class Letter(object):
_values = {
'ENIOA': 1,
'SDTR': 2,
'MLKPBG': 3,
'ZVUFJH': 4,
'CW': 5,
'XY': 8,
'Q': 10
}
def __init__(self, letter):
self.letter = letter[-1]
self.wordon = letter[0] == '!'
@property
def value(self):
return list(filter(lambda x: self.letter in x[0], self._values.items()))[0][1] | Python | 0.978909 | |
3ef7175814cd76621eeee00a26cff786ea032727 | Add flood it example | examples/floodit.py | examples/floodit.py | from guizero import App, Waffle, Text, PushButton, info
import random
# Set up the game - colours, width and height of board and no of moves allowed
colours = ["red", "blue", "green", "yellow", "fuchsia", "purple"]
b_width = 14
b_height = 14
moves_limit = 25
# Set up the palette
def init_palette():
[palette.set_pixel(colours.index(c), 0, c) for c in colours]
# Fill the board with coloured regions
def fill_board():
[board.set_pixel(x, y, random.choice(colours)) for y in range(b_height) for x in range(b_width)]
# Find and flood any squares next to this
def begin_flood(x, y):
replacement = palette.get_pixel(x,y)
target = board.get_pixel(0,0)
flood(0, 0, target, replacement)
win_check()
# Recursively floods adjacent squares
def flood(x, y, target, replacement):
# Algorithm from https://en.wikipedia.org/wiki/Flood_fill
if target == replacement:
return False
if board.get_pixel(x, y) != target:
return False
board.set_pixel(x, y, replacement)
if y+1 <= b_height-1: # South
flood(x, y+1, target, replacement)
if y-1 >= 0: # North
flood(x, y-1, target, replacement)
if x+1 <= b_width-1: # East
flood(x+1, y, target, replacement)
if x-1 >= 0: # West
flood(x-1, y, target, replacement)
# Check if there are any moves left or if they won
def win_check():
moves_left = int(moves_text.value)-1
moves_text.value = moves_left # Update moves left
if moves_left > 0:
squares = board.get_all()
if all(colour == squares[0] for colour in squares):
win_text.value = "Winner!"
reset.visible = True
palette.disable()
else:
win_text.value = "No more moves left!"
reset.visible = True
palette.disable()
# Reset the board and remove the win text/reset button
def reset_board():
reset.visible = False
win_text.value = ""
moves_text.value = moves_limit
init_palette()
fill_board()
palette.enable()
# Set up the game board
app = App("Flood it")
board = Waffle(app, width=b_width, height=b_width, pad=0)
palette = Waffle(app, width=len(colours), height=1, command=begin_flood, dotty=True)
moves_left = Text(app, text="Moves left:")
moves_text = Text(app, text=moves_limit)
# Win text and reset button (initially invisible)
win_text = Text(app)
reset = PushButton(app, text="Start again", command=reset_board)
reset.visible = False
# Initialise the palette and the random board pattern
init_palette()
fill_board()
# Instructions
instructions = PushButton(app, command=info, args=["Instructions", "Click a dot to flood the grid with that colour, beginning from the top left square. You have 25 moves to flood all squares on the grid with the same colour."], text="Instructions")
app.display()
| Python | 0.000005 | |
2e44b753a071aeba95b51bd03c5635a1eb4d7f28 | Create gcd.py | CiO/gcd.py | CiO/gcd.py | from fractions import gcd
def greatest_common_divisor(*args):
result, *args = args
for n in args:
result = gcd(result, n)
return result
| Python | 0.000001 | |
239488d33f94b0262e642fbf751878894fb7510e | add test for post form admin in articles | opps/articles/tests/test_forms.py | opps/articles/tests/test_forms.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from opps.channels.models import Channel
from opps.core.widgets import OppsEditor
from ..models import Post
from ..forms import PostAdminForm
class PostFormTest(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create(username=u'test', password='test')
self.site = Site.objects.filter(name=u'example.com').get()
self.channel = Channel.objects.create(name=u'Home', slug=u'home',
description=u'home page',
site=self.site, user=self.user)
def test_init(self):
"""
Test successful init without data
"""
self.post = Post.objects.create(title=u'test', user=self.user,
site=self.site, channel=self.channel)
form = PostAdminForm(instance=self.post)
self.assertTrue(isinstance(form.instance, Post))
self.assertEqual(form.instance.pk, self.post.pk)
def test_default_multiupload_link(self):
"""
Test default value field multiupload link
"""
self.post = Post.objects.create(title=u'test', user=self.user,
site=self.site, channel=self.channel)
form = PostAdminForm(instance=self.post)
self.assertEqual(form.multiupload_link, '/fileupload/image/')
def test_editor_widgets(self):
"""
Test auto set field widget Editor
"""
self.post = Post.objects.create(title=u'test', user=self.user,
site=self.site, channel=self.channel)
form = PostAdminForm(instance=self.post)
self.assertTrue(isinstance(form.fields['content'].widget,
OppsEditor))
| Python | 0 | |
94dbda64d07838a7408b94251972d81897536380 | Add listener example file | listeners_example.py | listeners_example.py | import turtle
turtle.penup()
turtle.ht()
def up():
print("You pressed Up!")
def down():
print("You pressed Down!")
def left():
print("You pressed Left!")
def right():
print("You pressed Right!")
turtle.onkey(up, 'Up')
turtle.onkey(down, 'Down')
turtle.onkey(left, 'Left')
turtle.onkey(right, 'Right')
def repeat():
turtle.ontimer(repeat, 500)
turtle.listen() # Remember to put this after your listeners!
| Python | 0 | |
1d0aff329c5adb836e7b055c042990de219debe0 | Add rough first implementation of widgets.py | wtforms/widgets.py | wtforms/widgets.py | """
wtforms.widgets
~~~~~~~~~~~~~~~
The WTForms widget system.
:copyright: 2009 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from cgi import escape
__all__ = (
'ListWidget', 'TextInput', 'PasswordInput', 'HiddenInput', 'CheckboxInput',
'RadioInput', 'Textarea', 'Select'
)
def html_params(**kwargs):
"""
Generate HTML parameters for keywords
"""
params = []
keys = kwargs.keys()
keys.sort()
for k in keys:
if k in ('class_', 'class__'):
k = k[:-1]
k = unicode(k)
v = escape(unicode(kwargs[k]), quote=True)
params.append(u'%s="%s"' % (k, v))
return str.join(' ', params)
class Widget(object):
"""
Base class for all WTForms widgets.
"""
def render(self, field, **kwargs):
"""
Renders the widget. All widgets must implement this.
`field`
The field to render.
`**kwargs`
Any parameters used for rendering. Typically used to override or
pass extra html attributes.
"""
raise NotImplementedError()
class ListWidget(Widget):
def __init__(self, parent_tag='ul', prefix_label=True):
assert parent_tag in ('ol', 'ul')
self.parent_tag = parent_tag
self.prefix_label = prefix_label
def render(self, field, **kwargs):
html = [u'<%s %s>' % (self.parent_tag, html_params(**kwargs))]
for subfield in field:
if self.prefix_label:
html.append(u'<li>%s: %s</li>' % (subfield.label, subfield()))
else:
out.append(u'<li>%s%s</li>' % (subfield(), subfield.label))
html.append(u'</%s>' % self.parent_tag)
return ''.join(html)
class Input(Widget):
pass
class TextInput(Input):
pass
class PasswordInput(Input):
pass
class HiddenInput(Input):
pass
class CheckboxInput(Input):
pass
class RadioInput(Input):
pass
class Textarea(Widget):
pass
class Select(Widget):
pass
| Python | 0 | |
8062aa6dcb20e3b1294ce62d6d0cce1841fd21e1 | Add cartan_wvecs.py | cartan_wvecs.py | cartan_wvecs.py | # Author: Hersh Singh [hershdeep@gmail.com]
# Date: August 05, 2013
# Description:
# Given the cartan matrix and the dynkin coefficients of the highest weight, return all the weight vectors, their weights
# Todo: dimensionality of each weight space using freudenthal's formula
# Reference: Cahn Chapter 10
from scipy import *
# Cartan Matrix for the given rep
C = array([[2., -1.], [-1., 2.]]) #SU(3)
#C = array([[2., -1., 0.], [-1., 2., -2.], [0., -1., 2.]]) #B3
N = len(C)
# Dynkin Coeffs for the hightest weight
d_highest = array([1, 0])
#d_highest = array([1, 1]) #SU(3) Adjoint rep
#d_highest = array([0, 0, 1]) #B3
#m[j] = 2<v,alpha[j]>/<alpha[j],alpha[j]>
#M[k] = list of roots at level k
M = [[d_highest]]
Mcoord = [[zeros(N)]]
def get_p(Mcoord, k, i):
#print "\nin get_p"
p = zeros(N)
if k==0:
return p
Mc = Mcoord[k][i]
#print Mc
# for each dynkin coefficient of the current weight vector
for n in range(N):
#print "n=",n
#for each level above the current level
#print k-1
for kk in range(k-1, -1, -1):
element = Mc + (k-kk)*identity(N)[n]
#print 'looking for', element, 'in',Mcoord[kk]
#print matchinlist(Mcoord[kk],element)
if matchinlist(Mcoord[kk], element):
p[n]=p[n]+1
else:
break
return p
# Returns true if element is found in the list
def matchinlist(list, element):
return any([array_equal(e,element) for e in list])
# at level k
k = 0
done_flag = 0
while done_flag == 0:
print ""
print "Level:", k
print "Last row of weight vectors:", M[k]
print "Last row of weight vectors coords:", Mcoord[k]
M.append([])
Mcoord.append([])
for i, v in enumerate(M[k]):
print "Weight vector: ",i,v
p = get_p(Mcoord,k,i)
m = p+v
print "M,P,V: ", m,p,v
if (sum(m>0) == 0):
done_flag = 1
break
v_repeat = tile(v, [sum(m > 0), 1])
Mcoord_repeat = tile(Mcoord[k][i], [sum(m > 0), 1])
new_wvecs = v_repeat - C[m > 0]
# using the fact the True,False is typecasted to 1,0 before doing arithmetic with integers
new_Mcoord = Mcoord_repeat - identity(N)[m > 0]
# Clean up by removing duplicates
#print new_wvecs
for idx,wvec in enumerate(new_wvecs):
if not matchinlist(M[k+1],wvec):
M[k+1].append(wvec)
Mcoord[k+1].append(new_Mcoord[idx])
k=k+1
| Python | 0.002077 | |
d3973ca556a28c84765ea8fbc19b2e8f66682fc2 | Add server.py module to tst package | tst/server.py | tst/server.py | import sys
import signal
import json
from subprocess import Popen, PIPE, CalledProcessError
import tst
from colors import *
from utils import to_unicode, cprint
class ConnectionFail(Exception): pass
class Server(object):
__instance = None
class Response:
def json(self):
if '_json' not in dir(self):
try:
self._json = json.loads(self.body)
except:
self._json = None
return self._json
def __new__(cls):
# instantiation
if Server.__instance is not None:
return Server.__instance
Server.__instance = object.__new__(cls)
self = Server.__instance
# initialization
#self.config = Config()
self.config = tst.get_config()
self.user = self.config.get('user')
self.token = self.config.get('access_token')
return self
def request(self, method, path, headers={}, payload=None, exit_on_fail=False):
curl_command = [
'curl',
'-q', # don't use ~/.curlrc (must be first arg)
'-X', method.upper(), # http verb
'-v', # be verbose: print report to stderr
'-s', # don't print progress meter
'-L' # follow redirects
]
headers['TST-CLI-Release'] = self.config.get('release', 'unknown')
if 'Authorization' not in headers:
headers['Authorization'] = 'Bearer %s' % self.token
for hname, hvalue in headers.items():
curl_command.append('-H')
curl_command.append('%s: %s' % (hname, hvalue))
url = self.config['url'] + path
curl_command.append(url)
if payload is not None:
curl_command.append('-d')
data = "%s" % json.dumps(payload)
curl_command.append(data)
signal.alarm(20000) # timeout in seconds
process = Popen(curl_command, stdout=PIPE, stderr=PIPE)
try:
stdout, stderr = map(to_unicode, process.communicate())
signal.alarm(0) # reset alarm for future use...
process.wait()
except: # timeout!!!
process.terminate()
raise
# raw data
response = self.Response()
response.stderr = stderr
response.stdout = stdout
response.exit_status = process.returncode
# curl messages
lines = [l[2:] for l in stderr.splitlines() if l and l[0] == '*']
response.curl_messages = "\n".join(lines)
# request headers
lines = [l[2:] for l in stderr.splitlines() if l and l[0] == '>']
response.request_headers = "\n".join(lines)
# response headers
lines = [l[2:] for l in stderr.splitlines() if l and l[0] == '<']
response.headers = "\n".join(lines)
if not response.headers:
if exit_on_fail:
msg = "tst: can't connect to server"
_assert(False, msg)
raise ConnectionFail("can't connect to tst online")
# body
response_lines = response.headers.splitlines()
response.status_code = None
for i in xrange(len(response_lines)-1, -1, -1):
if response_lines[i].startswith("HTTP"):
status_line = response_lines[i]
response.status_code = int(status_line.split()[1])
break
# exit_on_fail
if exit_on_fail and not (200 <= response.status_code < 300):
msg = 'Request to server failed'
try:
data = json.loads(response.stdout)
if 'messages' in data and type(data['messages'] == list):
msg += "\nServer message: " + str(data['messages'][0])
except:
data = {}
msg += ('\n' + "Couldn't parse server response")
cprint(LRED, msg)
if 'messages' in data and data['messages'][0] == 'invalid token':
print("---")
print("Use `tst login` to log in to the server")
sys.exit(1)
response.body = stdout if response.status_code else None
return response
def get(self, path, headers={}, exit_on_fail=False):
return self.request('get', path, headers, exit_on_fail=exit_on_fail)
def post(self, path, headers={}, payload='', exit_on_fail=False):
return self.request('post', path, headers=headers, payload=payload, exit_on_fail=exit_on_fail)
def patch(self, path, payload, headers={}, exit_on_fail=False):
return self.request('patch', path, headers=headers, payload=payload, exit_on_fail=exit_on_fail)
def delete(self, path, payload='', headers={}, exit_on_fail=False):
return self.request('delete', path, headers=headers, payload=payload, exit_on_fail=exit_on_fail)
| Python | 0 | |
be81dbc33e932e870a66ad0663c23e5d05b01ffa | Create Counter.py | Counter.py | Counter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@_ambonilla 2014
Using cocos & piglet libraries, is a small counter program
where when you push the up key it will add a number to the
displayed value, and the down key will substract one
"""
import cocos
import sys
from cocos.actions import *
import pyglet
from pyglet.window import key
class TempBackground(cocos.layer.Layer):
is_event_handler = True
def on_key_press(self, symbol, modifiers):
if symbol == key.UP:
self.counter = self.counter + 1
elif symbol == key.DOWN:
self.counter = self.counter - 1
elif symbol == key.ESCAPE:
SystemExit()
self.update_text()
def update_text(self):
self.label.element.text = str(self.counter)
def __init__(self):
self.startBackground = super(TempBackground, self).__init__()
self.counter = 0
self.label = cocos.text.Label(str(self.counter),
font_name='Arial',
font_size=150,
anchor_x='center',
anchor_y='center')
self.label.position = 320,240
self.update_text()
self.add(self.label)
if __name__ == "__main__":
cocos.director.director.init(resizable=False, fullscreen=False)
temp_layer = TempBackground()
main_scene = cocos.scene.Scene(temp_layer)
cocos.director.director.run(main_scene)
| Python | 0.000001 | |
c80baf708c956a9814ef81213a66da8d443de12a | add migration | apps/bplan/migrations/0002_auto_20170509_1358.py | apps/bplan/migrations/0002_auto_20170509_1358.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meinberlin_bplan', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='statement',
name='email',
field=models.EmailField(verbose_name='Email address', max_length=254, blank=True),
),
migrations.AlterField(
model_name='statement',
name='name',
field=models.CharField(verbose_name='Your Name', max_length=255),
),
migrations.AlterField(
model_name='statement',
name='postal_code_city',
field=models.CharField(verbose_name='Postal code, City', max_length=255),
),
migrations.AlterField(
model_name='statement',
name='statement',
field=models.TextField(verbose_name='Statement', max_length=17500),
),
migrations.AlterField(
model_name='statement',
name='street_number',
field=models.CharField(verbose_name='Street, House number', max_length=255),
),
]
| Python | 0.000001 | |
2c78290cc569eb70b5b7098d154da3fb7a2247a9 | Add db_mktag.py, command line tag creator. | db_mktag.py | db_mktag.py | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
from sys import argv, exit
from dbclient import dbclient
if len(argv) not in (2, 3):
print "Usage:", argv[0], "tagname [tagtype]"
exit(1)
client = dbclient()
client.add_tag(*argv[1:])
| Python | 0 | |
3609c5842b33ca4146ad14b74c76f8954545aaa8 | Add commands for cases and variants | loqusdb/commands/view.py | loqusdb/commands/view.py | # -*- coding: utf-8 -*-
import logging
import click
from . import base_command
logger = logging.getLogger(__name__)
@base_command.command()
@click.option('-c' ,'--case-id',
help='Search for case'
)
@click.pass_context
def cases(ctx, case_id):
"""Display all cases in the database."""
adapter = ctx.obj['adapter']
if case_id:
case = adapter.case(case_id)
if case:
click.echo(case)
else:
logger.info("Case {0} does not exist in database".format(case_id))
else:
i = 0
for case in adapter.cases():
i += 1
click.echo(case)
if i == 0:
logger.info("No cases found in database")
@base_command.command()
@click.option('--variant-id',
help='Search for a variant'
)
@click.pass_context
def variants(ctx, variant_id):
"""Display variants in the database."""
adapter = ctx.obj['adapter']
if variant_id:
variant = adapter.get_variant({'_id':variant_id})
if variant:
click.echo(variant)
else:
logger.info("Variant {0} does not exist in database".format(variant_id))
else:
i = 0
for variant in adapter.get_variants():
i += 1
click.echo(variant)
if i == 0:
logger.info("No variants found in database")
| Python | 0.000001 | |
dd2f332dd1b7a215d5a6aa81819e3d66d46c1b91 | add python solution for 20 | 01-50/20/20.py | 01-50/20/20.py | import math
print sum(int(c) for c in str(math.factorial(100)).rstrip('L'))
| Python | 0.000077 | |
7f661e24388e82ae2e2872ab11ee6a84d487aac7 | Create py-mysql-select.py | py-mysql-select.py | py-mysql-select.py | #!/usr/bin/env python
# --*-- coding:utf-8 --*--
import MySQLdb #操作mysql,需要加载MySQLdb模块
#创建连接
conn = MySQLdb.connect(host = '127.0.0.1',user = 'root',passwd = '123',db = 'mydb') #使用connect方法对数据库进行连接,相当于一个门
cur = conn.cursor() #使用conn.cursor方法,相当于操作的一双手
#操作数据库
reCount = cur.execute('select * from students') #可以看到主函数的操作是查看students表
table = cur.fetchall() #将操作所得到的数据全部拿出来 #
#关闭连接
cur.close() #结束操作后,将手拿回来
conn.close() #将门关上
print reCount #cur.execute返回的是操作影响的行数
print data
| Python | 0.000008 | |
48b2b234377d8e66ccb274e4845a835486228166 | Create test_utils.py | utils_test.py | utils_test.py | import pytest
from utils import *
def test_struct_initialization():
s = Struct(a=1, b=2)
assert s.a == 1
assert s.b == 2
def test_struct_assignment():
s = Struct(a=1)
s.a = 3
assert s.a == 3
def test_removeall_list():
assert removeall(4, []) == []
assert removeall(4, [1,2,3,4]) == [1,2,3]
def test_removeall_string():
assert removeall('s', '') == ''
assert removeall('s', 'This is a test. Was a test.') == 'Thi i a tet. Wa a tet.'
def test_count_if():
is_odd = lambda x: x % 2
assert count_if(is_odd, []) == 0
assert count_if(is_odd, [1, 2, 3, 4, 5]) == 3
def test_argmax():
assert argmax([-2, 1], lambda x: x**2) == -2
def test_argmin():
assert argmin([-2, 1], lambda x: x**2) == 1
if __name__ == '__main__':
pytest.main()
| Python | 0.000001 | |
7581fbc397915c1ad72714203fee2349a84e14e9 | add notifiaction push script - pushNotif.py | API/ssc/SscData/pushNotif.py | API/ssc/SscData/pushNotif.py | from urllib2 import *
import urllib
import json
import sys
MY_API_KEY="AIzaSyCgSjnjxtYBGMOq7jNgnE_tbhpOJjU5nOo"
messageTitle = sys.argv[1]
messageBody = sys.argv[2]
data={
"to" : "/topics/sscapp",
"notification" : {
"body" : messageBody,
"title" : messageTitle,
"icon" : "notif_icon"
}
}
dataAsJSON = json.dumps(data)
request = Request(
"https://gcm-http.googleapis.com/gcm/send",
dataAsJSON,
{ "Authorization" : "key="+MY_API_KEY,
"Content-type" : "application/json"
}
)
print urlopen(request).read()
| Python | 0 | |
4d883d16ad1f1793ac98cbc8161aa40e88d46b44 | Add unit tests for bitmask module | lib/stsci/tools/tests/test_bitmask.py | lib/stsci/tools/tests/test_bitmask.py | """
A module containing unit tests for the `bitmask` modue.
:Authors: Mihai Cara (contact: help@stsci.edu)
:License: `<http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE>`_
"""
from __future__ import (absolute_import, division, unicode_literals,
print_function)
import warnings
import numpy as np
import pytest
from stsci.tools import bitmask
MAX_INT_TYPE = np.maximum_sctype(np.int)
MAX_UINT_TYPE = np.maximum_sctype(np.uint)
MAX_UINT_FLAG = np.left_shift(
MAX_UINT_TYPE(1),
MAX_UINT_TYPE(np.iinfo(MAX_UINT_TYPE).bits - 1)
)
MAX_INT_FLAG = np.left_shift(
MAX_INT_TYPE(1),
MAX_INT_TYPE(np.iinfo(MAX_INT_TYPE).bits - 2)
)
SUPER_LARGE_FLAG = 1 << np.iinfo(MAX_UINT_TYPE).bits
EXTREME_TEST_DATA = np.array([
0, 1, 1 + 1 << 2, MAX_INT_FLAG, ~0, MAX_INT_TYPE(MAX_UINT_FLAG),
1 + MAX_INT_TYPE(MAX_UINT_FLAG)
], dtype=MAX_INT_TYPE)
@pytest.mark.parametrize('flag', [0, -1])
def test_nonpositive_not_a_bit_flag(flag):
assert not bitmask.is_bit_flag(n=flag)
@pytest.mark.parametrize('flag', [
1, MAX_UINT_FLAG, int(MAX_UINT_FLAG), SUPER_LARGE_FLAG
])
def test_is_bit_flag(flag):
assert bitmask.is_bit_flag(n=flag)
@pytest.mark.parametrize('number', [0, 1, MAX_UINT_FLAG, SUPER_LARGE_FLAG])
def test_is_int(number):
assert bitmask._is_int(number)
@pytest.mark.parametrize('number', ['1', True, 1.0])
def test_nonint_is_not_an_int(number):
assert not bitmask._is_int(number)
@pytest.mark.parametrize('flag,flip,expected', [
(3, None, 3),
(3, True, -4),
(3, False, 3),
([1, 2], False, 3),
([1, 2], True, -4)
])
def test_interpret_valid_int_bit_flags(flag, flip, expected):
assert(
bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip) == expected
)
@pytest.mark.parametrize('flag', [None, ' ', 'None', 'Indef'])
def test_interpret_none_bit_flags_as_None(flag):
assert bitmask.interpret_bit_flags(bit_flags=flag) is None
@pytest.mark.parametrize('flag,expected', [
('1', 1),
('~-1', ~(-1)),
('~1', ~1),
('1,2', 3),
('1+2', 3),
('(1,2)', 3),
('(1+2)', 3),
('~1,2', ~3),
('~1+2', ~3),
('~(1,2)', ~3),
('~(1+2)', ~3)
])
def test_interpret_valid_str_bit_flags(flag, expected):
assert(
bitmask.interpret_bit_flags(bit_flags=flag) == expected
)
@pytest.mark.parametrize('flag,flip', [
(None, True),
(' ', True),
('None', True),
('Indef', True),
(None, False),
(' ', False),
('None', False),
('Indef', False),
('1', True),
('1', False)
])
def test_interpret_None_or_str_and_flip_incompatibility(flag, flip):
with pytest.raises(TypeError):
bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip)
@pytest.mark.parametrize('flag', [True, 1.0, [1.0], object])
def test_interpret_wrong_flag_type(flag):
with pytest.raises(TypeError):
bitmask.interpret_bit_flags(bit_flags=flag)
@pytest.mark.parametrize('flag', ['SOMETHING', '1.0,2,3'])
def test_interpret_wrong_string_int_format(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_interpret_duplicate_flag_warning():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert bitmask.interpret_bit_flags([2, 4, 4]) == 6
assert len(w)
assert issubclass(w[-1].category, UserWarning)
assert "Duplicate" in str(w[-1].message)
@pytest.mark.parametrize('flag', [[1, 2, 3], '1, 2, 3'])
def test_interpret_non_flag(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_interpret_allow_single_value_str_nonflags():
assert bitmask.interpret_bit_flags(bit_flags=str(3)) == 3
@pytest.mark.parametrize('flag', [
'~',
'( )',
'(~1,2)',
'~(1,2',
'1,~2',
'1,(2,4)',
'1,2+4',
'1+4,2'
])
def test_interpret_bad_str_syntax(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_bitfield_must_be_integer_check():
with pytest.raises(TypeError):
bitmask.bitfield_to_boolean_mask(1.0, 1)
@pytest.mark.parametrize('data,flags,flip,goodval,dtype,ref', [
(EXTREME_TEST_DATA, None, None, True, np.bool_,
EXTREME_TEST_DATA.size * [1]),
(EXTREME_TEST_DATA, None, None, False, np.bool_,
EXTREME_TEST_DATA.size * [0]),
(EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, True, np.bool_,
[1, 1, 0, 0, 0, 1, 1]),
(EXTREME_TEST_DATA, None, None, True, np.bool_,
EXTREME_TEST_DATA.size * [1]),
(EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], False, False, np.bool_,
[0, 0, 1, 1, 1, 0, 0]),
(EXTREME_TEST_DATA, [1, MAX_UINT_FLAG], True, True, np.int8,
[1, 0, 1, 1, 0, 0, 0])
])
def test_bitfield_to_boolean_mask(data, flags, flip, goodval, dtype, ref):
mask = bitmask.bitfield_to_boolean_mask(
bitfield=data,
ignore_flags=flags,
flip_bits=flip,
good_mask_value=goodval,
dtype=dtype
)
assert(mask.dtype == dtype)
assert np.all(mask == ref)
| Python | 0 | |
8d5f3136fb737c8058d8b0bb4d866d1fe5bb3af8 | Add main function for specchio | specchio/main.py | specchio/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
from watchdog.observers import Observer
from specchio.handlers import SpecchioEventHandler
from specchio.utils import logger
def main():
"""Main function for specchio
Example: specchio test/ user@host:test/
:return: None
"""
if len(sys.argv) == 2:
src_path = sys.argv[0].strip()
dst_ssh, dst_path = sys.argv[1].strip().split(":")
event_handler = SpecchioEventHandler(
src_path=src_path, dst_ssh=dst_path, dst_path=dst_path
)
logger.info("Initialize Specchio")
observer = Observer()
observer.schedule(event_handler, src_path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
else:
print """Specchio is a tool that can help you rsync your file
it use `.gitignore` in git to mark which file is ignored.
Usage: specchio src/ user@host:dst"""
| Python | 0.000003 | |
e487ca21da9e7b62a860b91aadfecdf36df005a2 | add public templates module | pymzn/templates.py | pymzn/templates.py |
from .mzn import templates as _templates
from .mzn.templates import *
__all__ = _templates.__all__
| Python | 0.000001 | |
1019f866fc0e9c16ccbe726b4b21265dbfc1ac68 | Add search_rotated_sorted_array.py | data_structures/sorting/search_rotated_sorted_array.py | data_structures/sorting/search_rotated_sorted_array.py | # Search in a Rotated Sorted Array
# You are given a sorted array which is rotated at some random pivot point.
#
# Example: [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]
#
# You are given a target value to search. If found in the array return its index, otherwise return -1.
#
# You can assume there are no duplicates in the array and your algorithm's runtime complexity
# must be in the order of O(log n).
#
# Example:
#
# Input: nums = [4,5,6,7,0,1,2], target = 0, Output: 4
#
# Here is some boilerplate code and test cases to start with:
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
left = 0
right = len(input_list) - 1
while left <= right:
mid = (left + right) // 2
if number == input_list[mid]:
return mid
# left sorted portion
if input_list[left] <= input_list[mid]:
if number > input_list[mid] or number < input_list[left]:
left = mid + 1
else:
right = mid - 1
# right sorted portion
else:
if number > input_list[right] or number < input_list[mid]:
right = mid - 1
else:
left = mid + 1
return -1
def linear_search(input_list, number):
for index, element in enumerate(input_list):
if element == number:
return index
return -1
def test_function(test_case):
input_list = test_case[0]
number = test_case[1]
if linear_search(input_list, number) == rotated_array_search(input_list, number):
print("Pass")
else:
print("Fail")
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 8])
test_function([[6, 7, 8, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 10])
test_function([[], 0])
test_function([[88], 88])
test_function([[], None]) | Python | 0.00001 | |
ffc1b443f13672d0a4002a38f5273b5f72cdb627 | Solve Even Fibonacci numbers | python/euler002.py | python/euler002.py | #!/bin/python3
# Project Euler #2: Even Fibonacci numbers
def fibonacci_sequence(n):
sequence = [1, 2]
while sequence[-1] + sequence[-2] < n:
sequence.append(sequence[-1] + sequence[-2])
return sequence
def evens(array):
return list(filter(lambda x: x % 2 == 0, array))
test_cases = int(input().strip())
for _ in range(test_cases):
n = int(input().strip())
print(sum(evens(fibonacci_sequence(n))))
| Python | 0.999998 | |
f5f2f87030e48dd751ed95eec08f29ab863a8ed9 | Compute the difference between two images | python/img_diff.py | python/img_diff.py | import requests
import json
# Compute the difference between two images and output the reconstructed image and the diff output.
# Keep in mind that the two images must be of the same size or call 'resize' or 'crop' before to
# fit the images to the same dimension.
# Read more on imgdiff here: https://pixlab.io/#/cmd?id=imgdiff
src = 'https://pixlab.io/images/jdr.jpg' # Source image which is the famous Michael Jordan's crying face.
target = 'https://pixlab.io/images/jdr_draw.jpg' # Target image which is the same Jordan's face but a MEME is drown on top of it.
req = requests.get('https://api.pixlab.io/imgdiff',params={
'src': src,
'target': target,
'key':'My_Key'
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Diff Output: "+str(reply['diff']))
print ("Reconstructed image link: "+ reply['link'])
| Python | 1 | |
dc5aad16e63ff210aa3770f6eae18f215f78f8ce | Create 03.py | 01/hw/03.py | 01/hw/03.py | # Given the variables s and t defined as:
s = 'udacity'
t = 'bodacious'
# write Python code that prints out udacious
# without using any quote characters in
# your code.
print s[:3] + t[4:]
| Python | 0 | |
c953e4d292de795d25d00f0a2152a2cf1625dd95 | Add custom ucr expression tests | custom/enikshay/tests/test_ucr_expressions.py | custom/enikshay/tests/test_ucr_expressions.py | import uuid
from django.test import TestCase, override_settings
from nose.tools import nottest
from casexml.apps.case.const import CASE_INDEX_CHILD
from casexml.apps.case.mock import CaseIndex
from casexml.apps.case.mock import CaseStructure
from casexml.apps.case.tests.util import delete_all_cases
from corehq.apps.userreports.expressions import ExpressionFactory
from corehq.apps.userreports.specs import EvaluationContext
from custom.enikshay.case_utils import CASE_TYPE_TRAIL
from custom.enikshay.expressions import ReferralExpressionBase
from .utils import ENikshayCaseStructureMixin
class ReferralTestExpression(ReferralExpressionBase):
"""
A version of the ReferralExpressionBase that just returns the referral or trail case for testing purposes
Other subclasses of ReferralExpressionBase would return a particular case property from the case, but for
testing purposes it is sufficient to just confirm that the right case is being returned at this step.
"""
def _handle_referral_case(self, referral):
return referral
def _handle_trail_case(self, context, trail, domain):
return trail
@nottest
def referral_test_expression(spec, context):
"""
Factory function for ReferralTestExpression
"""
wrapped = ReferralTestExpression.wrap(spec)
wrapped.configure(
ExpressionFactory.from_spec(wrapped.person_id_expression, context)
)
return wrapped
@override_settings(TESTS_SHOULD_USE_SQL_BACKEND=True)
class TestReferralExpressions(ENikshayCaseStructureMixin, TestCase):
def setUp(self):
super(TestReferralExpressions, self).setUp()
self.cases = self.create_case_structure()
def tearDown(self):
super(TestReferralExpressions, self).tearDown()
delete_all_cases()
def get_referral_expression_case(self, person_id):
"""
Evaluate the ReferralTestExpression against the given person_id
"""
context = EvaluationContext({"domain": self.domain})
expression = referral_test_expression({
# "domain": self.domain,
'person_id_expression': {
"type": "property_name",
"property_name": "person_id"
}
}, context)
referral_or_trail = expression({"person_id": person_id}, context)
return referral_or_trail
def test_person_with_no_referrals(self):
self.assertIsNone(self.get_referral_expression_case(self.person_id))
def test_person_with_open_referral(self):
referral_case_id = uuid.uuid4().hex
self.create_referral_case(referral_case_id)
self.assertEqual(
self.get_referral_expression_case(self.person_id).case_id,
referral_case_id
)
def accept_referral(self, referral_case):
# Note that the actual app workflow changes additional properties/ownership
person_case_id = referral_case.indices[0].referenced_id
self.factory.update_case(person_case_id, update={"awaiting_claim": "no"})
# Note that actual app workflow reassigns person case as well
self.factory.update_case(referral_case.case_id, close=True)
trail = self.factory.create_or_update_case(
CaseStructure(
case_id=uuid.uuid4().hex,
attrs={
"case_type": CASE_TYPE_TRAIL,
"create": True,
"update": {
"referral_id": referral_case.case_id
}
},
indices=[CaseIndex(
CaseStructure(case_id=person_case_id, attrs={"create": False}),
identifier='host',
relationship=CASE_INDEX_CHILD,
related_type='person',
)],
walk_related=False,
)
)[0]
return trail
def test_person_with_accepted_referral(self):
referral_case_id = uuid.uuid4().hex
referral_case = self.create_referral_case(referral_case_id)[0]
trail = self.accept_referral(referral_case)
self.assertEqual(
self.get_referral_expression_case(self.person_id),
trail
)
def reject_referral(self, referral_case_id):
# Note that the actual app workflow changes additional properties, including the case owner
self.factory.update_case(
referral_case_id,
update={
"referral_status": "rejected",
},
close=True,
)
def test_person_with_rejected_referral(self):
referral_case_id = uuid.uuid4().hex
self.create_referral_case(referral_case_id)
self.reject_referral(referral_case_id)
self.assertEqual(
self.get_referral_expression_case(self.person_id),
None
)
| Python | 0 | |
616e656cb9390321cb36d8f1b067d0bddaff11c2 | Add cli argument parser | frigg/worker/cli.py | frigg/worker/cli.py | # -*- coding: utf8 -*-
from fabric import colors
from frigg.worker.fetcher import fetcher
class Commands(object):
@staticmethod
def start():
print(colors.green("Starting frigg worker"))
fetcher()
@staticmethod
def unknown_command():
print(colors.red("Unknown command"))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Do some work for frigg.')
parser.add_argument('command')
args = parser.parse_args()
getattr(Commands, args.command, Commands.unknown_command)() | Python | 0.000001 | |
80caf160aba107f539d18287a09fc30d6cf3d0a1 | add demo plotting the available 1D demo signals | demo/plot_demo_signals.py | demo/plot_demo_signals.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Plot the set of 1D demo signals available in `pywt.data.demo_signal`."""
import numpy as np
import matplotlib.pyplot as plt
import pywt
# use 'list' to get a list of all available 1d demo signals
signals = pywt.data.demo_signal('list')
subplots_per_fig = 5
signal_length = 1024
i_fig = 0
n_figures = int(np.ceil(len(signals)/subplots_per_fig))
for i_fig in range(n_figures):
# Select a subset of functions for the current plot
func_subset = signals[
i_fig * subplots_per_fig:(i_fig + 1) * subplots_per_fig]
# create a figure to hold this subset of the functions
fig, axes = plt.subplots(subplots_per_fig, 1)
axes = axes.ravel()
for n, signal in enumerate(func_subset):
if signal in ['Gabor', 'sineoneoverx']:
# user cannot specify a length for these two
x = pywt.data.demo_signal(signal)
else:
x = pywt.data.demo_signal(signal, signal_length)
ax = axes[n]
ax.plot(x.real)
if signal == 'Gabor':
# The Gabor signal is complex-valued
ax.plot(x.imag)
ax.legend(['Gabor (Re)', 'Gabor (Im)'], loc='upper left')
else:
ax.legend([signal, ], loc='upper left')
# omit axes for any unused subplots
for n in range(n + 1, len(axes)):
axes[n].set_axis_off()
plt.show()
| Python | 0 | |
944ab744ce4ba3fb30ce94ac2ec581e4b481610f | add img to the dirs that get created. | dj/scripts/mkdirs.py | dj/scripts/mkdirs.py | #!/usr/bin/python
# Makes the dir tree to put files into
import os,sys
from process import process
from main.models import Client, Show, Location, Episode
class mkdirs(process):
def mkdir(self,dir):
""" makes the dir if it doesn't exist """
ret = False
print(dir, end=' ')
if os.path.exists(dir):
print('(exists)')
else:
if self.options.test:
print('(testing, skipped)')
else:
os.makedirs(dir)
ret = True
print()
return ret
def work(self):
"""
find client and show, create the dirs
"""
client = Client.objects.get(slug=self.options.client)
show = Show.objects.get(client=client,slug=self.options.show)
self.set_dirs(show)
dirs = "dv assets tmp titles webm mp4 mlt custom/titles img"
for d in dirs.split():
full_dir = os.path.join(self.show_dir,d)
ret = self.mkdir(full_dir)
# copy the footer image
# not sure where this should happen *shrug*
# It's really just for the default,
# If there is a non default, it will live under show_dir/assets/.
credits_img = client.credits
credits_src = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"bling",
credits_img)
# copy into show/assetts
credits_pathname = os.path.join(
self.show_dir, "assets", credits_img )
self.run_cmd( ["cp", credits_src, credits_pathname] )
if self.options.raw_slugs:
# get episodes for this show
eps = Episode.objects.filter(show=show)
for ep in eps:
loc = ep.location.slug
dt = ep.start.strftime("%Y-%m-%d")
slug = ep.slug
full_dir = os.path.join(self.show_dir,'dv',loc,dt,slug)
ret = self.mkdir(full_dir)
else:
# get locations of the episodes
for loc in Location.objects.filter(
show=show, active=True):
dir = os.path.join(self.show_dir,'dv',loc.slug)
ret = self.mkdir(dir)
return
def add_more_options(self, parser):
parser.add_option('--raw-slugs', action="store_true",
help="Make a dir for each talk's raw files")
if __name__=='__main__':
p=mkdirs()
p.main()
| #!/usr/bin/python
# Makes the dir tree to put files into
import os,sys
from process import process
from main.models import Client, Show, Location, Episode
class mkdirs(process):
def mkdir(self,dir):
""" makes the dir if it doesn't exist """
ret = False
print(dir, end=' ')
if os.path.exists(dir):
print('(exists)')
else:
if self.options.test:
print('(testing, skipped)')
else:
os.makedirs(dir)
ret = True
print()
return ret
def work(self):
"""
find client and show, create the dirs
"""
client = Client.objects.get(slug=self.options.client)
show = Show.objects.get(client=client,slug=self.options.show)
self.set_dirs(show)
dirs = "dv assets tmp titles webm mp4 mlt custom/titles"
for d in dirs.split():
full_dir = os.path.join(self.show_dir,d)
ret = self.mkdir(full_dir)
# copy the footer image
# not sure where this should happen *shrug*
# It's really just for the default,
# If there is a non default, it will live under show_dir/assets/.
credits_img = client.credits
credits_src = os.path.join(
os.path.split(os.path.abspath(__file__))[0],
"bling",
credits_img)
# copy into show/assetts
credits_pathname = os.path.join(
self.show_dir, "assets", credits_img )
self.run_cmd( ["cp", credits_src, credits_pathname] )
if self.options.raw_slugs:
# get episodes for this show
eps = Episode.objects.filter(show=show)
for ep in eps:
loc = ep.location.slug
dt = ep.start.strftime("%Y-%m-%d")
slug = ep.slug
full_dir = os.path.join(self.show_dir,'dv',loc,dt,slug)
ret = self.mkdir(full_dir)
else:
# get locations of the episodes
for loc in Location.objects.filter(
show=show, active=True):
dir = os.path.join(self.show_dir,'dv',loc.slug)
ret = self.mkdir(dir)
return
def add_more_options(self, parser):
parser.add_option('--raw-slugs', action="store_true",
help="Make a dir for each talk's raw files")
if __name__=='__main__':
p=mkdirs()
p.main()
| Python | 0 |
a20e4154e4b9a1432cae71e8f931486c42ae7493 | Update uflpost.py | ArduinoYun/Python/uflpost.py | ArduinoYun/Python/uflpost.py | """ SendDragonBoardData.py
Copyright 2016 OSIsoft, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/python
from __future__ import print_function
import os
import time
import json
import requests
#Wait 2min on startup to ensure Arduino sketch has started
time.sleep(120)
#Enter your UFL REST endpoint URL here:
url = 'https://{myuflserver}:5460/connectordata/{myuflendpoint}'
s = requests.session()
# In the Session information, one needs to set the username and password
# as specified in the connector configuration page
# You can hard code the credentials in the variables below.
# If not, you will be prompted to enter them at run time.
# If anonymous authentification is used, then use can use emptry strings for both
_username = None
_password = None
def username():
global _username
if _username is None:
_username = getpass.getpass('Username: ')
return _username
def password():
global _password
if _password is None:
_password = getpass.getpass()
return _password
s.auth = (username(), password())
#Disable warnings due to lack of server cert
requests.packages.urllib3.disable_warnings()
#Read file list from local json directory on OpenWRT
path = '/json/'
#while loop begin
try:
while True:
#Get current file list, sort, count files
dirList=os.listdir(path)
dirList.sort()
fileCount = len(dirList)
loopCount = 0
print(fileCount - 1, " files in queue to process.")
#Iterate through each file and post the file contents (JSON) to UFL REST endpoint
for infile in dirList:
#Iterate the counter first to ensure we leave at least one file in the directory
loopCount += 1
#If we've processed all but one file, exit the loop
if loopCount == fileCount:
break
#Open file and read contents, then close
print("File being processed is: " + infile)
f=open(os.path.join(path, infile),'r')
payload=f.read()
f.close()
#Send file json content to UFL endpoint
try:
response = s.put(url, data=data, verify=False)
# To use the Post method instead, replace the line above with the one below.
# response = s.post(args.resturl + '/post', data=data, verify=False)
except:
#If we throw an exception, break and try again on the next loop
print("Error during HTTP POST. Aborting loop.")
break
#If successful, delete the file
if response.status_code == 200:
os.remove(os.path.join(path, infile))
print("Success. File " + infile + " was uploaded and deleted.")
#else:
print(response.status_code, response.reason, file=sys.stderr)
print(" File " + infile + "was NOT uploaded and NOT deleted.")
#Delay 10 seconds before next loop
print("Waiting 10 seconds to run again.")
time.sleep(10)
except KeyboardInterrupt:
pass
| """ SendDragonBoardData.py
Copyright 2016 OSIsoft, LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#!/usr/bin/python
from __future__ import print_function
import os
import time
import json
import requests
#Wait 2min on startup to ensure Arduino sketch has started
time.sleep(120)
#UFL REST endpoint connection information
#Enter your UFL REST endpoint URL here:
url = 'https://{myuflserver}:5460/connectordata/{myuflendpoint}/post'
#Enter your UFL REST endpoint username here:
un = '{myusername}'
#Enter your UFL REST endpoint password here:
pw = '{mypassword}'
#Disable warnings due to lack of server cert
requests.packages.urllib3.disable_warnings()
#Read file list from local json directory on OpenWRT
path = '/json/'
#while loop begin
try:
while True:
#Get current file list, sort, count files
dirList=os.listdir(path)
dirList.sort()
fileCount = len(dirList)
loopCount = 0
print fileCount - 1, " files in queue to process."
#Iterate through each file and post the file contents (JSON) to UFL REST endpoint
for infile in dirList:
#Iterate the counter first to ensure we leave at least one file in the directory
loopCount += 1
#If we've processed all but one file, exit the loop
if loopCount == fileCount:
break
#Open file and read contents, then close
print "File being processed is: " + infile
f=open(os.path.join(path, infile),'r')
payload=f.read()
f.close()
#POST json to UFL endpoint
headers = {'Accept' : 'application/json', 'Content-Type' : 'application/json'}
try:
response = requests.post(url, data=payload, auth=(un,pw), verify=False, headers=headers)
except:
#If we throw an exception, simply break and try again on the next loop
print "Error during HTTP POST. Aborting loop."
break
#If successful, delete the file
if response.status_code == 200:
os.remove(os.path.join(path, infile))
print "Success. File " + infile + " was uploaded and deleted."
#else:
print "Status code:", response.status_code, " File " + infile + "was NOT uploaded and NOT deleted."
#Delay 10 seconds before next loop
print "Waiting 10 seconds to run again."
time.sleep(10)
except KeyboardInterrupt:
pass
| Python | 0 |
4a30d30b82fbdccbb0f15ebb5c094b13ce791f7f | Add a utility class to normalize input | genderator/utils.py | genderator/utils.py | from unidecode import unidecode
class Normalizer:
def normalize(text):
text = Normalizer.remove_extra_whitespaces(text)
text = Normalizer.replace_hyphens(text)
# text = Normalizer.remove_accent_marks(text)
return text.lower()
@staticmethod
def replace_hyphens(text):
return text.replace('-', ' ')
@staticmethod
def remove_extra_whitespaces(text):
return ' '.join(text.strip().split());
@staticmethod
def remove_accent_marks(text):
return unidecode(text) | Python | 0.000002 | |
8b828e9c9daacd8bd6b5719e0ee50fc93f3c612d | add line-invoker, allows pipeline to be changed on the fly | line-invoker.py | line-invoker.py | #!/usr/bin/python
from __future__ import print_function
import sys
import subprocess
# A normal(ish) pipeline looks like the following:
# tailf input | grep -v foo | grep bar | cat >>output
# If we want to change the valu "foo", "bar" or otherwise change the
# pipeline, we have to kill the old pipeline and start a new one.
# This script changes the above to
# tailf input | line-invoker.py mypipeline.sh | cat >>output
# where mypipeline.sh contains:
# grep -v foo | grep bar
# This allows the pipeline to be edited at will, without breaking the
# tailf and potentially having missed lines, or duplicated them on
# restarting tailf
def main():
prog = sys.argv[1]
try:
line = sys.stdin.readline()
while line:
p = subprocess.Popen(prog, stdin=subprocess.PIPE)
p.stdin.write(line)
p.stdin.close()
sys.stdout.flush()
line = sys.stdin.readline()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| Python | 0 | |
917708a749e2c9519cbb9841004a18eeff788af4 | Concatenate Final | Concatenate.py | Concatenate.py | # Copyright (c) 2017 Rahul V Sharma
# AUTHORS = Rahul Vinod Shaarma
# Website = www.rahul-sharma.com
# Email = sharmaR0810@gmail.com
# Don't Message Me unless Serios Help or you are not a hot girl.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# 720p = 1280 x 720
# 1080p = 1920 x 1080
# 1440p = 2560 x 1440
# 2160p = 3840 x 2160
# 4320p = 7680 x 4320
# Downloadding Master Playlist as Master
import urllib.request
import re
import os
# Creating MasterPlaylist Folder
if not os.path.exists("MasterPlaylist"):
os.makedirs("MasterPlaylist")
# Downloadding Master Playlist as 01_Master
PathM3U = 'https://player.vimeo.com/external/159463108.m3u8?s=d41bea2a0d7223e3bd161fcb549b2c668437f1c9&oauth2_token_id=410160086'
NameM3U = "01_Master.m3u"
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve(PathM3U, NameM3U)
# Matching Higher Resolution and selecting its URL
ResolutionMatchRegex = '(.*RESOLUTION=1920x1080.*)[\r\n]+([^\r\n]+)'
RawM3u = open(NameM3U, 'r')
RawM3uText = RawM3u.read()
ResolutionMatch = re.findall(ResolutionMatchRegex, RawM3uText)[0]
# Writing Regex Match to 02_Master.m3u
StringMatchFile = open('02_Master.m3u', 'w')
StringMatchFile.write(ResolutionMatch[1])
StringMatchFile.close()
# Downloadding Chop Playlist as 03_Master.m3u
PathM3U = ResolutionMatch[1]
NameM3U = "03_Master.m3u"
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve(PathM3U, NameM3U)
# Matching Filename and extention
ExtensionMatchRegex = '.*\/'
URLFile = open('02_Master.m3u', 'r')
URLText = URLFile.read()
ExtensionMatch = re.findall(ExtensionMatchRegex, URLText)[0]
# Writing Regex Match (without filename and extension)to 04_Master.m3u
URLExtentionFile = open('04_Master.m3u', 'w')
URLExtentionFile.write(ExtensionMatch)
URLExtentionFile.close()
# Opening 04_Master.m3u to take url pattern
URLFile = open('04_Master.m3u', 'r')
URLText = URLFile.read()
# opening 04_Master.m3u Segment File
with open('03_Master.m3u', 'r') as playlist:
ts_filenames = [line.rstrip() for line in playlist
if line.rstrip().endswith('.ts')]
StringMatchFile = open('MasterPlaylist/01_Master.m3u8', 'w')
for line in ts_filenames:
StringMatchFile.write(URLText)
StringMatchFile.write(line)
StringMatchFile.write("\n")
# Deleting 01_Master.m3u, 02_Master.m3u, 03_master.m3u, 04_master.m3u
os.remove('01_Master.m3u')
os.remove('02_Master.m3u')
os.remove('03_Master.m3u')
os.remove('04_Master.m3u')
| Python | 0.999998 | |
80cb11187894870ba9fe40e09834522d7ea2ee10 | Create middleware.py | middleware.py | middleware.py | Python | 0.000007 | ||
aadd5b5d60e1fa2939482790baa893d9624ad33b | Create mnist_lstm.py | mnist_lstm.py | mnist_lstm.py | from tensorflow.models.rnn import rnn_cell, rnn
import tensorflow as tf
import numpy as np
import input_data
sess = tf.Session()
'''
Classify MNIST using LSTM running row by row.
Good:
* No compilation time at all, which is cool.
Bad:
* Problem is that has all dimensions hard coded, which sucks.
Inspired by:
https://github.com/nlintz/TensorFlow-Tutorials
'''
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def get_lstm(num_steps, input_dim, hidden_dim, output_dim, batch_size):
# Define input
input = tf.placeholder("float", [batch_size, num_steps, input_dim])
desired = tf.placeholder("float", [batch_size, 10])
# Define parameters
i2h = init_weights([input_dim, hidden_dim])
h2o = init_weights([hidden_dim, output_dim])
bi = init_weights([hidden_dim])
bo = init_weights([output_dim])
# prepare input
# input shape: (batches, num_steps, input_dim)
X2 = tf.transpose(input, [1, 0, 2]) # (num_steps, batch_size, input_dim)
# tf.reshape does not accept X.get_shape elements as input :(
X3 = tf.reshape(X2, [num_steps*batch_size, dim]) # (num_steps*batch_size, input_dim)
# project to hidden state dimension
X4 = tf.matmul(X3, i2h) + bi # (num_steps*batch_size, hidden_dim)
# LSTM for loop expects a list as input, here we slice X3 into pieces of (batch_size, hidden_dim)
# tf.split expects as input a axis to slice, number of slices and a tensor
Xh = tf.split(0, num_steps, X4)
initializer = tf.random_uniform_initializer(-.01, .01)
# INNER LOOP
# There are two ways of calculating the inner loop of an RNN
with tf.variable_scope("RNN", reuse=None, initializer=initializer): # this is necessary
lstm_cell = rnn_cell.BasicLSTMCell(hidden_dim, forget_bias=1.0)
initial_state = lstm_cell.zero_state(batch_size, tf.float32)
# Explicitly calling a for loop inside the scope
#for time_step, input_ in enumerate(inputs):
# if time_step > 0: tf.get_variable_scope().reuse_variables()
# (cell_output, state) = lstm_cell(input_, initial_state)
# outputs.append(cell_output)
# states.append(state)
# or simply using rnn(cell, inputs, initial_state=init_state)
lstm_outputs, lstm_states = rnn.rnn(lstm_cell, Xh, initial_state=initial_state)
sess.run(tf.initialize_all_variables()) # it didn't work for me initializing outside the scope
# calculate output
Y = lstm_outputs[-1] # outputs is a list, we get the last value
output = tf.matmul(Y, h2o) + bo
return input, output, desired
| Python | 0.000004 | |
940c4f4238eac31f926e520dba473819abb44033 | Add a moksha.hub module with an initial OrbitedWidget | moksha/hub.py | moksha/hub.py | # This file is part of Moksha.
#
# Moksha is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Moksha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Moksha. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2008, Red Hat, Inc.
# Authors: Luke Macken <lmacken@redhat.com>
"""
The Moksha Real-time Hub
"""
from tw.api import Widget, JSLink, js_callback, js_function
# @@ Make the orbited url globally configurable
ORBITED_URL = 'http://localhost:9000'
orbited_js = JSLink(link=ORBITED_URL + '/static/Orbited.js')
class OrbitedWidget(Widget):
params = {
'onopen': 'A javascript callback for when the connection opens',
'onread': 'A javascript callback for when new data is read',
'onclose': 'A javascript callback for when the connection closes',
}
javascript = [orbited_js]
onopen = onread = onclose = js_callback('function(){}')
template = """
<script type="text/javascript">
Orbited.settings.port = 9000
Orbited.settings.hostname = 'localhost'
document.domain = document.domain
TCPSocket = Orbited.TCPSocket
connect = function() {
conn = new TCPSocket()
conn.onread = ${onread}
conn.onopen = ${onopen}
conn.onclose = ${onclose}
conn.open('localhost', 9000)
}
$(document).ready(function() {
connect()
});
</script>
"""
| Python | 0 | |
7780c235f0f357ab918f0c031e7dc51f6ca072a9 | Solve problem 20 | problem020.py | problem020.py | #!/usr/bin/env python3
from functools import *
import operator
def factorial(number):
assert number >= 1
return reduce(operator.mul, range(1, number+1))
def digits(number):
yield from (int(digit) for digit in str(number))
print(sum(digits(factorial(100))))
| Python | 0.999999 | |
6fcb3adbcf85aa8039274f59d2b26401b5927fc4 | Create PowerofFour_001.py | kargtom/twodim/PowerofFour/PowerofFour_001.py | kargtom/twodim/PowerofFour/PowerofFour_001.py | def isPowerOfFour(n):
return n > 0 and n & n - 1 is 0 and n & 0x5555555555555555 != 0
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.