commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
2e467774d83e14baf6fb2fec1fa4e0f6c1f8f88d | Disable webrtc benchmark on Android | tools/perf/benchmarks/webrtc.py | tools/perf/benchmarks/webrtc.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import webrtc
import page_sets
from telemetry import benchmark
@benchmark.Disabled('android') # crbug.com/390233
class WebRTC(benchmark.Benchmark):
"""Obtains WebRTC metrics for a real-time video tests."""
test = webrtc.WebRTC
page_set = page_sets.WebrtcCasesPageSet
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import webrtc
import page_sets
from telemetry import benchmark
class WebRTC(benchmark.Benchmark):
"""Obtains WebRTC metrics for a real-time video tests."""
test = webrtc.WebRTC
page_set = page_sets.WebrtcCasesPageSet
| Python | 0.000005 |
d171d316eb45bdd6ce9d3a80c2ca91ae8b3cf1b2 | Clarify how long a year is | tapiriik/auth/__init__.py | tapiriik/auth/__init__.py | from .payment import *
from .totp import *
from tapiriik.database import db
from tapiriik.sync import Sync
from datetime import datetime, timedelta
from bson.objectid import ObjectId
class User:
def Get(id):
return db.users.find_one({"_id": ObjectId(id)})
def Ensure(req):
if req.user == None:
req.user = User.Create()
User.Login(req.user, req)
return req.user
def Login(user, req):
req.session["userid"] = str(user["_id"])
req.user = user
def Create():
uid = db.users.insert({"Created": datetime.utcnow()}) # will mongodb insert an almost empty doc, i.e. _id?
return db.users.find_one({"_id": uid})
def GetConnectionRecordsByUser(user):
return db.connections.find({"_id": {"$in": [x["ID"] for x in user["ConnectedServices"]]}})
def AssociatePayment(user, payment):
db.users.update({"_id": {'$ne': ObjectId(user["_id"])}}, {"$pull": {"Payments": payment}}, multi=True) # deassociate payment ids from other accounts that may be using them
db.users.update({"_id": ObjectId(user["_id"])}, {"$addToSet": {"Payments": payment}})
def HasActivePayment(user):
if "Payments" not in user:
return False
for payment in user["Payments"]:
if payment["Timestamp"] > (datetime.utcnow() - timedelta(days=365.25)):
return True
return False
def ConnectService(user, serviceRecord):
existingUser = db.users.find_one({"_id": {'$ne': ObjectId(user["_id"])}, "ConnectedServices.ID": ObjectId(serviceRecord["_id"])})
if "ConnectedServices" not in user:
user["ConnectedServices"] = []
delta = False
if existingUser is not None:
# merge merge merge
user["ConnectedServices"] += existingUser["ConnectedServices"]
user["Payments"] += existingUser["Payments"]
delta = True
db.users.remove({"_id": existingUser["_id"]})
else:
if serviceRecord["_id"] not in [x["ID"] for x in user["ConnectedServices"]]:
user["ConnectedServices"].append({"Service": serviceRecord["Service"], "ID": serviceRecord["_id"]})
delta = True
db.users.update({"_id": user["_id"]}, {"$set": {"ConnectedServices": user["ConnectedServices"]}})
if delta or ("SyncErrors" in serviceRecord and len(serviceRecord["SyncErrors"]) > 0): # also schedule an immediate sync if there is an outstanding error (i.e. user reconnected)
Sync.ScheduleImmediateSync(user, True) # exhaustive, so it'll pick up activities from newly added services / ones lost during an error
def DisconnectService(serviceRecord):
# not that >1 user should have this connection
activeUsers = list(db.users.find({"ConnectedServices.ID": serviceRecord["_id"]}))
if len(activeUsers) == 0:
raise Exception("No users found with service " + serviceRecord["_id"])
db.users.update({}, {"$pull": {"ConnectedServices": {"ID": serviceRecord["_id"]}}}, multi=True)
for user in activeUsers:
if len(user["ConnectedServices"]) - 1 == 0:
# I guess we're done here?
db.users.remove({"_id": user["_id"]})
def AuthByService(serviceRecord):
return db.users.find_one({"ConnectedServices.ID": serviceRecord["_id"]})
class SessionAuth:
def process_request(self, req):
userId = req.session.get("userid")
if userId == None:
req.user = None
else:
req.user = db.users.find_one({"_id": ObjectId(userId)})
| from .payment import *
from .totp import *
from tapiriik.database import db
from tapiriik.sync import Sync
from datetime import datetime, timedelta
from bson.objectid import ObjectId
class User:
def Get(id):
return db.users.find_one({"_id": ObjectId(id)})
def Ensure(req):
if req.user == None:
req.user = User.Create()
User.Login(req.user, req)
return req.user
def Login(user, req):
req.session["userid"] = str(user["_id"])
req.user = user
def Create():
uid = db.users.insert({"Created": datetime.utcnow()}) # will mongodb insert an almost empty doc, i.e. _id?
return db.users.find_one({"_id": uid})
def GetConnectionRecordsByUser(user):
return db.connections.find({"_id": {"$in": [x["ID"] for x in user["ConnectedServices"]]}})
def AssociatePayment(user, payment):
db.users.update({"_id": {'$ne': ObjectId(user["_id"])}}, {"$pull": {"Payments": payment}}, multi=True) # deassociate payment ids from other accounts that may be using them
db.users.update({"_id": ObjectId(user["_id"])}, {"$addToSet": {"Payments": payment}})
def HasActivePayment(user):
if "Payments" not in user:
return False
for payment in user["Payments"]:
if payment["Timestamp"] > (datetime.utcnow() - timedelta(years=1)):
return True
return False
def ConnectService(user, serviceRecord):
existingUser = db.users.find_one({"_id": {'$ne': ObjectId(user["_id"])}, "ConnectedServices.ID": ObjectId(serviceRecord["_id"])})
if "ConnectedServices" not in user:
user["ConnectedServices"] = []
delta = False
if existingUser is not None:
# merge merge merge
user["ConnectedServices"] += existingUser["ConnectedServices"]
user["Payments"] += existingUser["Payments"]
delta = True
db.users.remove({"_id": existingUser["_id"]})
else:
if serviceRecord["_id"] not in [x["ID"] for x in user["ConnectedServices"]]:
user["ConnectedServices"].append({"Service": serviceRecord["Service"], "ID": serviceRecord["_id"]})
delta = True
db.users.update({"_id": user["_id"]}, {"$set": {"ConnectedServices": user["ConnectedServices"]}})
if delta or ("SyncErrors" in serviceRecord and len(serviceRecord["SyncErrors"]) > 0): # also schedule an immediate sync if there is an outstanding error (i.e. user reconnected)
Sync.ScheduleImmediateSync(user, True) # exhaustive, so it'll pick up activities from newly added services / ones lost during an error
def DisconnectService(serviceRecord):
# not that >1 user should have this connection
activeUsers = list(db.users.find({"ConnectedServices.ID": serviceRecord["_id"]}))
if len(activeUsers) == 0:
raise Exception("No users found with service " + serviceRecord["_id"])
db.users.update({}, {"$pull": {"ConnectedServices": {"ID": serviceRecord["_id"]}}}, multi=True)
for user in activeUsers:
if len(user["ConnectedServices"]) - 1 == 0:
# I guess we're done here?
db.users.remove({"_id": user["_id"]})
def AuthByService(serviceRecord):
return db.users.find_one({"ConnectedServices.ID": serviceRecord["_id"]})
class SessionAuth:
def process_request(self, req):
userId = req.session.get("userid")
if userId == None:
req.user = None
else:
req.user = db.users.find_one({"_id": ObjectId(userId)})
| Python | 0.99958 |
d9e11e2c5f14cee0ead87ced9afe85bdd299ab35 | Add python script to extract | extract_text.py | extract_text.py | import json
f=open('raw.json')
g=open('extracted1','a')
i=1
for s in f:
j=json.loads(s)
j=j['text']
h=json.dumps(j)
number=str(i) + ':' + ' '
g.write(h)
g.write('\n\n')
i=i+1
| Python | 0.000002 | |
370d3a122fa0bfc4c6f57a5cd6e518968205611a | add another linechart example showing new features | examples/lineChartXY.py | examples/lineChartXY.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples for Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from nvd3 import lineChart
import math
from numpy import sin,pi,linspace
output_file = open('test_lineChartXY.html', 'w')
chart = lineChart(name="lineChart", date=False, x_format="f",y_format="f", width=500, height=500, show_legend=False)
#lissajous parameters of a/b
a = [1,3,5,3]
b = [1,5,7,4]
delta = pi/2
t = linspace(-pi,pi,300)
for i in range(0,4):
x = sin(a[i] * t + delta)
y = sin(b[i] * t)
chart.add_serie(y=y, x=x, name='lissajous-n%d' % i, color='red' if i == 0 else 'black')
chart.buildhtml()
output_file.write(chart.htmlcontent)
output_file.close()
| Python | 0 | |
170829465e30c69c13f2c5903e85ea4d79e8ae08 | Add a drawing pad for testing. Need to further adding the message transmission | drawing_pad.py | drawing_pad.py | from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PIL import Image
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
matplotlib.rc('xtick', labelsize=8)
matplotlib.rc('ytick', labelsize=8)
class DrawingPad_Painter(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent=parent)
#prepare data and figure
self.traj_pnts = []
self.curr_traj = None
self.lines = []
self.curr_line = None
self.dpi = 100
self.fig = Figure(figsize=(3.24, 5.0), dpi=self.dpi, facecolor="white")
self.canvas = FigureCanvas(self.fig)
self.ax_painter = self.fig.add_subplot(111, aspect='equal')
self.ax_painter.hold(True)
self.ax_painter.set_xlim([-2, 2])
self.ax_painter.set_ylim([-2, 2])
self.ax_painter.set_aspect('equal')
self.ax_painter.set_xticks([])
self.ax_painter.set_yticks([])
self.ax_painter.axis('off')
self.hbox_layout = QHBoxLayout()
self.hbox_layout.addWidget(self.canvas, 5)
self.line_width = 12.0
# self.ctrl_pnl_layout = QVBoxLayout()
# #a button to clear the figure
# self.clean_btn = QPushButton('Clear')
# self.ctrl_pnl_layout.addWidget(self.clean_btn)
#
# self.hbox_layout.addLayout(self.ctrl_pnl_layout, 1)
self.setLayout(self.hbox_layout)
self.drawing = False
self.create_event_handler()
return
def create_event_handler(self):
self.canvas_button_clicked_cid = self.canvas.mpl_connect('button_press_event', self.on_canvas_mouse_clicked)
self.canvas_button_released_cid = self.canvas.mpl_connect('button_release_event', self.on_canvas_mouse_released)
self.canvas_motion_notify_cid = self.canvas.mpl_connect('motion_notify_event', self.on_canvas_mouse_move)
return
def on_canvas_mouse_clicked(self, event):
# print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# event.button, event.x, event.y, event.xdata, event.ydata)
self.drawing = True
# create a new line if we are drawing within the area
if event.xdata is not None and event.ydata is not None and self.curr_line is None and self.curr_traj is None:
self.curr_line, = self.ax_painter.plot([event.xdata], [event.ydata], '-k', linewidth=self.line_width)
self.curr_traj = [np.array([event.xdata, event.ydata])]
self.canvas.draw()
return
def on_canvas_mouse_released(self, event):
# print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(
# event.button, event.x, event.y, event.xdata, event.ydata)
self.drawing = False
# store finished line and trajectory
# print self.curr_traj
self.lines.append(self.curr_line)
self.traj_pnts.append(self.curr_traj)
self.curr_traj = None
self.curr_line = None
return
def on_clean(self, event):
print 'clean the canvas...'
#clear everything
for line in self.lines:
self.ax_painter.lines.remove(line)
self.lines = []
if self.curr_line is not None:
self.ax_painter.lines.remove(self.curr_line)
self.curr_line = None
self.canvas.draw()
self.traj_pnts = []
self.curr_traj = None
self.drawing = False
return
def on_canvas_mouse_move(self, event):
if self.drawing:
# print 'In movement: x=',event.x ,', y=', event.y,', xdata=',event.xdata,', ydata=', event.ydata
if event.xdata is not None and event.ydata is not None and self.curr_line is not None and self.curr_traj is not None:
#append new data and update drawing
self.curr_traj.append(np.array([event.xdata, event.ydata]))
tmp_curr_data = np.array(self.curr_traj)
self.curr_line.set_xdata(tmp_curr_data[:, 0])
self.curr_line.set_ydata(tmp_curr_data[:, 1])
self.canvas.draw()
return
def plot_trajs_helper(self, trajs):
tmp_lines = []
for traj in trajs:
tmp_line, = self.ax_painter.plot(traj[:, 0], traj[:, 1], '-.g', linewidth=self.line_width)
tmp_lines.append(tmp_line)
self.canvas.draw()
#add these tmp_lines to lines record
self.lines = self.lines + tmp_lines
return
def get_traj_data(self):
return self.traj_pnts
def get_image_data(self):
"""
Get the deposited image
"""
w,h = self.canvas.get_width_height()
buf = np.fromstring ( self.canvas.tostring_argb(), dtype=np.uint8 )
buf.shape = ( w, h, 4 )
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll ( buf, 3, axis = 2 )
return buf
class DrawingPad(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.resize(540, 360)
self.move(400, 200)
#create painter
self.main_frame = QWidget()
self.main_hbox = QHBoxLayout()
self.painter = DrawingPad_Painter()
self.main_hbox.addWidget(self.painter)
self.main_frame.setLayout(self.main_hbox)
self.setCentralWidget(self.main_frame)
self.setWindowTitle('DrawingPad')
self.ctrl_pnl_layout = QVBoxLayout()
#clean button
#a button to clear the figure
self.clean_btn = QPushButton('Clear')
self.ctrl_pnl_layout.addWidget(self.clean_btn)
self.clean_btn.clicked.connect(self.painter.on_clean)
#send button
self.send_btn = QPushButton('Send')
self.ctrl_pnl_layout.addWidget(self.send_btn)
self.send_btn.clicked.connect(self.on_send_button_clicked)
self.main_hbox.addLayout(self.ctrl_pnl_layout, 3)
return
def on_send_button_clicked(self, event):
img_data = self.painter.get_image_data()
#prepare an image
w, h, d = img_data.shape
img = Image.fromstring( "RGBA", ( w ,h ), img_data.tostring() )
img.convert('LA')
thumbnail_size = (28, 28)
img.thumbnail(thumbnail_size)
img.show()
return
import sys
def main():
app = QApplication(sys.argv)
gui = DrawingPad()
gui.show()
app.exec_()
return
if __name__ == '__main__':
main()
| Python | 0 | |
aa4a3011775c12c19d690bbca91a07df4e033b1f | add urls for admin, zinnia, and serving static files | src/phyton/urls.py | src/phyton/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
admin.autodiscover()
urlpatterns = patterns('',
# Admin URLs
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
# Zinnia's URLs
url(r'^weblog/', include('zinnia.urls')),
url(r'^comments/', include('django.contrib.comments.urls')),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| Python | 0 | |
b31def01a04a6ddb90e780985e43e8ad8e57e457 | Create uber.py | modules/uber.py | modules/uber.py | def uber(self):
self.send_chan("Moi")
| Python | 0.000036 | |
27ab8b0436d784f44220512a91e699006b735d82 | test mpi | tests/test_mpi.py | tests/test_mpi.py | # coding: utf-8
ierr = mpi_init()
comm = mpi_comm_world
print("mpi_comm = ", comm)
size, ierr = mpi_comm_size(comm)
print("mpi_size = ", size)
rank, ierr = mpi_comm_rank(comm)
print("mpi_rank = ", rank)
#abort, ierr = mpi_abort(comm)
#print("mpi_abort = ", abort)
ierr = mpi_finalize()
| Python | 0.000001 | |
2ad40dc0e7f61e37ab768bedd53572959a088bb0 | Make app package | app/__init__.py | app/__init__.py | from flask import Flask
def create_app(config_name):
pass
| Python | 0 | |
4981a1fa0d94020e20a8e7714af62a075f7d7874 | delete customers | delete_customers.py | delete_customers.py | @app.route('/customers/<int:id>', methods=['DELETE'])
def delete_customers(id):
index = [i for i, customer in enumerate(customers) if customer['id'] == id]
if len(index) > 0:
del customers[index[0]]
return make_response('', HTTP_204_NO_CONTENT) | Python | 0 | |
bc34aa231a3838ad7686541ed4bce58374a40b19 | Create __init__.py | physt/__init__.py | physt/__init__.py | Python | 0.000429 | ||
2b6c13f883a8e914a3f719447b508430c2d51e5a | Add Tower Label module (#21485) | lib/ansible/modules/web_infrastructure/ansible_tower/tower_label.py | lib/ansible/modules/web_infrastructure/ansible_tower/tower_label.py | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: tower_label
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower label.
description:
- Create, update, or destroy Ansible Tower labels. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the label.
required: True
default: null
organization:
description:
- Organization the label should be applied to.
required: True
default: null
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.3"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add label to tower organization
tower_label
name: Custom Label
organization: My Organization
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
organization = dict(required=True),
tower_host = dict(),
tower_username = dict(),
tower_password = dict(no_log=True),
tower_verify_ssl = dict(type='bool', default=True),
tower_config_file = dict(type='path'),
state = dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
organization = module.params.get('organization')
state = module.params.get('state')
json_output = {'label': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
label = tower_cli.get_resource('label')
try:
org_res = tower_cli.get_resource('organization')
org = org_res.get(name=organization)
if state == 'present':
result = label.modify(name=name, organization=org['id'], create_on_missing=True)
json_output['id'] = result['id']
elif state == 'absent':
result = label.delete(name=name, organization=org['id'])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update label, organization not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update label: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| Python | 0 | |
e93bbc1b5091f9b6d583437aea05aa59c8233d2d | add audiotsmcli | examples/audiotsmcli.py | examples/audiotsmcli.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
audiotsmcli
~~~~~~~~~~~
Change the speed of an audio file without changing its pitch.
"""
import argparse
import os
from audiotsm.ola import ola
from audiotsm.io.wav import WavReader, WavWriter
def main():
"""Change the speed of an audio file without changing its pitch."""
# Parse command line arguments
parser = argparse.ArgumentParser(description=(
"Change the speed of an audio file without changing its pitch."))
parser.add_argument('-s', '--speed', metavar="S", type=float, default=1.,
help=("Set the speed ratio (e.g 0.5 to play at half "
"speed)"))
parser.add_argument('-l', '--frame-length', metavar='N', type=int,
default=None, help=("Set the frame length to N."))
parser.add_argument('-a', '--analysis-hop', metavar='N', type=int,
default=None, help=("Set the analysis hop to N."))
parser.add_argument('--synthesis-hop', metavar='N', type=int, default=None,
help=("Set the synthesis hop to N."))
parser.add_argument('input_filename', metavar='INPUT_FILENAME', type=str,
help=("The audio input file"))
parser.add_argument('output_filename', metavar='OUTPUT_FILENAME', type=str,
help=("The audio output file"))
args = parser.parse_args()
if not os.path.isfile(args.input_filename):
parser.error(
'The input file "{}" does not exist.'.format(args.input_filename))
# Get TSM method parameters
parameters = {}
if args.speed is not None:
parameters['speed'] = args.speed
if args.frame_length is not None:
parameters['frame_length'] = args.frame_length
if args.analysis_hop is not None:
parameters['analysis_hop'] = args.analysis_hop
if args.speed is not None:
parameters['speed'] = args.speed
# Get input and output files
input_filename = args.input_filename
output_filename = args.output_filename
# Run the TSM procedure
with WavReader(input_filename) as reader:
channels = reader.channels
with WavWriter(output_filename, channels, reader.samplerate) as writer:
tsm = ola(channels, **parameters)
finished = False
while not (finished and reader.empty):
tsm.read_from(reader)
_, finished = tsm.write_to(writer)
finished = False
while not finished:
_, finished = tsm.flush_to(writer)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
2b1dc56193f3a81e5aba237f3ad59aa9113dcb6e | Create ui.py | ui.py | ui.py | from visual import *
from visual.controls import *
import wx
import scatterplot
import ui_functions
debug = True
L = 320
Hgraph = 400
# Create a window. Note that w.win is the wxPython "Frame" (the window).
# window.dwidth and window.dheight are the extra width and height of the window
# compared to the display region inside the window. If there is a menu bar,
# there is an additional height taken up, of amount window.menuheight.
# The default style is wx.DEFAULT_FRAME_STYLE; the style specified here
# does not enable resizing, minimizing, or full-sreening of the window.
w = window(width=2*(L+window.dwidth), height=L+window.dheight+window.menuheight+Hgraph,
menus=False, title='Widgets',
style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX)
# Place a 3D display widget in the left half of the window.
d = 20
disp = display(window=w, x=d, y=d, width=L*d, height=L*d, background=color.black, center=vector(.5,.5,.5), range = 1)
# Place buttons, radio buttons, a scrolling text object, and a slider
# in the right half of the window. Positions and sizes are given in
# terms of pixels, and pos(0,0) is the upper left corner of the window.
p = w.panel # Refers to the full region of the window in which to place widgets
m = wx.MenuBar()
file_menu = wx.Menu()
view_menu = wx.Menu()
options_menu = wx.Menu()
edit_menu = wx.Menu()
item = file_menu.Append(-1, "Load Data\tCtrl-L", "Load")
w.win.Bind(wx.EVT_MENU, ui_functions.open_file, item)
file_menu.AppendSeparator()
item = file_menu.Append(-1, "Open Plot\tCtrl-O", "Open")
item = file_menu.Append(-1, "Save Plot\tCtrl-S", "Save")
file_menu.AppendSeparator()
item = file_menu.Append(-1, "Export\tCtrl-E", "Export")
file_menu.AppendSeparator()
item = file_menu.Append(-1, "Quit\tCtrl-Q", "Exit")
w.win.Bind(wx.EVT_MENU, w._OnExitApp, item)
item = edit_menu.Append(-1, 'Undo\tCtrl-Z', 'Make box cyan')
item = edit_menu.Append(-1, 'Redo\tCtrl-Y', 'Make box cyan')
edit_menu.AppendSeparator()
item = edit_menu.Append(-1, 'Cut\tCtrl-X', 'Make box cyan')
item = edit_menu.Append(-1, 'Copy\tCtrl-C', 'Make box cyan')
item = edit_menu.Append(-1, 'Paste\tCtrl-V', 'Make box cyan')
edit_menu.AppendSeparator()
item = edit_menu.Append(-1, 'Select All\tCtrl-A', 'Make box cyan')
item = view_menu.Append(-1, 'Frame limits', 'Make box cyan')
item = view_menu.Append(-1, 'Equalise Axes', 'Make box cyan')
item = view_menu.Append(-1, 'Restore Default View', 'Make box cyan')
item = view_menu.Append(-1, 'Show Toolbar', 'Make box cyan')
item = options_menu.Append(-1, 'Titles & Labels', 'Make box cyan')
item = options_menu.Append(-1, 'Axes', 'Make box cyan')
w.win.Bind(wx.EVT_MENU, ui_functions.toggle_axes, item)
item = options_menu.Append(-1, 'Grid', 'Make box cyan')
w.win.Bind(wx.EVT_MENU, ui_functions.toggle_grid, item)
item = options_menu.Append(-1, 'Data Points', 'Make box cyan')
item = options_menu.Append(-1, 'Export Formats', 'Make box cyan')
m.Append(file_menu, '&File')
m.Append(edit_menu, '&Edit')
m.Append(view_menu, '&View')
m.Append(options_menu, '&Options')
if debug:
scatterplot.import_datafile('points.csv')
w.win.SetMenuBar(m)
while True:
rate(100)
| Python | 0.000001 | |
6dd52499de049d76a1bea5914f47dc5b6aae23d7 | Add gspread example | xl.py | xl.py | #!/usr/bin/python
#csv upload to gsheet
import logging
import json
import gspread
import time
import re
from oauth2client.client import SignedJwtAssertionCredentials
from Naked.toolshed.shell import muterun_rb
logging.basicConfig(filename='/var/log/gspread.log',format='%(asctime)s %(levelname)s:%(message)s',level=logging.INFO)
filename = '<google sheet name>'
#OAuth login
json_key = json.load(open('oauth.json'))
"""
JSON in the form:
{
"private_key_id": "",
"private_key": "",
"client_email": "",
"client_id": "",
"type": "service_account"
}
"""
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
gc = gspread.authorize(credentials)
if gc:
logging.info('OAuth succeeded')
else:
logging.warn('Oauth failed')
now = time.strftime("%c")
# get data from ruby script
response = muterun_rb('script')
if response:
logging.info('Data collected')
else:
logging.warn('Could not collect data')
csv = response.stdout
csv = re.sub('/|"|,[0-9][0-9][0-9]Z|Z', '', csv)
csv_lines = csv.split('\n')
#get columns and rows for cell list
column = len(csv_lines[0].split(","))
row = 1
for line in csv_lines:
row += 1
#create cell range
columnletter = chr((column - 1) + ord('A'))
cell_range = 'A1:%s%s' % (columnletter, row)
#open the worksheet and create a new sheet
wks = gc.open(filename)
if wks:
logging.info('%s file opened for writing', filename)
else:
logging.warn('%s file could not be opened', filename)
sheet = wks.add_worksheet(title=now, rows=(row + 2), cols=(column + 2))
cell_list = sheet.range(cell_range)
#create values list
csv = re.split("\n|,", csv)
for item, cell in zip(csv, cell_list):
cell.value = item
# Update in batch
if sheet.update_cells(cell_list):
logging.info('upload to %s sheet in %s file done', now, filename)
else:
logging.warn('upload to %s sheet in %s file failed', now, filename)
| Python | 0.000001 | |
3e10a9fcb15e06699aa90016917b4ec5ec857faa | Solve task #506 | 506.py | 506.py | class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
def reverse_numeric(x, y):
return y - x
kek = sorted(nums, cmp=reverse_numeric)
l = len(nums)
if l > 0:
nums[nums.index(kek[0])] = "Gold Medal"
if l > 1:
nums[nums.index(kek[1])] = "Silver Medal"
if l > 2:
nums[nums.index(kek[2])] = "Bronze Medal"
if l > 3:
for i in range(3, l):
nums[nums.index(kek[i])] = str(i + 1)
return nums
| Python | 0.999999 | |
0040a9e9417ab8becac64701b3c4fc6d94410a21 | Create Mc3.py | Mc3.py | Mc3.py | # -*- coding: utf-8 -*-
"""Mc3 module."""
import cmd2
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
SourceUser, SourceGroup, SourceRoom,
TemplateSendMessage, ConfirmTemplate, MessageTemplateAction,
ButtonsTemplate, URITemplateAction, PostbackTemplateAction,
CarouselTemplate, CarouselColumn, PostbackEvent,
StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,
ImageMessage, VideoMessage, AudioMessage,
UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent, ImageSendMessage,VideoSendMessage
)
from aq import (aq, map, boss)
from arena import arena
from aw import aw
from mcoccalendars import calendar
#from prestigetest import inputchamp
class Mc3(cmd2.Cmd):
trigger = 'Mc3 '
def __init__(self, line_bot_api, event):
self.line_bot_api = line_bot_api
self.event = event
super(Mc3, self).__init__()
def default(self, line):
#self.line_bot_api.reply_message(self.event.reply_token,
# TextSendMessage(text="Oops! You've entered an invalid command please type " +
# self.trigger + "list to view available commands"))
return False
def do_list(self, line):
self.line_bot_api.reply_message(
self.event.reply_token,
TextSendMessage(text="Make sure all commands start with the 'Mc3' trigger and there are no spaces after the last letter in the command!"+'\n'+
"Example:'Mc3 aq'"+'\n'+
'\n'+
"Command List:"+'\n'+
"abilities"+'\n'+
"aq"+'\n'+
"aw"+'\n'+
"arena"+'\n'+
"calendars"+'\n'+
"duels"+'\n'+
"masteries"+'\n'+
"prestige"+'\n'+
"prestige tools"+'\n'+
"special quests"+'\n'+
"synergies"+'\n'+
"support"+'\n'+
"Inputting champs has changed, please see new syntax in 'Mc3 prestige tools'"))
return True;
def do_aq(self, line):
_aq = aq(self.line_bot_api, self.event)
return _aq.process(line.parsed.args)
def do_map(self, line):
_map = map(self.line_bot_api, self.event)
return _map.process(line.parsed.args)
def do_boss(self, line):
_boss = boss(self.line_bot_api, self.event)
return _boss.process(line.parsed.args)
def do_arena(self, line):
_arena = arena(self.line_bot_api, self.event)
return _arena.process(line.parsed.args)
def do_aw(self, line):
_aw = aw(self.line_bot_api, self.event)
return _aw.process(line.parsed.args)
def do_calendar(self, line):
_calendar = calendar(self.line_bot_api, self.event)
return _calendar.process(line.parsed.args)
#def do_inputchamp(self, line):
# _inputchamp=inputchamp(self.line_bot_api, self.event,self.events, self.user)
#return _inputchamp.splitname(line)
def do_help(self, line):
return self.do_list(line);
def do_EOF(self, line):
return True
def process(self, line):
return self.onecmd(line);
| Python | 0.000001 | |
6c3867275693d4a771b9ff8df55aab18818344cd | add first app | app.py | app.py | import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start() | Python | 0.000001 | |
8080153c65f4aa1d875a495caeee290fb1945081 | Add migration | media_management_api/media_auth/migrations/0005_delete_token.py | media_management_api/media_auth/migrations/0005_delete_token.py | # Generated by Django 3.2.7 on 2021-09-15 20:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('media_auth', '0004_auto_20160209_1902'),
]
operations = [
migrations.DeleteModel(
name='Token',
),
]
| Python | 0.000002 | |
40b0f1cd33a053be5ab528b4a50bda404f0756dc | Add managment command Add_images_to_sections | gem/management/commands/Add_images_to_sections.py | gem/management/commands/Add_images_to_sections.py | from __future__ import absolute_import, unicode_literals
import csv
from babel import Locale
from django.core.management.base import BaseCommand
from wagtail.wagtailimages.tests.utils import Image
from molo.core.models import Languages, SectionPage, Main, SectionIndexPage
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('csv_name', type=str)
parser.add_argument('locale', type=str)
def handle(self, *args, **options):
csv_name = options.get('csv_name', None)
locale_code = options.get('locale', None)
mains = Main.objects.all()
sections = {}
with open(csv_name) as sections_images:
reader = csv.reader(sections_images)
if mains:
for row in reader:
key = row[0]
sections[key] = row[1:]
for main in mains:
section_index = SectionIndexPage.objects.child_of(main).first()
main_lang = Languages.for_site(main.get_site()).languages.filter(
is_active=True, is_main_language=True).first()
translated_sections = SectionPage.objects.descendant_of(
section_index).filter(
languages__language__is_main_language=False).live()
for translated_section in translated_sections:
translated_section.image = None
translated_section.save_revision().publish()
if section_index and main_lang:
if main_lang.locale == locale_code:
for section_slug in sections:
section = SectionPage.objects.descendant_of(
section_index).filter(slug=section_slug).first()
if section:
for image_title in sections.get(section_slug):
image = Image.objects.filter(
title=image_title + ".jpg").first()
if image:
section.image = image
section.extra_style_hints = section.slug
section.save_revision().publish()
else:
self.stdout.write(self.style.NOTICE(
'Image "%s" does not exist in "%s"'
% (image_title, main)))
else:
self.stdout.write(self.style.ERROR(
'section "%s" does not exist in "%s"'
% (section_slug, main.get_site())))
else:
self.stdout.write(self.style.NOTICE(
'Main language of "%s" is not "%s".'
' The main language is "%s"'
% (main.get_site(), locale_code, main_lang)))
else:
if not section_index:
self.stdout.write(self.style.NOTICE(
'Section Index Page does not exist in "%s"' % main))
if not main_lang:
self.stdout.write(self.style.NOTICE(
'Main language does not exist in "%s"' % main))
| Python | 0.000003 | |
69b900580e614ce494b9d1be0bee61464470cef7 | Create 6kyu_digital_root.py | Solutions/6kyu_digital_root.py | Solutions/6kyu_digital_root.py | from functools import reduce
def digital_root(n):
while n>10:
n = reduce(lambda x,y: x+y, [int(d) for d in str(n)])
return n
| Python | 0.000013 | |
8a06b469642f02f7cf7a946e971f594bac1f02cd | Add testing initial gui file. | python2.7libs/CacheManager/temp_gui.py | python2.7libs/CacheManager/temp_gui.py | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
## Description
"""
This file is not for releasing, just for testing.
"""
#-------------------------------------------------------------------------------
import hou
from PySide import QtCore
from PySide import QtGui
# from . import hqt
from . import define as Define
# from . import core
# reload(hqt)
reload(Define)
# reload(core)
class CacheManager(QtGui.QWidget):
"""docstring for CacheManager"""
def __init__(self):
QtGui.QWidget.__init__(self)
# Initialise the list of Current Cache Nodes in each HIP.
self.current_cache_nodes = []
self.initUI()
def initUI(self):
## Create MenuBar and MenuItems
self._createMenubar()
self._createLoadMenu()
## Layout: table_layout
self._createTableView()
table_layout = QtGui.QVBoxLayout()
table_layout.addWidget(self.table_view)
## Layout: body_layout (Main)
body_layout = QtGui.QVBoxLayout()
body_layout.addLayout(table_layout)
body_layout.setSpacing(10)
body_layout.setContentsMargins(0, 0, 5, 0)
self.setLayout(body_layout)
def _createMenubar(self):
self.menuBar = QtGui.QMenuBar()
self.menuBar.setFixedHeight(25)
self.menuBar.cornerWidget(QtCore.Qt.TopRightCorner)
def _createLoadMenu(self):
file_menu = QtGui.QMenu(self)
reload_menu = QtGui.QMenu("Reload", self)
file_menu.addMenu(reload_menu)
file_action = self.menuBar.addAction("File")
file_action.setMenu(file_menu)
def _createTableView(self):
self.table_view = CacheTableView(self)
class CacheTableView(QtGui.QTableView):
"""docstring for CacheTableView"""
def __init__(self, parent):
super(CacheTableView, self).__init__(parent)
self._parent = parent
self.initSettings()
def initSettings(self):
cache_list = self.getCacheList()
table_model = CacheTableModel(data = cache_list)
self.setModel(table_model)
self.verticalHeader().setVisible(False)
self.setSortingEnabled(True)
self.setAlternatingRowColors(True)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
def setColumnWidthWithKey(self, width, **kargs):
col = self.store().getHeaderSectionByKey(**kargs)
if col > -1:
self.setColumnWidth(col, width)
def getCacheList(self):
current_cache_nodes = []
nodes = hou.pwd().allSubChildren()
for node in nodes:
if node.type().name().lower() in Define._CACHE_NODES:
eachNode_dict = {}
nodeName = node.name()
nodePath = node.path()
cachePath = self.unexpStrPath(nodePath)
envName = self.env_Analysis(cachePath)
cacheExpandedPath = node.evalParm("file")
nodeTypeName = node.type().name().lower()
nodeColor = node.color().rgb()
eachNode_dict["Name"] = nodeName
eachNode_dict["Node Path"] = nodePath
eachNode_dict["Cache Path"] = cachePath
eachNode_dict["Env"] = envName
eachNode_dict["Expanded Path"] = cacheExpandedPath
eachNode_dict["Colour"] = nodeColor
current_cache_nodes.append(eachNode_dict)
return current_cache_nodes
def unexpStrPath(self, path):
cachePath = path + "/file"
unExpPath = hou.parm(cachePath).unexpandedString()
return unExpPath
def env_Analysis(self, path):
pathParts = path[0].split('/')
if pathParts[0] == None:
return "-"
else:
return pathParts[0]
class CacheTableModel(QtCore.QAbstractTableModel):
"""docstring for CacheTableModel"""
def __init__(self, parent = None, data = []):
super(CacheTableModel, self).__init__(parent)
self._items = data
def rowCount(self, parent = QtCore.QModelIndex()):
return len(self._items)
def columnCount(self, parent = QtCore.QModelIndex()):
return len(self._items[0])
def data(self, index, role = QtCore.QModelIndex()):
if not index.isValid():
return None
if not 0 <= index.row() < len(self._items):
return None
if index.column() == len(Define._HEADER_ITEMS):
return None
hkey = Define._HEADER_ITEMS[index.column()]["key"]
hType = Define._HEADER_ITEMS[index.column()]["type"]
if role == QtCore.Qt.DisplayRole:
if hType in ["string", "int"]:
return sValue
if hType ==
def headerData(self, section, orientation, role = QtCore.Qt.DisplayRole):
if role != QtCore.Qt.DisplayRole:
return None
if orientation == QtCore.Qt.Horizontal:
if section < len(Define._HEADER_ITEMS):
return Define._HEADER_ITEMS[section]["title"]
else:
return None
return None
def setColumnVisibe(self):
if visible:
self.showColumn()
else:
self.hideColum.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable
# def getHeaderTitleByKey(arg):
# for i in
| Python | 0 | |
6c08f3d3441cf660de910b0f3c49c3385f4469f4 | Add "Secret" channel/emoji example | examples/secret.py | examples/secret.py | import typing
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=commands.when_mentioned, description="Nothing to see here!")
# the `hidden` keyword argument hides it from the help command.
@bot.group(hidden=True)
async def secret(ctx: commands.Context):
"""What is this "secret" you speak of?"""
if ctx.invoked_subcommand is None:
await ctx.send('Shh!', delete_after=5)
def create_overwrites(ctx, *objects):
"""This is just a helper function that creates the overwrites for the
voice/text channels.
A `discord.PermissionOverwrite` allows you to determine the permissions
of an object, whether it be a `discord.Role` or a `discord.Member`.
In this case, the `view_channel` permission is being used to hide the channel
from being viewed by whoever does not meet the criteria, thus creating a
secret channel.
"""
# a dict comprehension is being utilised here to set the same permission overwrites
# for each `discord.Role` or `discord.Member`.
overwrites = {
obj: discord.PermissionOverwrite(view_channel=True)
for obj in objects
}
# prevents the default role (@everyone) from viewing the channel
# if it isn't already allowed to view the channel.
overwrites.setdefault(ctx.guild.default_role, discord.PermissionOverwrite(view_channel=False))
# makes sure the client is always allowed to view the channel.
overwrites[ctx.guild.me] = discord.PermissionOverwrite(view_channel=True)
return overwrites
# since these commands rely on guild related features,
# it is best to lock it to be guild-only.
@secret.command()
@commands.guild_only()
async def text(ctx: commands.Context, name: str, *objects: typing.Union[discord.Role, discord.Member]):
"""This makes a text channel with a specified name
that is only visible to roles or members that are specified.
"""
overwrites = create_overwrites(ctx, *objects)
await ctx.guild.create_text_channel(
name,
overwrites=overwrites,
topic='Top secret text channel. Any leakage of this channel may result in serious trouble.',
reason='Very secret business.',
)
@secret.command()
@commands.guild_only()
async def voice(ctx: commands.Context, name: str, *objects: typing.Union[discord.Role, discord.Member]):
"""This does the same thing as the `text` subcommand
but instead creates a voice channel.
"""
overwrites = create_overwrites(ctx, *objects)
await ctx.guild.create_voice_channel(
name,
overwrites=overwrites,
reason='Very secret business.'
)
@secret.command()
@commands.guild_only()
async def emoji(ctx: commands.Context, emoji: discord.PartialEmoji, *roles: discord.Role):
"""This clones a specified emoji that only specified roles
are allowed to use.
"""
# fetch the emoji asset and read it as bytes.
emoji_bytes = await emoji.url.read()
# the key parameter here is `roles`, which controls
# what roles are able to use the emoji.
await ctx.guild.create_custom_emoji(
name=emoji.name,
image=emoji_bytes,
roles=roles,
reason='Very secret business.'
)
bot.run('token')
| Python | 0.000003 | |
e0338d39f611b2ca3f202151c49fc6a4b35bd580 | Add WallBuilder | exercise/WallBuilder.py | exercise/WallBuilder.py | #!/usr/bin/env python3
class Block(object):
def __init__(self, width, height, **attr):
self.__width = width
self.__height = height
def __eq__(self, another):
return (self.__width == another.__width) and \
(self.__height == another.__height)
class Brick(Block):
pass
class Layer(Block):
def build(self, brick, **more):
pass
class Wall(Block):
pass
class WallBuilder(object):
def __init__(self, brick, *more):
self.__bricks = [brick]
for i in more:
if i not in self.__bricks:
self.__bricks.append(i)
def get(x, y, z):
m1 = (z//x)
m2 = (z//y)
return [(i, j) for i in range(0, m1+1) for j in range(0, m2+1) if (x*i + y*j) == z]
b1 = Brick(2, 1)
b2 = Brick(3, 1)
c = WallBuilder(b1, b2)
pass
| Python | 0.000001 | |
998b0b77b8d2ba247f92df74d24a7efdf5077e89 | add expect_column_values_to_be_valid_ky_zip (#4730) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_kentucky_zip.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_kentucky_zip.py | import json
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_kentucky_zip(zip: str):
list_of_dicts_of_kentucky_zips = zipcodes.filter_by(state="KY")
list_of_kentucky_zips = [d["zip_code"] for d in list_of_dicts_of_kentucky_zips]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_of_kentucky_zips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidKentuckyZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_kentucky_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_kentucky_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidKentuckyZip(ColumnMapExpectation):
"""Expect values in this column to be valid Kentucky zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_kentucky_zip": ["42326", "42518", "41766", "41619"],
"invalid_kentucky_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_kentucky_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_kentucky_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_kentucky_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidKentuckyZip().print_diagnostic_checklist()
| Python | 0 | |
8a30bcc511647ed0c994cb2103dd5bed8d4671a8 | Create B_Temperature.py | Cas_4/Temperature/B_Temperature.py | Cas_4/Temperature/B_Temperature.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import cartopy.crs as ccrs
from xmitgcm import open_mdsdataset
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
plt.ion()
dir0 = '/homedata/bderembl/runmit/test_southatlgyre3'
ds0 = open_mdsdataset(dir0,prefix=['T'])
nt = 0
nz = 0
while (nt < 150) :
nt = nt+1
print(nt)
plt.figure(1)
ax = plt.subplot(projection=ccrs.PlateCarree());
ds0['T'][nt,nz,:,:].plot.pcolormesh('XC', 'YC',ax=ax,vmin=-10,vmax=35,cmap='ocean')
plt.title('Case 4 : Temperature ')
plt.text(5,5,nt,ha='center',wrap=True)
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if (nt < 10):
plt.savefig('Temperature_cas4-'+'00'+str(nt)+'.png')
plt.clf()
elif (nt > 9) and (nt < 100):
plt.savefig('Temperature_cas4-'+'0'+str(nt)+'.png')
plt.clf()
else:
plt.savefig('Temperature_cas4-'+str(nt)+'.png')
plt.clf()
| Python | 0.998596 | |
a70490e52bde05d2afc6ea59416a50e11119d060 | Add migration for Comment schema upgrade. ... | raggregate/rg_migrations/versions/002_Add_metadata_to_Comment_to_allow_it_to_masquerade_as_epistle.py | raggregate/rg_migrations/versions/002_Add_metadata_to_Comment_to_allow_it_to_masquerade_as_epistle.py | from sqlalchemy import *
from migrate import *
from raggregate.guid_recipe import GUID
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
unreadc = Column('unread', Boolean, default=True)
in_reply_toc = Column('in_reply_to', GUID, nullable=True)
unreadc.create(comments)
in_reply_toc.create(comments)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
comments.c.unread.drop()
comments.c.in_reply_to.drop()
| Python | 0 | |
88d480f2c97bf7779afea34798c6c082f127f3a6 | Add missing client.py (#703) | client/client.py | client/client.py | import webapp2
import re
class RedirectResource(webapp2.RequestHandler):
def get(self, path):
path = re.sub(r'/$', '', path)
self.redirect('/community/%s' % path, permanent=True)
# pylint: disable=invalid-name
app = webapp2.WSGIApplication([
webapp2.Route(r'/<:.*>', handler=RedirectResource),
], debug=True)
| Python | 0 | |
38cf2a9f0c964c69df084d80ded6cf161ba7eb16 | Add elf read elf file. | elf.py | elf.py | import sys
from elftools.elf.elffile import ELFFile
from elftools.common.exceptions import ELFError
from elftools.elf.segments import NoteSegment
class ReadELF(object):
def __init__(self, file):
self.elffile = ELFFile(file)
def get_build(self):
for segment in self.elffile.iter_segments():
if isinstance(segment, NoteSegment):
for note in segment.iter_notes():
print note
def main():
if(len(sys.argv) < 2):
print "Missing argument"
sys.exit(1)
with open(sys.argv[1], 'rb') as file:
try:
readelf = ReadELF(file)
readelf.get_build()
except ELFError as err:
sys.stderr.write('ELF error: %s\n' % err)
sys.exit(1)
if __name__ == '__main__':
main()
| Python | 0 | |
d2f13fb17d3f9998af1a175dfd4e2bea4544fb3d | add example to just serialize matrix | examples/undocumented/python_modular/serialization_matrix_modular.py | examples/undocumented/python_modular/serialization_matrix_modular.py | from modshogun import *
from numpy import array
parameter_list=[[[[1.0,2,3],[4,5,6]]]]
def serialization_matrix_modular(m):
feats=RealFeatures(array(m))
#feats.io.set_loglevel(0)
fstream = SerializableAsciiFile("foo.asc", "w")
feats.save_serializable(fstream)
l=Labels(array([1.0,2,3]))
fstream = SerializableAsciiFile("foo2.asc", "w")
l.save_serializable(fstream)
os.unlink("foo.asc")
os.unlink("foo2.asc")
if __name__=='__main__':
print 'Serialization Matrix Modular'
serialization_matrix_modular(*parameter_list[0])
| Python | 0 | |
e7f1439cae37facaedce9c33244b58584e219869 | Initialize P01_sendingEmail | books/AutomateTheBoringStuffWithPython/Chapter16/P01_sendingEmail.py | books/AutomateTheBoringStuffWithPython/Chapter16/P01_sendingEmail.py | # This program uses the smtplib module to send emails
# Connecting to an SMTP Server
import smtplib
with open('smtp_info') as config:
# smtp_cfg = [email, password, smtp server, port]
smtp_cfg = config.read().splitlines()
smtp_obj = smtplib.SMTP_SSL(smtp_cfg[2], smtp_cfg[3])
print(type(smtp_obj))
| Python | 0.000286 | |
e86047546693290556494bf00b493aa4ae770482 | add binding.gyp for node-gyp | binding.gyp | binding.gyp | {
"targets": [
{
"target_name": "rawhash",
"sources": [
"src/rawhash.cpp",
"src/MurmurHash3.h",
"src/MurmurHash3.cpp"
],
'cflags': [ '<!@(pkg-config --cflags libsparsehash)' ],
'conditions': [
[ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'cflags_cc!': ['-fno-rtti', '-fno-exceptions'],
'cflags_cc+': ['-frtti', '-fexceptions'],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_RTTI': 'YES',
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
}
}]
]
}
]
} | Python | 0.000001 | |
888b27db4d91ebba91eb935532f961943453b7c8 | add update command to new certificate data model | paralapraca/management/commands/update_certificate_templates.py | paralapraca/management/commands/update_certificate_templates.py | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.core.files import File
from core.models import CertificateTemplate
from timtec.settings import STATIC_ROOT
from paralapraca.models import CertificateData, Contract
import os
class Command(BaseCommand):
help = 'Create certificate and receipt templates data for CertificateTemplate existent'
receipt_text = '<p>inscrita no cadastro de pessoa física sob o número {CPF} </p>\
<p>participou do <em>{MODULO}</em></p>\
<p>no Ambiente Virtual de Aprendizagem do Programa Paralapracá.</p>'
certificate_text = '<p style="text-align: center;">inscrita no cadastro de pessoa física sob o número {CPF}</p>\
<p style="text-align: center;">concluiu o <strong>{MODULO}</strong>,</p>\
<p style="text-align: center;">com carga horária total de 40 horas, no </p>\
<p style="text-align: center;">Ambiente Virtual de Aprendizagem do Programa Paralapracá.</p>'
def handle(self, *files, **options):
types = CertificateData.TYPES
cts = CertificateTemplate.objects.all()
plpc = Contract.objects.first()
plpc_path = os.path.join(STATIC_ROOT, 'img/site-logo-orange.svg')
avante_path = os.path.join(STATIC_ROOT, 'img/logo-avante.png')
plpc_logo = File(open(plpc_path, 'r'))
avante_logo = File(open(avante_path, 'r'))
for ct in cts:
ct.base_logo = avante_logo
ct.save()
cdr = CertificateData(contract=plpc, type=types[0][0],
certificate_template=ct,
site_logo=plpc_logo,
text=self.receipt_text)
cdr.save()
ct.pk = None
ct.save()
cdc = CertificateData(contract=plpc, type=types[1][0],
certificate_template=ct,
site_logo=plpc_logo,
text=self.certificate_text)
cdc.save()
| Python | 0.000001 | |
0ac4b8f55703a1cde7474c6ad8db9c3b6005f0f0 | convert markdown tables into rst before building (#4125) | docs/sphinx_util.py | docs/sphinx_util.py | # -*- coding: utf-8 -*-
"""Helper utilty function for customization."""
import sys
import os
import docutils
import subprocess
def run_build_mxnet(folder):
"""Run the doxygen make command in the designated folder."""
try:
subprocess.call('cd %s; cp make/readthedocs.mk config.mk' % folder, shell = True)
subprocess.call('cd %s; rm -rf build' % folder, shell = True)
retcode = subprocess.call("cd %s; make -j$(nproc)" % folder, shell = True)
if retcode < 0:
sys.stderr.write("build terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("build execution failed: %s" % e)
def build_r_docs(root_path):
r_root = os.path.join(root_path, 'R-package')
pdf_path = os.path.join(root_path, 'docs', 'api', 'r', 'mxnet-r-reference-manual.pdf')
subprocess.call('cd ' + r_root +'; R CMD Rd2pdf . --no-preview -o ' + pdf_path, shell = True)
dest_path = os.path.join(root_path, 'docs', '_build', 'html', 'api', 'r')
subprocess.call('mkdir -p ' + dest_path, shell = True)
subprocess.call('mv ' + pdf_path + ' ' + dest_path, shell = True)
def build_scala_docs(root_path):
scala_path = os.path.join(root_path, 'scala-package', 'core', 'src', 'main', 'scala', 'ml', 'dmlc', 'mxnet')
subprocess.call('cd ' + scala_path + '; scaladoc `find . | grep .*scala`', shell = True)
dest_path = os.path.join(root_path, 'docs', '_build', 'html', 'api', 'scala', 'docs')
subprocess.call('mkdir -p ' + dest_path, shell = True)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
subprocess.call('cd ' + scala_path + ';mv ' + doc_file + ' ' + dest_path, shell = True)
def build_table(table):
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
if j == 0:
out += ' * - '
else:
out += ' - '
out += c + '\n'
out += '```\n'
return out
def convert_md_table(root_path):
import glob
import codecs
files = []
for i in range(5):
files += glob.glob(os.path.join(root_path, *(['*']*i+['*.md'])))
for f in files:
started = False
num_table = 0
table = []
output = ''
with codecs.open(f, 'r', 'utf-8') as i:
data = i.readlines()
for l in data:
r = l.strip()
if r.startswith('|'):
table += [r,]
started = True
else:
if started is True:
tab = build_table(table)
if tab is not '':
num_table += 1
output += tab
started = False
table = []
output += l
if num_table != 0:
print 'converted %d tables in %s' % (num_table, f)
with codecs.open(f, 'w', 'utf-8') as i:
i.write(output)
print len(files)
subprocess.call('./build-notebooks.sh')
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
root_path = os.path.join(curr_path, '..')
convert_md_table(curr_path)
run_build_mxnet(root_path)
build_r_docs(root_path)
build_scala_docs(root_path)
if not os.path.exists('../recommonmark'):
subprocess.call('cd ..; rm -rf recommonmark;' +
'git clone https://github.com/tqchen/recommonmark', shell = True)
else:
subprocess.call('cd ../recommonmark/; git pull', shell=True)
sys.path.insert(0, os.path.abspath('../recommonmark/'))
from recommonmark import parser, transform
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
| # -*- coding: utf-8 -*-
"""Helper utilty function for customization."""
import sys
import os
import docutils
import subprocess
def run_build_mxnet(folder):
"""Run the doxygen make command in the designated folder."""
try:
subprocess.call('cd %s; cp make/readthedocs.mk config.mk' % folder, shell = True)
subprocess.call('cd %s; rm -rf build' % folder, shell = True)
retcode = subprocess.call("cd %s; make -j$(nproc)" % folder, shell = True)
if retcode < 0:
sys.stderr.write("build terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("build execution failed: %s" % e)
def build_r_docs(root_path):
r_root = os.path.join(root_path, 'R-package')
pdf_path = os.path.join(root_path, 'docs', 'api', 'r', 'mxnet-r-reference-manual.pdf')
subprocess.call('cd ' + r_root +'; R CMD Rd2pdf . --no-preview -o ' + pdf_path, shell = True)
dest_path = os.path.join(root_path, 'docs', '_build', 'html', 'api', 'r')
subprocess.call('mkdir -p ' + dest_path, shell = True)
subprocess.call('mv ' + pdf_path + ' ' + dest_path, shell = True)
def build_scala_docs(root_path):
scala_path = os.path.join(root_path, 'scala-package', 'core', 'src', 'main', 'scala', 'ml', 'dmlc', 'mxnet')
subprocess.call('cd ' + scala_path + '; scaladoc `find . | grep .*scala`', shell = True)
dest_path = os.path.join(root_path, 'docs', '_build', 'html', 'api', 'scala', 'docs')
subprocess.call('mkdir -p ' + dest_path, shell = True)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
subprocess.call('cd ' + scala_path + ';mv ' + doc_file + ' ' + dest_path, shell = True)
if not os.path.exists('../recommonmark'):
subprocess.call('cd ..; rm -rf recommonmark;' +
'git clone https://github.com/tqchen/recommonmark', shell = True)
else:
subprocess.call('cd ../recommonmark/; git pull', shell=True)
subprocess.call('./build-notebooks.sh')
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
root_path = os.path.join(curr_path, '..')
run_build_mxnet(root_path)
build_r_docs(root_path)
build_scala_docs(root_path)
sys.path.insert(0, os.path.abspath('../recommonmark/'))
from recommonmark import parser, transform
MarkdownParser = parser.CommonMarkParser
AutoStructify = transform.AutoStructify
| Python | 0 |
a0d3ae80a2f4f9ae76aaa4d672be460ce3a657d4 | add command to populate change messages | corehq/apps/users/management/commands/add_location_change_message.py | corehq/apps/users/management/commands/add_location_change_message.py | from django.core.management.base import BaseCommand
from django.db.models import Q
from corehq.apps.users.audit.change_messages import UserChangeMessage
from corehq.apps.users.models import UserHistory
class Command(BaseCommand):
help = "Add locations removed change messages on commcare user's User History records " \
"for https://github.com/dimagi/commcare-hq/pull/30253/commits/76996b5a129be4e95f5c5bedd0aba74c50088d15"
def add_arguments(self, parser):
parser.add_argument(
'--save',
action='store_true',
dest='save',
default=False,
help="actually update records else just log",
)
def handle(self, *args, **options):
save = options['save']
# since we need locations removed, filter for update logs
records = UserHistory.objects.filter(
Q(changes__has_key='location_id') | Q(changes__has_key='assigned_location_ids'),
user_type='CommCareUser',
action=UserHistory.UPDATE,
)
with open("add_location_change_message.csv", "w") as _file:
for record in records:
updated = False
if 'location_id' in record.changes and record.changes['location_id'] is None:
if 'location' not in record.change_messages:
record.change_messages.update(UserChangeMessage.primary_location_removed())
updated = True
if record.changes.get('assigned_location_ids') == []:
if 'assigned_locations' not in record.change_messages:
record.change_messages.update(UserChangeMessage.assigned_locations_info([]))
updated = True
if updated:
_file.write(
f"{record.pk},{record.user_id},{record.changes},{record.change_messages}\n"
)
if save:
record.save()
| Python | 0.000001 | |
b42596ff4175d12504272323e3a9ac0b8b4b7e0b | Add get_package.py for new packaging system | get_package.py | get_package.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2018 The ungoogled-chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Simple package script generator.
"""
import argparse
import configparser
import re
import shutil
import string
import subprocess
from pathlib import Path
from buildkit.common import (ENCODING, BuildkitAbort, get_logger, get_chromium_version,
get_release_revision)
from buildkit.third_party import schema
# Constants
_PACKAGING_ROOT = Path(__file__).resolve().parent / 'packaging'
_PKGMETA = _PACKAGING_ROOT / 'pkgmeta.ini'
_PKGMETA_SCHEMA = schema.Schema({
schema.Optional(schema.And(str, len)): {
schema.Optional('depends'): schema.And(str, len),
schema.Optional('buildkit_copy'): schema.And(str, len),
}
})
# Classes
class _BuildFileStringTemplate(string.Template):
"""
Custom string substitution class
Inspired by
http://stackoverflow.com/questions/12768107/string-substitutions-using-templates-in-python
"""
pattern = r"""
{delim}(?:
(?P<escaped>{delim}) |
_(?P<named>{id}) |
{{(?P<braced>{id})}} |
(?P<invalid>{delim}((?!_)|(?!{{)))
)
""".format(
delim=re.escape("$ungoog"), id=string.Template.idpattern)
# Methods
def _process_templates(root_dir, build_file_subs):
"""
Recursively substitute '$ungoog' strings in '.ungoogin' template files and
remove the suffix
"""
for old_path in root_dir.rglob('*.ungoogin'):
new_path = old_path.with_name(old_path.stem)
old_path.replace(new_path)
with new_path.open('r+', encoding=ENCODING) as new_file:
content = _BuildFileStringTemplate(new_file.read()).substitute(**build_file_subs)
new_file.seek(0)
new_file.write(content)
new_file.truncate()
def _get_current_commit():
"""
Returns a string of the current commit hash.
It assumes "git" is in PATH, and that buildkit is run within a git repository.
Raises BuildkitAbort if invoking git fails.
"""
result = subprocess.run(
['git', 'rev-parse', '--verify', 'HEAD'],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=str(Path(__file__).resolve().parent))
if result.returncode:
get_logger().error('Unexpected return code %s', result.returncode)
get_logger().error('Command output: %s', result.stdout)
raise BuildkitAbort()
return result.stdout.strip('\n')
def _ini_section_generator(ini_parser):
"""
Yields tuples of a section name and its corresponding dictionary of keys and values
"""
for section in ini_parser:
if section == configparser.DEFAULTSECT:
continue
yield section, dict(ini_parser.items(section))
def _validate_and_get_pkgmeta():
"""
Validates and returns the parsed pkgmeta
"""
pkgmeta = configparser.ConfigParser()
with _PKGMETA.open(encoding=ENCODING) as pkgmeta_file: #pylint: disable=no-member
pkgmeta.read_file(pkgmeta_file, source=str(_PKGMETA))
try:
_PKGMETA_SCHEMA.validate(dict(_ini_section_generator(pkgmeta)))
except schema.SchemaError as exc:
get_logger().error('pkgmeta.ini failed schema validation at: %s', _PKGMETA)
raise exc
return pkgmeta
def _get_package_dir_list(package, pkgmeta):
"""
Returns a list of pathlib.Path to packaging directories to be copied,
ordered by dependencies first.
Raises FileNotFoundError if a package directory cannot be found.
"""
package_list = list()
current_name = package
while current_name:
package_list.append(_PACKAGING_ROOT / current_name)
if not package_list[-1].exists():
raise FileNotFoundError(package_list[-1])
if current_name in pkgmeta and 'depends' in pkgmeta[current_name]:
current_name = pkgmeta[current_name]['depends']
else:
break
package_list.reverse()
return package_list
def _get_package_files(package_dir_list):
"""Yields tuples of relative and full package file paths"""
resolved_files = dict()
for package_dir in package_dir_list:
for file_path in package_dir.rglob('*'):
relative_path = file_path.relative_to(package_dir)
resolved_files[relative_path] = file_path
yield from sorted(resolved_files.items())
def main():
"""CLI Entrypoint"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('name', help='Name of packaging to generate')
parser.add_argument('destination', type=Path, help='Directory to store packaging files')
args = parser.parse_args()
if not args.destination.parent.exists():
parser.error('Destination parent directory "{}" does not exist'.format(
args.destination.parent))
packaging_dir = _PACKAGING_ROOT / args.name
if not packaging_dir.exists():
parser.error('Packaging "{}" does not exist'.format(args.name))
if not _PKGMETA.exists(): #pylint: disable=no-member
parser.error('Cannot find pkgmeta.ini in packaging directory')
if not args.destination.exists():
args.destination.mkdir()
pkgmeta = _validate_and_get_pkgmeta()
for relative_path, actual_path in _get_package_files(_get_package_dir_list(args.name, pkgmeta)):
if actual_path.is_dir():
if not (args.destination / relative_path).exists():
(args.destination / relative_path).mkdir()
shutil.copymode(str(actual_path), str(args.destination / relative_path))
else:
shutil.copy(str(actual_path), str(args.destination / relative_path))
packaging_subs = dict(
chromium_version=get_chromium_version(),
release_revision=get_release_revision(),
current_commit=_get_current_commit(),
)
_process_templates(args.destination, packaging_subs)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
fb133e260722fd02cb6f14ede15dbdb1fdf91af7 | Add gtk dependencies tests | test/test_dependencies.py | test/test_dependencies.py | """
Copyright (c) 2017, Michael Sonntag (sonntag@bio.lmu.de)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License. See
LICENSE file in the root of the project.
"""
import unittest
class DependencyTest(unittest.TestCase):
"""
This class checks for non python gtk3 dependencies.
This Class will be removed, it is testing how travis and conda
can play nice with gtk3.
"""
def test_gi_dependency(self):
has_error = False
try:
import gi
except (ImportError, ValueError) as _:
has_error = True
self.assertFalse(has_error)
def test_pygtkcompat(self):
has_error = False
try:
import gi
import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
except (ImportError, ValueError) as _:
has_error = True
self.assertFalse(has_error)
def test_gtk(self):
has_error = False
try:
import gi
import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
import gtk
except (ImportError, ValueError) as _:
has_error = True
self.assertFalse(has_error)
def test_gobject(self):
has_error = False
try:
import gi
import pygtkcompat
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
import gtk
import gobject
except (ImportError, ValueError) as _:
has_error = True
self.assertFalse(has_error)
| Python | 0 | |
c09356487360ec373c98ec50800a450a3966a60f | define prompt function | lib.py | lib.py | # weather-app
# lib.py
# Classes and functions for weather-app.
# Function definitions.
# ---------------------
# Prompt for user input. Accepts a prompt message we want to show.
def prompt(msg):
return input(msg)
| Python | 0.000196 | |
e51185a5538ab20250d94c4fe5e71bcfcfed0e1e | trying to get the hash 1dbd981fe6985776b644b173a4d0385ddc1aa2a829688d1e0000000000000000 | btcrelay.py | btcrelay.py | # Stored variables:
#
# Last known block
# 10: version
# 11: hashPrevBlock
# 12: hashMerkleRoot
# 13: time
# 14: bits
# 15: nonce
# 16: blockHash / lastKnownBlock
# 17: score
#
def shared():
TWO_POW_24 = 2 ^ 24
def init():
self.storage[16] = 0x00000000000000000cfdd50d917943949fa708829ab70108c98cdb9f7d62339d
def code():
ret = self.slt(2,4)
return(ret)
def storeBlockHeader(version, hashPrevBlock, hashMerkleRoot, time, bits, nonce):
exp = bits / TWO_POW_24
mant = bits & 0xffffff
target = mant * slt(1, (8*(exp - 3)))
def flipBytes(n):
numByte = 32
mask = 0xff
result = 0
i = 0
while i < numByte:
b = n & mask
b /= 2^(i*8)
b *= 2^((numByte-i-1)*8)
mask *= 256
result = result | b
i += 1
return(result)
# shift left
def slt(n, x):
return(n * 2^x)
def test():
b1 = 0x0100000081cd02ab7e569e8bcd9317e2
b2 = 0xfe99f2de44d49ab2b8851ba4a308000000000000e320b6c2fffc8d750423db8b
b3 = 0x1eb942ae710e951ed797f7affc8892b0f1fc122bc7f5d74df2b9441a42a14695
hash1 = sha256([b1,b2,b3], 3)
hash2 = sha256([hash1], 1)
return(hash2)
| # Stored variables:
#
# Last known block
# 10: version
# 11: hashPrevBlock
# 12: hashMerkleRoot
# 13: time
# 14: bits
# 15: nonce
# 16: blockHash / lastKnownBlock
# 17: score
#
def shared():
TWO_POW_24 = 2 ^ 24
def init():
self.storage[16] = 0x00000000000000000cfdd50d917943949fa708829ab70108c98cdb9f7d62339d
def code():
return(-1)
def storeBlockHeader(version, hashPrevBlock, hashMerkleRoot, time, bits, nonce):
exp = bits / TWO_POW_24
mant = bits & 0xffffff
target = mant * slt(1, (8*(exp - 3)))
def flipBytes(n):
numByte = 32
mask = 0xff
result = 0
i = 0
while i < numByte:
b = n & mask
b /= 2^(i*8)
b *= 2^((numByte-i-1)*8)
mask *= 256
result = result | b
i += 1
return(result)
# shift left
def slt(n, x):
return(n * 2^x)
| Python | 0.999988 |
e6bd5bbb3a46413b1ad164e0ef6ab66e89d9c95f | Add buildbot.py | buildbot.py | buildbot.py | #!/usr/bin/env python
# encoding: utf-8
project_name = 'stub'
def configure(options):
pass
def build(options):
pass
def run_tests(options):
pass
def coverage_settings(options):
options['required_line_coverage'] = 80.0
| Python | 0.000001 | |
97ea4f9019dcd5d4c01b4d5715297f25bc6aaf91 | Create bytesize.py | bytesize.py | bytesize.py | # -*- coding: utf-8 -*-
"""
@author: stk
"""
import sys
import itertools
from collections import defaultdict
_ver = sys.version_info
#: Python 2.x?
_is_py2 = (_ver[0] == 2)
#: Python 3.x?
_is_py3 = (_ver[0] == 3)
if _is_py2:
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
integer_types = (int, long)
elif _is_py3:
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
integer_types = (int,)
class _CaseInsensitiveDict(dict):
"""CaseInsensitiveDict with aliasing."""
def _k(self, key):
"""get normalized key."""
k = key.upper() if isinstance(key, basestring) else key
return self.get_alias(k) or k
def __init__(self, *args, **kwargs):
super(_CaseInsensitiveDict, self).__init__(*args, **kwargs)
self._pseudonyms = defaultdict(list)
self._convert_keys()
def set_alias(self, key, aliaslist):
self._pseudonyms[key] = set([key]+aliaslist)
def get_alias(self, key):
alias = None
for k, aliaslist in iter(self._pseudonyms.items()):
if key in aliaslist:
alias = k
break
return alias
def __getitem__(self, key):
return super(_CaseInsensitiveDict, self).__getitem__(self._k(key))
def __setitem__(self, key, value):
super(_CaseInsensitiveDict, self).__setitem__(self._k(key), value)
def _convert_keys(self):
"""normalize keys set during dict constructor."""
for k in list(self.keys()):
v = super(_CaseInsensitiveDict, self).pop(k)
self.__setitem__(k, v)
B = SZ_B,SUFFIX_B = 1<<0, 'B'
KB = SZ_KB,SUFFIX_KB = 1<<10, 'KB'
MB = SZ_MB,SUFFIX_MB = 1<<20, 'MB'
GB = SZ_GB,SUFFIX_GB = 1<<30, 'GB'
TB = SZ_TB,SUFFIX_TB = 1<<40, 'TB'
PB = SZ_PB,SUFFIX_PB = 1<<50, 'PB'
EB = SZ_EB,SUFFIX_EB = 1<<60, 'EB'
ZB = SZ_ZB,SUFFIX_ZB = 1<<70, 'ZB'
YB = SZ_YB,SUFFIX_YB = 1<<80, 'YB'
_size_list = [B, KB, MB, GB, TB, PB, EB, ZB, YB]
_size_names = [suffix for _,suffix in _size_list]
_size_multipliers = [mult for mult,_ in _size_list]
size_map = _CaseInsensitiveDict(zip(_size_names, _size_multipliers))
size_map.set_alias(SUFFIX_B, ['BYTE', 'BYTES', ''])
size_map.set_alias(SUFFIX_KB, ['KILO', 'KILOS', 'KILOBYTE', 'KILOBYTES', 'K'])
size_map.set_alias(SUFFIX_MB, ['MEGA', 'MEGAS', 'MEGABYTE', 'MEGABYTES', 'M'])
size_map.set_alias(SUFFIX_GB, ['GIGA', 'GIGAS', 'GIGABYTE', 'GIGABYTES', 'G'])
size_map.set_alias(SUFFIX_TB, ['TERA', 'TERAS', 'TERABYTE', 'TERABYTES', 'T'])
size_map.set_alias(SUFFIX_PB, ['PETA', 'PETAS', 'PETABYTE', 'PETABYTES', 'P'])
size_map.set_alias(SUFFIX_EB, ['EXA', 'EXABS', 'EXABYTE', 'EXABYTES', 'E'])
size_map.set_alias(SUFFIX_ZB, ['ZETA', 'ZETTA', 'EXABYTE', 'EXABYTES', 'E'])
size_map.set_alias(SUFFIX_YB, ['YOTA', 'YOTTA', 'YOTTABYTE','YOTTABYTES', 'Y'])
class ByteSize(int):
def as_unit(self, unit_suffix):
dividend, divisor = self, size_map[unit_suffix]
k = 1.0 if divisor>1 else 1
return dividend/(k*divisor)
def __new__(cls, value):
kv = [''.join(x) for _, x in itertools.groupby(str(value), key=str.isdigit)]
if not kv or len(kv) > 2:
raise ValueError('invalid ByteSize Val: %s' % str(value))
multiplicand, multiplier = (kv+['B'])[:2]
multiplicand = int(multiplicand)
multiplier = size_map[str(multiplier).strip()]
bytesize = multiplicand*multiplier
return int.__new__(cls, bytesize)
| Python | 0.00001 | |
399cd799ae993412a6ad2455b8e11f4019aa9509 | Add models admin | td_biblio/admin.py | td_biblio/admin.py | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Author, Editor, Journal, Publisher, Entry, Collection
class AbstractHumanAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name')
class AuthorAdmin(AbstractHumanAdmin):
pass
class EditorAdmin(AbstractHumanAdmin):
pass
class JournalAdmin(admin.ModelAdmin):
pass
class PublisherAdmin(admin.ModelAdmin):
pass
class EntryAdmin(admin.ModelAdmin):
list_display = ('title', 'type', 'publication_date', 'journal')
list_filter = ('publication_date', 'journal', 'authors')
date_hierarchy = 'publication_date'
class CollectionAdmin(admin.ModelAdmin):
pass
admin.site.register(Author, AuthorAdmin)
admin.site.register(Editor, EditorAdmin)
admin.site.register(Journal, JournalAdmin)
admin.site.register(Publisher, PublisherAdmin)
admin.site.register(Entry, EntryAdmin)
admin.site.register(Collection, CollectionAdmin)
| Python | 0 | |
6d3f6951d846c50fcc1ff011f9129a4e1e3f7de1 | Add unit tests for BMI version of storm | testing/test_storm_bmi.py | testing/test_storm_bmi.py | #! /usr/bin/env python
#
# Tests for the BMI version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
input_file1 = os.path.join(data_dir, 'test1.in')
input_file2 = os.path.join(data_dir, 'test2.in')
build_dir = os.path.join(start_dir, 'build')
exe = './bmi/storm'
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** BMI tests')
os.mkdir(build_dir)
os.chdir(build_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
os.chdir(start_dir)
shutil.rmtree(build_dir)
# Tests ----------------------------------------------------------------
def test_configure():
'''
Test whether CMake executes successfully
'''
call(['cmake', '..'])
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_without_input_file():
'''
Check that storm runs without an input file
'''
r = call([exe])
assert_equal(r, 0)
def test_with_singlestep_input_file():
'''
Check that storm runs with a one-step input file
'''
r = call([exe, input_file1])
assert_equal(r, 0)
def test_with_multistep_input_file():
'''
Check that storm runs with a multi-step input file
'''
r = call([exe, input_file2])
assert_equal(r, 0)
| Python | 0 | |
9ef0e5e6dc50af7d5ccc27cc4d41abce72b51456 | Create runcount.py | bin/runcount.py | bin/runcount.py | #!/usr/bin/python
| Python | 0.000002 | |
671a5abc1f04930c749745c2ec0a59000d6e69a8 | Add profile_rec script. (transplanted from ab55d87908f16cf7e2fac0a1938b280204a612bf) | tests/test_functional/profile_rec.py | tests/test_functional/profile_rec.py | import profile
import pstats
import tempfile
import os
import time
from routes import Mapper
def bench_rec(n):
m = Mapper()
m.connect('', controller='articles', action='index')
m.connect('admin', controller='admin/general', action='index')
m.connect('admin/comments/article/:article_id/:action/:id',
controller = 'admin/comments', action = None, id=None)
m.connect('admin/trackback/article/:article_id/:action/:id',
controller='admin/trackback', action=None, id=None)
m.connect('admin/content/:action/:id', controller='admin/content')
m.connect('xml/:action/feed.xml', controller='xml')
m.connect('xml/articlerss/:id/feed.xml', controller='xml',
action='articlerss')
m.connect('index.rdf', controller='xml', action='rss')
m.connect('articles', controller='articles', action='index')
m.connect('articles/page/:page', controller='articles',
action='index', requirements = {'page':'\d+'})
m.connect(
'articles/:year/:month/:day/page/:page',
controller='articles', action='find_by_date', month = None,
day = None,
requirements = {'year':'\d{4}', 'month':'\d{1,2}','day':'\d{1,2}'})
m.connect('articles/category/:id', controller='articles', action='category')
m.connect('pages/*name', controller='articles', action='view_page')
m.create_regs(['content','admin/why', 'admin/user'])
start = time.time()
for x in range(1,n):
a = m.match('/content')
a = m.match('/content/list')
a = m.match('/content/show/10')
a = m.match('/admin/user')
a = m.match('/admin/user/list')
a = m.match('/admin/user/show/bbangert')
a = m.match('/admin/user/show/bbangert/dude')
a = m.match('/admin/why/show/bbangert')
a = m.match('/content/show/10/20')
a = m.match('/food')
end = time.time()
ts = time.time()
for x in range(1,n):
pass
en = time.time()
total = end-start-(en-ts)
per_url = total / (n*10)
print "Recognition\n"
print "%s ms/url" % (per_url*1000)
print "%s urls/s\n" % (1.00/per_url)
def do_profile(cmd, globals, locals, sort_order, callers):
fd, fn = tempfile.mkstemp()
try:
if hasattr(profile, 'runctx'):
profile.runctx(cmd, globals, locals, fn)
else:
raise NotImplementedError(
'No profiling support under Python 2.3')
stats = pstats.Stats(fn)
stats.strip_dirs()
# calls,time,cumulative and cumulative,calls,time are useful
stats.sort_stats(*sort_order or ('cumulative', 'calls', 'time'))
if callers:
stats.print_callers(.3)
else:
stats.print_stats(.3)
finally:
os.remove(fn)
def main(n=300):
do_profile('bench_rec(%s)' % n, globals(), locals(),
('time', 'cumulative', 'calls'), None)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
5c5bf274c72ef67a3a2a2e5d6713df910026dcdb | Add hash plugin | plugins/hash.py | plugins/hash.py | import hashlib
import sys
class Plugin:
def on_command(self, bot, msg):
if len(sys.argv) >= 2:
algorithm = sys.argv[1]
contents = " ".join(sys.argv[2:])
if not contents:
contents = sys.stdin.read().strip()
h = hashlib.new(algorithm)
h.update(bytes(contents, "utf-8"))
print(h.hexdigest())
else:
print(self.on_help(bot))
def on_help(self, bot):
return "Usage: hash <algorithm> <contents>"
| Python | 0.000001 | |
086371f56748da9fb68acc4aaa10094b6cf24fcb | Revert "Remove pgjsonb returner unit tests" | tests/unit/returners/test_pgjsonb.py | tests/unit/returners/test_pgjsonb.py | # -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
| Python | 0 | |
077607b1b7fe705992c9f59f7dc94f2386aef4bb | add memcached | testutils/servers/memcache_server.py | testutils/servers/memcache_server.py | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import signal
from testutils import sock_utils
from testutils import subprocutil
class MemcacheServer(object):
def __init__(self, port=None):
self.port = port if port else sock_utils.findPorts(num=1)[0]
self.server = subprocutil.GenericSubprocess(
args=['memcached',
'-p', str(self.port),
],
)
def start(self):
self.server.start()
sock_utils.tryConnect('::', self.port, abortFunc=self.server.check)
def check(self):
return self.server.check()
def stop(self):
self.server.kill(signum=signal.SIGQUIT, timeout=3)
def reset(self):
pass
def getHostPort(self):
return '127.0.0.1:%d' % self.port
| Python | 0.000001 | |
d95f24d43f3925a91176429cca1aaac30a0c55aa | Create java module main | genes/java/main.py | genes/java/main.py | from genes import apt, debconf
import platform
class Config:
OS = platform.system()
(DIST, _, CODE) = platform.linux_distribution()
REPO = DIST.lower() + '-' + CODE
def main():
if Config.OS == 'Linux':
if Config.DIST == 'Ubuntu' or Config.DIST == 'Debian':
#FIXME: debian needs ppa software
apt.add_repo('ppa:webupd8team/java')
apt.update()
debconf.set_selections('oracle-java8-installer',
'shared/accepted-oracle-license-v1-1',
'select', 'true')
apt.install('oracle-java8-installer')
else:
#FIXME: print failure case
pass
elif Config.OS == 'Darwin':
#brew_cask.install('java8')
pass
else:
#FIXME: print failure, handle windows
pass
| Python | 0.000001 | |
6d87badb68f2e20a3907f670b9190956ebd127e8 | Create AddBinaryNumbers.py | math/AddBinaryNumbers/Python/AddBinaryNumbers.py | math/AddBinaryNumbers/Python/AddBinaryNumbers.py | number1 = input("Enter the first number: ")
number2 = input("Enter the second number: ")
result = (int(number1, 2) + int(number2, 2))
result = bin(result)
print(result[2:])
| Python | 0.000004 | |
c71a43dae259299952cec082d33f003ecaeb9eab | Add marky test. | tests/py/test_markdown.py | tests/py/test_markdown.py | from gratipay.testing import Harness
from gratipay.utils import markdown
from HTMLParser import HTMLParser
class TestMarkdown(Harness):
def test_marky_works(self):
md = "**Hello World!**"
actual = HTMLParser().unescape(markdown.marky(md)).strip()
expected = '<p><strong>Hello World!</strong></p>'
assert actual == expected
| Python | 0 | |
e26e2ed264175d86fb22c651486e1c97dc1f3a1a | Create break_xhill3.py | break_xhill3.py | break_xhill3.py | from itertools import product
from ngram_score import ngram_score
L2I = dict(zip("ABCDEFGHIJKLMNOPQRSTUVWXYZ",range(26)))
I2L = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
import sys
ctext = 'hwduyfsfqdxnx nx ymj fwy tk gwjfpnsl htijx fsi hnumjwx. bmjs fyyjruynsl yt hwfhp f mnqq hnumjw, kwjvzjshd fsfqdxnx bnqq gj uwfhynhfqqd zxjqjxx'
def hill3decipher(ctext,key,key2):
if len(ctext)%3==1:
ctext = ctext + 'XX'
elif len(ctext)%3==2:
ctext = ctext + 'X'
ptext = ""
for i in range(0,len(ctext),3):
ptext += I2L[(key[0]*L2I[ctext[i]] + key[1]*L2I[ctext[i+1]] + key[2]*L2I[ctext[i+2]] + key2[0])%26]
ptext += I2L[(key[3]*L2I[ctext[i]] + key[4]*L2I[ctext[i+1]] + key[5]*L2I[ctext[i+2]] + key2[1])%26]
ptext += I2L[(key[6]*L2I[ctext[i]] + key[7]*L2I[ctext[i+1]] + key[8]*L2I[ctext[i+2]] + key2[2])%26]
return ptext
# keep a list of the N best things we have seen, discard anything else
# the list may be greater than N, and unsorted. Call finalise() before accessing
# to guarantee correct length and sorted order.
class nbest(object):
def __init__(self,N=1000):
self.store = []
self.N = N
def add(self,item):
self.store.append(item)
if len(self.store)>2*N: self.finalise()
def finalise(self):
self.store.sort(reverse=True)
self.store = self.store[:self.N]
def __getitem__(self,k):
return self.store[k]
def __len__(self):
return len(self.store)
import re
#ctext ='XUKEXWSLZJUAXUNKIGWFSOZRAWURORKXAOSLHROBXBTKCMUWDVPTFBLMKEFVWMUXTVTWUIDDJVZKBRMCWOIWYDXMLUFPVSHAGSVWUFWORCWUIDUJCNVTTBERTUNOJUZHVTWKORSVRZSVVFSQXOCMUWPYTRLGBMCYPOJCLRIYTVFCCMUWUFPOXCNMCIWMSKPXEDLYIQKDJWIWCJUMVRCJUMVRKXWURKPSEEIWZVXULEIOETOOFWKBIUXPXUGOWLFPWUSCH'
ctext = re.sub('[^A-Z]','',ctext.upper())
mono = ngram_score('monograms.txt')
bi = ngram_score('bigrams.txt')
quad = ngram_score('quadgrams.txt')
N = 100
rec = nbest(N)
for seq in product(range(26),repeat=4):
if seq[0]%2 == 0 and seq[1]%2 == 0 and seq[2]%2 == 0:
continue
if seq[0]%13 == 0 and seq[1]%13 == 0 and seq[2]%13 == 0:
continue
seq2 = (seq[0],seq[1],seq[2],1,1,1,1,1,1)
txt = hill3decipher(ctext,seq2,(seq[3],0,0))
score = 0
for i in range(0,len(txt),3):
score += mono.score(txt[i])
rec.add((score,seq2,(seq[3],0,0)))
rec.finalise()
print 'stage 1 complete...'
rec2 = nbest(N)
for j in range(N):
print j,
sys.stdout.flush()
for seq in product(range(26),repeat=4):
if seq[0]%2 == 0 and seq[1]%2 == 0 and seq[2]%2 == 0:
continue
if seq[0]%13 == 0 and seq[1]%13 == 0 and seq[2]%13 == 0:
continue
seq2 = (rec[j][1][0],rec[j][1][1],rec[j][1][2],seq[0],seq[1],seq[2],1,1,1)
txt = hill3decipher(ctext,seq2,(rec[j][2][0],seq[3],0))
score = 0
for i in range(0,len(txt),3):
score += bi.score(txt[i:i+2])
rec2.add((score,seq2,(rec[j][2][0],seq[3],0)))
print 'stage 2 complete.'
rec2.finalise()
rec3 = nbest(N)
for j in range(N):
print j,
sys.stdout.flush()
for seq in product(range(26),repeat=4):
seq2 = (rec2[j][1][0],rec2[j][1][1],rec2[j][1][2],rec2[j][1][3],rec2[j][1][4],rec2[j][1][5],seq[0],seq[1],seq[2])
da = (seq2[0]*seq2[4]*seq2[8] + seq2[1]*seq2[5]*seq2[6] + seq2[2]*seq2[3]*seq2[7]) - (seq2[2]*seq2[4]*seq2[6] + seq2[1]*seq2[3]*seq2[8] + seq2[0]*seq2[5]*seq2[7])
if da % 2 != 0 and da % 13 !=0:
txt = hill3decipher(ctext,seq2,(rec2[j][2][0],rec2[j][2][1],seq[3]))
score = quad.score(txt)
rec3.add((score,seq2,(rec2[j][2][0],rec2[j][2][1],seq[3])))
# also try other permutation
seq2 = (seq[0],seq[1],seq[2],rec2[j][1][0],rec2[j][1][1],rec2[j][1][2],rec2[j][1][3],rec2[j][1][4],rec2[j][1][5])
da = (seq2[0]*seq2[4]*seq2[8] + seq2[1]*seq2[5]*seq2[6] + seq2[2]*seq2[3]*seq2[7]) - (seq2[2]*seq2[4]*seq2[6] + seq2[1]*seq2[3]*seq2[8] + seq2[0]*seq2[5]*seq2[7])
if da % 2 != 0 and da % 13 !=0:
txt = hill3decipher(ctext,seq2,(seq[3],rec2[j][2][0],rec2[j][2][1]))
score = quad.score(txt)
rec3.add((score,seq2,(seq[3],rec2[j][2][0],rec2[j][2][1])))
rec3.finalise()
print 'stage 3 complete.'
for j in range(10):
print rec3[j], hill3decipher(ctext,rec3[j][1],rec3[j][2])
| Python | 0.00015 | |
722b1d55c771e628ba82bbd5b8f8f5de047112af | Add a hex dump utility class. | tests/hexdumper.py | tests/hexdumper.py | # This hack by: Raymond Hettinger
class hexdumper:
"""Given a byte array, turn it into a string. hex bytes to stdout."""
def __init__(self):
self.FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in range(256)])
def dump(self, src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(self.FILTER)
result.append("%04X %-*s %s\n" % \
(i, length*3, hexa, printable))
return ''.join(result)
| Python | 0 | |
f7a8c0b6e361ce3e5f0980b539b843b33fea258d | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/4464fe2aad5cccfd7935b0f1767901eb08e99784. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "4464fe2aad5cccfd7935b0f1767901eb08e99784"
TFRT_SHA256 = "cc3b5b95a2da47710ade8b2d3c0046cd05750f94db5f3feb58a224ae7163db2f"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "191a16a25cc901e12535893b94aca169916d378c"
TFRT_SHA256 = "11b5d8d41bc4a6c1c6c7f9c6958c834aef832162ca75806f799bb51c9119b93d"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0 |
c5b0c56f53dee5577641a668019f40f9468017ea | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/83d3045fb5476bed115ae438871a228c1c682af1. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "83d3045fb5476bed115ae438871a228c1c682af1"
TFRT_SHA256 = "bdde8691c6a17c803de04423271b3534a421fd323627dc607b1fddf2f454e52c"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "6ca793b5d862ef6c50f242d77a811f06cce9b60a"
TFRT_SHA256 = "720b059a6b1d5757a76e56cf4a3a791b58e5d020858f6b67b077839963bffe8c"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0 |
a3a9d4d6538b025d0c6c821a72076e084a5b597b | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/9dac1ed1ebc2350ada97b16093174a1a0bbd56d0. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "9dac1ed1ebc2350ada97b16093174a1a0bbd56d0"
TFRT_SHA256 = "89eea9ff0c9dfca61037c4da051a6ddf4d4598614f7ca08a240355d1635f8786"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "553df8c12e9ba5930b9b8065f1d012ea07c6044c"
TFRT_SHA256 = "477d0374b044c60cd018fdb17e7c6054e190e59e36e1a442eb5d1628efb2341d"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000001 |
2e53ae34ec03485302d5d7e6e5dd05707bbd1cf6 | Add camera tests | tests/test_camera.py | tests/test_camera.py | import os
import pygame
from ..sappho import Camera
from .common import compare_pygame_surfaces
class TestCamera(object):
def test_scroll(self):
# Create surface to render to
output_surface = pygame.surface.Surface((1, 1))
# Create fixtures
red_surface = pygame.surface.Surface((1, 1))
blue_surface = pygame.surface.Surface((1, 1))
red_surface.fill((255, 0, 0))
blue_surface.fill((0, 255, 0))
# Create the camera and blit colors to it
camera = Camera((2, 1), (1, 1), (1, 1))
camera.blit(red_surface, (0, 0))
camera.blit(blue_surface, (1, 0))
# We should be at (0, 0) so blitting should get us a red pixel
output_surface.blit(camera, (0, 0))
assert(compare_pygame_surfaces(red_surface, output_surface))
# Scroll one pixel to the left, and we should get a blue pixel
# when blitting
camera.scroll(1, 0)
output_surface.blit(camera, (0, 0))
assert(compare_pygame_surfaces(blue_surface, output_surface))
def test_scale(self):
# Create surface to render to
output_surface = pygame.surface.Surface((10, 10))
# Create fixtures
red_small = pygame.surface.Surface((1, 1))
red_large = pygame.surface.Surface((10, 10))
red_small.fill((255, 0, 0))
red_large.fill((255, 0, 0))
# Create the camera with scaling enabled and blit our red pixel to it
camera = Camera((1, 1), (10, 10), (1, 1))
camera.blit(red_small, (0, 0))
# Blit and compare
output_surface.blit(camera, (0, 0))
assert(compare_pygame_surfaces(output_surface, red_large))
| Python | 0 | |
349918610081c8c02dc75fdafd47f647814dd63c | add converter of string to format maya understands for changing setting of fps | mindbender/maya/pythonpath/mayafpsconverter.py | mindbender/maya/pythonpath/mayafpsconverter.py | def mayafpsconverter(Sfps):
condition = 0
if Sfps == "":
condition = 1
return Sfps
if Sfps == "15":
condition = 1
return "game"
if Sfps == "24":
condition = 1
return "film"
if Sfps == "25":
condition = 1
return "pal"
if Sfps == "30":
condition = 1
return "ntsc"
if Sfps == "48":
condition = 1
return "show"
if Sfps == "50":
condition = 1
return "palf"
if Sfps == "60":
condition = 1
return "ntscf"
ERRORSTRING = "MINDBENDER_FPS has bad value in the bat file"
if str(Sfps).isdigit() is False:
cmds.confirmDialog(
title="Enviroment variable error",
message=ERRORSTRING,
button="",
defaultButton="",
cancelButton="",
dismissString="")
return ""
if condition == 0:
Sfps = str(Sfps) + "fps"
return Sfps | Python | 0 | |
3eafac9d71f7f885f66a63218557194291c649f7 | add config test | tests/test_config.py | tests/test_config.py | import pytest
from pytest_girder.assertions import assertStatusOk, assertStatus
from slicer_cli_web.config import PluginSettings
@pytest.mark.plugin('slicer_cli_web')
def test_default_task_folder(server, admin, folder):
# Test the setting
resp = server.request('/system/setting', method='PUT', params={
'key': PluginSettings.SLICER_CLI_WEB_TASK_FOLDER,
'value': 'bad value'
}, user=admin)
assertStatus(resp, 400)
resp = server.request('/system/setting', method='PUT', params={
'key': PluginSettings.SLICER_CLI_WEB_TASK_FOLDER,
'value': folder['_id']
}, user=admin)
assertStatusOk(resp)
assert PluginSettings.has_task_folder()
assert PluginSettings.get_task_folder()['_id'] == folder['_id']
| Python | 0.000001 | |
cfc89a542ebb9b1745bb8a7ce30f79dad12a16b7 | add mslib tool to build static C libraries. | yaku/tools/mslib.py | yaku/tools/mslib.py | import yaku.utils
import yaku.task
def setup(ctx):
env = ctx.env
ctx.env["STLINK"] = ["lib.exe"]
ctx.env["STLINK_TGT_F"] = ["/OUT:"]
ctx.env["STLINK_SRC_F"] = []
ctx.env["STLINKFLAGS"] = ["/nologo"]
ctx.env["STATICLIB_FMT"] = "%s.lib"
# XXX: hack
saved = yaku.task.Task.exec_command
def msvc_exec_command(self, cmd, cwd):
new_cmd = []
carry = ""
for c in cmd:
if c in ["/OUT:"]:
carry = c
else:
c = carry + c
carry = ""
new_cmd.append(c)
saved(self, new_cmd, cwd)
yaku.task.Task.exec_command = msvc_exec_command
def detect(ctx):
if yaku.utils.find_program("lib.exe") is None:
return False
else:
return True
| Python | 0 | |
367a7cdcb02d2d8c15e9a2375c5304b2ad9c89ac | Add the basic tools as functions to facilitate basic operations | ytranslate/tools.py | ytranslate/tools.py | # Copyright (c) 2015, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing different tools as functions.
They can be called to interact with the created object of the
'ytranslate' library, including the catalogs and loader.
"""
from ytranslate.fsloader import FSLoader
def init(root_dir):
"""Load the catalogs at a specified location.
The 'root_dir', the parent directory, is sent to the FSLoader
class which is to create a hierarchy of catalogs. The parent
catalogs bear the name of the namespace (that is their
directory or their filename without the '.yml' extension).
For instance:
init("path/to/translations")
Use the 'select' function to then select a catalog.
"""
fsloader = FSLoader(root_dir)
FSLoader.current_loader = fsloader
fsloader.load()
def select(catalog):
"""Select the catalog from the loader.
The catalog's name must be specified. If the loader is a
FSLoader (the default), then the 'root_dir' directory contains
the parent catalogs. You should use one of its contained
directoriess' names, or that of a MYL file without the '.yml'
extension. For instance:
select("en")
"""
if FSLoader.current_loader:
FSLoader.current_loader.select(catalog)
else:
raise ValueError("the current loader hasn't been selected")
def t(address, count=None, **kwargs):
"""Retrieve the translated message from the selected catalog.
You can use this function to obtain the translated message,
corresponding to the address, which must represent the list of
namespaces separated by '.'. For instance:
t("ui.title")
The hierarchy of messages is defined by the catalog's structure
(directories and files, if it has been selected by a FSLoader,
which is the default choice).
You can also use placeholders as named parameters:
t("welcome.name", user="John")
Additionally, you can vary the message according to a number.
For instance:
t("notificaiton.emails", 3)
See the user documentation for a detailed explanation about
the syntax and corresponding catalogs.
"""
if FSLoader.current_catalog:
return FSLoader.current_catalog.retrieve(address, count, **kwargs)
raise ValueError("no catalog has been selected")
| Python | 0.000071 | |
8660c7fda8cc7290fadeed7a39f06218087d9401 | Add draft test module for linter | tests/test_linter.py | tests/test_linter.py | import logging
import pytest
from mappyfile.validator import Validator
def validate(d):
v = Validator()
return v.validate(d)
def get_from_dict(d, keys):
for k in keys:
if isinstance(k, int):
d = d[0]
else:
d = d[k]
return d
def run_tests():
pytest.main(["tests/test_linter.py"])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# run_tests()
print("Done!")
| Python | 0 | |
f73eaa3d1ba8c6f21fe64a4793aea7ba6b6835ca | Create tensorBoard-example.py | rocksetta-examples/tensorBoard-example.py | rocksetta-examples/tensorBoard-example.py | '''
Loss Visualization with TensorFlow.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
import numpy
# Import MINST data
import input_data
mnist = input_data.read_data_sets("/home/ubuntu/workspace/tmp5/data/", one_hot=True)
# Use Logistic Regression from our previous example
# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1
# tf Graph Input
x = tf.placeholder("float", [None, 784], name='x') # mnist data image of shape 28*28=784
y = tf.placeholder("float", [None, 10], name='y') # 0-9 digits recognition => 10 classes
# Create model
# Set model weights
W = tf.Variable(tf.zeros([784, 10]), name="weights")
b = tf.Variable(tf.zeros([10]), name="bias")
# Construct model
activation = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
# Minimize error using cross entropy
cost = -tf.reduce_sum(y*tf.log(activation)) # Cross entropy
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Gradient Descent
# Initializing the variables
init = tf.initialize_all_variables()
# Create a summary to monitor cost function
tf.scalar_summary("loss", cost)
# Merge all summaries to a single operator
merged_summary_op = tf.merge_all_summaries()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Set logs writer into folder /home/ubuntu/workspace/tmp5/tensorflow_logs
summary_writer = tf.train.SummaryWriter('/home/ubuntu/workspace/tmp5/tensorflow_logs', graph_def=sess.graph_def)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# Fit training using batch data
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
# Write logs at every iteration
summary_str = sess.run(merged_summary_op, feed_dict={x: batch_xs, y: batch_ys})
summary_writer.add_summary(summary_str, epoch*total_batch + i)
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
print "Optimization Finished!"
# Test model
correct_prediction = tf.equal(tf.argmax(activation, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print "Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})
'''
Run the command line: tensorboard --logdir=/home/ubuntu/workspace/tmp5/tensorflow_logs
Open http://localhost:6006/ into your web browser
'''
| Python | 0 | |
ad7f9f785f9a4a4494127a9b2196e1fc64c9f3de | Add basic first tests for new report driven by "events" | tests/test_report.py | tests/test_report.py | from django.test import TestCase
from deep_collector.core import RelatedObjectsCollector
from .factories import BaseModelFactory
class TestLogReportGeneration(TestCase):
def test_report_with_no_debug_mode(self):
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.collect(obj)
report = collector.get_report()
self.assertDictEqual(report, {
'excluded_fields': [],
'log': 'Set DEBUG to True if you what collector internal logs'
})
def test_report_with_debug_mode(self):
self.maxDiff = None
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.DEBUG = True
collector.collect(obj)
report = collector.get_report()
self.assertEqual(report['excluded_fields'], [])
# For now, just checking that the log report is not empty.
# Some work has to be done to test it more.
self.assertNotEqual(report['log'], [])
| Python | 0 | |
272371f28369cca514d90f355e7771c133d11dcf | Create __openerp__.py | project_surgery/__openerp__.py | project_surgery/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Gideoni Silva (Omnes)
# Copyright 2013-2014 Omnes Tecnologia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Participantes da Cirurgia',
'description': 'Este módulo adiciona os campos adicionais com os participantes da cirurgia.',
'category': 'Generic Modules/Projects & Services',
'license': 'AGPL-3',
'author': 'Omnes',
'website': 'www.omnes.net.br',
'version': '0.1',
'depends': [
'base',
'project',
],
'data': [
'project_view.xml'
],
'demo': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0.005291 | |
a08452c4ed3338cf43bf2647bcc17a7d66ba4d23 | call restore config directly | corehq/apps/ota/tasks.py | corehq/apps/ota/tasks.py | from celery.task import task
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V1
from casexml.apps.phone.restore import RestoreConfig
from corehq.apps.users.models import CommCareUser
from soil import DownloadBase
@task
def prime_restore(usernames_or_ids, version=V1, cache_timeout=None, overwrite_cache=False):
total = len(usernames_or_ids)
DownloadBase.set_progress(prime_restore, 0, total)
ret = {'messages': []}
for i, username_or_id in enumerate(usernames_or_ids):
couch_user = get_user(username_or_id)
if not couch_user:
ret['messages'].append('User not found: {}'.format(username_or_id))
continue
try:
project = couch_user.project
commtrack_settings = project.commtrack_settings
stock_settings = commtrack_settings.get_ota_restore_settings() if commtrack_settings else None
restore_config = RestoreConfig(
couch_user.to_casexml_user(), None, version, None,
items=True,
stock_settings=stock_settings,
domain=project,
force_cache=True,
cache_timeout=cache_timeout,
overwrite_cache=overwrite_cache
)
restore_config.get_payload()
ret['messages'].append('Restore generated successfully for user: {}'.format(
couch_user.human_friendly_name,
))
except Exception as e:
ret['messages'].append('Error processing user: {}'.format(str(e)))
DownloadBase.set_progress(prime_restore, i + 1, total)
return ret
def get_user(username_or_id):
try:
couch_user = CommCareUser.get(username_or_id)
except ResourceNotFound:
try:
couch_user = CommCareUser.get_by_username(username_or_id)
except ResourceNotFound:
return None
return couch_user
| from celery.task import task
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V1
from corehq.apps.users.models import CommCareUser
from soil import DownloadBase
@task
def prime_restore(usernames_or_ids, version=V1, cache_timeout=None, overwrite_cache=False):
from corehq.apps.ota.views import get_restore_response
total = len(usernames_or_ids)
DownloadBase.set_progress(prime_restore, 0, total)
ret = {'messages': []}
for i, username_or_id in enumerate(usernames_or_ids):
couch_user = get_user(username_or_id)
if not couch_user:
ret['messages'].append('User not found: {}'.format(username_or_id))
continue
try:
get_restore_response(
couch_user.domain,
couch_user,
since=None,
version=version,
force_cache=True,
cache_timeout=cache_timeout,
overwrite_cache=overwrite_cache,
items=True
)
except Exception as e:
ret['messages'].append('Error processing user: {}'.format(str(e)))
DownloadBase.set_progress(prime_restore, i + 1, total)
return ret
def get_user(username_or_id):
try:
couch_user = CommCareUser.get(username_or_id)
except ResourceNotFound:
try:
couch_user = CommCareUser.get_by_username(username_or_id)
except ResourceNotFound:
return None
return couch_user
| Python | 0 |
e1a0029488d4cbf0581c21ceb1bd5db3c19bf3eb | add readme | algorithms/CommonFun.py | algorithms/CommonFun.py | #!user/bin/env python
# coding:utf-8
import sys
import random
reload(sys)
sys.setdefaultencoding('utf-8')
def QuickSort(left, right, array):
l = left
r = right
while l < r:
base = array[r]
while (array[l] <= base and l < r):
l = l + 1
if(l < r):
array[r] = array[l]
while (array[l] <= array[r] and l < r):
r = r - 1
if(l < r):
array[l] = array[r]
array[r] = base
QuickSort(left, r - 1, array)
QuickSort(r + 1, right, array)
#array 为有序数组
def BinarySearch(left, right, array, target):
if(left < right):
mid = (left + right)/2
if(array[mid] > target):
return BinarySearch(left, mid-1, array, target)
elif(array[mid] < target):
return BinarySearch(mid+1, right, array, target)
else:
return mid
else:
return -1
if __name__ == '__main__':
array = []
for i in range(10):
it = random.randint(1, 100)
array.append(it)
QuickSort(0, len(array)-1, array)
print BinarySearch(0, len(array)-1, array, 15) | Python | 0.000001 | |
e0a7824253ae412cf7cc27348ee98c919d382cf2 | verify stderr for a failing clone into a non-empty dir | test/test_clone.py | test/test_clone.py | # -*- coding: utf-8 -*-
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from pathlib import Path
import re
import git
from .lib import (
TestBase,
with_rw_directory,
)
class TestClone(TestBase):
@with_rw_directory
def test_checkout_in_non_empty_dir(self, rw_dir):
non_empty_dir = Path(rw_dir)
garbage_file = non_empty_dir / 'not-empty'
garbage_file.write_text('Garbage!')
# Verify that cloning into the non-empty dir fails while complaining about the target directory not being empty/non-existent
try:
self.rorepo.clone(non_empty_dir)
except git.GitCommandError as exc:
self.assertTrue(exc.stderr, "GitCommandError's 'stderr' is unexpectedly empty")
expr = re.compile(r'(?is).*\bfatal:\s+destination\s+path\b.*\bexists\b.*\bnot\b.*\bempty\s+directory\b')
self.assertTrue(expr.search(exc.stderr), '"%s" does not match "%s"' % (expr.pattern, exc.stderr))
else:
self.fail("GitCommandError not raised")
| Python | 0.000001 | |
b20f694b57813397bf0c4a1537a2d404ef5adb24 | Add tests for client_kwargs_from_config | tests/test_util.py | tests/test_util.py | import docker
import pytest
from dockci.util import client_kwargs_from_config
class TestClientKwargsFromConfig(object):
""" Tests for ``dockci.util.client_kwargs_from_config`` """
@pytest.mark.parametrize('host_str,expected,expected_tls_dict', (
('https://localhost', {'base_url': 'https://localhost'}, {}),
(
'https://localhost assert_hostname=no',
{'base_url': 'https://localhost'},
{
'assert_fingerprint': None,
'assert_hostname': False,
'ssl_version': 5,
},
),
(
'https://localhost ssl_version=TLSv1',
{'base_url': 'https://localhost'},
{
'assert_fingerprint': None,
'assert_hostname': None,
'ssl_version': 3,
},
),
(
'https://localhost verify=no',
{'base_url': 'https://localhost'},
{
'assert_fingerprint': None,
'assert_hostname': None,
'ssl_version': 5,
'verify': False,
},
),
(
'https://localhost assert_hostname=no ssl_version=TLSv1',
{'base_url': 'https://localhost'},
{
'assert_fingerprint': None,
'assert_hostname': False,
'ssl_version': 3,
},
),
))
def test_parse_host_str(self,
host_str,
expected,
expected_tls_dict,
):
""" Test basic ``host_str`` parsing; no surprises """
out = client_kwargs_from_config(host_str)
out_tls = out.pop('tls', {})
try:
out_tls = out_tls.__dict__
except AttributeError:
pass
assert out == expected
assert out_tls == expected_tls_dict
def test_parse_host_str_certs(self, tmpdir):
""" Test setting all certificates """
tmpdir.join('cert.pem').ensure()
tmpdir.join('key.pem').ensure()
tmpdir.join('ca.pem').ensure()
out = client_kwargs_from_config(
'http://l cert_path=%s' % tmpdir.strpath
)
assert out['tls'].cert == (
tmpdir.join('cert.pem').strpath,
tmpdir.join('key.pem').strpath,
)
assert out['tls'].verify == tmpdir.join('ca.pem').strpath
@pytest.mark.parametrize('host_str_fs', (
'http://l verify=no cert_path={cert_path}',
'http://l cert_path={cert_path} verify=no',
))
def test_no_verify_no_ca(self, host_str_fs, tmpdir):
""" Test that ``verify=no`` overrides ``cert_path`` """
tmpdir.join('cert.pem').ensure()
tmpdir.join('key.pem').ensure()
tmpdir.join('ca.pem').ensure()
out = client_kwargs_from_config(
host_str_fs.format(cert_path=tmpdir.strpath),
)
assert out['tls'].cert == (
tmpdir.join('cert.pem').strpath,
tmpdir.join('key.pem').strpath,
)
assert out['tls'].verify == False
def test_certs_error(self, tmpdir):
""" Test raising ``TLSParameterError`` when certs don't exist """
with pytest.raises(docker.errors.TLSParameterError):
client_kwargs_from_config(
'http://l cert_path=%s' % tmpdir.strpath
)
def test_no_ca_no_error(self, tmpdir):
"""
Ensure that when client cert/key exists, but the CA doesn't, cert
params are set without verify
"""
tmpdir.join('cert.pem').ensure()
tmpdir.join('key.pem').ensure()
out = client_kwargs_from_config(
'http://l cert_path=%s' % tmpdir.strpath
)
assert out['tls'].cert == (
tmpdir.join('cert.pem').strpath,
tmpdir.join('key.pem').strpath,
)
assert out['tls'].verify == None
| Python | 0.000001 | |
4683fc67d5171d8bb0391ac45f587fbc3e3c97fc | Add dependency installer for linux and mac osx | install_dependencies.py | install_dependencies.py | import platform
import subprocess
"""
This is a standalone script that installs the required dependencies to run. It
*should* be platform independent, and should work regardless of what platform
you are running it on.
To install dependencies, download the DevAssist source and run this script by
running "python install_dependencies.py"
"""
# Identifying host platform
host_platform = platform.system()
def install_dependencies():
"""
Installs dependencies for DevAssist
"""
# Darwin = Mac OSX
if host_platform == "Darwin":
# Installing portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio...\n")
portaudio = subprocess.Popen(["brew install portaudio"], shell=True)
portaudio.communicate()
print("\nportaudio has been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
elif host_platform == "Linux":
# Installing dependencies for portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio & dependencies...\n")
portaudio = subprocess.Popen(["apt-get install portaudio19-dev python-all-dev python3-all-dev"], shell=True)
portaudio.communicate()
print("\nportaudio & dependencies have been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib' pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
if __name__ == "__main__":
install_dependencies()
| Python | 0 | |
0fd7cdee45b54551bcfc901cece2e5cc9dec4555 | Add new test setup required for py.test/django test setup | test/test_setup.py | test/test_setup.py | import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'testsettings'
# run django setup if we are on a version of django that has it
if hasattr(django, 'setup'):
# setup doesn't like being run more than once
try:
django.setup()
except RuntimeError:
pass | Python | 0 | |
2014a7e3e785c9826575846a38b4703ef19946f4 | fix path stuff | test/test_tiles.py | test/test_tiles.py | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import math
import pyglet
from pyglet.window import key
import cocos
from cocos import tiles
class CarSprite(cocos.actions.ActionSprite):
speed = 0
def update(self, dt):
# handle input and move the car
self.rotation += (keyboard[key.RIGHT] - keyboard[key.LEFT]) * 150 * dt
speed = self.speed
speed += (keyboard[key.UP] - keyboard[key.DOWN]) * 50
if speed > 200: speed = 200
if speed < -100: speed = -100
self.speed = speed
r = math.radians(self.rotation)
s = dt * speed
self.x += math.sin(r) * s
self.y += math.cos(r) * s
manager.set_focus(self.x, self.y)
if __name__ == "__main__":
from cocos.director import director
#director.init(width=400, height=300)
director.init(width=600, height=300)
car_layer = tiles.ScrollableLayer()
car = pyglet.image.load('car.png')
car.anchor_x = car.width//2
car.anchor_y = car.height//2
car = CarSprite(car)
pyglet.clock.schedule(car.update)
car_layer.add(car)
manager = tiles.ScrollingManager(director.window)
test_layer = tiles.load('road-map.xml')['map0']
manager.append(test_layer)
manager.append(car_layer)
main_scene = cocos.scene.Scene(test_layer, car_layer)
keyboard = key.KeyStateHandler()
director.window.push_handlers(keyboard)
@director.window.event
def on_close():
pyglet.app.exit()
director.run(main_scene)
| import math
import pyglet
from pyglet.window import key
import cocos
from cocos import tiles
class CarSprite(cocos.actions.ActionSprite):
speed = 0
def update(self, dt):
# handle input and move the car
self.rotation += (keyboard[key.RIGHT] - keyboard[key.LEFT]) * 150 * dt
speed = self.speed
speed += (keyboard[key.UP] - keyboard[key.DOWN]) * 50
if speed > 200: speed = 200
if speed < -100: speed = -100
self.speed = speed
r = math.radians(self.rotation)
s = dt * speed
self.x += math.sin(r) * s
self.y += math.cos(r) * s
manager.set_focus(self.x, self.y)
if __name__ == "__main__":
from cocos.director import director
#director.init(width=400, height=300)
director.init(width=600, height=300)
car_layer = tiles.ScrollableLayer()
car = pyglet.image.load('car.png')
car.anchor_x = car.width//2
car.anchor_y = car.height//2
car = CarSprite(car)
pyglet.clock.schedule(car.update)
car_layer.add(car)
manager = tiles.ScrollingManager(director.window)
test_layer = tiles.load('road-map.xml')['map0']
manager.append(test_layer)
manager.append(car_layer)
main_scene = cocos.scene.Scene(test_layer, car_layer)
keyboard = key.KeyStateHandler()
director.window.push_handlers(keyboard)
@director.window.event
def on_close():
pyglet.app.exit()
director.run(main_scene)
| Python | 0.000001 |
dbfa14401c0b50eb1a3cac413652cb975ee9d41f | Add valid directory cleaner helper test | ocw-ui/backend/tests/test_directory_helpers.py | ocw-ui/backend/tests/test_directory_helpers.py | import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
| Python | 0.000003 | |
53eaf6de12b282ff359d4d4b9518b10ebeeee6ba | Add imdb parser | imdb-parser.py | imdb-parser.py | #!/usr/bin/python
import threading, urllib2, urllib
import sys
import time
import codecs
import re
import os
import MySQLdb
from pprint import pprint
from bs4 import BeautifulSoup, NavigableString
from optparse import OptionParser
use = "Usage: %prog -u url -l limit\n This script parses justeat site and outputs csv file of the following format :\n "
parser = OptionParser(usage = use)
parser.add_option("-g", "--genre", action='store', type="string", dest="genre", default=None, help="Just provide genre like action/thriller etc.")
parser.add_option("-v", "--verbose", action='store_true', dest="verbose", default=False, help="show progress")
parser.add_option("-s", "--start", action='store', type="int", dest="start", default=1, help="start from")
parser.add_option("-l", "--last", action='store', type="int", dest="start", default=1, help="last number")
parser.add_option("-t", "--threads", action='store', type="int", dest="thread", default=1, help="Threads to run")
(opts, args) = parser.parse_args()
_db= None
def connectDB():
global _cursor
global _db
_db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="root", # your username
passwd="ubuntu", # your password
db="imdb_data") # name of the data base
if _db is not None:
print "connected"
else:
print "unable to connect"
def stripslashes(s):
if type(s) is str:
s=codecs.decode( s.encode('utf-8'), 'string_escape')
s=s.decode('utf-8').replace("'","")
else:
s= unicode(s)
s= ''.join([i if ord(i) < 128 else ' ' for i in s])
return s.replace(u'\xeb',"")
def urlopen_with_retry(request,attr):
retries= t_retries=5
while retries>0:
try:
if retries< t_retries:
print "Retrying... \n"
doc= urllib2.urlopen(request).read()
return doc;
except urllib2.URLError,e:
print "Error reading page "+attr+" ."
retries= retries-1
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def writeToFile(arr,filename):
global _file
_dir= os.path.dirname(os.path.dirname(filename))
dir_= filename[len(_dir)+1:].split("/");
dir_= [x for x in dir_ if x != ""]
for part in dir_[:-1]:
_dir= _dir+"/"+part
try:
os.stat(_dir)
except:
os.mkdir(_dir)
_file= open(filename, 'a+')
_file.write('"'+'";"'.join(arr)+'"\n')
def parse_genrepage(genre,count):
global _db
_cursor= _db.cursor()
print("Parsing genere: "+genre+"("+str(count)+") ....\n")
Url= "http://www.imdb.com/search/title?genres="+genre.lower()+"&start="+str(count)+"&count=100"
headers = {'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.66 Safari/537.36'}
request = urllib2.Request(Url, None, headers)
html_doc = urlopen_with_retry(request,Url)
if html_doc is not None:
soup = BeautifulSoup(html_doc)
childs= soup.find("table",attrs={"class": "results"}).find_all("tr")
for tr in childs:
if tr.find("td") is not None:
try:
titleEl= tr.find("td",attrs={"class":"title"})
title= titleEl.a.string
url= "http://www.imdb.com"+titleEl.a["href"];
year_= re.findall(r'[0-9]{4}', titleEl.find("span",attrs={"class":"year_type"}).string.strip("(").strip(")") )
if len(year_)>0:
year= year_[0]
else:
year=0000
tv= re.findall(r'TV', titleEl.find("span",attrs={"class":"year_type"}).string.strip("(").strip(")") )
if len(tv)>0:
type_= "TV Series"
else:
type_= "Movie"
rating = titleEl.find("div",attrs={"class":"rating"})
if rating is not None and rating["title"] is not None:
rating=rating["title"].replace(",","").replace(".","")
rating= map(int, re.findall(r'[0-9]+', rating));
if len(rating)!=0:
rating_count= rating[2];
rating= float(float(rating[0])/float(rating[1]));
else:
rating=0
rating_count=0
else:
rating=0
rating_count=0
if titleEl.find("span",attrs={"class":"runtime"}) is not None:
runtime= titleEl.find("span",attrs={"class":"runtime"}).string
else:
runtime=""
if titleEl.find("span",attrs={"class":"outline"}) is not None:
outline= ("".join(titleEl.find("span",attrs={"class":"outline"}).findAll(text=True))).replace("\"","'")
else:
outline=""
if titleEl.find("span",attrs={"class":"credit"}) is not None:
credit= ("".join(titleEl.find("span",attrs={"class":"credit"}).findAll(text=True))).replace("\"","'")
else:
credit=""
if titleEl.find("span",attrs={"class":"genre"}) is not None:
genre_= "".join(titleEl.find("span",attrs={"class":"genre"}).findAll(text=True))
else:
genre_= ""
_cursor.execute("INSERT INTO `movies`(`url`, `title`, `rating`, `rating_count`, `credits`,`outline`, `genre`, `type`, `year`, `runtime`, `timestamp`) VALUES (\""+"\",\"".join([stripslashes(x) for x in [url,title,rating,rating_count,credit,outline,genre_,type_,year,runtime] ])+"\",CURRENT_TIMESTAMP)")
_db.commit()
print title;
except KeyError:
print "KeyError"
except MySQLdb.IntegrityError:
flag=1
except MySQLdb.InterfaceError:
connectDB()
_cursor= _db.cursor()
_db.commit()
except MySQLdb.OperationalError:
connectDB()
_cursor= _db.cursor()
_cursor.execute("INSERT INTO `movies`(`url`, `title`, `rating`, `rating_count`, `credits`,`outline`, `genre`, `type`, `year`, `runtime`, `timestamp`) VALUES (\""+"\",\"".join([stripslashes(x) for x in [url,title,rating,rating_count,credit,outline,genre_,type_,year,runtime] ])+"\",CURRENT_TIMESTAMP)")
_db.commit()
print title;
else:
print "Error parsing"
return count+100
def main():
connectDB()
if opts.genre is None:
print "Please specify Zomato URL\n"
parser.print_help()
exit(-1)
else:
if opts.start:
count=opts.start
while True:
threads=[]
for i in range(0,opts.thread):
threads.append(threading.Thread(target=parse_genrepage, args =[opts.genre,count]))
count= count+100
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
main() | Python | 0.000008 | |
ce924c72795d342605bb4409d5217fe99c807ace | Add test cases for moments functions | skimage/measure/tests/test_moments.py | skimage/measure/tests/test_moments.py | from numpy.testing import assert_equal, assert_almost_equal
import numpy as np
from skimage.measure import (moments, moments_central, moments_normalized,
moments_hu)
def test_moments():
image = np.zeros((20, 20), dtype=np.double)
image[14, 14] = 1
image[15, 15] = 1
image[14, 15] = 0.5
image[15, 14] = 0.5
m = moments(image)
assert_equal(m[0, 0], 3)
assert_almost_equal(m[0, 1] / m[0, 0], 14.5)
assert_almost_equal(m[1, 0] / m[0, 0], 14.5)
def test_moments_central():
image = np.zeros((20, 20), dtype=np.double)
image[14, 14] = 1
image[15, 15] = 1
image[14, 15] = 0.5
image[15, 14] = 0.5
mu = moments_central(image, 14.5, 14.5)
# shift image by dx=2, dy=2
image2 = np.zeros((20, 20), dtype=np.double)
image2[16, 16] = 1
image2[17, 17] = 1
image2[16, 17] = 0.5
image2[17, 16] = 0.5
mu2 = moments_central(image2, 14.5 + 2, 14.5 + 2)
# central moments must be translation invariant
assert_equal(mu, mu2)
def test_moments_normalized():
image = np.zeros((20, 20), dtype=np.double)
image[13:17, 13:17] = 1
mu = moments_central(image, 14.5, 14.5)
nu = moments_normalized(mu)
# shift image by dx=-3, dy=-3 and scale by 0.5
image2 = np.zeros((20, 20), dtype=np.double)
image2[11:13, 11:13] = 1
mu2 = moments_central(image2, 11.5, 11.5)
nu2 = moments_normalized(mu2)
# central moments must be translation and scale invariant
assert_almost_equal(nu, nu2, decimal=1)
def test_moments_hu():
image = np.zeros((20, 20), dtype=np.double)
image[13:15, 13:17] = 1
mu = moments_central(image, 13.5, 14.5)
nu = moments_normalized(mu)
hu = moments_hu(nu)
# shift image by dx=2, dy=3, scale by 0.5 and rotate by 90deg
image2 = np.zeros((20, 20), dtype=np.double)
image2[11, 11:13] = 1
image2 = image2.T
mu2 = moments_central(image2, 11.5, 11)
nu2 = moments_normalized(mu2)
hu2 = moments_hu(nu2)
# central moments must be translation and scale invariant
assert_almost_equal(hu, hu2, decimal=1)
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| Python | 0 | |
5ee4f6fd50da0a7115f8ca0ab29c4388eaef13a6 | add probabitity function decomposition | src/probability.py | src/probability.py | from __future__ import division
from math import log
from scipy.special import binom
import numpy as np
def C(p, p0):
p1 = 1 - p0
return -p0*log(p0, 2) + p0*p*log(p0*p, 2) - (p1+p0*p)*log(p1+p0*p, 2)
def P(c, p0, eps=0.00001):
left = 0
right = 1
while right - left > eps:
p = (left + right) / 2
cp = C(p, p0)
if cp > c:
left = p
else:
right = p
return left
def coef(i, p):
return binom(N, i) * p**i*(1-p)**(N-i)
def A(c, N, M):
points = (np.array(xrange(M)) + 1) / (M + 1)
A = np.matrix([np.array([coef(i, p) for i in xrange(N)]) for p in points])
b = np.array([P(c, p) for p in points])
a, _, _, _ = np.linalg.lstsq(A, b)
return a
if __name__ == '__main__':
N = 10 # Buffer size
M = 100 # Num of points
c = 0.15
a = A(c, N)
p0 = 0.7
x = np.array([coef(i, p0) for i in xrange(N)])
print(np.dot(a, x))
print(P(c, p0))
print(a)
| Python | 0.003014 | |
ad5b1459bf514f7be5b39b90d0fdf627edf65f62 | Add helper table module to generate common test tables [skip ci] | astropy/table/table_helpers.py | astropy/table/table_helpers.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from itertools import cycle
import string
import numpy as np
from .table import Table, Column
from ..extern.six.moves import zip
class TimingTables(object):
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table['i'] = np.arange(size)
self.table['a'] = np.random.random(size) # float
self.table['b'] = np.random.random(size) > 0.5 # bool
self.table['c'] = np.random.random((size,10)) # 2d column
self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)),size)
self.extra_row = {'a':1.2, 'b':True, 'c':np.repeat(1, 10), 'd':'Z'}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table['a'] > 0.9)[0]
self.table_grouped = self.table.group_by('d')
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table['i'] = np.arange(1,size,3)
self.other_table['f'] = np.random.random()
self.other_table.sort('f')
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2['g'] = np.random.random(size)
self.other_table_2['h'] = np.random.random((size, 10))
self.bool_mask = self.table['a'] > 0.6
def simple_table(size=3, cols=None, kinds='ifS', masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, default=number of kinds
Number of table columns
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord('a') + ii) for ii in xrange(cols)]
letters = np.array([c for c in string.ascii_letters])
for jj, kind in zip(xrange(cols), cycle(kinds)):
if kind == 'i':
data = np.arange(1, size + 1, dtype=int) + jj
elif kind == 'f':
data = np.arange(size, dtype=float) + jj
elif kind == 'S':
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == 'O':
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError('Unknown data kind')
columns.append(Column(data, dtype=kind))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
from ..utils.data import get_pkg_data_filename
from ..io.votable.table import parse
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'),
pedantic=False)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
| Python | 0 | |
572508fb78014e9e1b228d738b6ebb89c9bdcb9e | Create rpc.py | rpc.py | rpc.py | ### Reverse Polish Calculator
import math
### CLASSES ###
class Stack:
""" Object to realize&handle stack
Access to stack only through its methods """
def __init__(self):
""" Init stack as list """
self.clear_stack()
def clear_stack(self):
""" Clears stack """
self.stack = []
print("\nStack cleared")
def push_to(self, item):
""" Function tu push an item to stack
returns True if item is not a valid number """
try:
num = float(item) #only numbers are allowed
except:
return True
self.stack.append(item) #stores numbers as strings (because of eval)
return False
def pop_from(self):
""" Removes&returns last item in stack
returns None if stack is empty"""
try:
return self.stack.pop()
except:
print("Error: empty stack")
return None
class Variable:
""" Defines storage place & handling methods for variables
Access to variable only through its methods """
def __init__(self):
""" Init storage as dictionary """
self.clear_variables()
def clear_variables(self):
""" Destroy all variables """
self.variable = {}
print("\nVariables cleared")
def check_variable(self, name):
""" Checks if variable name already exists"""
if self.variable.get(name, None) is None:
return False # existing
return True #not existing
def add_variable(self, name, value):
""" Creates new variable equaling value """
self.variable[name] = value
print(value, "popped from stack into", name)
def get_variable(self, name):
""" Returns variable """
print(self.variable[name], "from", name, "pushed to stack")
return self.variable[name]
class RPC:
"""Application for performing calculations expressed in postfix notation, aka
reversed polish calculation.
Usage:
self.evaluate(expression),
where expression is a string containing postfix notated formula,
for example: 2 3 4 5 + - * = -12
Known operators: * / + - ** %
Known functions: all of Python's math module are available for use
trigonometrics: please specify degrees, not radians
Known commands:
=: display last item in stack (without it, calculation is just performed)
==: remove&display last item in stack (pop from stack)
c: clear stack (stack isn't cleared after errors by itself)
v: declaring a variable
- not existing: vANY pops stack, and stores value in ANY
- existing: vANY pushes its value to stack (leaving value in ANY)
Variable name must follow v immediately!
d: destroys all variables
Typing errors handled gracefully, ignoring everything that's not a number,
operator, function or command.
Commands are not, but variable names are case sensitive.
(c) 2015 Weisz Roland <weisz.roland@wevik.hu> license: ISC"""
def __init__(self):
""" Init operators, functions & stack """
self.operators = { "*": "*", "x": "*", "X": "*", "×": "*", ".": "*",
"/": "/", "÷": "/", ":": "/", "%": "%",
"+": "+", "-": "-", "^": "**", "**": "**" }
self.functions = dir(math)[5:] #without language-spec functions
self.trigonometric = ('acos', 'asin', 'atan', 'cos', 'sin', 'tan') #handling radians
print(self.__doc__) #display usage
self.stack, self.var = Stack(), Variable()
def evaluate(self, expression):
""" Evaluates expression in postfix notation """
for op in expression.split(): #split expression at spaces into list
if self.stack.push_to(op): #number pushed, otherwise proceed
if op in self.operators.keys(): #handling common operators
if self.performCalculation(self.operators[op]):
break
elif op.lower() in self.functions: #handling functions
if self.performCalculation(op):
break
elif op == "=" or op == "==": #handling displaying
self.displayStackTop(op)
elif op.lower() == "c": #clear stack
self.stack.clear_stack()
elif op.lower() == "d": #destroy variables
self.var.clear_variables()
elif op[0].lower() == "v": #handling variables
var = op[1:]
if var:
if self.var.check_variable(var): #variable name exists
self.stack.push_to(self.var.get_variable(var))
else: #encountered new variable name
value = self.stack.pop_from()
if value is not None:
self.var.add_variable(var, value)
else:
break
else:
print("Error: variable name must follow v immediately")
break
else:
print(op, "ignored")
def displayStackTop(self, how):
""" Displays stack's top item, where
how can be = or == (latter removes item from stack too) """
top_item = self.stack.pop_from()
if top_item is not None:
print(top_item)
if how == "=": #only read
self.stack.push_to(top_item) #push back item
def performCalculation(self, operator):
""" Performs calculation based on operators type """
if operator in self.functions: #function with one operand
operand = self.stack.pop_from()
if operand is not None:
formula = "math." + operator + "(" + operand + ")" #like: math.sqrt(2)
if operator in self.trigonometric:
formula = "math." + operator + "(math.radians(" + operand + "))" #like: math.sin(math.radians(60))
else:
return True #error already displayed
else: #otherwise operator with two operands
operand1, operand2 = self.stack.pop_from(), self.stack.pop_from()
if (operand1 is not None) and (operand2 is not None):
formula = operand2 + operator + operand1
else:
return True #error already displayed
try:
self.stack.push_to(str(eval(formula))) #validated (no injections)
return False
except ZeroDivisionError:
print("Error: divide by zero")
return True
except:
print("Error: calculation impossible")
return True
return False
def main():
""" Main programm """
app = RPC() #instantiate calculator
while True:
expression = input("\nPlease enter a postfix-notated expression: ")
if expression.lower() == "done":
break
app.evaluate(expression)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
8e8a1a33d8bedcb597020f9723c03d0f6af57522 | Add python script | send.py | send.py | import sys
import os
try:
sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src'))
except:
pass
from com.dtmilano.android.viewclient import ViewClient
number = sys.argv[2]
text = sys.argv[3]
print("Sending WhatsApp...")
print("Number: " + number)
print("Text: " + text)
package = 'com.android.chrome'
activity = 'com.google.android.apps.chrome.Main'
component = package + "/" + activity
uri = 'https://api.whatsapp.com/send?phone=' + number
device, serialno = ViewClient.connectToDeviceOrExit()
vc = ViewClient(device=device, serialno=serialno)
device.startActivity(component=component, uri=uri)
vc.sleep(3)
device.type(text)
vc = ViewClient(device=device, serialno=serialno)
send = vc.findViewByIdOrRaise('com.whatsapp:id/send')
send.touch()
| Python | 0.000302 | |
cec1ec8367c83e540b9a9cfbfeac2a576cdf357b | add send.py | send.py | send.py | """
Example:
switch type A:
sudo python send.py -c off -t A -s 11111,11111 -p 0
switch type B:
sudo python send.py -c off -t B -s 1,3 -p 0
switch type C:
sudo python send.py -c off -t C -s a,1,1 -p 0
switch type D:
sudo python send.py -c off -t D -s A,1 -p 0
"""
import argparse
import sys
try:
import pi_switch
except ImportError:
print "pi_switch import error!"
#sys.exit()
def create_switch(type, settings, pin):
"""Create a switch.
Args:
type: (str): type of the switch [A,B,C,D]
settings (str): a comma separted list
pin (int): wiringPi pin
Returns:
switch
"""
switch = None
if type == "A":
group, device = settings.split(",")
switch = pi_switch.RCSwitchA(group, device)
elif type == "B":
addr, channel = settings.split(",")
addr = int(addr)
channel = int(channel)
switch = pi_switch.RCSwitchB(addr, channel)
elif type == "C":
family, group, device = settings.split(",")
group = int(group)
device = int(device)
switch = pi_switch.RCSwitchC(family, group, device)
elif type == "D":
group, device = settings.split(",")
device = int(device)
switch = pi_switch.RCSwitchD(group, device)
else:
print "Type %s is not supported!" % type
sys.exit()
switch.enableTransmit(pin)
return switch
def toggle(switch, command):
"""Toggles a switch on or off.
Args:
switch (switch): a switch
command (str): "on" or "off"
"""
if command in ["on"]:
switch.switchOn()
if command in ["off"]:
switch.switchOff()
def main():
parser = argparse.ArgumentParser(description="Send off / on commands to a remote power socket.")
parser.add_argument("-c", dest = "command", metavar = "command", nargs = "?",
help="can be on or off")
parser.add_argument("-t", dest = "type", metavar = "type", nargs = "?",
help="type of the switch: A, B, C or D")
parser.add_argument("-s", dest = "settings", metavar = "settings", nargs = "?",
help="settings as a comma separated list: value1,value2,value2")
parser.add_argument("-p", dest = "pin", metavar = "pin", type = int, nargs = "?",
help="wriningPi pin")
args = parser.parse_args()
switch = create_switch(args.type, args.settings, args.pin)
toggle(switch, args.command)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
61f806ffc68c41dfbb926ea6825292eabed46966 | Add sorting code | sort.py | sort.py | #!/usr/bin/env python
import re
import sys
sort = {}
regex = re.compile(r'TBX_API \w* \*?(\w*)\(.*')
for line in sys.stdin.readlines():
result = regex.match(line)
if not result:
sort[line] = line
else:
sort[result.group(1)] = line
for k in sorted(sort.keys()):
sys.stdout.write(sort[k])
| Python | 0.000007 | |
3e885137d23e7618b78f207ecd6b2f6118a4a0dc | add a test file | test.py | test.py | #!/usr/bin/python
import cgi
cgi.test()
| Python | 0.000001 | |
b6a55999cd0f6ff6a7d69b7eb59e859d415b275f | Add test.py with old-formatting test | test.py | test.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Martine Lenders <mail@martine-lenders.eu>
#
# Distributed under terms of the MIT license.
"%s" % "test"
"%d" % 2
"%.4f" % 2.0
| Python | 0.000003 | |
f4d26567afc9185e0f9370eda43d30084437ade5 | Solve Code Fights make array consecutive 2 problem | CodeFights/makeArrayConsecutive2.py | CodeFights/makeArrayConsecutive2.py | #!/usr/local/bin/python
# Code Fights Make Array Consecutive 2 Problem
def makeArrayConsecutive2(statues):
return (len(range(min(statues), max(statues) + 1)) - len(statues))
def main():
tests = [
[[6, 2, 3, 8], 3],
[[0, 3], 2],
[[5, 4, 6], 0],
[[6, 3], 2],
[[1], 0]
]
for t in tests:
res = makeArrayConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: makeArrayConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: makeArrayConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
| Python | 0.998889 | |
06d8f4290cf433a538cef4851acefd6e42c8341d | Add simple example | examples/client.py | examples/client.py | #!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from accepton import Client
API_KEY = 'skey_be064297e7b2db4b6ce5928e8dcad582'
accepton = Client(api_key=API_KEY, environment='development')
token = accepton.create_token(amount=1099, application_fee=99, currency='cad',
description='Test charge')
print(token)
| Python | 0.000375 | |
96dd9b2968039be3fa87a30e8a16ed1c77be10bb | solve 94 | 94_BinaryTreeInorderTraversal.py | 94_BinaryTreeInorderTraversal.py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {integer[]}
def inorderTraversal(self, root):
if not root:
return []
result = []
stack = [(False, root)]
while stack:
read, node = stack.pop()
if read:
result.append(node.val)
else:
if node.right:
stack.append((False, node.right))
stack.append((True, node))
if node.left:
stack.append((False, node.left))
return result
| Python | 0.999999 | |
e8c0b17bb28f1212b302959144086d72c205bf4c | store toc list in optional file to make merging easier | publisher/conf.py | publisher/conf.py | import glob
import os
work_dir = os.path.dirname(__file__)
papers_dir = os.path.join(work_dir,'../papers')
output_dir = os.path.join(work_dir,'../output')
template_dir = os.path.join(work_dir,'_templates')
static_dir = os.path.join(work_dir,'_static')
css_file = os.path.join(static_dir,'scipy-proc.css')
toc_list = os.path.join(static_dir,'toc.txt')
build_dir = os.path.join(work_dir,'_build')
pdf_dir = os.path.join(build_dir, 'pdfs')
html_dir = os.path.join(build_dir, 'html')
bib_dir = os.path.join(html_dir, 'bib')
toc_conf = os.path.join(build_dir, 'toc.json')
proc_conf = os.path.join(work_dir,'../scipy_proc.json')
if os.path.isfile(toc_list):
with open(toc_list) as f:
dirs = f.read().splitlines()
else:
dirs = sorted([os.path.basename(d)
for d in glob.glob('%s/*' % papers_dir)
if os.path.isdir(d)])
| import glob
import os
work_dir = os.path.dirname(__file__)
papers_dir = os.path.join(work_dir,'../papers')
output_dir = os.path.join(work_dir,'../output')
template_dir = os.path.join(work_dir,'_templates')
static_dir = os.path.join(work_dir,'_static')
css_file = os.path.join(static_dir,'scipy-proc.css')
build_dir = os.path.join(work_dir,'_build')
pdf_dir = os.path.join(build_dir, 'pdfs')
html_dir = os.path.join(build_dir, 'html')
bib_dir = os.path.join(html_dir, 'bib')
toc_conf = os.path.join(build_dir, 'toc.json')
proc_conf = os.path.join(work_dir,'../scipy_proc.json')
dirs = sorted([os.path.basename(d)
for d in glob.glob('%s/*' % papers_dir)
if os.path.isdir(d)])
| Python | 0 |
f046bd8982f08a31448bb5e4e10ded2a14ea95b0 | Create __init__.py | iotqatools/__init__.py | iotqatools/__init__.py | Python | 0.000429 | ||
e4a33badd98c4c927c4128e22fd839f54711cfd6 | Create PedidoCadastrar.py | backend/Models/Predio/PedidoCadastrar.py | backend/Models/Predio/PedidoCadastrar.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoCadastrar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoCadastrar, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
self.nome = self.corpo['nome']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
def setNome(self,nome):
self.nome = nome
def getNome(self):
return self.nome
| Python | 0 | |
8a911b877c5ae196ce6e4cc7e6c284b742645bc8 | Update headers strategy to mimic internal bytes representation. | test/test_invalid_headers.py | test/test_invalid_headers.py | # -*- coding: utf-8 -*-
"""
test_invalid_headers.py
~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests that use invalid header blocks, and validates that
they fail appropriately.
"""
import pytest
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
import h2.utilities
from hypothesis import given
from hypothesis.strategies import binary, lists, tuples
HEADERS_STRATEGY = lists(tuples(binary(), binary()))
class TestInvalidFrameSequences(object):
"""
Invalid header sequences cause ProtocolErrors to be thrown when received.
"""
base_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
('user-agent', 'someua/0.0.1'),
]
invalid_header_blocks = [
base_request_headers + [('Uppercase', 'name')],
base_request_headers + [(':late', 'pseudo-header')],
[(':path', 'duplicate-pseudo-header')] + base_request_headers,
base_request_headers + [('connection', 'close')],
base_request_headers + [('proxy-connection', 'close')],
base_request_headers + [('keep-alive', 'close')],
base_request_headers + [('transfer-encoding', 'gzip')],
base_request_headers + [('upgrade', 'super-protocol/1.1')],
base_request_headers + [('te', 'chunked')],
]
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event(self, frame_factory, headers):
"""
Test invalid headers are rejected with PROTOCOL_ERROR.
"""
c = h2.connection.H2Connection(client_side=False)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
def test_transfer_encoding_trailers_is_valid(self, frame_factory):
"""
Transfer-Encoding trailers is allowed by the filter.
"""
headers = (
self.base_request_headers + [('te', 'trailers')]
)
c = h2.connection.H2Connection(client_side=False)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
request_event = events[0]
assert request_event.headers == headers
class TestFilter(object):
"""
Test the filter function directly.
These tests exists to confirm the behaviour of the filter function in a
wide range of scenarios. Many of these scenarios may not be legal for
HTTP/2 and so may never hit the function, but it's worth validating that it
behaves as expected anyway.
"""
@given(HEADERS_STRATEGY)
def test_range_of_acceptable_outputs(self, headers):
"""
validate_headers either returns the data unchanged or throws a
ProtocolError.
"""
try:
assert headers == h2.utilities.validate_headers(headers)
except h2.exceptions.ProtocolError:
assert True
| # -*- coding: utf-8 -*-
"""
test_invalid_headers.py
~~~~~~~~~~~~~~~~~~~~~~~
This module contains tests that use invalid header blocks, and validates that
they fail appropriately.
"""
import pytest
import h2.connection
import h2.errors
import h2.events
import h2.exceptions
import h2.utilities
from hypothesis import given
from hypothesis.strategies import text, lists, tuples
HEADERS_STRATEGY = lists(tuples(text(), text()))
class TestInvalidFrameSequences(object):
"""
Invalid header sequences cause ProtocolErrors to be thrown when received.
"""
base_request_headers = [
(':authority', 'example.com'),
(':path', '/'),
(':scheme', 'https'),
(':method', 'GET'),
('user-agent', 'someua/0.0.1'),
]
invalid_header_blocks = [
base_request_headers + [('Uppercase', 'name')],
base_request_headers + [(':late', 'pseudo-header')],
[(':path', 'duplicate-pseudo-header')] + base_request_headers,
base_request_headers + [('connection', 'close')],
base_request_headers + [('proxy-connection', 'close')],
base_request_headers + [('keep-alive', 'close')],
base_request_headers + [('transfer-encoding', 'gzip')],
base_request_headers + [('upgrade', 'super-protocol/1.1')],
base_request_headers + [('te', 'chunked')],
]
@pytest.mark.parametrize('headers', invalid_header_blocks)
def test_headers_event(self, frame_factory, headers):
"""
Test invalid headers are rejected with PROTOCOL_ERROR.
"""
c = h2.connection.H2Connection(client_side=False)
c.receive_data(frame_factory.preamble())
c.clear_outbound_data_buffer()
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
with pytest.raises(h2.exceptions.ProtocolError):
c.receive_data(data)
expected_frame = frame_factory.build_goaway_frame(
last_stream_id=0, error_code=h2.errors.PROTOCOL_ERROR
)
assert c.data_to_send() == expected_frame.serialize()
def test_transfer_encoding_trailers_is_valid(self, frame_factory):
"""
Transfer-Encoding trailers is allowed by the filter.
"""
headers = (
self.base_request_headers + [('te', 'trailers')]
)
c = h2.connection.H2Connection(client_side=False)
c.receive_data(frame_factory.preamble())
f = frame_factory.build_headers_frame(headers)
data = f.serialize()
events = c.receive_data(data)
assert len(events) == 1
request_event = events[0]
assert request_event.headers == headers
class TestFilter(object):
"""
Test the filter function directly.
These tests exists to confirm the behaviour of the filter function in a
wide range of scenarios. Many of these scenarios may not be legal for
HTTP/2 and so may never hit the function, but it's worth validating that it
behaves as expected anyway.
"""
@given(HEADERS_STRATEGY)
def test_range_of_acceptable_outputs(self, headers):
"""
validate_headers either returns the data unchanged or throws a
ProtocolError.
"""
try:
assert headers == h2.utilities.validate_headers(headers)
except h2.exceptions.ProtocolError:
assert True
| Python | 0 |
82152af00c54ea94a4e8cd90d3cd5f45ef28ee86 | add missing unit test file | test/test_utils.py | test/test_utils.py | # coding=utf-8
from __future__ import unicode_literals
import os
import codecs
from nose.tools import eq_
from pyecharts.utils import (
freeze_js,
write_utf8_html_file,
get_resource_dir
)
def test_get_resource_dir():
path = get_resource_dir('templates')
expected = os.path.join(os.getcwd(), '..', 'pyecharts', 'templates')
eq_(path, os.path.abspath(expected))
def test_freeze_js():
html_content = """
</style>
<!-- build -->
<script src="js/echarts/echarts.min.js"></script>
<script src="js/echarts/echarts-wordcloud.min.js"></script>
<!-- endbuild -->
</head><body>"""
html_content = freeze_js(html_content)
assert 'exports.echarts' in html_content
assert 'echarts-wordcloud' in html_content
def test_write_utf8_html_file():
content = "柱状图数据堆叠示例"
file_name = 'test.html'
write_utf8_html_file(file_name, content)
with codecs.open(file_name, 'r', 'utf-8') as f:
actual_content = f.read()
eq_(content, actual_content)
| Python | 0 | |
9c52dae7f5de64865fff51a24680c43e041376ea | Add random_subtree script | random_subtree.py | random_subtree.py | #!/usr/bin/env python2
# Use either ete2 or ete3
try:
import ete3 as ete
except ImportError:
import ete2 as ete
import numpy as np
CLI = """
USAGE:
random_subtree <tree> <n>
Subsamples <n> taxa from the Newick tree in <tree>, preserving the branch
lengths of subsampled taxa.
"""
def main(treefile, n):
n = int(n)
tree = ete.Tree(treefile)
leaves = tree.get_leaf_names()
subsample = [leaves[i] for i in np.random.choice(n, size=len(tree))]
tree.prune(subsample, preserve_branch_length=True)
print(tree.write())
if __name__ == "__main__":
import docopt
opts = docopt.docopt(CLI)
main(opts['<tree>'], int(opts['<n>']))
| Python | 0.000001 | |
3c1e61b4b47ec244e4cadd4bf34e0a21cf1ff7e1 | Create w3_1.py | w3_1.py | w3_1.py | print("第三週")
| Python | 0.000482 | |
8bb9d6cbe161654126bb3aa3adecdb99ee0d9987 | Create sct4.py | sct4.py | sct4.py | from mpi4py import MPI
comm = MPI.COMM_WORLD
rank=comm.rank
size=comm.size
print 'Rank:',rank
print 'Node Count:',size
print 9**(rank+3)
| Python | 0.000003 | |
aafd823069176075b4810496ee98cea3203b5652 | Make a command to make subsets. Subsets are useful for testing during development. | build_time/src/make_subset.py | build_time/src/make_subset.py | """
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import Options, load_font, Subsetter, save_font
def main(args):
"""Subset a font (useful for making small test fonts).
Arguments:
font-file
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting',default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
unicodes = [ int(c,16) for c in unicodes_str.split(',') if c ]
glyphs = [ int(c) for c in cmd_args.glyphs.split(',') if c ]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
dir = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_file = dir + '/' + filename + '_subset' + extension
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
| Python | 0 | |
01f4aedac1df6f2e55c76d60c52d1e0c5ccfd9f2 | Revert "Delete test file" | tests/mock_vws/test_query.py | tests/mock_vws/test_query.py | """
Tests for the mock of the query endpoint.
https://library.vuforia.com/articles/Solution/How-To-Perform-an-Image-Recognition-Query.
"""
import io
from typing import Any, Dict
from urllib.parse import urljoin
import pytest
import requests
from requests import codes
from requests_mock import POST
from tests.utils import VuforiaDatabaseKeys
from vws._request_utils import authorization_header, rfc_1123_date
@pytest.mark.usefixtures('verify_mock_vuforia')
class TestQuery:
"""
Tests for the query endpoint.
"""
def test_no_results(
self,
vuforia_database_keys: VuforiaDatabaseKeys,
high_quality_image: io.BytesIO,
) -> None:
"""
With no results
"""
image_content = high_quality_image.read()
content_type = 'multipart/form-data'
query: Dict[str, Any] = {}
date = rfc_1123_date()
request_path = '/v1/query'
url = urljoin('https://cloudreco.vuforia.com', request_path)
files = {'image': ('image.jpeg', image_content, 'image/jpeg')}
request = requests.Request(
method=POST,
url=url,
headers={},
data=query,
files=files,
)
prepared_request = request.prepare() # type: ignore
authorization_string = authorization_header(
access_key=vuforia_database_keys.client_access_key,
secret_key=vuforia_database_keys.client_secret_key,
method=POST,
content=prepared_request.body,
content_type=content_type,
date=date,
request_path=request_path,
)
headers = {
**prepared_request.headers,
'Authorization': authorization_string,
'Date': date,
}
prepared_request.prepare_headers(headers=headers)
session = requests.Session()
response = session.send(request=prepared_request) # type: ignore
assert response.status_code == codes.OK
assert response.json()['result_code'] == 'Success'
assert response.json()['results'] == []
assert 'query_id' in response.json()
| Python | 0 | |
bf577257f4d34ac15642cb92efba00d750e9cb66 | test added | tests/heisenbug.py | tests/heisenbug.py |
import os
import sys
import time
import logging
import uuid
import saga
import pilot
import traceback
#------------------------------------------------------------------------------
# Redis password and eThread secret key details aquired from the environment
COORD = os.environ.get('COORDINATION_URL')
ACCESS_KEY_ID = os.environ.get('ETHREAD_ACCESS_KEY_ID')
SECRET_ACCESS_KEY= os.environ.get('ETHREAD_SECRET_ACCESS_KEY')
#------------------------------------------------------------------------------
# The coordination server
#COORD = "redis://ILikeBigJob_wITH-REdIS@gw68.quarry.iu.teragrid.org:6379"
# The host (+username) to run BigJob on
EC2url = "aws.amazon.com"
S3url = ""
# The queue on the remote system
#QUEUE = "normal"
# The working directory on the remote cluster / machine
#WORKDIR = "/home1/02554/sagatut/XSEDETutorial/%s/example1" % USER_NAME
WORKDIR = "/home/ubuntu/NY/"
SSHKEYFILE = "/home/merzky/.ssh/id_rsa_futuregrid"
#WORKDIR2 = "/home/anjani/bigjob_test/test_saga-bigjob/agent/SE_2"
# The number of jobs you want to run
NUMBER_JOBS = 1
AMI = "ami-d63572bf"
VM = "t1.micro"
KEY = "fg_andre"
USER = "ubuntu"
REGION = "us-east-1a"
#------------------------------------------------------------------------------
#
def main():
pilotjob1 = None
pilot_compute_service1 = None
pilotjob2 = None
pilot_compute_service2 = None
try:
# this describes the parameters and requirements for our pilot job1
pilot_compute_description_amazon_west1 = pilot.PilotComputeDescription()
pilot_compute_description_amazon_west1 = {
"service_url": 'ec2+ssh://%s' % EC2url,
"number_of_processes": 2,
"vm_id":AMI,
"vm_ssh_username": USER,
"vm_ssh_keyname": KEY,
"vm_ssh_keyfile": SSHKEYFILE,
"vm_type":VM,
"region" :REGION,
"access_key_id":ACCESS_KEY_ID,
"secret_access_key":SECRET_ACCESS_KEY,
# "affinity_machine_label": ""
}
# create a new pilot job1
pilot_compute_service1 = pilot.PilotComputeService(COORD)
pilotjob1 = pilot_compute_service1.create_pilot(pilot_compute_description_amazon_west1)
# this describes the parameters and requirements for our pilot job2
pilot_compute_description_amazon_west2 = pilot.PilotComputeDescription()
pilot_compute_description_amazon_west2 = {
"service_url": 'ec2+ssh://%s' % EC2url,
"number_of_processes": 2,
"vm_id": AMI,
"vm_ssh_username": USER,
"vm_ssh_keyname": KEY,
"vm_ssh_keyfile": SSHKEYFILE,
"vm_type": VM,
"region" : REGION,
"access_key_id":ACCESS_KEY_ID,
"secret_access_key":SECRET_ACCESS_KEY,
# "affinity_machine_label": ""
}
# create a new pilot job2
pilot_compute_service2 = pilot.PilotComputeService(COORD)
pilotjob2 = pilot_compute_service2.create_pilot(pilot_compute_description_amazon_west2)
# submit tasks1 to pilot job1
tasks1 = list()
for i in range(NUMBER_JOBS):
task_desc1 = pilot.ComputeUnitDescription()
task_desc1.working_directory = WORKDIR
task_desc1.executable = '/bin/echo'
task_desc1.arguments = ['I am task number $TASK_NO from pj1', ]
task_desc1.environment = {'TASK_NO': i}
task_desc1.number_of_processes = 1
task_desc1.output = 'stdout1.txt'
task_desc1.error = 'stderr1.txt'
task1 = pilotjob1.submit_compute_unit(task_desc1)
print "* Submitted task '%s' with id '%s' to %s" % (i, task1.get_id(), EC2url)
tasks1.append(task1)
print "Waiting for tasks to finish..."
pilotjob1.wait()
# submit tasks2 to pilot job2
tasks2 = list()
for i in range(NUMBER_JOBS):
task_desc2 = pilot.ComputeUnitDescription()
task_desc2.working_directory = WORKDIR
task_desc2.executable = '/bin/echo'
task_desc2.arguments = ['I am task number $TASK_NO from pj2', ]
task_desc2.environment = {'TASK_NO': i}
task_desc2.number_of_processes = 1
task_desc2.output = 'stdout2.txt'
task_desc2.error = 'stderr2.txt'
task2 = pilotjob2.submit_compute_unit(task_desc2)
print "* Submitted task '%s' with id '%s' to %s" % (i, task2.get_id(), EC2url)
tasks2.append(task2)
print "Waiting for tasks to finish..."
pilotjob2.wait()
return(0)
except Exception, ex:
print "AN ERROR OCCURED: %s" % ((str(ex)))
# print a stack trace in case of an exception -
# this can be helpful for debugging the problem
traceback.print_exc()
return(-1)
finally:
# alway try to shut down pilots, otherwise jobs might end up
# lingering in the queue
print ("Terminating BigJob...")
if pilotjob1 :
pilotjob1.cancel()
if pilot_compute_service1 :
pilot_compute_service1.cancel()
if pilotjob2 :
pilotjob2.cancel()
if pilot_compute_service2 :
pilot_compute_service2.cancel()
if __name__ == "__main__":
sys.exit(main())
| Python | 0 | |
793344ae359f028db950a364d48578ae97cb7028 | Add tests for jenkins_job_linter.test_jjb_subcommand | tests/test_jjb_subcommand.py | tests/test_jjb_subcommand.py | from jenkins_job_linter.jjb_subcommand import LintSubCommand
class TestParseArgs(object):
def test_parser_named_lint(self, mocker):
subcommand = LintSubCommand()
subparser_mock = mocker.Mock()
subcommand.parse_args(subparser_mock)
assert 1 == subparser_mock.add_parser.call_count
assert mocker.call('lint') == subparser_mock.add_parser.call_args
def test_args_added_to_parser(self, mocker):
expected_methods = [
'parse_arg_names', 'parse_arg_path',
'parse_option_recursive_exclude']
subcommand = LintSubCommand()
mocks = []
for expected_method in expected_methods:
mock = mocker.Mock()
setattr(subcommand, expected_method, mock)
mocks.append(mock)
subparser_mock = mocker.Mock()
subcommand.parse_args(subparser_mock)
for mock in mocks:
assert 1 == mock.call_count
assert mocker.call(
subparser_mock.add_parser.return_value) == mock.call_args
class TestExecute(object):
def test_arguments_passed_through(self, mocker):
super_execute_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
options, jjb_config = mocker.Mock(), mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(options, jjb_config)
assert 1 == super_execute_mock.call_count
assert mocker.call(options, jjb_config) == super_execute_mock.call_args
def test_config_xml_set_to_false(self, mocker):
super_execute_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
options = mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(options, mocker.Mock())
assert super_execute_mock.call_args[0][0].config_xml is False
def _get_tmpdir_mock(self, mocker):
temporary_directory_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.tempfile.TemporaryDirectory')
return temporary_directory_mock.return_value.__enter__.return_value
def test_tmpdir_used_as_output_dir(self, mocker):
mocker.patch(
'jenkins_job_linter.jjb_subcommand.lint_jobs_from_directory')
super_execute_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
tmpdir_mock = self._get_tmpdir_mock(mocker)
options = mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(options, mocker.Mock())
assert super_execute_mock.call_args[0][0].output_dir == tmpdir_mock
def test_lint_jobs_from_directory_called_with_tmpdir(self, mocker):
lint_jobs_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.lint_jobs_from_directory')
mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
tmpdir_mock = self._get_tmpdir_mock(mocker)
subcommand = LintSubCommand()
subcommand.execute(mocker.Mock, mocker.Mock())
assert 1 == lint_jobs_mock.call_count
assert lint_jobs_mock.call_args[0][0] == tmpdir_mock
def test_lint_jobs_from_directory_called_with_jjb_config_config_parser(
self, mocker):
lint_jobs_mock = mocker.patch(
'jenkins_job_linter.jjb_subcommand.lint_jobs_from_directory')
mocker.patch(
'jenkins_job_linter.jjb_subcommand.test.TestSubCommand.execute')
jjb_config = mocker.Mock()
subcommand = LintSubCommand()
subcommand.execute(mocker.Mock, jjb_config)
assert 1 == lint_jobs_mock.call_count
assert lint_jobs_mock.call_args[0][1] == jjb_config.config_parser
| Python | 0.000001 | |
5a546b2b4c4c8ccf7f44a75bd07c32888fc5bdf5 | add perftests script | tests/perftests.py | tests/perftests.py | #!/usr/bin/env python
#
# Author: Vincenzo Maffione <v.maffione@gmail.com>
#
import multiprocessing
import subprocess
import argparse
import re
import os
import pickle
def stats_init(x):
x['kpps'] = []
x['mbps'] = []
x['packets'] = []
x['transactions'] = []
x['latency'] = []
description = "Python script to perform automated tests based on rinaperf"
epilog = "2017 Vincenzo Maffione <v.maffione@gmail.com>"
argparser = argparse.ArgumentParser(description = description,
epilog = epilog)
argparser.add_argument('--size-min', type = int, default = 2,
help = "Minimum size for the test")
argparser.add_argument('--size-max', type = int, default = 1400,
help = "Maximum size for the test")
argparser.add_argument('--size-step', type = int, default = 10,
help = "Packet size increment")
argparser.add_argument('--trials', type = int, default = 3,
help = "Number of trials for each combination of parameters")
argparser.add_argument('--count', type = int, default = 100000,
help = "Packet/transaction count for each test")
argparser.add_argument('-f', '--flow-control', action='store_true',
help = "Enable flow control")
argparser.add_argument('-g', '--max-sdu-gap', type = int, default = -1,
help = "Max SDU gap")
argparser.add_argument('-t', '--test-type', type = str, default = "perf",
help = "Test type", choices = ["perf", "rr"])
argparser.add_argument('--load', type = str, help = "Dump file to recover")
args = argparser.parse_args()
if args.load:
fin = open(args.load, 'rb')
sndstats = pickle.load(fin)
rcvstats = pickle.load(fin)
fin.close()
print("Restarting from")
print(sndstats)
print(rcvstats)
else:
sndstats = dict()
rcvstats = dict()
stats_init(sndstats)
stats_init(rcvstats)
# build QoS
qos = ""
if args.flow_control:
qos += " -f"
if args.max_sdu_gap >= 0:
qos += " -g %s" % args.max_sdu_gap
try:
for sz in range(args.size_min, args.size_max, args.size_step):
cmd = ("rinaperf -s %s -t %s -c %s %s"
% (sz, args.test_type, args.count, qos))
print("Running: %s" % cmd)
for t in range(args.trials):
out = subprocess.check_output(cmd.split())
out = out.decode('ascii')
outl = out.split('\n')
if args.test_type == 'perf':
if len(outl) < 4:
print(out)
continue
m = re.match(r'^Sender\s+(\d+)\s+(\d+\.?\d*)\s+(\d+\.?\d*)', outl[2])
if m is None:
print(out)
continue
packets = int(m.group(1))
kpps = float(m.group(2))
mbps = float(m.group(3))
sndstats['packets'].append(packets)
sndstats['kpps'].append(kpps)
sndstats['mbps'].append(mbps)
m = re.match(r'^Receiver\s+(\d+)\s+(\d+\.?\d*)\s+(\d+\.?\d*)', outl[3])
if m is None:
print(out)
continue
packets = int(m.group(1))
kpps = float(m.group(2))
mbps = float(m.group(3))
rcvstats['packets'].append(packets)
rcvstats['kpps'].append(kpps)
rcvstats['mbps'].append(mbps)
print("%d/%d pkts %.3f/%.3f Kpps %.3f/%.3f Mbps" %
(sndstats['packets'][-1], rcvstats['packets'][-1],
sndstats['kpps'][-1], rcvstats['kpps'][-1],
sndstats['mbps'][-1], rcvstats['mbps'][-1]))
elif args.test_type == 'rr':
if len(outl) < 3:
print(out)
continue
m = re.match(r'^Sender\s+(\d+)\s+(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+)', outl[2])
if m is None:
print(out)
continue
transactions = int(m.group(1))
kpps = float(m.group(2))
mbps = float(m.group(3))
latency = int(m.group(4))
sndstats['transactions'].append(transactions)
sndstats['kpps'].append(kpps)
sndstats['mbps'].append(mbps)
sndstats['latency'].append(latency)
print("%d transactions %.3f Kpps %.3f Mbps %d ns" %
(sndstats['transactions'][-1], sndstats['kpps'][-1],
sndstats['mbps'][-1], sndstats['latency'][-1]))
else:
assert(False)
except KeyboardInterrupt:
pass
# dump results
fout = open('perftests.dump', 'wb')
pickle.dump(sndstats, fout)
pickle.dump(rcvstats, fout)
fout.close()
| Python | 0.000002 | |
2e330d5cd2ad033c675d5888a2f43e0f846a4df1 | Add CodeDeploy | troposphere/codedeploy.py | troposphere/codedeploy.py | # Copyright (c) 2015, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
from .validators import positive_integer
KEY_ONLY = "KEY_ONLY"
VALUE_ONLY = "VALUE_ONLY"
KEY_AND_VALUE = "KEY_AND_VALUE"
class GitHubLocation(AWSProperty):
props = {
'CommitId': (basestring, True),
'Repository': (basestring, True),
}
class S3Location(AWSProperty):
props = {
'Bucket': (basestring, True),
'BundleType': (basestring, True),
'ETag': (basestring, False),
'Key': (basestring, True),
'Version': (basestring, True),
}
class Revision(AWSProperty):
props = {
'GitHubLocation': (GitHubLocation, False),
'RevisionType': (basestring, False),
'S3Location': (S3Location, False),
}
class Deployment(AWSProperty):
props = {
'Description': (basestring, False),
'IgnoreApplicationStopFailures': (bool, False),
'Revision': (Revision, True),
}
class Ec2TagFilters(AWSProperty):
props = {
'Key': (basestring, False),
'Type': (basestring, False),
'Value': (basestring, False),
}
class OnPremisesInstanceTagFilters(AWSProperty):
props = {
'Key': (basestring, False),
'Type': (basestring, False),
'Value': (basestring, False),
}
class MinimumHealthyHosts(AWSProperty):
props = {
'Type': (basestring, False),
'Value': (positive_integer, False),
}
class Application(AWSObject):
resource_type = "AWS::CodeDeploy::Application"
props = {
}
class DeploymentConfig(AWSObject):
resource_type = "AWS::CodeDeploy::DeploymentConfig"
props = {
'MinimumHealthyHosts': (MinimumHealthyHosts, False),
}
class DeploymentGroup(AWSObject):
resource_type = "AWS::DirectoryService::DeploymentGroup"
props = {
'ApplicationName': (basestring, True),
'AutoScalingGroups': ([basestring], False),
'Deployment': (Deployment, False),
'DeploymentConfigName': (basestring, False),
'Ec2TagFilters': (Ec2TagFilters, False),
'OnPremisesInstanceTagFilters': (OnPremisesInstanceTagFilters, False),
'ServiceRoleArn': (basestring, True),
}
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.