text stringlengths 0 1.05M | meta dict |
|---|---|
"""A script to plot a circular array geometry with 20 telescopes, and output the array elements"""
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import scipy.signal as sig
import scipy.ndimage as nd
import pdb
#Parameters here
outfile = "circ_array.txt"
ntel = 12
short_bl = 550
fov_bl = 30 #To define the field of view.
jitter = -1 ## 400 ##-1 for redundant arrays...
extra_jitter = 5
r_circ = ntel*short_bl/2.0/np.pi
dphi = 2.0*np.pi/ntel
wave=11e-6 #Wavelength in m
xc = np.zeros(ntel)
yc = np.zeros(ntel)
nrot = 59
rotdeg = 1
zooms = [1.02,1.04,1.06,1.08,1.10]
sz = 1024
ring_rad = 0.15/140.
ring_flux = 2.5 #Ratio of ring to star
#----- Automatic from here ------
fov = np.degrees(wave/fov_bl)*3600.
#Create the telescope positions xc and yc.
for i in range(ntel):
#f(r) = 2r
#int[f(r)] = r^2
if (jitter > 0):
rand_dr = np.sqrt(np.random.random())*jitter
rand_dphi = np.random.random()*2.0*np.pi
xc[i] = np.cos(i*dphi)*r_circ + np.cos(rand_dphi)*rand_dr
yc[i] = np.sin(i*dphi)*r_circ + np.sin(rand_dphi)*rand_dr
elif (jitter < 0):
if (i % 4 == 1):
dphi_offset = -1 + 4*4/13
elif (i % 4 == 2):
dphi_offset = -2 + 5*4/13
elif (i % 4 == 3):
dphi_offset = -3 + 7*4/13
else:
dphi_offset = 0.0
rand_dphi = dphi*dphi_offset
xc[i] = np.cos(i*dphi + rand_dphi)*r_circ
yc[i] = np.sin(i*dphi + rand_dphi)*r_circ
else:
xc[i] = np.cos(i*dphi)*r_circ
yc[i] = np.sin(i*dphi)*r_circ
xc[i] += np.random.normal()*extra_jitter
yc[i] += np.random.normal()*extra_jitter
#Add these telescopes to a UV plane.
uv_tel = np.zeros((sz,sz))
uv_tel[(xc/fov_bl).astype(int) + sz//2,(yc/fov_bl).astype(int) + sz//2]=1
#Create a uv plane from this
uv = np.fft.fftshift(np.abs(np.fft.ifft2(np.abs(np.fft.fft2(uv_tel))**2)))
a = sig.convolve(uv,[[0.5,1,0.5],[1,1,1],[0.5,1,0.5]], mode='same')
#Now create a uv plane with sky rotation.
a0 = a.copy()
for i in range(1,nrot):
a += nd.interpolation.rotate(a0, rotdeg*i, reshape=False, order=1)
a1 = a.copy()
for i in range(1,len(zooms)):
az = nd.interpolation.zoom(a1, zooms[i], order=1)
sz0 = az.shape[0]
a += az[sz0//2-512:sz0//2+512,sz0//2-512:sz0//2+512]
f1 = plt.figure(1)
f1.clf()
ax1 = f1.add_subplot(111, aspect='equal')
plt.imshow(np.log(np.maximum(a,.5)), cmap=cm.gray, interpolation='nearest')
ax1.set_xticks([])
ax1.set_yticks([])
f3 = plt.figure(3)
f3.clf()
ax = f3.add_subplot(111, aspect='equal')
im_psf = np.fft.fft2(a)
im_psf = np.abs(im_psf)**2
im_psf /= np.max(im_psf)
nth=100
thetas = np.arange(nth)/nth*2*np.pi
imring = np.zeros(im_psf.shape)
for theta in thetas:
xd = int(sz//2 + 0.8*ring_rad/(fov/sz)*np.cos(theta))
yd = int(sz//2 + ring_rad/(fov/sz)*np.sin(theta))
imring[xd,yd] += ring_flux/nth
#Convolve the ring image with the PSF.
imring = np.fft.irfft2(np.fft.rfft2(imring)*np.fft.rfft2(np.fft.fftshift(im_psf)))
#For a planet of circumstellar disk magnitude 11.5 and star of magnitude 8.5, we
#just add a copy of the PSF and add a copy of the ring.
im = im_psf.copy()
im += np.roll(np.roll(im_psf,int(0.035/fov*sz/np.sqrt(2)), axis=1),int(0.035/fov*sz/np.sqrt(2)), axis=0)*10**(-0.4*3.0)
im += imring
iml = np.log10(np.maximum(im,1e-4))
plt.imshow(np.fft.fftshift(iml), interpolation='nearest', cmap=cm.gist_heat, extent=[-fov/2,fov/2,-fov/2,fov/2])
#plt.axis([512-200,512+200,512-200,512+200])
plt.colorbar()
ax.set_xlabel('Delta RA (arcsec)')
ax.set_ylabel('Delta Dec (arcsec)')
f2 = plt.figure(2)
f2.clf()
ax = f2.add_subplot(111, aspect='equal')
ax.plot(yc,xc,'o')
ax.set_xlabel('X position (m)')
ax.set_ylabel('Y position (m)')
dists1 = np.sqrt( (xc[1:]-xc[:-1])**2 + (yc[1:]-yc[:-1])**2)
dists2 = np.sqrt( (xc[2:]-xc[:-2])**2 + (yc[2:]-yc[:-2])**2)
dists10 = np.sqrt( (xc[10:]-xc[:-10])**2 + (yc[10:]-yc[:-10])**2)
print(np.min(dists1))
print(np.max(dists2))
f = open(outfile, "w")
for i in range(ntel):
f.write("{0:6.1f} {1:6.1f}\n".format(xc[i],yc[i]))
f.close()
plt.show()
#Save the circular array.
np.savetxt('circ_array.txt',np.array( [xc,yc] ).T,fmt="%.1f") | {
"repo_name": "mikeireland/pfi",
"path": "pfi/plot_circ_array.py",
"copies": "1",
"size": "4241",
"license": "mit",
"hash": 5696149668094220000,
"line_mean": 28.2551724138,
"line_max": 119,
"alpha_frac": 0.6154208913,
"autogenerated": false,
"ratio": 2.3124318429661943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.34278527342661946,
"avg_score": null,
"num_lines": null
} |
# a script to process folio text and create files for each play
import Shax_Folio as sxf
import re
#create full text object to get all titles
fol_obj = sxf.Play()
# use titles to name documents and to access texts
all_titles = fol_obj.titles
# the string to write the html file, if contains acts
def act_grammar(title, act1, act2, act3, act4, act5): # add sections for each act
html ='''<!DOCTYPE html>
<html>
<head>
<link href="play_style.css" rel="stylesheet">
<script src="folio.js"></script>
</head>
<body>
<h1 id="title">'''+title+'''</h1>
<div id="actions"></div>
<ul>
<li>
<a href="#act1">Act 1</a>
<a href="#act2">Act 2</a>
<a href="#act3">Act 3</a>
<a href="#act4">Act 4</a>
<a href="#act5">Act 5</a>
</li>
<li>
<button id="previous">Previous</button>
<button id="next">Next</button>
<button id="reset">Reset</button>
<button id="home">Home</button>
</li>
</ul>
<form name="concordance" method="get">
<label id="lab">Show Concordance
<input type="text" name="word" placeholder="Enter a Word">
</form>
</div>
<p id="act1" class="text">'''+act1+'''</p>
<p id="act2" class="text">'''+act2+'''</p>
<p id="act3" class="text">'''+act3+'''</p>
<p id="act4" class="text">'''+act4+'''</p>
<p id="act5" class="text">'''+act5+'''</p>
</body>
</html>
'''
return html
def no_act_grammar(title, text):
html ='''<!DOCTYPE html>
<html>
<head>
<link href="play_style.css" rel="stylesheet">
<script src="folio.js"></script>
</head>
<body>
<h1 id="title">'''+title+'''</h1>
<div id="actions"></div>
<ul>
<li>
<button id="previous">Previous</button>
<button id="next">Next</button>
<button id="reset">Reset</button>
<button id="home">Home</button>
</li>
</ul>
<form name="concordance" method="get">
<label id="lab">Show Concordance
<input type="text" name="word" placeholder="Enter a Word">
</form>
</div>
<p id="fulltext" class="text">'''+text+'''</p>
</body>
</html>
'''
return html
#the function to process the texts
def process():
for t in all_titles:
play = sxf.Play(title=t)
if play.act1:
# now process the text for html
act1 = play.act1.replace('\n', '<br>')
act2 = play.act2.replace('\n', '<br>')
act3 = play.act3.replace('\n', '<br>')
act4 = play.act4.replace('\n', '<br>')
act5 = play.act5.replace('\n', '<br>')
#now create file and write html
file = open(t+'.html', 'w', encoding='utf-8')
file.write(act_grammar(t, act1, act2, act3, act4, act5))
file.close
else: # in case the play does not have act breaks
text = play.text.replace('\n', '<br>')
file = open(t+'.html', 'w', encoding='utf-8')
file.write(no_act_grammar(t, text))
file.close
def preface():
play = sxf.Play()
pattern = re.compile('To the Reader.*?FINIS', re.S)
text = pattern.findall(play.text)
text = ' '.join(text)
text = text.replace('\n', '<br>')
file = open('Preface.html', 'w', encoding='utf-8')
file.write(no_act_grammar('Prefatory Matter', text))
file.close
if __name__ == '__main__':
peface()
process()
| {
"repo_name": "jsw3/shax-apps",
"path": "Folio_site/ff_processor.py",
"copies": "1",
"size": "3846",
"license": "mit",
"hash": 4787657753802926000,
"line_mean": 31.8717948718,
"line_max": 81,
"alpha_frac": 0.4810192408,
"autogenerated": false,
"ratio": 3.521978021978022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9484514231284361,
"avg_score": 0.00369660629873216,
"num_lines": 117
} |
"""A script to produce the coefficients in mel_sparse_coeffs.ts.
Rather than recompute the Mel2Linear coefficients in javascript each time, we
save them as a large array in mel_sparse_coeffs.ts. When you run this scipt it
outputs to stdout a list of lists, with each list containing sparse information
[x_idx, y_idx, value].
Example usage:
$ python gansynth_make_mel_sparse_coeffs.py >> mel_sparse_coeffs.ts'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
MEL_BREAK_FREQUENCY_HERTZ = 700.0
MEL_HIGH_FREQUENCY_Q = 1127.0
NUM_MEL_BINS = 1024
NUM_SPECTROGRAM_BINS = 1024
SAMPLE_RATE = 16000
NYQUIST_HERTZ = SAMPLE_RATE / 2.0
LOWER_EDGE_HERTZ = 0.0
UPPER_EDGE_HERTZ = NYQUIST_HERTZ
def mel_to_hertz(mel_values):
"""Converts frequencies in `mel_values` from the mel scale to linear scale."""
return MEL_BREAK_FREQUENCY_HERTZ * (
np.exp(mel_values / MEL_HIGH_FREQUENCY_Q) - 1.0)
def hertz_to_mel(frequencies_hertz):
"""Converts frequencies in `frequencies_hertz` in Hertz to the mel scale."""
return MEL_HIGH_FREQUENCY_Q * np.log(
1.0 + (frequencies_hertz / MEL_BREAK_FREQUENCY_HERTZ))
def main():
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
linear_frequencies = np.linspace(
0.0, NYQUIST_HERTZ, NUM_SPECTROGRAM_BINS)[bands_to_zero:, np.newaxis]
# Compute NUM_MEL_BINS triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [LOWER_EDGE_HERTZ, UPPER_EDGE_HERTZ] into
# NUM_MEL_BINS + 2 pieces.
band_edges_mel = np.linspace(
hertz_to_mel(LOWER_EDGE_HERTZ), hertz_to_mel(UPPER_EDGE_HERTZ),
NUM_MEL_BINS + 2)
lower_edge_mel = band_edges_mel[0:-2]
center_mel = band_edges_mel[1:-1]
upper_edge_mel = band_edges_mel[2:]
freq_res = NYQUIST_HERTZ / float(NUM_SPECTROGRAM_BINS)
freq_th = 1.5 * freq_res
center_hz = center_mel.copy()
center_hz = mel_to_hertz(center_mel)
for i in range(NUM_MEL_BINS):
center_hz = mel_to_hertz(center_mel[i])
lower_hz = mel_to_hertz(lower_edge_mel[i])
upper_hz = mel_to_hertz(upper_edge_mel[i])
if upper_hz - lower_hz < freq_th:
rhs = 0.5 * freq_th / (center_hz + MEL_BREAK_FREQUENCY_HERTZ)
dm = MEL_HIGH_FREQUENCY_Q * np.log(rhs + np.sqrt(1.0 + rhs**2))
lower_edge_mel[i] = center_mel[i] - dm
upper_edge_mel[i] = center_mel[i] + dm
lower_edge_hz = mel_to_hertz(lower_edge_mel)[np.newaxis, :]
center_hz = mel_to_hertz(center_mel)[np.newaxis, :]
upper_edge_hz = mel_to_hertz(upper_edge_mel)[np.newaxis, :]
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (linear_frequencies - lower_edge_hz) / (
center_hz - lower_edge_hz)
upper_slopes = (upper_edge_hz - linear_frequencies) / (
upper_edge_hz - center_hz)
# Intersect the line segments with each other and zero.
mel_weights_matrix = np.maximum(0.0, np.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
# [freq, mel]
mel_weights_matrix = np.pad(mel_weights_matrix, [[bands_to_zero, 0], [0, 0]],
'constant')
w_linear2mel = mel_weights_matrix
m_t = np.transpose(w_linear2mel)
p = np.dot(w_linear2mel, m_t)
d = [1.0 / x if np.abs(x) > 1.0e-8 else x for x in np.sum(p, axis=0)]
w_mel2linear = np.dot(m_t, np.diag(d))
w = w_mel2linear
row, col = np.where(w != 0.0)
w_sparse = [[row[i], col[i], w[row[i], col[i]]] for i in range(len(row))]
np.set_printoptions(threshold=np.inf)
for l in w_sparse:
print(l, ',')
if __name__ == "__main__":
main()
| {
"repo_name": "magenta/magenta-js",
"path": "music/src/gansynth/gansynth_make_mel_sparse_coeffs.py",
"copies": "1",
"size": "3777",
"license": "apache-2.0",
"hash": 2785661474897438000,
"line_mean": 32.7232142857,
"line_max": 80,
"alpha_frac": 0.6706380725,
"autogenerated": false,
"ratio": 2.7114142139267767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38820522864267765,
"avg_score": null,
"num_lines": null
} |
#a script to read out our json data
#and write them to a file
#readJson.py url interval filebase
#will write to the file:
#timestamp jsonData
#filenames: filebase-xxx.tst
#xxx - gps,ais,trk
import os
import sys
import time
import urllib2
types={
'gps':('request=gps',1),
'ais':('request=ais',10),
'trk':('request=track',10)}
files={}
namebase="json"
count=0
def log(txt):
print "log:%s" %(txt,)
def appendDataToFile(data,file):
fh=files.get(file)
if fh is None:
fname=namebase+"-"+file+".tst"
fh=open(fname,"w")
files[file]=fh
log("opened file "+fname)
ts=int(round(time.time()))
line="%d %s\n"%(ts,data)
fh.write(line)
fh.flush()
def getJson(url):
h=urllib2.urlopen(url)
data=h.read()
return data
def getAll(urlbase):
global count
count=count+1
for type in types.keys():
ext=types[type][0]
iv=types[type][1]
if ( count % iv) == 0:
url=urlbase+"?"+ext
data=getJson(url)
appendDataToFile(data, type)
def run(url,interval):
if interval is 0:
interval=1
while True:
getAll(url)
time.sleep(interval)
if __name__ == "__main__":
if len(sys.argv) < 3:
print "usage: %s url interval" % (sys.argv[0],)
sys.exit(1)
if len(sys.argv) > 3 and sys.argv[3] is not None:
namebase=sys.argv[3]
run(sys.argv[1],int(sys.argv[2] or 0))
| {
"repo_name": "wellenvogel/avnav",
"path": "test/readJson.py",
"copies": "1",
"size": "1363",
"license": "mit",
"hash": -2678496133787112000,
"line_mean": 18.4857142857,
"line_max": 51,
"alpha_frac": 0.6177549523,
"autogenerated": false,
"ratio": 2.8045267489711936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8596899387688062,
"avg_score": 0.06507646271662629,
"num_lines": 70
} |
"""A script to run inference on a set of image files.
NOTE #1: The Attention OCR model was trained only using FSNS train dataset and
it will work only for images which look more or less similar to french street
names. In order to apply it to images from a different distribution you need
to retrain (or at least fine-tune) it using images from that distribution.
NOTE #2: This script exists for demo purposes only. It is highly recommended
to use tools and mechanisms provided by the TensorFlow Serving system to run
inference on TensorFlow models in production:
https://www.tensorflow.org/serving/serving_basic
Usage:
python demo_inference.py --batch_size=32 \
--checkpoint=model.ckpt-399731\
--image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png
"""
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.compat.v1 import flags
from tensorflow.python.training import monitored_session
import common_flags
import datasets
import data_provider
FLAGS = flags.FLAGS
common_flags.define()
# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png
flags.DEFINE_string('image_path_pattern', '',
'A file pattern with a placeholder for the image index.')
def get_dataset_image_size(dataset_name):
# Ideally this info should be exposed through the dataset interface itself.
# But currently it is not available by other means.
ds_module = getattr(datasets, dataset_name)
height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']
return width, height
def load_images(file_pattern, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),
dtype='uint8')
for i in range(batch_size):
path = file_pattern % i
print("Reading %s" % path)
pil_image = PIL.Image.open(tf.io.gfile.GFile(path, 'rb'))
images_actual_data[i, ...] = np.asarray(pil_image)
return images_actual_data
def create_model(batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(
num_char_classes=dataset.num_char_classes,
seq_length=dataset.max_sequence_length,
num_views=dataset.num_of_views,
null_code=dataset.null_code,
charset=dataset.charset)
raw_images = tf.compat.v1.placeholder(
tf.uint8, shape=[batch_size, height, width, 3])
images = tf.map_fn(data_provider.preprocess_image, raw_images,
dtype=tf.float32)
endpoints = model.create_base(images, labels_one_hot=None)
return raw_images, endpoints
def run(checkpoint, batch_size, dataset_name, image_path_pattern):
images_placeholder, endpoints = create_model(batch_size,
dataset_name)
images_data = load_images(image_path_pattern, batch_size,
dataset_name)
session_creator = monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint)
with monitored_session.MonitoredSession(
session_creator=session_creator) as sess:
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
return [pr_bytes.decode('utf-8') for pr_bytes in predictions.tolist()]
def main(_):
print("Predicted strings:")
predictions = run(FLAGS.checkpoint, FLAGS.batch_size, FLAGS.dataset_name,
FLAGS.image_path_pattern)
for line in predictions:
print(line)
if __name__ == '__main__':
tf.compat.v1.app.run()
| {
"repo_name": "tensorflow/models",
"path": "research/attention_ocr/python/demo_inference.py",
"copies": "1",
"size": "3635",
"license": "apache-2.0",
"hash": -336090938136295360,
"line_mean": 36.4742268041,
"line_max": 78,
"alpha_frac": 0.7020632737,
"autogenerated": false,
"ratio": 3.6532663316582914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4855329605358291,
"avg_score": null,
"num_lines": null
} |
"""A script to run inference on a set of image files.
NOTE #1: The Attention OCR model was trained only using FSNS train dataset and
it will work only for images which look more or less similar to french street
names. In order to apply it to images from a different distribution you need
to retrain (or at least fine-tune) it using images from that distribution.
NOTE #2: This script exists for demo purposes only. It is highly recommended
to use tools and mechanisms provided by the TensorFlow Serving system to run
inference on TensorFlow models in production:
https://www.tensorflow.org/serving/serving_basic
Usage:
python demo_inference.py --batch_size=32 \
--image_path_pattern=./datasets/data/fsns/temp/fsns_train_%02d.png
"""
import numpy as np
import PIL.Image
import tensorflow as tf
from tensorflow.python.platform import flags
import common_flags
import datasets
import model as attention_ocr
FLAGS = flags.FLAGS
common_flags.define()
# e.g. ./datasets/data/fsns/temp/fsns_train_%02d.png
flags.DEFINE_string('image_path_pattern', '',
'A file pattern with a placeholder for the image index.')
def get_dataset_image_size(dataset_name):
# Ideally this info should be exposed through the dataset interface itself.
# But currently it is not available by other means.
ds_module = getattr(datasets, dataset_name)
height, width, _ = ds_module.DEFAULT_CONFIG['image_shape']
return width, height
def load_images(file_pattern, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
images_actual_data = np.ndarray(shape=(batch_size, height, width, 3),
dtype='float32')
for i in range(batch_size):
path = file_pattern % i
print("Reading %s" % path)
pil_image = PIL.Image.open(tf.gfile.GFile(path))
images_actual_data[i, ...] = np.asarray(pil_image)
return images_actual_data
def load_model(checkpoint, batch_size, dataset_name):
width, height = get_dataset_image_size(dataset_name)
dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
model = common_flags.create_model(
num_char_classes=dataset.num_char_classes,
seq_length=dataset.max_sequence_length,
num_views=dataset.num_of_views,
null_code=dataset.null_code,
charset=dataset.charset)
images_placeholder = tf.placeholder(tf.float32,
shape=[batch_size, height, width, 3])
endpoints = model.create_base(images_placeholder, labels_one_hot=None)
init_fn = model.create_init_fn_to_restore(checkpoint)
return images_placeholder, endpoints, init_fn
def main(_):
images_placeholder, endpoints, init_fn = load_model(FLAGS.checkpoint,
FLAGS.batch_size,
FLAGS.dataset_name)
images_data = load_images(FLAGS.image_path_pattern, FLAGS.batch_size,
FLAGS.dataset_name)
with tf.Session() as sess:
tf.tables_initializer().run() # required by the CharsetMapper
init_fn(sess)
predictions = sess.run(endpoints.predicted_text,
feed_dict={images_placeholder: images_data})
print("Predicted strings:")
for line in predictions:
print(line)
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "jmhsi/justin_tinker",
"path": "data_science/courses/learning_dl_packages/models/research/attention_ocr/python/demo_inference.py",
"copies": "2",
"size": "3332",
"license": "apache-2.0",
"hash": -2757237602720901000,
"line_mean": 36.8636363636,
"line_max": 79,
"alpha_frac": 0.6830732293,
"autogenerated": false,
"ratio": 3.743820224719101,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00677946710664901,
"num_lines": 88
} |
#a script to run several replicates of several treatments locally
#RUN SIMPLE_REPEAT.PY FROM WITHIN THE FOLDER WHERE THE DATA SHOULD GO
#EX: INSIDE OF SymbulationEmp/Data, RUN python3 ../stats_scripts/simple_repeat.py
import sys
directory = "PartPop/"
#start_mois = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 30, 40, 60, 80, 100]
start_mois = [1]
slrs = [15]
#verts = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
#sym_ints = [-1, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0]
import subprocess
def cmd(command):
'''This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.'''
return subprocess.Popen(command, shell=True).wait()
def silent_cmd(command):
'''This wait causes all executions to run in sieries.
For parralelization, remove .wait() and instead delay the
R script calls unitl all neccesary data is created.'''
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).wait()
start_range = 10
end_range = 21
if(len(sys.argv) > 1):
start_range = int(sys.argv[1])
end_range = int(sys.argv[2])
seeds = range(start_range, end_range)
print("Copying SymSettings.cfg and executable to "+directory)
cmd("cp ../SymSettings.cfg .")
cmd("cp ../symbulation .")
print("Using seeds", start_range, "up to", end_range)
for a in seeds:
for b in start_mois:
for c in slrs:
command_str = './symbulation -SEED '+str(a)+' -START_MOI '+str(b)+' -FILE_NAME SM'+str(b)+'_Seed'+str(a)+'_SLR'+str(c)+' -SYM_LYSIS_RES '+str(c)
# command_str = './symbulation -SEED '+str(a)+' -START_MOI '+str(b)+' -FILE_PATH '+directory+' -FILE_NAME SM'+str(b)+'_Seed'+str(a)+'_VT'+str(c)+' -VERTICAL_TRANSMISSION '+str(c)
# command_str = './symbulation -SEED '+str(a)+' -START_MOI '+str(b)+' -FILE_PATH '+directory+' -FILE_NAME SM'+str(b)+'_Seed'+str(a)+'_SINT'+str(c)+' -SYM_INT '+str(c)
# command_str = './symbulation -SEED '+str(a)+' -VERTICAL_TRANSMISSION '+str(b)+' -FILE_NAME _VT'+str(b)+'_Seed'+str(a) + " -FILE_PATH "+directory
settings_filename = "Output_SM"+str(b)+"_Seed"+str(a)+"_SLR"+str(c)+".data"
print(command_str)
cmd(command_str+" > "+settings_filename)
| {
"repo_name": "anyaevostinar/SymbulationEmp",
"path": "stats_scripts/simple_repeat.py",
"copies": "1",
"size": "2413",
"license": "mit",
"hash": 2982961040807141400,
"line_mean": 47.26,
"line_max": 190,
"alpha_frac": 0.6009117281,
"autogenerated": false,
"ratio": 2.8863636363636362,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3987275364463636,
"avg_score": null,
"num_lines": null
} |
# A script to scrape /r/India
import time
import praw
from collections import deque
import traceback
import datetime
import pickle
DEBUG = False
DAY_IN_SECONDS = 60 * 60 * 24
class subStats(object):
def __init__(self, user, passwd, subreddit, file):
USER_AGENT = 'Script by /u/kashre001'
self.reddit = praw.Reddit(user_agent=USER_AGENT)
self.reddit.login(user, passwd, disable_warning=True)
self.subreddit = self.reddit.get_subreddit(subreddit)
self.file = file
self.submissions = []
self.authors = []
self.comments = []
self.offset = -(5.5 * 60 * 60)
self.max_date = 1420050600 - (DAY_IN_SECONDS * 6 * 365) - (1 * DAY_IN_SECONDS)
self.min_date = self.max_date -(DAY_IN_SECONDS * 365) #1420050600 #self.subreddit.created_utc - self.offset
print ("Current time :" + str(self.max_date) + ' ' + self.easyTime(time.time() - self.offset))
print ("Creation time :" + str(self.max_date)+ ' ' + self.easyTime(self.max_date))
print ("Upper time :" + str(self.min_date)+ ' ' + self.easyTime(self.min_date))
print ('Total existence time: ' + str(int((self.max_date - self.subreddit.created_utc)/DAY_IN_SECONDS/365)) + \
' years & ' + str((int(self.max_date - self.subreddit.created_utc)/DAY_IN_SECONDS)%365) + ' days\n')
def easyTime(self,timestamp):
time = datetime.datetime.utcfromtimestamp(timestamp)
time = datetime.datetime.strftime(time, "%b %d %Y %H:%M:%S")
return time
def fetch_submissions(self, max_duration=5):
if max_duration:
self.min_date = self.max_date - (DAY_IN_SECONDS * max_duration)
Done = False
upperTime = self.max_date
lowerTime = self.min_date
while not Done:
try:
if DEBUG:
print ('------------------------------------------------------\n')
print (str(lowerTime) + ' ' + self.easyTime(lowerTime) + '\n')
print (str(upperTime) + ' ' + self.easyTime(upperTime) + '\n')
print ('\nreached')
query = 'timestamp:%d..%d' % (lowerTime, (upperTime + 8 * 60 * 60))
submissions = list(self.reddit.search(query, subreddit=self.subreddit, sort='new', limit=1000, syntax='cloudsearch'))
if (len(submissions) == 0):
break
for submission in submissions:
if submission.created_utc > self.max_date:
continue
if submission.created_utc <= self.min_date:
Done = True
break
if submission.created_utc <= upperTime:
self.submissions.append(submission)
if DEBUG:
print(submission.author)
print (str(submission.created_utc) + ' ' + self.easyTime(submission.created_utc) + '\n')
self.submissions.sort(key=lambda x: x.created_utc)
upperTime = self.submissions[0].created_utc - 0.001
if DEBUG:
print (self.easyTime(upperTime) + '\n')
time.sleep(2);
except KeyboardInterrupt:
print ("\nExiting loop...\n")
break
except Exception as e:
print ('Going to sleep for 30 seconds...\n')
time.sleep(30)
self.submissions.sort(key=lambda x: x.created_utc)
upperTime = self.submissions[0].created_utc - 0.001
continue
self.submissions.sort(key=lambda x: x.created_utc)
print(len(self.submissions))
pickle.dump(self.submissions, self.file, protocol=-1)
return True
def main():
outFile = open('submissionsactual2008.p', 'wb')
stats = subStats('MsAbroadBot','imnoidiot5','india',outFile)
if (stats.fetch_submissions(365)):
print ('Success')
else:
print ('Faaaaaail')
outFile.close()
del outFile
#call main function
main()
| {
"repo_name": "KaushikR/SubredditStats",
"path": "RandiaScrap.py",
"copies": "1",
"size": "5533",
"license": "bsd-3-clause",
"hash": -5335268507680527000,
"line_mean": 47.1130434783,
"line_max": 149,
"alpha_frac": 0.407554672,
"autogenerated": false,
"ratio": 5.1136783733826245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6021233045382625,
"avg_score": null,
"num_lines": null
} |
"""A script to strip out an ATS checkpoint file and write a "logically
structured" h5 version for use as ICs by PFLOTRAN
"""
import sys,os
sys.path.append(os.path.join(os.environ['ATS_SRC_DIR'],'tools','utils'))
import numpy as np
import h5py
import mesh
import parse_ats
def ats_to_pflotran_ic_h5(filename, directory=".", output_filename="pflotran_ic.h5"):
ixyz = mesh.meshCentroidsStructuredOrdering_3D(directory=directory)
with h5py.File(os.path.join(directory, filename),'r') as fin:
ic_pressure = fin['pressure.cell.0'][:][ixyz['id']]
ic_temperature = fin['temperature.cell.0'][:][ixyz['id']]
with h5py.File(os.path.join(directory, output_filename),'w') as fout:
fout.create_dataset("pressure", data=ic_pressure)
fout.create_dataset("temperature", data=ic_temperature)
fout.create_dataset("x", data=ixyz['x'])
fout.create_dataset("y", data=ixyz['y'])
fout.create_dataset("z", data=ixyz['z'])
def ats_to_pflotran_bcs_h5(directory=".", output_filename="pflotran_bcs.h5"):
ixy = mesh.meshCentroidsStructuredOrdering_3D(order=["x",], filename="visdump_surface_mesh.h5",
directory=directory)
keys, times, dat = parse_ats.readATS(directory, "visdump_surface_data.h5", timeunits='s')
with h5py.File(os.path.join(directory, output_filename),'w') as fout:
fout.create_dataset("time [s]", data=np.array(times))
T = fout.create_group("surface temperature [K]")
for i,k in enumerate(keys):
T.create_dataset("%d"%i, data=dat['surface-temperature.cell.0'][k][:][ixy['id']])
flx = fout.create_group("outward molar flux [mol m^-2 s^-1]")
# need the face area
face_areas = mesh.meshElemVolume(filename="visdump_surface_mesh.h5", directory=directory)
for i,k in enumerate(keys):
flux_dat = dat['surface_subsurface_flux.cell.0'][k][:]
flux_dat = flux_dat / face_areas
flx.create_dataset("%d"%i, data=flux_dat[ixy['id']])
if __name__ == "__main__":
checkp = sys.argv.pop(-1)
if not (checkp.startswith("checkpoint") and checkp.endswith(".h5")):
print "Usage: python ats-vis-to-structured-h5.py checkpointXXXXX.h5"
sys.exit(-1)
ats_to_pflotran_ic_h5(checkp)
ats_to_pflotran_bcs_h5()
sys.exit(0)
| {
"repo_name": "amanzi/ats-dev",
"path": "tools/utils/ats-vis-to-structured-h5.py",
"copies": "2",
"size": "2408",
"license": "bsd-3-clause",
"hash": -2077121318747921000,
"line_mean": 39.1333333333,
"line_max": 99,
"alpha_frac": 0.6241694352,
"autogenerated": false,
"ratio": 3.1725955204216074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9702104933160824,
"avg_score": 0.018932004492156335,
"num_lines": 60
} |
# a script to test PyEEG on real EEG data
# just define the data folders
# original script
# for paper PyEEG:A Open Source Python Module for EEG/MEG Feature Extraction
# 2010/10/18 Xin Lin
# Graphical environment and improvements by Petrousov Giannis
# student University of Western Macedonia ICTE
import sys,string
from pyeeg import *
from pylab import *
from numpy import *
from Tkinter import *
""" This is a script for testing PyEEG on data sets signals.
Here list the feature abbreviations
'spect' ======> spectral entropy
'bp' ======> bin_power
'hurst' ======> hurst
'dfa' ======> dfa
'hjorth' ======> hjorth
'pfd' ======> pfd
'apen' ======> ap_en
'hfd' ======> hfd
'fi' ======> fisher_information
'svd' ======> svd_entropy
"""
### GE functions ###
def shutdown(): #destroy when exit pressed
main_win.destroy()
signal_win.destroy()
def shutdown_signal():
signal_win.destroy()
def plus_func():
SIG_PATHS.append(Entry(signal_win))
SIG_PATHS[-1].pack()
def minus_func():
SIG_PATHS[-1].pack_forget()
del SIG_PATHS[-1]
####### ends #########
#### definition of feature plot function ###########
def feature_plot(IDX):
AXIS = array([[-0.5,4.5,0.12,0.31],
[-0.5,4.5,0.32,0.98],
[-0.5,4.5,0.02,0.12],
[-0.5,4.5,0.11,0.25],
[-0.5,4.5,0, 1],
[-0.5,4.5,0.52,0.69],
[-0.5,4.5,0.574,0.609],
[-0.5,4.5,0, 1],
[-0.5,4.5,0.67,0.82],
[-0.5,4.5,1.92,2.26]])
N = 5
NAME_MAP = {'1':'AP', '2':'DFA', '3':'FI', '4':'HFD', '5':'Hjorth', '6':'HURST', '7':'PFD', '8':'PSI', '9':'SPECT', '10':'SVD'}
for i in xrange(1, len(IDX)):
if IDX[i] == 1:
NAME = NAME_MAP[str(i)] #onoma synartisis
f1=open('Z.'+NAME+'.A.txt','r')
#f1=open('Z/'+NAME+'.A.txt','r')
A=[]
for line in f1.readlines():
A.append(map(double, line.split()))
f2=open('O.'+NAME+'.B.txt','r')
#f2=open('O/'+NAME+'.B.txt','r')
B=[]
for line in f2.readlines():
B.append(map(double, line.split()))
f3=open('N.'+NAME+'.C.txt','r')
#f3=open('N/'+NAME+'.C.txt','r')
C=[]
for line in f3.readlines():
C.append(map(double, line.split()))
f4=open('F.'+NAME+'.D.txt','r')
#f4=open('F/'+NAME+'.D.txt','r')
D=[]
for line in f4.readlines():
D.append(map(double, line.split()))
f5=open('S.'+NAME+'.E.txt','r')
#f5=open('S/'+NAME+'.E.txt','r')
E=[]
for line in f5.readlines():
E.append(map(double, line.split()))
A = array(A)
B = array(B)
C = array(C)
D = array(D)
E = array(E)
x = range(0,len(A))
if NAME == 'Hjorth':
Mobmean = [mean(A[:,0]),mean(B[:,0]),mean(C[:,0]),mean(D[:,0]),mean(E[:,0])]
Mobstd = [var(A[:,0]),var(B[:,0]),var(C[:,0]),var(D[:,0]),var(E[:,0])]
maxcom=max(max(A[:,1]),max(B[:,1]),max(C[:,1]),max(D[:,1]),max(E[:,1]))
mincom=min(min(A[:,1]),min(B[:,1]),min(C[:,1]),min(D[:,1]),min(E[:,1]))
A[:,1]=(A[:,1]-mincom)/(maxcom-mincom)
B[:,1]=(B[:,1]-mincom)/(maxcom-mincom)
C[:,1]=(C[:,1]-mincom)/(maxcom-mincom)
D[:,1]=(D[:,1]-mincom)/(maxcom-mincom)
E[:,1]=(E[:,1]-mincom)/(maxcom-mincom)
Commean = [mean(A[:,1]),mean(B[:,1]),mean(C[:,1]),mean(D[:,1]),mean(E[:,1])]
Comstd = [var(A[:,1]),var(B[:,1]),var(C[:,1]),var(D[:,1]),var(E[:,1])]
figure(i, figsize=(12,6))
subplot(121)
ind = arange(N) # the x locations for the groups
width = 0.5 # the width of the bars
p2 = errorbar(ind, Mobmean, Mobstd,marker='s',linestyle='-',linewidth=0.5,markersize=0.1)
axis([-0.5,4.5,0.0025,0.0060])
xticks(ind, ('A', 'B', 'C', 'D', 'E'),size=20 )
title('(a) Hjorth Mobility',size=20)
subplot(122)
p2 = errorbar(ind, Commean, Comstd,marker='s',linestyle='-',linewidth=0.5,markersize=0.1)
xticks(ind, ('A', 'B', 'C', 'D', 'E'),size=20)
xlim(-width,len(ind))
title('(b) Hjorth Complexity',size=20)
savefig('Hjorth.png')
elif NAME == 'PSI': ### bin power case
MA = mean(A, 0)
MB = mean(B, 0)
MC = mean(C, 0)
MD = mean(D, 0)
ME = mean(E, 0)
figure(i, figsize=(30,6))
subplot(151)
plot(MA, 'b')
#import code;code.interact(local=locals())
axis([-1,43,0,350000])
grid(True)
title('A', size=20)
subplot(152)
plot(MB, 'c')
axis([-1,43,0,500000])
grid(True)
title('B', size=20)
subplot(153)
plot(MC, 'm')
axis([-1,43,0,600000])
grid(True)
title('C', size=20)
subplot(154)
plot(MD, 'r')
axis([-1,43,0,750000])
grid(True)
title('D', size=20)
subplot(155)
plot(ME, 'y')
axis([-1,43,0,2500000])
grid(True)
title('E', size=20)
savefig('PSI.png')
else: ## other cases
Mean = [mean(A),mean(B),mean(C),mean(D),mean(E)]
Std = [var(A),var(B),var(C),var(D),var(E)]
figure(i, figsize=(5,5))
ind = arange(N) # the x locations for the groups
width = 0.5 # the width of the bars
p2 = errorbar(ind, Mean, Std,marker='s',linestyle='-',linewidth=0.5,markersize=2)
axis(AXIS[i-1][:])
xticks(ind, ('A', 'B', 'C', 'D', 'E'),size=20)
title(NAME,size=20)
savefig(NAME + '.png')
show()
####### Begin the main part #####################
####### Define the parameters ##############
### Note if you change these default parameter values, you may need to adjust the axis interval when plotting
FEA_IDX = {'spect':0, 'bp':0, 'hurst':0, 'dfa':0, 'hjorth':0, 'pfd':0, 'apen':0, 'hfd':0, 'fi':0, 'svd':0}
SIG_PATHS=[]
def play(spect_var, bp_var, hurst_var, dfa_var, hjorth_var, pfd_var, apen_var, hfd_var, fi_var, svd_va):
DIM = 10
TAU = 4
Kmax = 5
Fs = 173
Band = [2*i+1 for i in xrange(0, 43)] ## 0.5~85 Hz
SET_MAP = {'A':'Z', 'B':'O', 'C':'N', 'D':'F','E':'S'} #de3ia einai o fakelos, aristera einai to string gia to teliko onoma
FEA_MAP = {'apen':'1', 'dfa':'2', 'fi':'3', 'hfd':'4', 'hjorth':'5', 'hurst':'6', 'pfd':'7', 'bp':'8', 'spect':'9', 'svd':'10'}
ALL = 0
FLAG = zeros(11)
if (spect_var.get() == 1):
FEA_IDX['spect']=1
FLAG[9]=1
if (bp_var.get()==1):
FEA_IDX['bp']=1
FLAG[8]=1
if (hurst_var.get()==1):
FEA_IDX['hurst']=1
FLAG[6]=1
if (dfa_var.get()==1):
FEA_IDX['dfa']=1
FLAG[2]=1
if (hjorth_var.get()==1):
FEA_IDX['hjorth']=1
FLAG[5]=1
if (pfd_var.get()==1):
FEA_IDX['pfd']=1
FLAG[7]=1
if (apen_var.get()==1):
FEA_IDX['apen']=1
FLAG[1]=1
if (hfd_var.get()==1):
FEA_IDX['hfd']=1
FLAG[4]=1
if (fi_var.get()==1):
FEA_IDX['fi']=1
FLAG[3]=1
if (svd_var.get()==1):
FEA_IDX['svd']=1
FLAG[10]=1
##### Feature Extraction part ################
for i in range(0,len(SIG_PATHS)): #loop gia ta paths twn simatwn
SET_NAME=SIG_PATHS[i].get() # mporeis na valeis to apolyto path H to sxetiko
print 'Begin with %c' % SET_NAME[-1] #kratame to teleytaio gramma toy path
for j in xrange(1, 101):
print 'Begin the %dth SEGMENT of SET %d' % (j,i+1)
if (j == 100): #otan ftasei stin teleytaia grammi 8a pei pou apo8ikeyse to apotelesma
print '<--RESULT SAVED IN "%s" ' % RESULT_DIR
#ka8orizoume to onoma toy txt
if j < 10: #mexri 009.txt
FILE_NAME = '00' + str(j) #leei pio *.txt 8a anoi3ei
elif j < 100: #apo 010.txt ews 099.txt
FILE_NAME = '0' + str(j)
else:
FILE_NAME = str(j) #100.txt
#ka8orizei onoma path + txt
FILE_NAME = SET_NAME + '/' + SET_NAME[-1] + FILE_NAME + '.txt' + '.cut.txt'
FILE_DIR = FILE_NAME
fid = open(FILE_DIR, 'r') #anoigei to *.txt, reading
print FILE_DIR
tmp = fid.readlines() #epistrefei oles tis grammes tou arxeioy sto tmp
DATA = [float(k) for k in tmp] #DATA=stoixeio (data loop)
####### Methods ######## stelnei to ka8e stoixeio (grammi) sti synartisi
if (FEA_IDX['spect']): #an FEA_IDX=1
print 'spectral entropy...\n',
RESULT_DIR = SET_NAME[-1] + '.SPECT' + '.txt' #apo8ikeyetai sto path toy programmatos
fod = open(RESULT_DIR, 'a')
result = spectral_entropy(DATA, Band, Fs) #periexei to apotelesma
fod.write('%f\n' % float(result)) #grafei to apotelesma sto arxeio
if (FEA_IDX['bp']):
print 'PSI ...\n',
RESULT_DIR = SET_NAME[-1] + '.PSI' + '.txt'
fod = open(RESULT_DIR, 'a')
result = bin_power(DATA, Band, Fs)
for k in result[0]:
fod.write('%f\t' % float(k))
fod.write('\n')
RESULT_DIR = SET_NAME[-1] + '.RIR' + '.txt'
fod = open(RESULT_DIR, 'a')
for k in result[1]:
fod.write('%f\t' % float(k))
fod.write('\n')
if (FEA_IDX['hurst']):
print 'Hurst Exponent...\n',
RESULT_DIR = SET_NAME[-1] + '.HURST' + '.txt'
fod = open(RESULT_DIR, 'a')
result = hurst(DATA)
if not isnan(result):
fod.write('%f\n' % float(result))
print '<--result line saved in "%s" ' % RESULT_DIR
if (FEA_IDX['dfa']):
print 'DFA...\n',
RESULT_DIR = SET_NAME[-1] + '.DFA' + '.txt'
fod = open(RESULT_DIR, 'a')
#import code;code.interact(local=locals())
result = dfa(DATA)
fod.write('%f\n' % float(result))
if (FEA_IDX['hjorth']):
print 'Hjorth...\n',
RESULT_DIR = SET_NAME[-1] + '.Hjorth' + '.txt'
fod = open(RESULT_DIR, 'a')
result = hjorth(DATA)
fod.write('%f\t%f\n' % (float(result[0]),float(result[1])))
if (FEA_IDX['pfd']):
print 'PFD...\n',
RESULT_DIR = SET_NAME[-1] + '.PFD' + '.txt'
fod = open(RESULT_DIR, 'a')
result = pfd(DATA)
fod.write('%f\n' % float(result))
if (FEA_IDX['apen']):
print 'approximate entropy...\n',
R = std(DATA) * 0.3
RESULT_DIR = SET_NAME[-1] + '.AP' + '.txt'
fod = open(RESULT_DIR, 'a')
result = ap_entropy(DATA, DIM, R)
fod.write('%f\n' % float(result))
if (FEA_IDX['hfd']):
print 'HFD...\n',
RESULT_DIR = SET_NAME[-1] + '.HFD' + '.txt'
fod = open(RESULT_DIR, 'a')
result = hfd(DATA, Kmax)
fod.write('%f\n' % float(result))
if (FEA_IDX['fi']):
print 'fisher information...\n',
M = embed_seq(DATA, TAU, DIM)
W = svd(M, compute_uv=0)
W /= sum(W)
RESULT_DIR = SET_NAME[-1] + '.FI' + '.txt'
fod = open(RESULT_DIR, 'a')
result = fisher_info(DATA, TAU, DIM, W)
fod.write('%f\n' % float(result))
if (FEA_IDX['svd']):
print 'SVD entropy...\n'
M = embed_seq(DATA, TAU, DIM)
W = svd(M, compute_uv=0)
W /= sum(W)
RESULT_DIR = SET_NAME[-1] + '.SVD' + '.txt'
fod = open(RESULT_DIR, 'a')
result = svd_entropy(DATA, TAU, DIM, W)
fod.write('%f\n' % float(result))
###### Plot part #######
#feature_plot(FLAG)
if __name__=="__main__":
#main window is fixed first
main_win=Tk() #main windows shows all functions
main_win.title("pyeeg") #main window title
main_win.geometry("300x650+300+300")
main_win.protocol('WM_DELETE_WINDOW', shutdown)
message=Label(main_win, text="Choose functions to execute", height=3)
message.pack()
#signal path window is fixed second
signal_win=Tk()
signal_win.title("signals")
signal_win.geometry("300x500+300+300")
signal_win.protocol("WM_DELETE_WINDOW", shutdown_signal)
message_signal=Label(signal_win, text="choose signal paths", height=3)
message_signal.pack()
plus_button_var=IntVar()
plus_button=Button(signal_win, text="add signal", height=1, width=5, command=plus_func)
plus_button.pack(anchor=N)
minus_button_var=IntVar()
minus_button=Button(signal_win, text="remove signal", height=1, width=8, command=minus_func)
minus_button.pack(anchor=N)
#paths for signals
SIG_PATHS.append(Entry(signal_win))
SIG_PATHS[0].pack()
############## ends ###############
spect_var = IntVar() #default value 0
spect_button = Checkbutton(main_win, text="Spectral entropy", variable=spect_var, height=2)
spect_button.pack()
bp_var=IntVar()
bp_button = Checkbutton(main_win, text="binary power", variable=bp_var, height=2, onvalue=1, offvalue=0)
bp_button.pack()
hurst_var=IntVar()
hurst_button=Checkbutton(main_win, text="hurst", variable=hurst_var, height=2, onvalue=1, offvalue=0)
hurst_button.pack()
dfa_var=IntVar()
dfa_button=Checkbutton(main_win, text="dfa", variable=dfa_var, height=2, onvalue=1, offvalue=0)
dfa_button.pack()
hjorth_var=IntVar()
hjorth_button=Checkbutton(main_win, text="hjorth", variable=hjorth_var, height=2, onvalue=1, offvalue=0)
hjorth_button.pack()
pfd_var=IntVar()
pfd_button=Checkbutton(main_win, text="pfd", variable=pfd_var, height=2, onvalue=1, offvalue=0)
pfd_button.pack()
apen_var=IntVar()
apen_button=Checkbutton(main_win, text="ap en", variable=apen_var, height=2, onvalue=1, offvalue=0)
apen_button.pack()
hfd_var=IntVar()
hfd_button=Checkbutton(main_win, text="hfd", variable=hfd_var, height=2, onvalue=1, offvalue=0)
hfd_button.pack()
fi_var=IntVar()
fi_button=Checkbutton(main_win, text="fisher information", variable=fi_var, height=2, onvalue=1, offvalue=0)
fi_button.pack()
svd_var=IntVar()
svd_button=Checkbutton(main_win, text="svd entropy", variable=svd_var, height=2, onvalue=1, offvalue=0)
svd_button.pack()
play_var=IntVar()
play_button=Button(main_win, text="Play", height=2, width=10, command=lambda: play(spect_var, bp_var, hurst_var, dfa_var, hjorth_var, pfd_var, apen_var, hfd_var, fi_var, svd_var))
play_button.pack()
exit_var=IntVar()
exit_button=Button(main_win, text="Exit", height=2, width=10, command=shutdown)
exit_button.pack()
main_win.mainloop()
| {
"repo_name": "gpetrousov/peeg",
"path": "peeg.py",
"copies": "1",
"size": "16435",
"license": "mit",
"hash": 6236527516263162000,
"line_mean": 34.1927194861,
"line_max": 180,
"alpha_frac": 0.4675387892,
"autogenerated": false,
"ratio": 3.0194745544736357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8347441935301341,
"avg_score": 0.12791428167445884,
"num_lines": 467
} |
"""A script to test the extraction of a bunch of RHEA2 spectra.
The functions within this module should go in the Extractor if they
are general. Spectrograph specific functions should go in the RHEA module
********************************************************************************
NOTE:
----
The functions in this file will be removed shortly once they are verified to
work post-refactor. New functions should be defined in one of the pre-existing
modules/classes as appropriate, rather than in test scripts.
********************************************************************************
TODO:
0) Make sure that the Th/Ar reference is created from the same epoch that the wavelength
scale solution is made at. i.e. add a new wavelength solution script, e.g. with
creation of new data/orderXXX.txt files from an averaged Th/Ar for each night. This
would be an extraction then a fitting of Gaussians to each line.
1) Output the reference spectrum separately, so it can be imported. This is
*super* important because one test we want to do is to input the sun as a reference
for Tau Ceti (part of ardata.fits.gz)
2) Put extraction in a script where tramlines are tweaked using fit_x_to_image.
3) Add flat field creation scripts to this.
4) Correct for Telluric lines... (in data/ardata.fits.gz).
For Telluric lines, the wavelength scale has to be corrected epoch to epoch.
5) Find and correct for common bad pixels.
6) The GHOST in orders 28 to 30 should be marked as high variance.
7) Actually use a (neatened version of) this script for the gamma Crucis and
sun data.
"""
from __future__ import division, print_function
import pymfe
try:
import pyfits
except:
import astropy.io.fits as pyfits
import numpy as np
import matplotlib.pyplot as plt
import glob
import opticstools as ot
import pdb
import scipy.optimize as op
import scipy.interpolate as interp
import time
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
import PyAstronomy.pyasl as pyasl
from astropy import constants as const
plt.ion()
dir = "/Users/mireland/data/rhea2/20150601/"
#First few thar frames...
star = "thar"
files = glob.glob(dir + "*" + star + "*00[1234].fit")
#thar frames separated by 10
star = "thar"
files = glob.glob(dir + "*" + star + "*0[012]1.fit")
#Gamma cru
star = "gacrux"
star = "thar"
files = glob.glob(dir + "*" + star + "*00[1234].fit")
files = glob.glob("/Users/mireland/data/rhea2/2015060*/*" + star + "*00[1234].fit")
#dark = pyfits.getdata(dir + "Masterdark_target.fit")
#This is "Gamma Cru"
coord = SkyCoord('12 31 09.9596 -57 06 47.568',unit=(u.hourangle, u.deg))
save_file = "gacrux06.fit"
save_file = "thar06.fit"
#nu Oph, "Sinistra". Has bad pixels.
#star = "sinistra"
#files = glob.glob(dir + "*" + star + "*00[12345678].fit")
#save_file = "sinistra0601.fit"
#ref_file = "" #A reference spectrum file should be possible.
#This is "Sinistra"
#coord = SkyCoord('17 59 01.59191 -09 46 25.07',unit=(u.hourangle, u.deg))
#Select a dark here
dir = "/Users/mireland/data/rhea2/tauCeti/"
star = "thar"
save_file_prefix = "tauCeti_thar1114"
star_dark = pyfits.getdata(dir + "Masterdark_thar.fit")
star = "tauCeti"
save_file_prefix = "tauCeti1114"
star_dark = pyfits.getdata(dir + "Masterdark_target.fit")
files = glob.glob("/Users/mireland/data/rhea2/tauCeti/201511*/*" + star + "*.fit")
coord = SkyCoord('01 44 04.08338 -15 56 14.9262',unit=(u.hourangle, u.deg))
flat_dark = pyfits.getdata(dir + "Masterdark_flat.fit")
rhea2_format = pymfe.rhea.Format()
rhea2_extract = pymfe.Extractor(rhea2_format, transpose_data=False)
xx, wave, blaze = rhea2_format.spectral_format()
#Things to change each time if you want. Below for star
do_we_extract=False
do_bcor=True
med_cut=0.6 #0 for Th/Ar
#Here for Th/Ar
#do_we_extract=True
#do_bcor=False
#med_cut=0
save_file = save_file_prefix + ".fits"
rv_file = save_file_prefix + "_rv.csv"
rv_sig_file = save_file_prefix + "_rv_sig.csv"
file_dirs = [f[f.rfind('/')-8:f.rfind('/')] for f in files]
flat_files = ["/Users/mireland/data/rhea2/tauCeti/" + f + "/Masterflat.fit" for f in file_dirs]
#-----------------------------------------
def rv_shift_resid(params, wave, spect, spect_sdev, spline_ref, return_spect=False):
"""Find the residuals to a fit of a (subsampled)reference spectrum to an
observed spectrum.
The function for parameters p[0] through p[3] is:
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
TODO: replace with e.g. op.minimize_scalar to account for bad pixels
Parameters
----------
params:
...
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
return_spect: boolean
Whether to return the fitted spectrum or the
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
resid:
The fit residuals
"""
ny = len(spect)
xx = np.arange(ny)-ny//2
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
#Lets get this sign correct. A redshift (positive velocity) means that
#a given wavelength for the reference corresponds to a longer wavelength for the target,
#which in turn means that the target wavelength has to be interpolated onto shorter
#wavelengths for the reference.
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
if return_spect:
return fitted_spect
else:
return (fitted_spect - spect)/spect_sdev
def rv_shift_jac(params, wave, spect, spect_sdev, spline_ref):
"""Jacobian function for the above. Dodgy... sure, but
without this there seems to be numerical derivative instability.
Parameters
----------
params:
...
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
Returns
-------
jac:
...
"""
ny = len(spect)
xx = np.arange(ny)-ny//2
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
jac = np.empty( (ny,4) )
jac[:,3] = fitted_spect/spect_sdev
jac[:,2] = fitted_spect*xx/spect_sdev
jac[:,1] = fitted_spect*xx**2/spect_sdev
jac[:,0] = (spline_ref(wave*(1.0 - (params[0] + 1.0)/const.c.si.value))*norm - fitted_spect)/spect_sdev
return jac
def create_ref_spect(wave, fluxes, vars, bcors, rebin_fact=2, gauss_sdev = 1.0, med_cut=0.6,gauss_hw=7):
"""Create a reference spectrum from a series of target spectra, after
correcting the spectra barycentrically.
Parameters
----------
wave:
...
fluxes:
...
vars:
...
bvors:
...
rebin_fact:
...
gauss_sdev:
...
med_cut:
...
gauss_hw:
...
Returns
-------
wave_ref:
...
ref_spect:
...
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
#Create arrays for our outputs.
wave_ref = np.empty( (nm,rebin_fact*ny + 2) )
ref_spect = np.empty( (nm,rebin_fact*ny + 2) )
#First, rebin everything.
new_shape = (fluxes.shape[1],rebin_fact*fluxes.shape[2])
fluxes_rebin = np.empty( (fluxes.shape[0],fluxes.shape[1],rebin_fact*fluxes.shape[2]) )
for i in range(nf):
fluxes_rebin[i] = ot.utils.regrid_fft(fluxes[i],new_shape)
#Create the final wavelength grid.
for j in range(nm):
wave_ref[j,1:-1] = np.interp(np.arange(rebin_fact*ny)/rebin_fact,np.arange(ny),wave[j,:])
#Fill in the end wavelengths, including +/-100 km/s from the ends.
wave_ref[j,-2] = wave_ref[j,-3] + (wave_ref[j,-3]-wave_ref[j,-4])
wave_ref[j,0] = wave_ref[j,1] * (const.c.si.value + 1e5)/const.c.si.value
wave_ref[j,-1] = wave_ref[j,-2] * (const.c.si.value - 1e5)/const.c.si.value
#Barycentric correct
for i in range(nf):
for j in range(nm):
#Awkwardly, we've extended the wavelength scale by 2 elements, but haven't yet extended
#the fluxes...
ww = wave_ref[j,1:-1]
fluxes_rebin[i,j] = np.interp(ww*(1 - bcors[i]/const.c.si.value),ww[::-1],fluxes_rebin[i,j,::-1])
#Subsample a reference spectrum using opticstools.utils.regrid_fft
#and interpolate to fit.
flux_meds = np.median(fluxes_rebin,axis=2)
flux_files = np.median(flux_meds,axis=1)
if med_cut > 0:
good_files = np.where(flux_files > med_cut*np.median(flux_files))[0]
else:
good_files = np.arange(len(flux_files),dtype=np.int)
flux_orders = np.median(flux_meds[good_files],axis=0)
flux_norm = fluxes_rebin.copy()
for g in good_files:
for j in range(nm):
flux_norm[g,j,:] /= flux_meds[g,j]
#Create a median over files
flux_ref = np.median(flux_norm[good_files],axis=0)
#Multiply this by the median for each order
for j in range(nm):
flux_ref[j] *= flux_orders[j]
#Create a Gaussian smoothing function for the reference spectrum. This is needed to
#prevent a bias to zero radial velocity, especially in the case of few data points.
gg = np.exp(-(np.arange(2*gauss_hw+1)-gauss_hw)**2/2.0/gauss_sdev**2)
gg /= np.sum(gg)
one_order = np.empty(flux_ref.shape[1] + 2*gauss_hw)
for j in range(nm):
one_order[gauss_hw:-gauss_hw] = flux_ref[j,:]
one_order[:gauss_hw] = one_order[gauss_hw]
one_order[-gauss_hw:] = one_order[-gauss_hw-1]
ref_spect[j,:] = np.convolve(one_order, gg, mode='same')[gauss_hw-1:1-gauss_hw]
return wave_ref, ref_spect
def extract_spectra(files, star_dark, flat_files, flat_dark, location=('151.2094','-33.865',100.0), coord=None, outfile=None, do_bcor=True):
"""Extract the spectrum from a file, given a dark file, a flat file and
a dark for the flat.
Parameters
----------
files: list of strings
One string for each file. CAn be on separate nights - a full pathname should be given.
star_dark:
flat_files: list of strings.
One string for each star file. CAn be on separate nights - a full pathname should be given.
flat_dark:
location: (lattitude:string, longitude:string, elevation:string)
The location on Earth where the data were taken.
coord:
outfile:
do_bcor: boolean
Returns
-------
fluxes:
vars:
wave:
bcors:
mjds:
"""
# Initialise list of return values
# Each index represents a single observation
fluxes = []
vars = []
dates = []
bcors = []
#!!! This is dodgy, as files and flat_files should go together in a dict. !!!
for ix,file in enumerate(files):
# Dark correct the science and flat frames
data = pyfits.getdata(file) - star_dark
flat = pyfits.getdata(flat_files[ix]) - flat_dark
header = pyfits.getheader(file)
date = Time(header['DATE-OBS'], location=location)
dates.append(date)
# Determine the barycentric correction
if do_bcor:
if not coord:
coord=SkyCoord( ra=float(header['RA']) , dec=float(header['DEC']) , unit='deg')
if not location:
location=( float(header['LONG']), float(header['LAT']), float(header['HEIGHT']))
#(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False)
bcors.append( 1e3*pyasl.helcorr(float(location[0]),float(location[1]),location[2],coord.ra.deg, coord.dec.deg,date.jd)[0] )
else:
bcors.append(0.0)
# Extract the fluxes and variance for the science and flat frames
flux, var = rhea2_extract.one_d_extract(data=data, rnoise=20.0)
flat_flux, fvar = rhea2_extract.one_d_extract(data=flat, rnoise=20.0)
for j in range(flat_flux.shape[0]):
medf = np.median(flat_flux[j])
flat_flux[j] /= medf
fvar[j] /= medf**2
#Calculate the variance after dividing by the flat
var = var/flat_flux**2 + fvar * flux**2/flat_flux**4
#Now normalise the flux.
flux /= flat_flux
#pdb.set_trace()
fluxes.append(flux[:,:,0])
vars.append(var[:,:,0])
fluxes = np.array(fluxes)
vars = np.array(vars)
bcors = np.array(bcors)
mjds = np.array([d.mjd for d in dates])
# Output and save the results
if not outfile is None:
hl = pyfits.HDUList()
hl.append(pyfits.ImageHDU(fluxes,header))
hl.append(pyfits.ImageHDU(vars))
hl.append(pyfits.ImageHDU(wave))
col1 = pyfits.Column(name='bcor', format='D', array=bcors)
col2 = pyfits.Column(name='mjd', format='D', array=mjds)
cols = pyfits.ColDefs([col1, col2])
hl.append(pyfits.new_table(cols))
hl.writeto(outfile, clobber=True)
return fluxes,vars,wave,bcors,mjds
#------ Standard analysis --------
#Extract all data.
if do_we_extract:
fluxes,vars,wave,bcors,mjds = extract_spectra(files, star_dark, flat_files, flat_dark, coord=coord,outfile=save_file, do_bcor=do_bcor)
if not save_file is None:
hl = pyfits.open(save_file)
fluxes = hl[0].data
vars = hl[1].data
wave = hl[2].data
bcors = hl[3].data['bcor']
mjds = hl[3].data['mjd']
#Create a reference spectrum if not given
wave_ref,ref_spect = create_ref_spect(wave,fluxes,vars,bcors,med_cut=med_cut)
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
rvs = np.zeros( (nf,nm) )
rv_sigs = np.zeros( (nf,nm) )
initp = np.zeros(4)
initp[0]=0.0
spect_sdev = np.sqrt(vars)
fitted_spects = np.empty(fluxes.shape)
for i in range(nf):
# !!! Uncomment here !!!
initp[0] = -bcors[i] #Start with an initial guess that there is no intrinsic RV for the target.
for j in range(nm):
#This is the *only* non-linear interpolation function that doesn't take forever
spline_ref = interp.InterpolatedUnivariateSpline(wave_ref[j,::-1], ref_spect[j,::-1])
args = (wave[j,:],fluxes[i,j,:],spect_sdev[i,j,:],spline_ref)
#Remove edge effects in a slightly dodgy way. 20 pixels is about 30km/s.
args[2][:20] = np.inf
args[2][-20:] = np.inf
the_fit = op.leastsq(rv_shift_resid,initp,args=args, diag=[1e3,1e-6,1e-3,1],Dfun=rv_shift_jac,full_output=True)
#Remove bad points...
resid = rv_shift_resid( the_fit[0], *args)
wbad = np.where( np.abs(resid) > 7)[0]
args[2][wbad] = np.inf
the_fit = op.leastsq(rv_shift_resid,initp,args=args, diag=[1e3,1e-7,1e-3,1],Dfun=rv_shift_jac, full_output=True)
#Some outputs for testing
fitted_spects[i,j] = rv_shift_resid(the_fit[0],*args,return_spect=True)
#Save the fit and the uncertainty.
rvs[i,j] = the_fit[0][0]
try:
rv_sigs[i,j] = np.sqrt(the_fit[1][0,0])
except:
rv_sigs[i,j] = np.NaN
print("Done file {0:d}".format(i))
#Plot the Barycentric corrected RVs. Note that a median over all orders is
#only a first step - a weighted mean is needed.
plt.clf()
rvs += bcors.repeat(nm).reshape( (nf,nm) )
rv_mn,wt_sum = np.average(rvs,axis=1,weights=1.0/rv_sigs**2,returned=True)
rv_mn_sig = 1.0/np.sqrt(wt_sum)
rv_med1 = np.median(rvs,1)
rv_med2 = np.median(rvs[:,3:20],1)
#plt.plot_date([dates[i].plot_date for i in range(len(dates))], rv_mn)
#plt.errorbar(mjds, rv_mn, yerr=rv_mn_sig,fmt='o')
plt.errorbar(mjds, rv_med2, yerr=rv_mn_sig,fmt='o')
plt.xlabel('Date (MJD)')
plt.ylabel('Barycentric RV (m/s)')
plt.title(star)
#Write a csv file for the RVs and the RV_sigs
np.savetxt(rv_file, np.append(mjds.reshape(nf,1),rvs,axis=1), fmt="%10.4f" + nm*", %6.1f",header="Radial velocities in m/s for each order, for each MJD epoch")
np.savetxt(rv_sig_file, np.append(mjds.reshape(nf,1),rv_sigs,axis=1), fmt="%10.4f" + nm*", %6.1f",header="Radial velocity uncertainties in m/s for each order, for each MJD epoch")
#A line for checking the image...
#dd = pyfits.getdata (BLAH)(
#plt.imshow(np.arcsinh(dd/100), aspect='auto', interpolation='nearest', cmap=cm.cubehelix)
#plt.plot(1375/2 + xx.T,np.repeat(np.arange(2200),34).reshape(2200,34))
| {
"repo_name": "mikeireland/pymfe",
"path": "test_rhea2_extract.py",
"copies": "1",
"size": "16632",
"license": "mit",
"hash": 1001853231297649400,
"line_mean": 32.6,
"line_max": 179,
"alpha_frac": 0.6218133718,
"autogenerated": false,
"ratio": 3.0489459211732357,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9052310707772023,
"avg_score": 0.023689717040242482,
"num_lines": 495
} |
# A script to test the memory scaling properties of the algorithms. This script
# creates Erdos-Renyi graphs with weights randomly selected from the set {-1, 1}.
# One set of graphs is dense and the other is sparse. It tests all the heuristics
# on graphs of specified sizes, outputting the memory usage.
import csv
import os.path
import platform
import random
import subprocess
import sys
# Create a complete graph with random edge weights selected from {-1, 1}
def createCompleteGraph(size):
random.seed(144)
with open('complete.txt', 'w') as fp:
fp.write(str(size) + " " + str(size*(size-1)/2) + "\n")
for x in range(size-1):
for y in range(x+1, size):
fp.write(str(x+1) + " " + str(y+1) + " " + str(2*random.randint(0, 1)-1) + "\n")
# Create an Erdos-Renyi graph with random edge weights selected from {-1, 1}
# and edge probabilities that yield an expected degree of 5 for each node.
def createERGraph(size):
random.seed(144)
p = 5.0 / (size-1)
edges = []
for x in range(size-1):
for y in range(x+1, size):
if random.random() < p:
edges.append((x, y))
with open('ER.txt', 'w') as fp:
fp.write(str(size) + " " + str(len(edges)) + "\n")
for x, y in edges:
fp.write(str(x+1) + " " + str(y+1) + " " + str(2*random.randint(0, 1)-1) + "\n")
# Determine the runtime limit for the indicated graph
def getRuntimeLimit(graphName):
torun = ["timeout", "10000", "../bin/MQLib", "-fM",
graphName, "-h", "BASELINE", "-r", "1500", "-s", "144"]
p = subprocess.Popen(torun, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
baseline_output = p.stdout.read().decode("utf-8")
return baseline_output.split(",")[4]
##############################
##############################
minERGraphSize = 1000
maxCompleteGraphSize = 3000
if len(sys.argv) < 3 or not all([x.isdigit() for x in sys.argv[2:]]):
print('Usage: python scaling.py heuristics.txt n1 n2 n3 ...')
exit(0)
if not os.path.exists("../bin"):
print("scaling.py must be run from the scripts folder")
exit(0)
if not os.path.exists("../bin/MQLib"):
print("You need to run `make` in the main folder before running scaling.py")
exit(0)
# Load the heuristics to evaluate
with open(sys.argv[1], 'r') as fp:
heuristics = [x.strip() for x in fp]
writer = csv.writer(sys.stdout)
writer.writerow(['heuristic', 'graph', 'size', 'runtime', 'memusg'])
for size in [int(x) for x in sys.argv[2:]]:
runs = []
if size <= maxCompleteGraphSize:
createCompleteGraph(size)
runs.append(('complete.txt', getRuntimeLimit('complete.txt')))
if size >= minERGraphSize:
createERGraph(size)
runs.append(('ER.txt', getRuntimeLimit('ER.txt')))
for graphName, runtime in runs:
for heuristic in heuristics:
if platform.system() == 'Darwin':
torun = ['/usr/bin/time', '-l', '../bin/MQLib', '-fM',
graphName, '-h', heuristic, '-r', runtime, '-s', '144',
'-nv']
p = subprocess.Popen(torun, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
baseline_output = p.stdout.read().decode("utf-8")
for q in baseline_output.split("\n"):
if q.find("maximum resident set size") >= 0:
writer.writerow([heuristic, graphName, size, runtime,
q.strip().split()[0]])
else:
torun = ['/usr/bin/time', '-v', '../bin/MQLib', '-fM',
graphName, '-h', heuristic, '-r', runtime, '-s', '144',
'-nv']
p = subprocess.Popen(torun, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
baseline_output = p.stdout.read().decode("utf-8")
for q in baseline_output.split("\n"):
if q.find("Maximum resident set size (kbytes):") >= 0:
writer.writerow([heuristic, graphName, size, runtime,
q.strip().split()[5]])
| {
"repo_name": "MQLib/MQLib",
"path": "scripts/scaling.py",
"copies": "1",
"size": "4256",
"license": "mit",
"hash": -1198514721816574200,
"line_mean": 41.56,
"line_max": 96,
"alpha_frac": 0.5498120301,
"autogenerated": false,
"ratio": 3.6037256562235394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.964423782072463,
"avg_score": 0.0018599731197816358,
"num_lines": 100
} |
# A script to test tracking on the Raspberry Pi driving servos with the AdaFruit16ServoDriver service
# as at mrl development build version 2489
# a mashup of code taken from Mats:
# https://github.com/MyRobotLab/pyrobotlab/blob/master/home/Mats/Tracking.py
# and also from Grog:
# http://myrobotlab.org/content/tracking-results
#
import time
from org.myrobotlab.opencv import OpenCVFilterPyramidDown
xPin = 0;
yPin = 1;
arduinoPort = "/dev/ttyAMA0";
cameraIndex = 0;
#start an AdaFruit16C I2C servo driver instance
adaFruit16c3 = Runtime.createAndStart("AdaFruit16C3","Adafruit16CServoDriver")
#start a Raspberry Pi instance
raspi = Runtime.createAndStart("RasPi","RasPi")
#attach the AdaFruit16C I2C servo driver to the Raspberry Pi
adaFruit16c3.setController("RasPi","1","0x42")
#set the frequency for the AdaFruit16C I2C servo driver to 50 Hz
adaFruit16c3.setPWMFreq(0,50)
#start and connect a Virtual Arduino instance
virtual = Runtime.start("virtual", "VirtualArduino");
virtual.connect(arduinoPort);
#start a tracker instance
tracker = Runtime.start("tracker", "Tracking");
x = tracker.getX();
# invert if necessary
# x.setInverted(True);
y = tracker.getY();
# invert if necessary
# y.setInverted(True);
tracker.connect(arduinoPort, xPin, yPin, cameraIndex);
#small delay here to resolve servo/attach glitch
sleep(5)
#detach x and y servos from Virtual Arduino
tracker.x.detach()
tracker.y.detach()
#attach x and y servos to AdaFruit servo driver
tracker.x.attach(adaFruit16c3,xPin,70,20);
tracker.y.attach(adaFruit16c3,yPin,60,20);
tracker.x.setVelocity(20);
tracker.x.setMinMax(60,90);
#x.setInverted(True);
tracker.x.setRest(70);
tracker.x.rest();
tracker.y.setVelocity(20);
tracker.y.setInverted(True);
tracker.y.setMinMax(50,75);
tracker.y.setRest(60);
tracker.y.rest();
#adjust PID values to suit
pid = Runtime.start("tracker.pid","Pid")
#tracker.pid.setPID("tracker.x", 5.0, 1.0, 0.1);
#tracker.pid.setPID("tracker.y", 20.0, 1.0, 0.1);
#pid.setPID("x", 10.0, 1.0, 0.1);
#pid.setPID("y", 50.0, 1.0, 0.1);
#opencv = tracker.getOpenCV();
#opencv.broadcastState();
opencv = Runtime.start("tracker.opencv","OpenCV")
pid.setPID("x", 5.0, 1.0, 0.1);
pid.setPID("y", 5.0, 1.0, 0.1);
sleep(1);
tracker.y.setInverted(True);
sleep(1);
# additional PyramidDown filter for improved framerate on the Pi (~15 fps)
PreFilterPyramidDown = OpenCVFilterPyramidDown("PreFilterPyramidDown")
tracker.preFilters.add(PreFilterPyramidDown)
tracker.opencv.setDisplayFilter("PreFilterPyramidDown")
#opencv.capture();
# do lk optical point tracking
# tracker.startLKTracking();
# do face tracking
#tracker.faceDetect();
| {
"repo_name": "MyRobotLab/pyrobotlab",
"path": "home/CheekyMonkey/tracking-i2c.py",
"copies": "1",
"size": "2639",
"license": "apache-2.0",
"hash": 3155660489897487400,
"line_mean": 24.1333333333,
"line_max": 101,
"alpha_frac": 0.7483895415,
"autogenerated": false,
"ratio": 2.6873727087576373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3935762250257637,
"avg_score": null,
"num_lines": null
} |
"""A script to train the DNC on implemented tasks.
You can start training the DNC model on any implemented task by executing:
> python -m src.tasks.train --task=<task_name>
TO SUPPORT NEW TASKS:
1) Import necessary code for task (follow necessary requirements listed below).
2) Create new section in flags and define any valid flags for the task.
3) In the "get_task" method, append a command-line name for the task to the end
of the list "valid_tasks".
4) Append a lambda function to the end of the "instantiate_task" list that
returns an instatiated object of the task using all FLAGS defined in step 2.
REQUIREMENTS FOR ALL TASKS:
* The task's class must be a sub-class of snt.AbstractModule implementing
methods `_build(self)`, `cost(output, task_state)`,
`to_string(output, task_state)`, and `process_output(output, task_state)`.
* The `_build(self)` method must return a collections.namedtuple,
`task_state`, containing at least fields 'input'. Other fields are
allowed to be used internally in the other methods. For example, the
'target' field would likely be needed for supervised learning tasks to
calculate the cost.
* The `cost(output, task_state)` method must return the losses for the
model to be used in `tf.gradients(losses, trainable_variables)`.
* The `to_string(output, task_state, model_state)` method must return a
string. This string will be logged to the console every time a report
comes up during training time. Preferrably, this string provides an
example input/output to show what the DNC model is doing.
* The `process_output(output, task_state, model_state)` method returns
the output back if no processing is needed. This method processes the
output passed to `to_string(output, task_state)`, but not to
`cost(output, task_state)`. If the output needs to be processed in
`cost(output, task_output)`, then that method needs to call it itself.
This provides ability to transform the data before
`to_string(output, task_state)` converts it to a human readable
representation. For example, if the model outputs logits, but you need
probabilitites (repeat copy task), then do that here.
* The task's class has public property `output_size`. This property must be
an integer representing the size of the output expected from the DNC model
for each iteration of this task.
"""
from .. dnc.dnc import DNC
from . dna_sequencing.dna_sequencing import DNASequencing
from . repeat_copy.repeat_copy import RepeatCopy
import sonnet as snt
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# DNC parameters
tf.flags.DEFINE_integer("memory_size", 16, "The number of memory slots.")
tf.flags.DEFINE_integer("word_size", 16, "The width of each memory slot.")
tf.flags.DEFINE_integer("num_read_heads", 1,
"The number of memory read heads.")
tf.flags.DEFINE_integer("hidden_size", 64,
"The size of LSTM hidden layer in the controller.")
tf.flags.DEFINE_string("controller", "lstm", "The type of controller to use "
"(options: [lstm, ff]).")
# Task parameters
tf.flags.DEFINE_integer("batch_size", 16, "The batch size used in training.")
tf.flags.DEFINE_string("task", "repeat_copy", "The task to train the DNC on.")
# RepeatCopy task parameters (used only if using the RepeatCopy task)
tf.flags.DEFINE_integer("num_bits", 4,
"Dimensionality of each vector to copy.")
tf.flags.DEFINE_integer("min_length", 1,
"Lower limit on number of vectors in the observation "
"pattern to copy.")
tf.flags.DEFINE_integer("max_length", 2,
"Upper limit on number of vectors in the observation "
"pattern to copy.")
tf.flags.DEFINE_integer("min_repeats", 1,
"Lower limit on number of copy repeats.")
tf.flags.DEFINE_integer("max_repeats", 2,
"Upper limit on number of copy repeats.")
# Training parameters
tf.flags.DEFINE_integer("num_training_iterations", 1000,
"Number of iterations to train for.")
tf.flags.DEFINE_integer("report_interval", 100,
"Iterations between reports (samples, valid loss).")
tf.flags.DEFINE_string("checkpoint_dir", "~/tmp/dnc", "Checkpoint directory.")
tf.flags.DEFINE_string("checkpoint_basename", "model.ckpt",
"Base name for the checkpoint files")
tf.flags.DEFINE_integer("checkpoint_interval", -1,
"Checkpointing step interval (-1 means never).")
tf.flags.DEFINE_float("gpu_usage", 0.2,
"The percent of gpu memory to use for each process.")
tf.flags.DEFINE_boolean("test", False,
"Whether this is testing the model or not.")
# Optimizer parameters
tf.flags.DEFINE_float("max_grad_norm", 50, "Gradient clipping norm limit.")
tf.flags.DEFINE_float("learning_rate", 1e-4, "Optimizer learning rate.")
tf.flags.DEFINE_float("optimizer_epsilon", 1e-10,
"Epsilon used for RMSProp optimizer.")
def get_task(task_name):
"""Instantiate a task with all valid flags that provides training data."""
valid_tasks = ["repeat_copy", "dna_sequencing"]
instantiate_task = [
lambda: RepeatCopy(
num_bits=FLAGS.num_bits,
batch_size=FLAGS.batch_size,
min_length=FLAGS.min_length,
max_length=FLAGS.max_length,
min_repeats=FLAGS.min_repeats,
max_repeats=FLAGS.max_repeats),
lambda: DNASequencing(
batch_size=FLAGS.batch_size),
]
return instantiate_task[valid_tasks.index(task_name)]()
def run_model(input, output_size):
"""Run the model on the given input and returns size output_size."""
dnc_cell = DNC(output_size,
memory_size=FLAGS.memory_size,
word_size=FLAGS.word_size,
num_read_heads=FLAGS.num_read_heads,
hidden_size=FLAGS.hidden_size)
if FLAGS.test and FLAGS.task == "repeat_copy":
prev_state = dnc_cell.initial_state(1, dtype=input.dtype)
else:
prev_state = dnc_cell.initial_state(FLAGS.batch_size,
dtype=input.dtype)
if FLAGS.test and FLAGS.task == "repeat_copy":
model_state = {
'rw': prev_state.tape_head.read_weights,
'ww': prev_state.tape_head.write_weights,
'fg': prev_state.tape_head.free_gate,
'ag': prev_state.tape_head.alloc_gate,
}
output = None
model_state_t = prev_state
for time_index in range(13):
output_t, model_state_t = tf.nn.dynamic_rnn(
cell=dnc_cell,
inputs=tf.expand_dims(input[time_index, :, :], 0),
time_major=True,
initial_state=model_state_t)
if output is None:
output = output_t
else:
output = tf.concat([output, output_t], 0)
model_state['rw'] = tf.concat(
[model_state['rw'], model_state_t.tape_head.read_weights], 0)
model_state['ww'] = tf.concat(
[model_state['ww'], model_state_t.tape_head.write_weights], 0)
model_state['fg'] = tf.concat(
[model_state['fg'], model_state_t.tape_head.free_gate], 0)
model_state['ag'] = tf.concat(
[model_state['ag'], model_state_t.tape_head.alloc_gate], 0)
else:
output, model_state = tf.nn.dynamic_rnn(
cell=dnc_cell,
inputs=input,
time_major=True,
initial_state=prev_state)
return output, model_state
def run_lstm_baseline(input, output_size):
"""Run a basic LSTM basline model on given input."""
lstm = snt.LSTM(hidden_size=output_size)
initial_state = lstm.initial_state(FLAGS.batch_size, dtype=input.dtype)
output, model_state = tf.nn.dynamic_rnn(
cell=lstm,
inputs=input,
time_major=True,
initial_state=initial_state)
return output, model_state
def get_config():
"""Return configuration for a tf.Session using a fraction of GPU memory."""
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_usage
def train():
"""Train the DNC and periodically report the loss."""
task = get_task(FLAGS.task)
task_state = task()
# output, model_state = run_model(task_state.input, task.output_size)
output, model_state = run_model(task_state.input, task.output_size)
output_processed = task.process_output(output, task_state, model_state)
# responsibility of task.cost to process output if desired
train_loss = task.cost(output, task_state)
trainable_variables = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(
tf.gradients(train_loss, trainable_variables), FLAGS.max_grad_norm)
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.RMSPropOptimizer(
FLAGS.learning_rate, epsilon=FLAGS.optimizer_epsilon)
train_step = optimizer.apply_gradients(
zip(grads, trainable_variables), global_step=global_step)
saver = tf.train.Saver()
if FLAGS.checkpoint_interval > 0:
hooks = [
tf.train.CheckpointSaverHook(
checkpoint_dir=FLAGS.checkpoint_dir,
checkpoint_basename=FLAGS.checkpoint_basename,
save_steps=FLAGS.checkpoint_interval,
saver=saver)
]
else:
hooks = []
# Training time
with tf.train.SingularMonitoredSession(
hooks=hooks, config=get_config(), checkpoint_dir=FLAGS.checkpoint_dir,
) as sess:
start_iteration = sess.run(global_step)
total_loss = 0
for train_iteration in range(start_iteration,
FLAGS.num_training_iterations):
if FLAGS.test:
loss = sess.run(train_loss)
else:
_, loss = sess.run([train_step, train_loss])
total_loss += loss
# report periodically
if (train_iteration + 1) % FLAGS.report_interval == 0:
task_state_eval, output_eval, model_state_eval = sess.run(
[task_state, output_processed, model_state])
report_string = task.to_string(
output_eval, task_state_eval, model_state_eval,
verbose=FLAGS.test)
if not FLAGS.test:
tf.logging.info(
"Train Iteration %d: Avg training loss: %f.\n",
train_iteration, total_loss / FLAGS.report_interval)
# reset total_loss to report the interval's loss only
total_loss = 0
if report_string != "":
tf.logging.info(report_string)
return task
def main(unused):
"""Main method for this app."""
tf.logging.set_verbosity(3) # Print INFO log messages.
train()
if __name__ == "__main__":
tf.app.run()
| {
"repo_name": "derrowap/DNC-TensorFlow",
"path": "src/tasks/train.py",
"copies": "1",
"size": "11555",
"license": "mit",
"hash": 4157049069585212400,
"line_mean": 40.7962962963,
"line_max": 79,
"alpha_frac": 0.6046733016,
"autogenerated": false,
"ratio": 3.955837042108867,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5060510343708866,
"avg_score": null,
"num_lines": null
} |
# A script to turn a motifset object into a beta object
import os
import numpy as np
import sys
import jsonpickle
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
from decomposition.models import MotifSet,FeatureSet,GlobalFeature,GlobalMotifsToSets,Beta,FeatureMap
from basicviz.models import Alpha,Mass2MotifInstance
if __name__ == '__main__':
motifset_name = sys.argv[1]
ms = MotifSet.objects.get(name = motifset_name)
print("Loaded {}".format(ms))
fs = FeatureSet.objects.get(motifset = ms)
print("Extracted {}".format(fs))
motif_links = GlobalMotifsToSets.objects.filter(motifset = ms)
global_motifs = [m.motif for m in motif_links]
global_features = GlobalFeature.objects.filter(featureset = fs)
print("Extracted {} global features".format(len(global_features)))
fmap = FeatureMap.objects.filter(globalfeature__in = global_features)
print("Extracted {} feature map objects".format(len(fmap)))
feature_map_dict = {}
for feature in fmap:
feature_map_dict[feature.localfeature] = feature.globalfeature
motif_map_dict = {}
for globalmotif in global_motifs:
motif_map_dict[globalmotif.originalmotif] = globalmotif
n_motifs = len(global_motifs)
n_global_features = len(global_features)
motif_index = {}
for i in range(n_motifs):
motif_index[global_motifs[i]] = i
feature_index = {}
for i in range(n_global_features):
feature_index[global_features[i]] = i
betalist = []
originalmotifs = [m.originalmotif for m in global_motifs]
fm2ms = Mass2MotifInstance.objects.filter(mass2motif__in = originalmotifs)
print("Found {} instances".format(len(fm2ms)))
n_done = 0
for fm2m in fm2ms:
n_done += 1
if fm2m.feature in feature_map_dict:
global_feature = feature_map_dict[fm2m.feature]
fpos = feature_index[global_feature]
mpos = motif_index[motif_map_dict[fm2m.mass2motif]]
betalist.append((mpos,fpos,fm2m.probability))
# beta[mpos][fpos] = fm2m.probability
if n_done % 100 == 0:
print(n_done,len(fm2ms))
feature_id_list = [None for f in range(n_global_features)]
motif_id_list = [None for m in range(n_motifs)]
for motif,pos in motif_index.items():
motif_id_list[pos] = motif.id
for feature,pos in feature_index.items():
feature_id_list[pos] = feature.id
# Get the alphas
alpha_list = [0.0 for m in range(n_motifs)]
for motif,pos in motif_index.items():
originalmotif = motif.originalmotif
alpha = Alpha.objects.get(mass2motif = originalmotif)
alpha_list[pos] = alpha.value
b,status = Beta.objects.get_or_create(motifset = ms)
b.beta = jsonpickle.encode(betalist)
b.motif_id_list = jsonpickle.encode(motif_id_list)
b.feature_id_list = jsonpickle.encode(feature_id_list)
b.alpha_list = jsonpickle.encode(alpha_list)
b.motifset = ms
b.save()
| {
"repo_name": "sdrogers/ms2ldaviz",
"path": "ms2ldaviz/decomp_make_beta_from_motifset.py",
"copies": "1",
"size": "3008",
"license": "mit",
"hash": 8972011531042131000,
"line_mean": 32.0549450549,
"line_max": 101,
"alpha_frac": 0.6748670213,
"autogenerated": false,
"ratio": 3.1333333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43082003546333336,
"avg_score": null,
"num_lines": null
} |
# a script to turn a normal sentence into a sequence of left-to-right and right-to-left strings, with the same rendering
import sys
import random
import codecs
START = u"\u202e"
END = u"\u202d"
def _(s):
return START + u"".join(s[::-1]) + END
def splitstr(s, c):
if c < 2:
return [s]
size = len(s) / c
off = 0
words = []
for i in xrange(c):
words.append(s[off:off + size])
off += size
words[-1] = words[-1] + s[off:]
return words
if len(sys.argv) == 1:
print "Usage: %s <input_file>\n\t will create <input_file.rev>" % sys.argv[0]
sys.exit()
fn = sys.argv[1]
with open(fn, "rb") as f:
source = f.read()
l = len(source)
#let's split the string in 2
middle = random.randint(1, l - 1)
start, end = source[:middle], source[middle:]
# and split each side in the same amount of words
splitcounter = random.randint(1, min(middle, l - middle))
starts, ends = splitstr(start, splitcounter), splitstr(end, splitcounter)
assert len(starts) == len(ends)
i = len(starts)
final = []
for k in xrange(i):
final.append(starts[k])
final.append(_(ends[i - 1 - k]))
target = "".join(final)
with codecs.open(fn + ".rev", "w+", encoding='utf-16') as f:
f.write(target)
sys.exit()
| {
"repo_name": "angea/corkami",
"path": "misc/python/reversestr.py",
"copies": "1",
"size": "1298",
"license": "bsd-2-clause",
"hash": -4223012030739992600,
"line_mean": 22.037037037,
"line_max": 120,
"alpha_frac": 0.593990755,
"autogenerated": false,
"ratio": 2.9168539325842695,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8965033150204391,
"avg_score": 0.009162307475975552,
"num_lines": 54
} |
# A script to upload a bunch of images to see how your server
# handles the load.
#
# Based on http://atlee.ca/software/poster/
# https://bitbucket.org/chrisatlee/poster
import os
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
import urllib2
UPLOADS_PATH = "/home/ben/Projects/WaznexServer/waznexserver/tests/hammeruploads/uploads"
UPLOAD_URL = "http://waznex-dev.clusterbleep.net/upload/"
def upload_images():
# Get list of files from IMAGE_FOLDER and sort by "date" (filename)
image_list = os.listdir(UPLOADS_PATH)
for image in image_list:
try:
print "Uploading: " + image
register_openers()
datagen, headers = multipart_encode({"file": open(UPLOADS_PATH + '/' + image, "rb")})
request = urllib2.Request(UPLOAD_URL, datagen, headers)
print urllib2.urlopen(request).read()
except:
pass # Skip files that don't work
if __name__ == '__main__':
upload_images()
| {
"repo_name": "brousch/WaznexServer",
"path": "waznexserver/tests/hammeruploads/hammerit.py",
"copies": "3",
"size": "1024",
"license": "mit",
"hash": 7892549302219769000,
"line_mean": 34.3448275862,
"line_max": 97,
"alpha_frac": 0.662109375,
"autogenerated": false,
"ratio": 3.618374558303887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5780483933303886,
"avg_score": null,
"num_lines": null
} |
# A Script using the downhill simplex algorithm to optimize the seven parameters in the EMT calculator
# Written by: Rasmus E. Christiansen
import Optimize as Optim
from ErrorFunction import ErrorFunction as EF
import numpy
# Previously fittet Values for parameters:
parameters = {
# E0 s0 V0 eta2 kappa lambda n0
# eV bohr eV bohr^-1 bohr^-1 bohr^-1 bohr^-3
'H': (-2.21, 0.71, 2.132, 1.652, 2.790, 1.892, 0.00547),
'Al': (-3.28, 3.00, 1.493, 1.240, 2.000, 1.169, 0.00700),
'Cu': (-3.51, 2.67, 2.476, 1.652, 2.740, 1.906, 0.00910),
'Ag': (-2.96, 3.01, 2.132, 1.652, 2.790, 1.892, 0.00547),
'Au': (-3.80, 3.00, 2.321, 1.674, 2.873, 2.182, 0.00703),
'Ni': (-4.44, 2.60, 3.673, 1.669, 2.757, 1.948, 0.01030),
'Pd': (-3.90, 2.87, 2.773, 1.818, 3.107, 2.155, 0.00688),
'Pt': (-5.85, 2.90, 4.067, 1.812, 3.145, 2.192, 0.00802),
'C': (-1.97, 1.18, 0.132, 3.652, 5.790, 2.892, 0.01322),
'N': (-4.97, 1.18, 0.132, 2.652, 3.790, 2.892, 0.01222),
'O': (-2.97, 1.25, 2.132, 3.652, 5.790, 4.892, 0.00850)}
# The parameters which are to be optimized are specified. This is done by initializing the dictionary
# VariableParameters indexed by the elements whose parameters are being optimized including len()=8 arrays
# of the form [0.,0.,1.,1.,0.,1.,0.], where 0 means the parameter is to be kept constant while 1 means it
# is to be optimized. The Initial values of the parameters to be optimized are then set in the list ParamInit.
VariableParameters = {'Cu': numpy.array([1.,1.,1.,1.,1.,1.,1.]),
'Au': numpy.array([1.,1.,1.,1.,1.,1.,1.])}
Elements = VariableParameters.keys()
# The alloys desired are given in ElementsAllyos
#ElementsAlloys = ['AuCu3']
ElementsAlloys = []
# Experimental values picked from tabel
ParamInit = []
for i in Elements:
for j in range(7):
if VariableParameters[i][j] == 1:
ParamInit.append(parameters[i][j])
# The ErrorFunction object is created using the chosen elements and variable parameters
ErrorFunc = EF(Elements,ElementsAlloys,VariableParameters)
# The minimization simplex algorithm is run using the ErrorFunction.
print Optim.fmin(ErrorFunc.ErFu,ParamInit,xtol=0.001,ftol=0.001)
| {
"repo_name": "auag92/n2dm",
"path": "Asap-3.8.4/Python/asap3/Tools/ParameterOptimization/Rasmus2011/MinAlgorithm.py",
"copies": "1",
"size": "2299",
"license": "mit",
"hash": 8259573070075966000,
"line_mean": 41.5740740741,
"line_max": 110,
"alpha_frac": 0.6294040887,
"autogenerated": false,
"ratio": 2.6547344110854505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8649018173331674,
"avg_score": 0.02702406529075525,
"num_lines": 54
} |
# A ScrolledList widget feels like a list widget but also has a
# vertical scroll bar on its right. (Later, options may be added to
# add a horizontal bar as well, to make the bars disappear
# automatically when not needed, to move them to the other side of the
# window, etc.)
#
# Configuration options are passed to the List widget.
# A Frame widget is inserted between the master and the list, to hold
# the Scrollbar widget.
# Most methods calls are inherited from the List widget; Pack methods
# are redirected to the Frame widget however.
from Tkinter import *
from Tkinter import _cnfmerge
class ScrolledListbox(Listbox):
def __init__(self, master=None, cnf={}):
cnf = _cnfmerge(cnf)
fcnf = {}
vcnf = {'name': 'vbar',
Pack: {'side': 'right', 'fill': 'y'},}
for k in cnf.keys():
if type(k) == ClassType or k == 'name':
fcnf[k] = cnf[k]
del cnf[k]
self.frame = Frame(master, fcnf)
self.vbar = Scrollbar(self.frame, vcnf)
cnf[Pack] = {'side': 'left', 'fill': 'both', 'expand': 'yes'}
cnf['name'] = 'list'
Listbox.__init__(self, self.frame, cnf)
self['yscrollcommand'] = (self.vbar, 'set')
self.vbar['command'] = (self, 'yview')
# Copy Pack methods of self.frame -- hack!
for m in Pack.__dict__.keys():
if m[0] != '_' and m != 'config':
setattr(self, m, getattr(self.frame, m))
| {
"repo_name": "vlinhd11/vlinhd11-android-scripting",
"path": "python/src/Tools/modulator/ScrolledListbox.py",
"copies": "37",
"size": "1477",
"license": "apache-2.0",
"hash": -4334048460384505300,
"line_mean": 38.9189189189,
"line_max": 70,
"alpha_frac": 0.5951252539,
"autogenerated": false,
"ratio": 3.59367396593674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A ScrolledText widget feels like a text widget but also has a
vertical scroll bar on its right. (Later, options may be added to
add a horizontal bar as well, to make the bars disappear
automatically when not needed, to move them to the other side of the
window, etc.)
Configuration options are passed to the Text widget.
A Frame widget is inserted between the master and the text, to hold
the Scrollbar widget.
Most methods calls are inherited from the Text widget; Pack, Grid and
Place methods are redirected to the Frame widget however.
"""
__all__ = ['ScrolledText']
from Tkinter import Frame, Text, Scrollbar, Pack, Grid, Place
from Tkconstants import RIGHT, LEFT, Y, BOTH
class ScrolledText(Text):
def __init__(self, master=None, **kw):
self.frame = Frame(master)
self.vbar = Scrollbar(self.frame)
self.vbar.pack(side=RIGHT, fill=Y)
kw.update({'yscrollcommand': self.vbar.set})
Text.__init__(self, self.frame, **kw)
self.pack(side=LEFT, fill=BOTH, expand=True)
self.vbar['command'] = self.yview
# Copy geometry methods of self.frame without overriding Text
# methods -- hack!
text_meths = vars(Text).keys()
methods = vars(Pack).keys() + vars(Grid).keys() + vars(Place).keys()
methods = set(methods).difference(text_meths)
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure':
setattr(self, m, getattr(self.frame, m))
def __str__(self):
return str(self.frame)
def example():
import __main__
from Tkconstants import END
stext = ScrolledText(bg='white', height=10)
stext.insert(END, __main__.__doc__)
stext.pack(fill=BOTH, side=LEFT, expand=True)
stext.focus_set()
stext.mainloop()
if __name__ == "__main__":
example()
| {
"repo_name": "phalax4/CarnotKE",
"path": "jyhton/lib-python/2.7/lib-tk/ScrolledText.py",
"copies": "133",
"size": "1836",
"license": "apache-2.0",
"hash": 1151451606693594800,
"line_mean": 32.3818181818,
"line_max": 76,
"alpha_frac": 0.6497821351,
"autogenerated": false,
"ratio": 3.6284584980237153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013727013727013726,
"num_lines": 55
} |
# A ScrolledText widget feels like a text widget but also has a
# vertical scroll bar on its right. (Later, options may be added to
# add a horizontal bar as well, to make the bars disappear
# automatically when not needed, to move them to the other side of the
# window, etc.)
#
# Configuration options are passed to the Text widget.
# A Frame widget is inserted between the master and the text, to hold
# the Scrollbar widget.
# Most methods calls are inherited from the Text widget; Pack methods
# are redirected to the Frame widget however.
from tkinter import *
from tkinter import _cnfmerge
class ScrolledText(Text):
def __init__(self, master=None, cnf=None, **kw):
if cnf is None:
cnf = {}
if kw:
cnf = _cnfmerge((cnf, kw))
fcnf = {k:v for k,v in cnf.items() if isinstance(k,type) or k=='name'}
for k in fcnf.keys():
del cnf[k]
self.frame = Frame(master, **fcnf)
self.vbar = Scrollbar(self.frame, name='vbar')
self.vbar.pack(side=RIGHT, fill=Y)
cnf['name'] = 'text'
Text.__init__(self, self.frame, **cnf)
self.pack(side=LEFT, fill=BOTH, expand=1)
self['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.yview
# Copy geometry methods of self.frame -- hack!
methods = Pack.__dict__.keys()
methods = methods + Grid.__dict__.keys()
methods = methods + Place.__dict__.keys()
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure':
setattr(self, m, getattr(self.frame, m))
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.0/Lib/tkinter/scrolledtext.py",
"copies": "1",
"size": "1612",
"license": "mit",
"hash": -3439669908508929000,
"line_mean": 37.380952381,
"line_max": 78,
"alpha_frac": 0.6135235732,
"autogenerated": false,
"ratio": 3.6553287981859413,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4768852371385941,
"avg_score": null,
"num_lines": null
} |
"""A ScrolledText widget feels like a text widget but also has a
vertical scroll bar on its right. (Later, options may be added to
add a horizontal bar as well, to make the bars disappear
automatically when not needed, to move them to the other side of the
window, etc.)
Configuration options are passed to the Text widget.
A Frame widget is inserted between the master and the text, to hold
the Scrollbar widget.
Most methods calls are inherited from the Text widget; Pack, Grid and
Place methods are redirected to the Frame widget however.
"""
__all__ = ['ScrolledText']
from Tkinter import Frame, Text, Scrollbar, Pack, Grid, Place
from Tkconstants import RIGHT, LEFT, Y, BOTH
class ScrolledText(Text):
def __init__(self, master=None, **kw):
self.frame = Frame(master)
self.vbar = Scrollbar(self.frame)
self.vbar.pack(side=RIGHT, fill=Y)
kw.update({'yscrollcommand': self.vbar.set})
Text.__init__(self, self.frame, **kw)
self.pack(side=LEFT, fill=BOTH, expand=True)
self.vbar['command'] = self.yview
# Copy geometry methods of self.frame -- hack!
methods = vars(Pack).keys() + vars(Grid).keys() + vars(Place).keys()
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure':
setattr(self, m, getattr(self.frame, m))
def __str__(self):
return str(self.frame)
def example():
import __main__
from Tkconstants import END
stext = ScrolledText(bg='white', height=10)
stext.insert(END, __main__.__doc__)
stext.pack(fill=BOTH, side=LEFT, expand=True)
stext.focus_set()
stext.mainloop()
if __name__ == "__main__":
example()
| {
"repo_name": "babyliynfg/cross",
"path": "tools/project-creator/Python2.6.6/Lib/lib-tk/ScrolledText.py",
"copies": "2",
"size": "1753",
"license": "mit",
"hash": 9090140477410211000,
"line_mean": 31.7115384615,
"line_max": 76,
"alpha_frac": 0.6320593269,
"autogenerated": false,
"ratio": 3.621900826446281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001399063899063899,
"num_lines": 52
} |
# A ScrolledText widget feels like a text widget but also has a
# vertical scroll bar on its right. (Later, options may be added to
# add a horizontal bar as well, to make the bars disappear
# automatically when not needed, to move them to the other side of the
# window, etc.)
#
# Configuration options are passed to the Text widget.
# A Frame widget is inserted between the master and the text, to hold
# the Scrollbar widget.
# Most methods calls are inherited from the Text widget; Pack methods
# are redirected to the Frame widget however.
from Tkinter import *
from Tkinter import _cnfmerge
class ScrolledText(Text):
def __init__(self, master=None, cnf=None, **kw):
if cnf is None:
cnf = {}
if kw:
cnf = _cnfmerge((cnf, kw))
fcnf = {}
for k in cnf.keys():
if type(k) == ClassType or k == 'name':
fcnf[k] = cnf[k]
del cnf[k]
self.frame = Frame(master, **fcnf)
self.vbar = Scrollbar(self.frame, name='vbar')
self.vbar.pack(side=RIGHT, fill=Y)
cnf['name'] = 'text'
Text.__init__(self, self.frame, **cnf)
self.pack(side=LEFT, fill=BOTH, expand=1)
self['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.yview
# Copy geometry methods of self.frame -- hack!
methods = Pack.__dict__.keys()
methods = methods + Grid.__dict__.keys()
methods = methods + Place.__dict__.keys()
for m in methods:
if m[0] != '_' and m != 'config' and m != 'configure':
setattr(self, m, getattr(self.frame, m))
| {
"repo_name": "ericlink/adms-server",
"path": "playframework-dist/play-1.1/python/Lib/lib-tk/ScrolledText.py",
"copies": "2",
"size": "1681",
"license": "mit",
"hash": 1929850751923649800,
"line_mean": 37.0930232558,
"line_max": 70,
"alpha_frac": 0.5823914337,
"autogenerated": false,
"ratio": 3.7108167770419427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5293208210741943,
"avg_score": null,
"num_lines": null
} |
"""asdl2html - generate HTML file from asdl
This was coded only to be able to convert python's ASDL.
I have not studied ASDL specification so the parsing is probably incomplete!
It uses a quick & dirty (inneficient and naive) parsing, but it did the job :)
Python code is represented as a `tree`.
Each `node` of the tree is an instance of a node `Type`.
Each type belongs to a Category.
Each Type define `attributes` and `fields`.
Attributes describe which properties a node of that type has.
Fields describe the quantity and the category of child nodes.
"""
import json
import argparse
import jinja2
class Field:
"""Type's Field
qualifier represent the quantity of child nodes for this field
* 0 to N
? 0 or 1
"""
def __init__(self, name, cat_name):
self.name = name.strip() # the field name
# extract qualifier from cat_name string
cat_name = cat_name.strip()
if cat_name[-1] in "*?":
self.qualifier = cat_name[-1]
self.cat_name = cat_name[:-1]
else:
self.cat_name = cat_name
self.qualifier = ''
class Type:
"""A node type in an AST"""
def __init__(self, name, cat_name, fields, attributes):
""":param str fields: unparsed field definition"""
self.name = name
self.cat_name = cat_name
self.fields = [] # list of Field
# atributes saved as string unparsed. currently not used
self.attributes = attributes
for par in fields:
parts = par.strip().split(' ')
self.fields.append(Field(parts[1], parts[0]))
class Category:
"""Category is an Abstract Type that may have more than derived Type"""
def __init__(self, cat_name, types, builtin=False):
self.cat_name = cat_name
self.types = types # list os strings
self.builtin = builtin
class ASDL:
"""parse an asdl file
"""
def __init__(self, file_name):
self.cats = {}
self.types = {}
# 0 - read the file
with open(file_name, 'r') as asdl_file:
asdl_lines = asdl_file.readlines()
# first line contains list of built-in types
for name in asdl_lines[0].split(','):
type_name = name.split(' ')[-1]
self.cats[name] = Category(name, [type_name], builtin=True)
self.types[type_name] = Type(type_name, name, '', '')
# split content into a list of definitions
definitions = self.get_asdl_definitions(asdl_lines)
# prase and set self.clusters, self.types
for definition in definitions:
self.parse_definition(definition)
@staticmethod
def get_asdl_definitions(asdl_lines):
"""Read an ASDL file and returns a list
with definitions (just the unprocessed strings)
"""
# 1 - remove comments
asdl_no_comments = [line.split('--')[0] for line in asdl_lines]
# 2 - get content part. Everything between {}. just handle one {}
left_brace = "".join(asdl_no_comments).split('{')
assert len(left_brace) == 2
right_brace = left_brace[1].split('}')
assert len(right_brace) == 2
content = right_brace[0]
# 3 - break content into definitions
# a definition is something like
#
# xxx = yyy
# | zzz
#
# a definition is over when another line with a `=` is found
definitions = []
current = None
for line in content.splitlines(1):
if not line.strip(): # ignore blank lines
continue
#print "--->", line
if "=" in line:
if current is not None:
definitions.append(current)
current = ''
current += line
definitions.append(current)
return definitions
def parse_definition(self, defi):
"""parse a definition. A definition contains one Category and its types.
"""
# break left(cat_name) and right(constructors) side of a definition
# extract definition cat_name
_left, _right = defi.split('=')
cat_name = _left.strip()
right_parts = _right.split('attributes')
assert len(right_parts) < 3
# get attributes - not all definitions contain attributes
if len(right_parts) == 2:
attrs = self.get_braces_content(right_parts[1])
else:
attrs = []
# read lines, extract constructors & attributes
types = [c.strip() for c in right_parts[0].split('|')]
# if just one type in definition
if len(types) == 1:
# in the ASDL types from categories that have only one type
# might be named or not.
# If not named use cat_name as type_name.
type_name = types[0].split('(')[0].strip() or cat_name
type_names = [type_name]
field_list = self.get_braces_content(types[0])
self.types[type_name] = Type(type_name, cat_name, field_list, attrs)
# iterate over constructors
else:
type_names = []
for cons in types: # for each constructor
name = cons.split('(')[0].strip()
type_names.append(name)
field_list = self.get_braces_content(cons)
self.types[name] = Type(name, cat_name, field_list, attrs)
# create category
self.cats[cat_name] = Category(cat_name, type_names)
@staticmethod
def get_braces_content(data):
"""extract data from within comma separated, round-braces string
return list of strings
>>> ASDL.get_braces_content('(a,b,c)')
['a', 'b', 'c']
"""
left_brace = data.split('(')
if len(left_brace) > 1:
return left_brace[1].split(')')[0].split(',')
return []
################################################################
class ASDL2JSON(ASDL):
"""output ASDL in JSON format"""
def render(self):
types = {}
for asdl_type in self.types.values():
types[asdl_type.name] = self.type_dict(asdl_type)
return json.dumps(types, sort_keys=True, indent=4)
@staticmethod
def type_dict(asdl_type):
order = []
fields = {}
for f in asdl_type.fields:
order.append(f.name)
fields[f.name] = {'cat':f.cat_name, 'q':f.qualifier}
return {
'category': asdl_type.cat_name,
'order': order,
'fields': fields,
}
class ASDL2HTML(ASDL):
"""extend ASDL with methods to generate a HTML page"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.css = {} # map category to a CSS color
self.jinja_env = jinja2.Environment(
loader=jinja2.PackageLoader('pyreg', 'templates'),
undefined=jinja2.StrictUndefined,
trim_blocks=True)
# divide categories into groups
self.builtin_types = []
self.product_types = [] # the name "product" comes from ASDL spec
self.sum_cats = []
for cat in self.cats.values():
if cat.cat_name in ('mod', 'stmt', 'expr'):
continue
if cat.builtin:
self.builtin_types.append(cat.types[0])
elif len(cat.types) == 1:
self.product_types.append(cat.types[0])
else:
self.sum_cats.append(cat.cat_name)
self.product_types.sort()
self.builtin_types.sort()
self.sum_cats.sort()
palette_soft = ['#CFD0D2', '#E9D4A7', '#C1D18A', '#B296C7',
'#55BEED', '#F384AE', '#F1753F']
palette_strong = ['#FFE617', '#E8272F', '#E5185D',
'#5F3577', '#238ACC', '#143B86', '#799155',
'#09811C', '#C05C20', '#474D4D',
'#003F2E', '#FDB717', '#EF4638']
palette_all = palette_soft + palette_strong
# set color for builtins
for cat_name in self.builtin_types:
rules = '{{background-color:{}; border: 2px solid black;}}'
self.css[cat_name] = rules.format(palette_soft.pop())
# all categories that have a sigle type but are not built-ins
# set color for builtins
for cat_name in self.product_types + self.sum_cats:
rules = '{{background-color:{};}}'
self.css[cat_name] = rules.format(palette_all.pop())
def get_group(self, name):
"""get a group of types to be displayed together in the HTML"""
if name == 'builtin':
return '', self.builtin_types
if name == 'product_types':
return '', self.product_types
cat = self.cats[name]
return cat.cat_name, cat.types
def render(self):
template = self.jinja_env.get_template("asdl.html")
cols = {1: ["mod", "stmt", "expr"],
2: self.sum_cats + ["product_types", "builtin"]}
return template.render(asdl=self, category_colors=self.css,
columns=cols)
def asdl_view(args=None):
"""command line program to convert ASDL into HTML, JSON"""
parser = argparse.ArgumentParser(description='ASDL viewer')
parser.add_argument(
'-f', '--format', dest='format', metavar='FORMAT',
choices=('html', 'json'), default='html',
help='output format one of [%(choices)s], default=%(default)s')
parser.add_argument(
'asdl_file', metavar='ASDL', nargs='?',
help='ASDL file')
args = parser.parse_args(args)
if args.format == 'html':
print(ASDL2HTML(args.asdl_file).render())
elif args.format == 'json':
print(ASDL2JSON(args.asdl_file).render())
if __name__ == "__main__": # pragma: no cover
asdl_view()
| {
"repo_name": "schettino72/pyRegurgitator",
"path": "pyreg/asdlview.py",
"copies": "1",
"size": "9917",
"license": "mit",
"hash": -5710108721729461000,
"line_mean": 32.5033783784,
"line_max": 80,
"alpha_frac": 0.5566199455,
"autogenerated": false,
"ratio": 3.9166666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9962023207900804,
"avg_score": 0.0022526808531724177,
"num_lines": 296
} |
"""asdoc2dash
Convert form ASDoc to Dash Docset.
Usage:
asdoc2dash --name <name> --docdir <path> (--outdir <path>|--add-to-dash) [--icon <path>] [--force] [--log]
asdoc2dash (-h | --help)
asdoc2dash (-v | --version)
Options:
-h --help show help.
-v --version show version.
-n --name <name> docset name
-d --docdir <path> asdoc directory
-o --outdir <path> output directory
-a --add-to-dash add to dash
-f --force if exists directory, force overwrite
-i --icon <path> docset icon (png format only)
-l --log show debug information
"""
import os
import sys
import errno
import shutil
import logging
from . import __author__, __version__, __license__
from . import __doc__ as __description__
from docopt import docopt
import dash
from dash import DocsetGenerator
from asdocparser import ASDocParser
log = logging.getLogger(__name__)
def main():
arguments = docopt(__doc__, version=__version__)
docset_name = arguments["--name"]
asdocdir = arguments["--docdir"]
outdir = arguments["--outdir"]
icon = arguments["--icon"]
add_dash_flag = arguments["--add-to-dash"]
if add_dash_flag:
outdir = os.path.join(dash.DEFAULT_DOCSET_PATH, docset_name)
docset_path = os.path.join(outdir, docset_name + ".docset")
# log settings
if arguments["--log"]:
logging.basicConfig(format="%(asctime)s: %(levelname)s\n%(message)s", level=logging.DEBUG)
else:
logging.basicConfig(format="%(message)s", level=logging.INFO)
# error check
if not os.path.exists(asdocdir):
log.error("Not such directory " + asdocdir)
sys.exit(errno.ENOENT)
if not os.path.isdir(asdocdir):
log.error("Not a directory " + asdocdir)
sys.exit(errno.ENOTDIR)
if os.path.exists(docset_path):
if arguments["--force"]:
shutil.rmtree(docset_path)
else:
log.error("Already exists " + docset_path + "\nIf you want to overwrite, use option --force.")
sys.exit(errno.EEXIST)
if icon:
if not os.path.exists(icon):
log.error("Not such file " + icon)
sys.exit(errno.ENOENT)
filename, ext = os.path.splitext(icon)
ext = ext.lower()
if ext != ".png":
log.error("Icon is supported only png")
sys.exit(1)
# main
log.info("convert from %s to %s", asdocdir, docset_path)
generator = DocsetGenerator(docset_name, docset_path, asdocdir, icon)
asdocParser = ASDocParser(generator)
generator.create_project()
asdocParser.parse()
generator.finish()
log.info("done.")
# add to dash
if add_dash_flag:
log.info("add to dash")
os.system("open -a dash '%s'" % docset_path)
if __name__ == '__main__':
main()
| {
"repo_name": "ton1517/asdoc2dash",
"path": "asdoc2dash/asdoc2dash.py",
"copies": "1",
"size": "2838",
"license": "mit",
"hash": 9128397879950394000,
"line_mean": 25.523364486,
"line_max": 108,
"alpha_frac": 0.6060606061,
"autogenerated": false,
"ratio": 3.44,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45460606061,
"avg_score": null,
"num_lines": null
} |
# as due to their complexity multi-gpu tests could impact other tests, and to aid debug we have those in a separate module.
import os
import sys
from pathlib import Path
import torch
from transformers.testing_utils import TestCasePlus, execute_subprocess_async, require_torch_multi_gpu
from utils import load_json
CUDA_AVAILABLE = torch.cuda.is_available()
ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."]
SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
CHEAP_ARGS = {
"max_tokens_per_batch": None,
"supervise_forward": True,
"normalize_hidden": True,
"label_smoothing": 0.2,
"eval_max_gen_length": None,
"eval_beams": 1,
"val_metric": "loss",
"save_top_k": 1,
"adafactor": True,
"early_stopping_patience": 2,
"logger_name": "default",
"length_penalty": 0.5,
"cache_dir": "",
"task": "summarization",
"num_workers": 2,
"alpha_hid": 0,
"freeze_embeds": True,
"enc_only": False,
"tgt_suffix": "",
"resume_from_checkpoint": None,
"sortish_sampler": True,
"student_decoder_layers": 1,
"val_check_interval": 1.0,
"output_dir": "",
"fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp
"no_teacher": False,
"fp16_opt_level": "O1",
"gpus": 1 if CUDA_AVAILABLE else 0,
"n_tpu_cores": 0,
"max_grad_norm": 1.0,
"do_train": True,
"do_predict": True,
"accumulate_grad_batches": 1,
"server_ip": "",
"server_port": "",
"seed": 42,
"model_name_or_path": "sshleifer/bart-tiny-random",
"config_name": "",
"tokenizer_name": "facebook/bart-large",
"do_lower_case": False,
"learning_rate": 0.3,
"lr_scheduler": "linear",
"weight_decay": 0.0,
"adam_epsilon": 1e-08,
"warmup_steps": 0,
"max_epochs": 1,
"train_batch_size": 2,
"eval_batch_size": 2,
"max_source_length": 12,
"max_target_length": 12,
"val_max_target_length": 12,
"test_max_target_length": 12,
"fast_dev_run": False,
"no_cache": False,
"n_train": -1,
"n_val": -1,
"n_test": -1,
"student_encoder_layers": 1,
"freeze_encoder": False,
"auto_scale_batch_size": False,
"overwrite_output_dir": False,
"student": None,
}
def _dump_articles(path: Path, articles: list):
content = "\n".join(articles)
Path(path).open("w").writelines(content)
def make_test_data_dir(tmp_dir):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES)
_dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES)
return tmp_dir
class TestSummarizationDistillerMultiGPU(TestCasePlus):
@classmethod
def setUpClass(cls):
return cls
@require_torch_multi_gpu
def test_multi_gpu(self):
updates = dict(
no_teacher=True,
freeze_encoder=True,
gpus=2,
overwrite_output_dir=True,
sortish_sampler=True,
)
self._test_distiller_cli_fork(updates, check_contents=False)
def _test_distiller_cli_fork(self, updates, check_contents=True):
default_updates = dict(
label_smoothing=0.0,
early_stopping_patience=-1,
train_batch_size=1,
eval_batch_size=2,
max_epochs=2,
alpha_mlm=0.2,
alpha_ce=0.8,
do_predict=True,
model_name_or_path="sshleifer/tinier_bart",
teacher=CHEAP_ARGS["model_name_or_path"],
val_check_interval=0.5,
)
default_updates.update(updates)
args_d: dict = CHEAP_ARGS.copy()
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
output_dir = self.get_auto_remove_tmp_dir()
args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates)
def convert(k, v):
if k in ["tgt_suffix", "server_ip", "server_port", "out", "n_tpu_cores"]:
return ""
if v is False or v is None:
return ""
if v is True: # or len(str(v))==0:
return f"--{k}"
return f"--{k}={v}"
cli_args = [x for x in (convert(k, v) for k, v in args_d.items()) if len(x)]
cmd = [sys.executable, f"{self.test_file_dir}/distillation.py"] + cli_args
execute_subprocess_async(cmd, env=self.get_env())
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
ckpt_files = [p for p in contents if p.endswith("ckpt")]
assert len(ckpt_files) > 0
self.assertIn("test_generations.txt", contents)
self.assertIn("test_results.txt", contents)
# get the following from the module, (we don't have access to `model` here)
metrics_save_path = os.path.join(output_dir, "metrics.json")
val_metric = "rouge2"
metrics = load_json(metrics_save_path)
# {'test': [{'test_avg_loss': 10.63731575012207, 'test_avg_rouge1': 0.0, 'test_avg_rouge2': 0.0, 'test_avg_rougeL': 0.0, 'test_avg_gen_time': 0.1822289228439331, 'test_avg_gen_len': 142.0, 'step_count': 1}]}
print(metrics)
last_step_stats = metrics["val"][-1]
self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01)
self.assertIsInstance(last_step_stats[f"val_avg_{val_metric}"], float)
self.assertEqual(len(metrics["test"]), 1)
desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) / 2 + 1)
self.assertEqual(len(metrics["val"]), desired_n_evals)
| {
"repo_name": "huggingface/pytorch-transformers",
"path": "examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py",
"copies": "2",
"size": "5672",
"license": "apache-2.0",
"hash": 5934075902722955000,
"line_mean": 33.5853658537,
"line_max": 215,
"alpha_frac": 0.5946755994,
"autogenerated": false,
"ratio": 3.107945205479452,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9699224705928275,
"avg_score": 0.0006792197902354394,
"num_lines": 164
} |
"""A sed-style sentence breaker, with SQuAD/Wiki-specific tweaks.
This code is derived from a broad-coverage proof-of-concept sed script,
with some simplification/adaptation:
- Several substitution rules are simplified here to focus on the cases that
arise in the SQuAD sentences.
- Other substitution rules are added to deal with specifics of the SQuAD
corpus.
This code aims for high accuracy/F1, but does not try to reach full 100%.
Some of the long tail is left alone, to avoid proliferating substitution rules
for diminishing returns.
When run in squad_sentence_break_test.py, sb_sed achieves an F1 of .9961.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
def infer_sentence_breaks(uni_text):
"""Generates (start, end) pairs demarking sentences in the text.
Args:
uni_text: A (multi-sentence) passage of text, in Unicode.
Yields:
(start, end) tuples that demarcate sentences in the input text. Normal
Python slicing applies: the start index points at the first character of
the sentence, and the end index is one past the last character of the
sentence.
"""
# Treat the text as a single line that starts out with no internal newline
# characters and, after regexp-governed substitutions, contains internal
# newlines representing cuts between sentences.
uni_text = re.sub(r'\n', r' ', uni_text) # Remove pre-existing newlines.
text_with_breaks = _sed_do_sentence_breaks(uni_text)
starts = [m.end() for m in re.finditer(r'^\s*', text_with_breaks, re.M)]
sentences = [s.strip() for s in text_with_breaks.split('\n')]
assert len(starts) == len(sentences)
for i in range(len(sentences)):
start = starts[i]
end = start + len(sentences[i])
yield start, end
def _sed_do_sentence_breaks(uni_text):
"""Uses regexp substitution rules to insert newlines as sentence breaks.
Args:
uni_text: A (multi-sentence) passage of text, in Unicode.
Returns:
A Unicode string with internal newlines representing the inferred sentence
breaks.
"""
# The main split, looks for sequence of:
# - sentence-ending punctuation: [.?!]
# - optional quotes, parens, spaces: [)'" \u201D]*
# - whitespace: \s
# - optional whitespace: \s*
# - optional opening quotes, bracket, paren: [['"(\u201C]?
# - upper case letter or digit
txt = re.sub(
r'''([.?!][)'" %s]*)\s(\s*[['"(%s]?[A-Z0-9])''' % ('\u201D', '\u201C'),
r'\1\n\2', uni_text)
# Wiki-specific split, for sentence-final editorial scraps (which can stack):
# - ".[citation needed]", ".[note 1] ", ".[c] ", ".[n 8] "
txt = re.sub(r'''([.?!]['"]?)((\[[a-zA-Z0-9 ?]+\])+)\s(\s*['"(]?[A-Z0-9])''',
r'\1\2\n\4', txt)
# Wiki-specific split, for ellipses in multi-sentence quotes:
# "need such things [...] But"
txt = re.sub(r'(\[\.\.\.\]\s*)\s(\[?[A-Z])', r'\1\n\2', txt)
# Rejoin for:
# - social, military, religious, and professional titles
# - common literary abbreviations
# - month name abbreviations
# - geographical abbreviations
#
txt = re.sub(r'\b(Mrs?|Ms|Dr|Prof|Fr|Rev|Msgr|Sta?)\.\n', r'\1. ', txt)
txt = re.sub(r'\b(Lt|Gen|Col|Maj|Adm|Capt|Sgt|Rep|Gov|Sen|Pres)\.\n',
r'\1. ',
txt)
txt = re.sub(r'\b(e\.g|i\.?e|vs?|pp?|cf|a\.k\.a|approx|app|es[pt]|tr)\.\n',
r'\1. ',
txt)
txt = re.sub(r'\b(Jan|Aug|Oct|Nov|Dec)\.\n', r'\1. ', txt)
txt = re.sub(r'\b(Mt|Ft)\.\n', r'\1. ', txt)
txt = re.sub(r'\b([ap]\.m)\.\n(Eastern|EST)\b', r'\1. \2', txt)
# Rejoin for personal names with 3,2, or 1 initials preceding the last name.
txt = re.sub(r'\b([A-Z]\.)[ \n]([A-Z]\.)[ \n]([A-Z]\.)[ \n]("?[A-Z][a-z])',
r'\1 \2 \3 \4',
txt)
txt = re.sub(r'\b([A-Z]\.)[ \n]([A-Z]\.)[ \n]("?[A-Z][a-z])',
r'\1 \2 \3',
txt)
txt = re.sub(r'\b([A-Z]\.[A-Z]\.)\n("?[A-Z][a-z])', r'\1 \2', txt)
txt = re.sub(r'\b([A-Z]\.)\n("?[A-Z][a-z])', r'\1 \2', txt)
# Resplit for common sentence starts:
# - The, This, That, ...
# - Meanwhile, However,
# - In, On, By, During, After, ...
txt = re.sub(r'([.!?][\'")]*) (The|This|That|These|It) ', r'\1\n\2 ', txt)
txt = re.sub(r'(\.) (Meanwhile|However)', r'\1\n\2', txt)
txt = re.sub(r'(\.) (In|On|By|During|After|Under|Although|Yet|As |Several'
r'|According to) ',
r'\1\n\2 ',
txt)
# Rejoin for:
# - numbered parts of documents.
# - born, died, ruled, circa, flourished ...
# - et al (2005), ...
# - H.R. 2000
txt = re.sub(r'\b([Aa]rt|[Nn]o|Opp?|ch|Sec|cl|Rec|Ecl|Cor|Lk|Jn|Vol)\.\n'
r'([0-9IVX]+)\b',
r'\1. \2',
txt)
txt = re.sub(r'\b([bdrc]|ca|fl)\.\n([A-Z0-9])', r'\1. \2', txt)
txt = re.sub(r'\b(et al)\.\n(\(?[0-9]{4}\b)', r'\1. \2', txt)
txt = re.sub(r'\b(H\.R\.)\n([0-9])', r'\1 \2', txt)
# SQuAD-specific joins.
txt = re.sub(r'(I Am\.\.\.)\n(Sasha Fierce|World Tour)', r'\1 \2', txt)
txt = re.sub(r'(Warner Bros\.)\n(Records|Entertainment)', r'\1 \2', txt)
txt = re.sub(r'(U\.S\.)\n(\(?\d\d+)', r'\1 \2', txt)
txt = re.sub(r'\b(Rs\.)\n(\d)', r'\1 \2', txt)
# SQuAD-specific splits.
txt = re.sub(r'\b(Jay Z\.) ([A-Z])', r'\1\n\2', txt)
txt = re.sub(r'\b(Washington, D\.C\.) ([A-Z])', r'\1\n\2', txt)
txt = re.sub(r'\b(for 4\.\)) ([A-Z])', r'\1\n\2', txt)
txt = re.sub(r'\b(Wii U\.) ([A-Z])', r'\1\n\2', txt)
txt = re.sub(r'\. (iPod|iTunes)', r'.\n\1', txt)
txt = re.sub(r' (\[\.\.\.\]\n)', r'\n\1', txt)
txt = re.sub(r'(\.Sc\.)\n', r'\1 ', txt)
txt = re.sub(r' (%s [A-Z])' % '\u2022', r'\n\1', txt)
return txt | {
"repo_name": "google-research/xtreme",
"path": "third_party/processors/sb_sed.py",
"copies": "1",
"size": "5780",
"license": "apache-2.0",
"hash": -7439283027439730000,
"line_mean": 37.7986577181,
"line_max": 79,
"alpha_frac": 0.5626297578,
"autogenerated": false,
"ratio": 2.657471264367816,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8652825624316824,
"avg_score": 0.013455079570198354,
"num_lines": 149
} |
""" A Seed File to seed the Mongo DB with Test Data """
# Import pymongo module to allow operations on mongo
from pymongo import MongoClient
def up():
""" Seed the database with test data """
# Mandatory for every mongodb call
client = MongoClient()
# Select which database to use
db = client.test_database
# Select which collection to use, each collection stores data of one machine
collection = db.machine_states
# Remove the old collection
collection.remove({})
sample_machine_states = list()
# Set up the data
sample_machine_states.append({
'hostname': 'localhost',
'status': True,
'data': {
'cpu': '20%',
'cpuData': {
'histRec': [
{
'label': '01:00',
'value': '15%'
},
{
'label': '02:00',
'value': '14%'
},
{
'label': '03:00',
'value': '23%'
},
{
'label': '04:00',
'value': '91%'
},
{
'label': '05:00',
'value': '55%'
},
{
'label': '06:00',
'value': '83%'
},
{
'label': '07:00',
'value': '19%'
},
], 'cores': [{
'name': 'Core 1',
'usage': '20%',
'frequency': '2000MHz',
},
{
'name': 'Core 2',
'usage': '30%',
'frequency': '2000MHz'
}]
},
'storage': '80%',
'storageData': {
'storagePartitions': [{
'name': 'Partition',
'filesystem': '/dev/sda1',
'mountPt': '/',
'storage': '600GB/1000GB'
}]
},
'ram': '65%',
'ramData': {
'totalMemory': '3.90GB/16.00GB',
'buffers': '9GB',
'swapUsage': '1GB/16GB',
'histRec': [
{
'label': '01:00',
'value': '15%'
},
{
'label': '02:00',
'value': '14%'
},
{
'label': '03:00',
'value': '23%'
},
{
'label': '04:00',
'value': '91%'
},
{
'label': '05:00',
'value': '55%'
},
{
'label': '06:00',
'value': '83%'
},
{
'label': '07:00',
'value': '19%'
},
]
},
'process': True,
'processData': {
'processes': [{
'name': 'Process A',
'status': True,
'pid': '123456',
'uid': '0(root)/1(daemon)',
'guid': '3(user)/3(sys)',
'cpuOccupied': '15%',
'ramOccupied': '900MB/7%'
}]
}
},
'seeded': True
})
sample_machine_states.append({
'hostname': 'sample-domain',
'status': True,
'data': {
'cpu': '20%',
'cpuData': {
'histRec': [
{
'label': '01:00',
'value': '15%'
},
{
'label': '02:00',
'value': '14%'
},
{
'label': '03:00',
'value': '23%'
},
{
'label': '04:00',
'value': '91%'
},
{
'label': '05:00',
'value': '55%'
},
{
'label': '06:00',
'value': '83%'
},
{
'label': '07:00',
'value': '19%'
},
], 'cores': [{
'name': 'Core 1',
'usage': '40%',
'frequency': '2000MHz',
},
{
'name': 'Core 2',
'usage': '50%',
'frequency': '2000MHz'
}]
},
'storage': '80%',
'storageData': {
'storagePartitions': [{
'name': 'Partition',
'filesystem': '/dev/sda1',
'mountPt': '/',
'storage': '600GB/1000GB'
}]
},
'ram': '65%',
'ramData': {
'totalMemory': '3.90GB/16.00GB',
'buffers': '9GB',
'swapUsage': '1GB/16GB',
'histRec': [
{
'label': '01:00',
'value': '15%'
},
{
'label': '02:00',
'value': '14%'
},
{
'label': '03:00',
'value': '23%'
},
{
'label': '04:00',
'value': '91%'
},
{
'label': '05:00',
'value': '55%'
},
{
'label': '06:00',
'value': '83%'
},
{
'label': '07:00',
'value': '19%'
},
]
},
'process': True,
'processData': {
'processes': [{
'name': 'Process A',
'status': True,
'pid': '123457',
'uid': '0(root)/1(daemon)',
'guid': '3(user)/3(sys)',
'cpuOccupied': '15%',
'ramOccupied': '900MB/7%'
}]
}
},
'seeded': True
})
# Actual insertion of data
collection.insert_many(sample_machine_states);
def down():
""" Remove the seeded test data """
# Mandatory for every mongodb call
client = MongoClient()
# Select which database to use
db = client.test_database
# Select which collection to use, each collection stores data of one machine
collection = db.localhost_states
# Remove the data having attribute seeded = True
collection.delete_many({'seeded': True})
| {
"repo_name": "hpsuenaa/servmon",
"path": "migrations/machine_data_seed.py",
"copies": "1",
"size": "7640",
"license": "mit",
"hash": -3470603413503654400,
"line_mean": 29.0787401575,
"line_max": 80,
"alpha_frac": 0.2672774869,
"autogenerated": false,
"ratio": 5.165652467883705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5932929954783706,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import math
import logging
import datetime
from pycqed.utilities.timer import Timer
log = logging.getLogger(__name__)
from copy import deepcopy
import pycqed.measurement.waveform_control.pulse as bpl
import pycqed.measurement.waveform_control.pulse_library as pl
import pycqed.measurement.waveform_control.pulsar as ps
import pycqed.measurement.waveform_control.fluxpulse_predistortion as flux_dist
from collections import OrderedDict as odict
class Segment:
"""
Consists of a list of UnresolvedPulses, each of which contains information
about in which element the pulse is played and when it is played
(reference point + delay) as well as an instance of class Pulse.
Property distortion_dicts: a key of the form {AWG}_{channel} specifies
that the respective val should be used as distortion dict instead of
self.pulsar.{AWG}_{channel}_distortion_dict.
"""
trigger_pulse_length = 20e-9
trigger_pulse_amplitude = 0.5
trigger_pulse_start_buffer = 25e-9
def __init__(self, name, pulse_pars_list=[]):
self.name = name
self.pulsar = ps.Pulsar.get_instance()
self.unresolved_pulses = []
self.resolved_pulses = []
self.previous_pulse = None
self.elements = odict()
self.element_start_end = {}
self.elements_on_awg = {}
self.distortion_dicts = {}
self.trigger_pars = {
'pulse_length': self.trigger_pulse_length,
'amplitude': self.trigger_pulse_amplitude,
'buffer_length_start': self.trigger_pulse_start_buffer,
}
self.trigger_pars['length'] = self.trigger_pars['pulse_length'] + \
self.trigger_pars['buffer_length_start']
self._pulse_names = set()
self.acquisition_elements = set()
self.timer = Timer(self.name)
for pulse_pars in pulse_pars_list:
self.add(pulse_pars)
def add(self, pulse_pars):
"""
Checks if all entries of the passed pulse_pars dictionary are valid
and sets default values where necessary. After that an UnresolvedPulse
is instantiated.
"""
pars_copy = deepcopy(pulse_pars)
# Makes sure that pulse name is unique
if pars_copy.get('name') in self._pulse_names:
raise ValueError(f'Name of added pulse already exists: '
f'{pars_copy.get("name")}')
if pars_copy.get('name', None) is None:
pars_copy['name'] = pulse_pars['pulse_type'] + '_' + str(
len(self.unresolved_pulses))
self._pulse_names.add(pars_copy['name'])
# Makes sure that element name is unique within sequence of
# segments by appending the segment name to the element name
# and that RO pulses have their own elements if no element_name
# was provided
i = len(self.acquisition_elements) + 1
if pars_copy.get('element_name', None) == None:
if pars_copy.get('operation_type', None) == 'RO':
pars_copy['element_name'] = \
'RO_element_{}_{}'.format(i, self.name)
else:
pars_copy['element_name'] = 'default_{}'.format(self.name)
else:
pars_copy['element_name'] += '_' + self.name
# add element to set of acquisition elements
if pars_copy.get('operation_type', None) == 'RO':
if pars_copy['element_name'] not in self.acquisition_elements:
self.acquisition_elements.add(pars_copy['element_name'])
new_pulse = UnresolvedPulse(pars_copy)
if new_pulse.ref_pulse == 'previous_pulse':
if self.previous_pulse != None:
new_pulse.ref_pulse = self.previous_pulse.pulse_obj.name
# if the first pulse added to the segment has no ref_pulse
# it is reference to segment_start by default
elif self.previous_pulse == None and \
len(self.unresolved_pulses) == 0:
new_pulse.ref_pulse = 'segment_start'
else:
raise ValueError('No previous pulse has been added!')
self.unresolved_pulses.append(new_pulse)
self.previous_pulse = new_pulse
# if self.elements is odict(), the resolve_timing function has to be
# called prior to generating the waveforms
self.elements = odict()
self.resolved_pulses = []
def extend(self, pulses):
"""
Adds sequentially all pulses to the segment
:param pulses: list of pulses to add
:return:
"""
for p in pulses:
self.add(p)
@Timer()
def resolve_segment(self, store_segment_length_timer=True):
"""
Top layer method of Segment class. After having addded all pulses,
* pulse elements are updated to enforce single element per segment
for the that AWGs configured this way.
* the timing is resolved
* the virtual Z gates are resolved
* the trigger pulses are generated
* the charge compensation pulses are added
"""
self.enforce_single_element()
self.resolve_timing()
self.resolve_Z_gates()
self.add_flux_crosstalk_cancellation_channels()
self.gen_trigger_el()
self.add_charge_compensation()
if store_segment_length_timer:
try:
# FIXME: we currently store 1e3*length because datetime
# does not support nanoseconds. Find a cleaner solution.
self.timer.checkpoint(
'length.dt', log_init=False, values=[
datetime.datetime.utcfromtimestamp(0)
+ datetime.timedelta(microseconds=1e9*np.diff(
self.get_segment_start_end())[0])])
except Exception as e:
# storing segment length is not crucial for the measurement
log.warning(f"Could not store segment length timer: {e}")
def enforce_single_element(self):
self.resolved_pulses = []
for p in self.unresolved_pulses:
ch_mask = []
for ch in p.pulse_obj.channels:
ch_awg = self.pulsar.get(f'{ch}_awg')
ch_mask.append(
self.pulsar.get(f'{ch_awg}_enforce_single_element'))
if all(ch_mask) and len(ch_mask) != 0:
p = deepcopy(p)
p.pulse_obj.element_name = f'default_{self.name}'
self.resolved_pulses.append(p)
elif any(ch_mask):
p0 = deepcopy(p)
p0.pulse_obj.channel_mask = [not x for x in ch_mask]
self.resolved_pulses.append(p0)
p1 = deepcopy(p)
p1.pulse_obj.element_name = f'default_{self.name}'
p1.pulse_obj.channel_mask = ch_mask
p1.ref_pulse = p.pulse_obj.name
p1.ref_point = 0
p1.ref_point_new = 0
p1.basis_rotation = {}
p1.delay = 0
p1.pulse_obj.name += '_ese'
self.resolved_pulses.append(p1)
else:
p = deepcopy(p)
self.resolved_pulses.append(p)
def resolve_timing(self, resolve_block_align=True):
"""
For each pulse in the resolved_pulses list, this method:
* updates the _t0 of the pulse by using the timing description of
the UnresolvedPulse
* saves the resolved pulse in the elements ordered dictionary by
ascending element start time and the pulses in each element by
ascending _t0
* orderes the resolved_pulses list by ascending pulse middle
:param resolve_block_align: (bool) whether to resolve alignment of
simultaneous blocks (default True)
"""
self.elements = odict()
if self.resolved_pulses == []:
self.enforce_single_element()
visited_pulses = []
ref_pulses_dict = {}
i = 0
pulses = self.gen_refpoint_dict()
# add pulses that refer to segment start
for pulse in pulses['segment_start']:
if pulse.pulse_obj.name in pulses:
ref_pulses_dict.update({pulse.pulse_obj.name: pulse})
t0 = pulse.delay - pulse.ref_point_new * pulse.pulse_obj.length
pulse.pulse_obj.algorithm_time(t0)
visited_pulses.append((t0, i, pulse))
i += 1
if len(visited_pulses) == 0:
raise ValueError('No pulse references to the segment start!')
ref_pulses_dict_all = deepcopy(ref_pulses_dict)
# add remaining pulses
while len(ref_pulses_dict) > 0:
ref_pulses_dict_new = {}
for name, pulse in ref_pulses_dict.items():
for p in pulses[name]:
if isinstance(p.ref_pulse, list):
if p.pulse_obj.name in [vp[2].pulse_obj.name for vp
in visited_pulses]:
continue
if not all([ref_pulse in ref_pulses_dict_all for
ref_pulse in p.ref_pulse]):
continue
t0_list = []
delay_list = [p.delay] * len(p.ref_pulse) if not isinstance(p.delay, list) else p.delay
ref_point_list = [p.ref_point] * len(p.ref_pulse) if not isinstance(p.ref_point, list) \
else p.ref_point
for (ref_pulse, delay, ref_point) in zip(p.ref_pulse, delay_list, ref_point_list):
t0_list.append(ref_pulses_dict_all[ref_pulse].pulse_obj.algorithm_time() + delay -
p.ref_point_new * p.pulse_obj.length +
ref_point * ref_pulses_dict_all[ref_pulse].pulse_obj.length)
if p.ref_function == 'max':
t0 = max(t0_list)
elif p.ref_function == 'min':
t0 = min(t0_list)
elif p.ref_function == 'mean':
t0 = np.mean(t0_list)
else:
raise ValueError('Passed invalid value for ' +
'ref_function. Allowed values are: max, min, mean.' +
' Default value: max')
else:
t0 = pulse.pulse_obj.algorithm_time() + p.delay - \
p.ref_point_new * p.pulse_obj.length + \
p.ref_point * pulse.pulse_obj.length
p.pulse_obj.algorithm_time(t0)
# add p.name to reference list if it is used as a key
# in pulses
if p.pulse_obj.name in pulses:
ref_pulses_dict_new.update({p.pulse_obj.name: p})
visited_pulses.append((t0, i, p))
i += 1
ref_pulses_dict = ref_pulses_dict_new
ref_pulses_dict_all.update(ref_pulses_dict_new)
if len(visited_pulses) != len(self.resolved_pulses):
log.error(f"{len(visited_pulses), len(self.resolved_pulses)}")
for unpulse in visited_pulses:
if unpulse not in self.resolved_pulses:
log.error(unpulse)
raise Exception(f'Not all pulses have been resolved: '
f'{self.resolved_pulses}')
if resolve_block_align:
re_resolve = False
for i in range(len(visited_pulses)):
p = visited_pulses[i][2]
if p.block_align is not None:
n = p.pulse_obj.name
end_pulse = ref_pulses_dict_all[n[:-len('start')] + 'end']
simultaneous_end_pulse = ref_pulses_dict_all[
n[:n[:-len('-|-start')].rfind('-|-') + 3] +
'simultaneous_end_pulse']
Delta_t = p.block_align * (
simultaneous_end_pulse.pulse_obj.algorithm_time() -
end_pulse.pulse_obj.algorithm_time())
if abs(Delta_t) > 1e-14:
p.delay += Delta_t
re_resolve = True
p.block_align = None
if re_resolve:
self.resolve_timing(resolve_block_align=False)
return
# adds the resolved pulses to the elements OrderedDictionary
for (t0, i, p) in sorted(visited_pulses):
if p.pulse_obj.element_name not in self.elements:
self.elements[p.pulse_obj.element_name] = [p.pulse_obj]
elif p.pulse_obj.element_name in self.elements:
self.elements[p.pulse_obj.element_name].append(p.pulse_obj)
# sort resolved_pulses by ascending pulse middle. Used for Z_gate
# resolution
for i in range(len(visited_pulses)):
t0 = visited_pulses[i][0]
p = visited_pulses[i][2]
visited_pulses[i] = (t0 + p.pulse_obj.length / 2,
visited_pulses[i][1], p)
ordered_unres_pulses = []
for (t0, i, p) in sorted(visited_pulses):
ordered_unres_pulses.append(p)
self.resolved_pulses = ordered_unres_pulses
def add_flux_crosstalk_cancellation_channels(self):
if self.pulsar.flux_crosstalk_cancellation():
for p in self.resolved_pulses:
if any([ch in self.pulsar.flux_channels() for ch in
p.pulse_obj.channels]):
p.pulse_obj.crosstalk_cancellation_channels = \
self.pulsar.flux_channels()
p.pulse_obj.crosstalk_cancellation_mtx = \
self.pulsar.flux_crosstalk_cancellation_mtx()
p.pulse_obj.crosstalk_cancellation_shift_mtx = \
self.pulsar.flux_crosstalk_cancellation_shift_mtx()
def add_charge_compensation(self):
"""
Adds charge compensation pulse to channels with pulsar parameter
charge_buildup_compensation.
"""
t_end = -float('inf')
pulse_area = {}
compensation_chan = set()
# Find channels where charge compensation should be applied
for c in self.pulsar.channels:
if self.pulsar.get('{}_type'.format(c)) != 'analog':
continue
if self.pulsar.get('{}_charge_buildup_compensation'.format(c)):
compensation_chan.add(c)
# * generate the pulse_area dictionary containing for each channel
# that has to be compensated the sum of all pulse areas on that
# channel + the name of the last element
# * and find the end time of the last pulse of the segment
for element in self.element_start_end.keys():
# finds the channels of AWGs with that element
awg_channels = set()
for awg in self.element_start_end[element]:
chan = set(self.pulsar.find_awg_channels(awg))
awg_channels = awg_channels.union(chan)
# Calculate the tvals dictionary for the element
tvals = self.tvals(compensation_chan & awg_channels, element)
for pulse in self.elements[element]:
# Find the end of the last pulse of the segment
t_end = max(t_end, pulse.algorithm_time() + pulse.length)
for c in pulse.masked_channels():
if c not in compensation_chan:
continue
awg = self.pulsar.get('{}_awg'.format(c))
element_start_time = self.get_element_start(element, awg)
pulse_start = self.time2sample(
pulse.element_time(element_start_time), channel=c)
pulse_end = self.time2sample(
pulse.element_time(element_start_time) + pulse.length,
channel=c)
if c in pulse_area:
pulse_area[c][0] += pulse.pulse_area(
c, tvals[c][pulse_start:pulse_end])
# Overwrite this entry for all elements. The last
# element on that channel will be the one that
# is saved.
pulse_area[c][1] = element
else:
pulse_area[c] = [
pulse.pulse_area(
c, tvals[c][pulse_start:pulse_end]), element
]
# Add all compensation pulses to the last element after the last pulse
# of the segment and for each element with a compensation pulse save
# the pulse with the greatest length to determine the new length
# of the element
i = 1
comp_i = 1
comp_dict = {}
longest_pulse = {}
for c in pulse_area:
comp_delay = self.pulsar.get(
'{}_compensation_pulse_delay'.format(c))
amp = self.pulsar.get('{}_amp'.format(c))
amp *= self.pulsar.get('{}_compensation_pulse_scale'.format(c))
# If pulse lenght was smaller than min_length, the amplitude will
# be reduced
length = abs(pulse_area[c][0] / amp)
awg = self.pulsar.get('{}_awg'.format(c))
min_length = self.pulsar.get(
'{}_compensation_pulse_min_length'.format(awg))
if length < min_length:
length = min_length
amp = abs(pulse_area[c][0] / length)
if pulse_area[c][0] > 0:
amp = -amp
last_element = pulse_area[c][1]
# for RO elements create a seperate element for compensation pulses
if last_element in self.acquisition_elements:
RO_awg = self.pulsar.get('{}_awg'.format(c))
if RO_awg not in comp_dict:
last_element = 'compensation_el{}_{}'.format(
comp_i, self.name)
comp_dict[RO_awg] = last_element
self.elements[last_element] = []
self.element_start_end[last_element] = {RO_awg: [t_end, 0]}
self.elements_on_awg[RO_awg].append(last_element)
comp_i += 1
else:
last_element = comp_dict[RO_awg]
kw = {
'amplitude': amp,
'buffer_length_start': comp_delay,
'buffer_length_end': comp_delay,
'pulse_length': length,
'gaussian_filter_sigma': self.pulsar.get(
'{}_compensation_pulse_gaussian_filter_sigma'.format(c))
}
pulse = pl.BufferedSquarePulse(
last_element, c, name='compensation_pulse_{}'.format(i), **kw)
i += 1
# Set the pulse to start after the last pulse of the sequence
pulse.algorithm_time(t_end)
# Save the length of the longer pulse in longest_pulse dictionary
total_length = 2 * comp_delay + length
longest_pulse[(last_element,awg)] = \
max(longest_pulse.get((last_element,awg),0), total_length)
self.elements[last_element].append(pulse)
for (el, awg) in longest_pulse:
length_comp = longest_pulse[(el, awg)]
el_start = self.get_element_start(el, awg)
new_end = t_end + length_comp
new_samples = self.time2sample(new_end - el_start, awg=awg)
# make sure that element length is multiple of
# sample granularity
gran = self.pulsar.get('{}_granularity'.format(awg))
if new_samples % gran != 0:
new_samples += gran - new_samples % gran
self.element_start_end[el][awg][1] = new_samples
def gen_refpoint_dict(self):
"""
Returns a dictionary of UnresolvedPulses with their reference_points as
keys.
"""
pulses = {}
for pulse in self.resolved_pulses:
ref_pulse_list = pulse.ref_pulse
if not isinstance(ref_pulse_list, list):
ref_pulse_list = [ref_pulse_list]
for p in ref_pulse_list:
if p not in pulses:
pulses[p] = [pulse]
else:
pulses[p].append(pulse)
return pulses
def gen_elements_on_awg(self):
"""
Updates the self.elements_on_AWG dictionary
"""
if self.elements == odict():
self.resolve_timing()
self.elements_on_awg = {}
for element in self.elements:
for pulse in self.elements[element]:
for channel in pulse.masked_channels():
awg = self.pulsar.get(channel + '_awg')
if awg in self.elements_on_awg and \
element not in self.elements_on_awg[awg]:
self.elements_on_awg[awg].append(element)
elif awg not in self.elements_on_awg:
self.elements_on_awg[awg] = [element]
def find_awg_hierarchy(self):
masters = {awg for awg in self.pulsar.awgs
if len(self.pulsar.get('{}_trigger_channels'.format(awg))) == 0}
# generate dictionary triggering_awgs (keys are trigger AWGs and
# values triggered AWGs) and tirggered_awgs (keys are triggered AWGs
# and values triggering AWGs)
triggering_awgs = {}
triggered_awgs = {}
awgs = set(self.pulsar.awgs) - masters
for awg in awgs:
for channel in self.pulsar.get('{}_trigger_channels'.format(awg)):
trigger_awg = self.pulsar.get('{}_awg'.format(channel))
if trigger_awg in triggering_awgs:
triggering_awgs[trigger_awg].append(awg)
else:
triggering_awgs[trigger_awg] = [awg]
if awg in triggered_awgs:
triggered_awgs[awg].append(trigger_awg)
else:
triggered_awgs[awg] = [trigger_awg]
# impletment Kahn's algorithm to sort the AWG by hierarchy
trigger_awgs = masters
awg_hierarchy = []
while len(trigger_awgs) != 0:
awg = trigger_awgs.pop()
awg_hierarchy.append(awg)
if awg not in triggering_awgs:
continue
for triggered_awg in triggering_awgs[awg]:
triggered_awgs[triggered_awg].remove(awg)
if len(triggered_awgs[triggered_awg]) == 0:
trigger_awgs.add(triggered_awg)
awg_hierarchy.reverse()
return awg_hierarchy
def gen_trigger_el(self):
"""
For each element:
For each AWG the element is played on, this method:
* adds the element to the elements_on_AWG dictionary
* instatiates a trigger pulse on the triggering channel of the
AWG, placed in a suitable element on the triggering AWG,
taking AWG delay into account.
* adds the trigger pulse to the elements list
"""
# Generate the dictionary elements_on_awg, that for each AWG contains
# a list of the elements on that AWG
self.gen_elements_on_awg()
# Find the AWG hierarchy. Needed to add the trigger pulses first to
# the AWG that do not trigger any other AWGs, then the AWGs that
# trigger these AWGs and so on.
awg_hierarchy = self.find_awg_hierarchy()
i = 1
for awg in awg_hierarchy:
if awg not in self.elements_on_awg:
continue
# for master AWG no trigger_pulse has to be added
if len(self.pulsar.get('{}_trigger_channels'.format(awg))) == 0:
continue
# used for updating the length of the trigger elements after adding
# the trigger pulses
trigger_el_set = set()
for element in self.elements_on_awg[awg]:
# Calculate the trigger pulse time
[el_start, _] = self.element_start_length(element, awg)
trigger_pulse_time = el_start - \
- self.pulsar.get('{}_delay'.format(awg))\
- self.trigger_pars['buffer_length_start']
# Find the trigger_AWGs that trigger the AWG
trigger_awgs = set()
for channel in self.pulsar.get(
'{}_trigger_channels'.format(awg)):
trigger_awgs.add(self.pulsar.get('{}_awg'.format(channel)))
# For each trigger_AWG, find the element to play the trigger
# pulse in
trigger_elements = {}
for trigger_awg in trigger_awgs:
# if there is no element on that AWG create a new element
if self.elements_on_awg.get(trigger_awg, None) is None:
trigger_elements[
trigger_awg] = 'trigger_element_{}'.format(
self.name)
# else find the element that is closest to the
# trigger pulse
else:
trigger_elements[
trigger_awg] = self.find_trigger_element(
trigger_awg, trigger_pulse_time)
# Add the trigger pulse to all triggering channels
for channel in self.pulsar.get(
'{}_trigger_channels'.format(awg)):
trigger_awg = self.pulsar.get('{}_awg'.format(channel))
trig_pulse = pl.BufferedSquarePulse(
trigger_elements[trigger_awg],
channel=channel,
name='trigger_pulse_{}'.format(i),
**self.trigger_pars)
i += 1
trig_pulse.algorithm_time(trigger_pulse_time -
0.25/self.pulsar.clock(channel))
# Add trigger element and pulse to seg.elements
if trig_pulse.element_name in self.elements:
self.elements[trig_pulse.element_name].append(
trig_pulse)
else:
self.elements[trig_pulse.element_name] = [trig_pulse]
# Add the trigger_element to elements_on_awg[trigger_awg]
if trigger_awg not in self.elements_on_awg:
self.elements_on_awg[trigger_awg] = [
trigger_elements[trigger_awg]
]
elif trigger_elements[
trigger_awg] not in self.elements_on_awg[
trigger_awg]:
self.elements_on_awg[trigger_awg].append(
trigger_elements[trigger_awg])
trigger_el_set = trigger_el_set | set(
trigger_elements.items())
# For all trigger elements update the start and length
# after having added the trigger pulses
for (awg, el) in trigger_el_set:
self.element_start_length(el, awg)
# checks if elements on AWGs overlap
self._test_overlap()
# checks if there is only one element on the master AWG
self._test_trigger_awg()
def find_trigger_element(self, trigger_awg, trigger_pulse_time):
"""
For a trigger_AWG that is used for generating triggers as well as
normal pulses, this method returns the name of the element to which the
trigger pulse is closest.
"""
time_distance = []
for element in self.elements_on_awg[trigger_awg]:
[el_start, samples] = self.element_start_length(
element, trigger_awg)
el_end = el_start + self.sample2time(samples, awg=trigger_awg)
distance_start_end = [
[
abs(trigger_pulse_time + self.trigger_pars['length'] / 2 -
el_start), element
],
[
abs(trigger_pulse_time + self.trigger_pars['length'] / 2 -
el_end), element
]
]
time_distance += distance_start_end
trigger_element = min(time_distance)[1]
return trigger_element
def get_element_end(self, element, awg):
"""
This method returns the end of an element on an AWG in algorithm_time
"""
samples = self.element_start_end[element][awg][1]
length = self.sample2time(samples, awg=awg)
return self.element_start_end[element][awg][0] + length
def get_element_start(self, element, awg):
"""
This method returns the start of an element on an AWG in algorithm_time
"""
return self.element_start_end[element][awg][0]
def get_segment_start_end(self):
"""
Returns the start and end of the segment in algorithm_time
"""
for i in range(2):
start_end_times = np.array(
[[self.get_element_start(el, awg),
self.get_element_end(el, awg)]
for awg, v in self.elements_on_awg.items() for el in v])
if len(start_end_times) > 0:
# the segment has been resolved before
break
# Resolve the segment and retry. We set store_segment_length_timer
# to False to avoid that resolve_segment calls
# get_segment_start_end, which might cause an infinite loop in
# some pathological cases.
self.resolve_segment(store_segment_length_timer=False)
return np.min(start_end_times[:, 0]), np.max(start_end_times[:, 1])
def _test_overlap(self):
"""
Tests for all AWGs if any of their elements overlap.
"""
for awg in self.elements_on_awg:
el_list = []
i = 0
for el in self.elements_on_awg[awg]:
if el not in self.element_start_end:
self.element_start_length(el, awg)
el_list.append([self.element_start_end[el][awg][0], i, el])
i += 1
el_list.sort()
for i in range(len(el_list) - 1):
prev_el = el_list[i][2]
el_prev_start = self.get_element_start(prev_el, awg)
el_prev_end = self.get_element_end(prev_el, awg)
el_length = el_prev_end - el_prev_start
# If element length is shorter than min length, 0s will be
# appended by pulsar. Test for elements with at least
# min_el_len if they overlap.
min_el_len = self.pulsar.get('{}_min_length'.format(awg))
if el_length < min_el_len:
el_prev_end = el_prev_start + min_el_len
el_new_start = el_list[i + 1][0]
if el_prev_end > el_new_start:
raise ValueError('{} and {} overlap on {}'.format(
prev_el, el_list[i + 1][2], awg))
def _test_trigger_awg(self):
"""
Checks if there is more than one element on the AWGs that are not
triggered by another AWG.
"""
self.gen_elements_on_awg()
for awg in self.elements_on_awg:
if len(self.pulsar.get('{}_trigger_channels'.format(awg))) != 0:
continue
if len(self.elements_on_awg[awg]) > 1:
raise ValueError(
'There is more than one element on {}'.format(awg))
def resolve_Z_gates(self):
"""
The phase of a basis rotation is acquired by an basis pulse, if the
middle of the basis rotation pulse happens before the middle of the
basis pulse. Using that self.resolved_pulses was sorted by
self.resolve_timing() the acquired phases can be calculated.
"""
basis_phases = {}
for pulse in self.resolved_pulses:
for basis, rotation in pulse.basis_rotation.items():
basis_phases[basis] = basis_phases.get(basis, 0) + rotation
if pulse.basis is not None:
pulse.pulse_obj.phase = pulse.original_phase - \
basis_phases.get(pulse.basis, 0)
def element_start_length(self, element, awg):
"""
Finds and saves the start and length of an element on AWG awg
in self.element_start_end.
"""
if element not in self.element_start_end:
self.element_start_end[element] = {}
# find element start, end and length
t_start = float('inf')
t_end = -float('inf')
for pulse in self.elements[element]:
for ch in pulse.masked_channels():
if self.pulsar.get(f'{ch}_awg') == awg:
break
else:
continue
t_start = min(pulse.algorithm_time(), t_start)
t_end = max(pulse.algorithm_time() + pulse.length, t_end)
# make sure that element start is a multiple of element
# start granularity
# we allow rounding up of the start time by half a sample, otherwise
# we round the start time down
start_gran = self.pulsar.get(
'{}_element_start_granularity'.format(awg))
sample_time = 1/self.pulsar.clock(awg=awg)
if start_gran is not None:
t_start = math.floor((t_start + 0.5*sample_time) / start_gran) \
* start_gran
# make sure that element length is multiple of
# sample granularity
gran = self.pulsar.get('{}_granularity'.format(awg))
samples = self.time2sample(t_end - t_start, awg=awg)
if samples % gran != 0:
samples += gran - samples % gran
self.element_start_end[element][awg] = [t_start, samples]
return [t_start, samples]
def waveforms(self, awgs=None, elements=None, channels=None,
codewords=None):
"""
After all the pulses have been added, the timing resolved and the
trigger pulses added, the waveforms of the segment can be compiled.
This method returns a dictionary:
AWG_wfs =
= {AWG_name:
{(position_of_element, element_name):
{codeword:
{channel_id: channel_waveforms}
...
}
...
}
...
}
"""
if awgs is None:
awgs = set(self.elements_on_awg)
if channels is None:
channels = set(self.pulsar.channels)
if elements is None:
elements = set(self.elements)
awg_wfs = {}
for awg in awgs:
# only procede for AWGs with waveforms
if awg not in self.elements_on_awg:
continue
awg_wfs[awg] = {}
channel_list = set(self.pulsar.find_awg_channels(awg)) & channels
if channel_list == set():
continue
channel_list = list(channel_list)
for i, element in enumerate(self.elements_on_awg[awg]):
if element not in elements:
continue
awg_wfs[awg][(i, element)] = {}
tvals = self.tvals(channel_list, element)
wfs = {}
element_start_time = self.get_element_start(element, awg)
for pulse in self.elements[element]:
# checks whether pulse is played on AWG
pulse_channels = set(pulse.masked_channels()) & set(channel_list)
if pulse_channels == set():
continue
if codewords is not None and \
pulse.codeword not in codewords:
continue
# fills wfs with zeros for used channels
if pulse.codeword not in wfs:
wfs[pulse.codeword] = {}
for channel in pulse_channels:
wfs[pulse.codeword][channel] = np.zeros(
len(tvals[channel]))
else:
for channel in pulse_channels:
if channel not in wfs[pulse.codeword]:
wfs[pulse.codeword][channel] = np.zeros(
len(tvals[channel]))
# calculate the pulse tvals
chan_tvals = {}
pulse_start = self.time2sample(
pulse.element_time(element_start_time), awg=awg)
pulse_end = self.time2sample(
pulse.element_time(element_start_time) + pulse.length,
awg=awg)
for channel in pulse_channels:
chan_tvals[channel] = tvals[channel].copy(
)[pulse_start:pulse_end]
# calculate pulse waveforms
pulse_wfs = pulse.waveforms(chan_tvals)
# insert the waveforms at the correct position in wfs
for channel in pulse_channels:
wfs[pulse.codeword][channel][
pulse_start:pulse_end] += pulse_wfs[channel]
# for codewords: add the pulses that do not have a codeword to
# all codewords
if 'no_codeword' in wfs:
for codeword in wfs:
if codeword is not 'no_codeword':
for channel in wfs['no_codeword']:
if channel in wfs[codeword]:
wfs[codeword][channel] += wfs[
'no_codeword'][channel]
else:
wfs[codeword][channel] = wfs[
'no_codeword'][channel]
# do predistortion
for codeword in wfs:
for c in wfs[codeword]:
if not self.pulsar.get(
'{}_type'.format(c)) == 'analog':
continue
if not self.pulsar.get(
'{}_distortion'.format(c)) == 'precalculate':
continue
wf = wfs[codeword][c]
distortion_dict = self.distortion_dicts.get(c, None)
if distortion_dict is None:
distortion_dict = self.pulsar.get(
'{}_distortion_dict'.format(c))
else:
distortion_dict = \
flux_dist.process_filter_coeffs_dict(
distortion_dict,
default_dt=1 / self.pulsar.clock(
channel=c))
fir_kernels = distortion_dict.get('FIR', None)
if fir_kernels is not None:
if hasattr(fir_kernels, '__iter__') and not \
hasattr(fir_kernels[0], '__iter__'): # 1 kernel
wf = flux_dist.filter_fir(fir_kernels, wf)
else:
for kernel in fir_kernels:
wf = flux_dist.filter_fir(kernel, wf)
iir_filters = distortion_dict.get('IIR', None)
if iir_filters is not None:
wf = flux_dist.filter_iir(iir_filters[0],
iir_filters[1], wf)
wfs[codeword][c] = wf
# truncation and normalization
for codeword in wfs:
for c in wfs[codeword]:
# truncate all values that are out of bounds and
# normalize the waveforms
amp = self.pulsar.get('{}_amp'.format(c))
if self.pulsar.get('{}_type'.format(c)) == 'analog':
if np.max(wfs[codeword][c]) > amp:
logging.warning(
'Clipping waveform {} > {}'.format(
np.max(wfs[codeword][c]), amp))
if np.min(wfs[codeword][c]) < -amp:
logging.warning(
'Clipping waveform {} < {}'.format(
np.min(wfs[codeword][c]), -amp))
np.clip(
wfs[codeword][c],
-amp,
amp,
out=wfs[codeword][c])
# normalize wfs
wfs[codeword][c] = wfs[codeword][c] / amp
# marker channels have to be 1 or 0
elif self.pulsar.get('{}_type'.format(c)) == 'marker':
wfs[codeword][c] = (wfs[codeword][c] > 0)\
.astype(np.int)
# save the waveforms in the dictionary
for codeword in wfs:
awg_wfs[awg][(i, element)][codeword] = {}
for channel in wfs[codeword]:
awg_wfs[awg][(i, element)][codeword][self.pulsar.get(
'{}_id'.format(channel))] = (
wfs[codeword][channel])
return awg_wfs
def get_element_codewords(self, element, awg=None):
codewords = set()
if awg is not None:
channels = set(self.pulsar.find_awg_channels(awg))
for pulse in self.elements[element]:
if awg is not None and len(set(pulse.masked_channels()) & channels) == 0:
continue
codewords.add(pulse.codeword)
return codewords
def get_element_channels(self, element, awg=None):
channels = set()
if awg is not None:
awg_channels = set(self.pulsar.find_awg_channels(awg))
for pulse in self.elements[element]:
if awg is not None:
channels |= set(pulse.masked_channels()) & awg_channels
else:
channels |= set(pulse.masked_channels())
return channels
def calculate_hash(self, elname, codeword, channel):
if not self.pulsar.reuse_waveforms():
return (self.name, elname, codeword, channel)
awg = self.pulsar.get(f'{channel}_awg')
tstart, length = self.element_start_end[elname][awg]
hashlist = []
hashlist.append(length) # element length in samples
if self.pulsar.get(f'{channel}_type') == 'analog' and \
self.pulsar.get(f'{channel}_distortion') == 'precalculate':
# don't compare the kernels, just assume that all channels'
# distortion kernels are different
hashlist.append(channel)
else:
hashlist.append(self.pulsar.clock(channel=channel)) # clock rate
for par in ['type', 'amp', 'internal_modulation']:
try:
hashlist.append(self.pulsar.get(f'{channel}_{par}'))
except KeyError:
hashlist.append(False)
for pulse in self.elements[elname]:
if pulse.codeword in {'no_codeword', codeword}:
hashlist += self.hashables(pulse, tstart, channel)
return tuple(hashlist)
@staticmethod
def hashables(pulse, tstart, channel):
"""
Wrapper for Pulse.hashables making sure to deal correctly with
crosstalk cancellation channels.
The hashables of a cancellation pulse has to include the hashables
of all pulses that it cancels. This is needed to ensure that the
cancellation pulse gets re-uploaded when any of the cancelled pulses
changes. In addition it has to include the parameters of
cancellation calibration, i.e., the relevant entries of the
crosstalk cancellation matrix and of the shift matrix.
:param pulse: a Pulse object
:param tstart: (float) start time of the element
:param channel: (str) channel name
"""
if channel in pulse.crosstalk_cancellation_channels:
hashables = []
idx_c = pulse.crosstalk_cancellation_channels.index(channel)
for c in pulse.channels:
if c in pulse.crosstalk_cancellation_channels:
idx_c2 = pulse.crosstalk_cancellation_channels.index(c)
factor = pulse.crosstalk_cancellation_mtx[idx_c, idx_c2]
shift = pulse.crosstalk_cancellation_shift_mtx[
idx_c, idx_c2] \
if pulse.crosstalk_cancellation_shift_mtx is not \
None else 0
if factor != 0:
hashables += pulse.hashables(tstart, c)
hashables += [factor, shift]
return hashables
else:
return pulse.hashables(tstart, channel)
def tvals(self, channel_list, element):
"""
Returns a dictionary with channel names of the used channels in the
element as keys and the tvals array for the channel as values.
"""
tvals = {}
for channel in channel_list:
samples = self.get_element_samples(element, channel)
awg = self.pulsar.get('{}_awg'.format(channel))
tvals[channel] = np.arange(samples) / self.pulsar.clock(
channel=channel) + self.get_element_start(element, awg)
return tvals
def get_element_samples(self, element, instrument_ref):
"""
Returns the number of samples the element occupies for the channel or
AWG.
"""
if instrument_ref in self.pulsar.channels:
awg = self.pulsar.get('{}_awg'.format(instrument_ref))
elif instrument_ref in self.pulsar.awgs:
awg = instrument_ref
else:
raise Exception('instrument_ref has to be channel or AWG name!')
return self.element_start_end[element][awg][1]
def time2sample(self, t, **kw):
"""
Converts time to a number of samples for a channel or AWG.
"""
return int(t * self.pulsar.clock(**kw) + 0.5)
def sample2time(self, samples, **kw):
"""
Converts nubmer of samples to time for a channel or AWG.
"""
return samples / self.pulsar.clock(**kw)
def plot(self, instruments=None, channels=None, legend=True,
delays=None, savefig=False, prop_cycle=None, frameon=True,
channel_map=None, plot_kwargs=None, axes=None, demodulate=False,
show_and_close=True, col_ind=0, normalized_amplitudes=True):
"""
Plots a segment. Can only be done if the segment can be resolved.
:param instruments (list): instruments for which pulses have to be
plotted. Defaults to all.
:param channels (list): channels to plot. defaults to all.
:param delays (dict): keys are instruments, values are additional
delays. If passed, the delay is substracted to the time values of
this instrument, such that the pulses are plotted at timing when
they physically occur. A key 'default' can be used to specify a
delay for all instruments that are not explicitly given as keys.
:param savefig: save the plot
:param channel_map (dict): indicates which instrument channels
correspond to which qubits. Keys = qb names, values = list of
channels. eg. dict(qb2=['AWG8_ch3', "UHF_ch1"]). If provided,
will plot each qubit on individual subplots.
:param prop_cycle (dict):
:param frameon (dict, bool):
:param axes (array or axis): 2D array of matplotlib axes. if single
axes, will be converted internally to array.
:param demodulate (bool): plot only envelope of pulses by temporarily
setting modulation and phase to 0. Need to recompile the sequence
:param show_and_close: (bool) show and close the plot (default: True)
:param col_ind: (int) when passed together with axes, this specifies
in which column of subfigures the plots should be added
(default: 0)
:param normalized_amplitudes: (bool) whether amplitudes
should be normalized to the voltage range of the channel
(default: True)
:return: The figure and axes objects if show_and_close is False,
otherwise no return value.
"""
import matplotlib.pyplot as plt
if delays is None:
delays = dict()
if plot_kwargs is None:
plot_kwargs = dict()
plot_kwargs['linewidth'] = 0.7
try:
# resolve segment and populate elements/waveforms
self.resolve_segment()
if demodulate:
for el in self.elements.values():
for pulse in el:
if hasattr(pulse, "mod_frequency"):
pulse.mod_frequency = 0
if hasattr(pulse, "phase"):
pulse.phase = 0
wfs = self.waveforms(awgs=instruments, channels=None)
n_instruments = len(wfs) if channel_map is None else \
len(channel_map)
if axes is not None:
if np.ndim(axes) == 0:
axes = [[axes]]
fig = axes[0,0].get_figure()
ax = axes
else:
fig, ax = plt.subplots(nrows=n_instruments, sharex=True,
squeeze=False,
figsize=(16, n_instruments * 3))
if prop_cycle is not None:
for a in ax[:,col_ind]:
a.set_prop_cycle(**prop_cycle)
sorted_keys = sorted(wfs.keys()) if instruments is None \
else [i for i in instruments if i in wfs]
for i, instr in enumerate(sorted_keys):
if instr not in delays and 'default' in delays:
delays[instr] = delays['default']
# plotting
for elem_name, v in wfs[instr].items():
for k, wf_per_ch in v.items():
sorted_chans = sorted(wf_per_ch.keys())
for n_wf, ch in enumerate(sorted_chans):
wf = wf_per_ch[ch]
if not normalized_amplitudes:
wf = wf * self.pulsar.get(f'{instr}_{ch}_amp')
if channels is None or \
ch in channels.get(instr, []):
tvals = \
self.tvals([f"{instr}_{ch}"], elem_name[1])[
f"{instr}_{ch}"] - delays.get(instr, 0)
if channel_map is None:
# plot per device
ax[i, col_ind].set_title(instr)
ax[i, col_ind].plot(
tvals * 1e6, wf,
label=f"{elem_name[1]}_{k}_{ch}",
**plot_kwargs)
else:
# plot on each qubit subplot which includes
# this channel in the channel map
match = {i: qb_name
for i, (qb_name, qb_chs) in
enumerate(channel_map.items())
if f"{instr}_{ch}" in qb_chs}
for qbi, qb_name in match.items():
ax[qbi, col_ind].set_title(qb_name)
ax[qbi, col_ind].plot(
tvals * 1e6, wf,
label=f"{elem_name[1]}"
f"_{k}_{instr}_{ch}",
**plot_kwargs)
if demodulate: # filling
ax[qbi, col_ind].fill_between(
tvals * 1e6, wf,
label=f"{elem_name[1]}_"
f"{k}_{instr}_{ch}",
alpha=0.05,
**plot_kwargs)
# formatting
for a in ax[:, col_ind]:
if isinstance(frameon, bool):
frameon = {k: frameon for k in ['top', 'bottom',
"right", "left"]}
a.spines["top"].set_visible(frameon.get("top", True))
a.spines["right"].set_visible(frameon.get("right", True))
a.spines["bottom"].set_visible(frameon.get("bottom", True))
a.spines["left"].set_visible(frameon.get("left", True))
if legend:
a.legend(loc=[1.02, 0], prop={'size': 8})
if normalized_amplitudes:
a.set_ylabel('Amplitude (norm.)')
else:
a.set_ylabel('Voltage (V)')
ax[-1, col_ind].set_xlabel('time ($\mu$s)')
fig.suptitle(f'{self.name}', y=1.01)
plt.tight_layout()
if savefig:
plt.savefig(f'{self.name}.png')
if show_and_close:
plt.show()
plt.close(fig)
return
else:
return fig, ax
except Exception as e:
log.error(f"Could not plot: {self.name}")
raise e
def __repr__(self):
string_repr = f"---- {self.name} ----\n"
for i, p in enumerate(self.unresolved_pulses):
string_repr += f"{i}: " + repr(p) + "\n"
return string_repr
def export_tikz(self, qb_names, tscale=1e-6):
last_z = [(-np.inf, 0)] * len(qb_names)
output = ''
z_output = ''
start_output = '\\documentclass{standalone}\n\\usepackage{tikz}\n\\begin{document}\n\\scalebox{2}{'
start_output += '\\begin{tikzpicture}[x=10cm,y=2cm]\n'
start_output += '\\tikzstyle{CZdot} = [shape=circle, thick,draw,inner sep=0,minimum size=.5mm, fill=black]\n'
start_output += '\\tikzstyle{gate} = [draw,fill=white,minimum width=1cm, rotate=90]\n'
start_output += '\\tikzstyle{zgate} = [rotate=0]\n'
tmin = np.inf
tmax = -np.inf
num_single_qb = 0
num_two_qb = 0
num_virtual = 0
self.resolve_segment()
for p in self.resolved_pulses:
if p.op_code != '' and p.op_code[:2] != 'RO':
l = p.pulse_obj.length
t = p.pulse_obj._t0 + l / 2
tmin = min(tmin, p.pulse_obj._t0)
tmax = max(tmax, p.pulse_obj._t0 + p.pulse_obj.length)
qb = qb_names.index(p.op_code[-3:])
op_code = p.op_code[:-4]
qbt = 0
if op_code[-3:-1] == 'qb':
qbt = qb_names.index(op_code[-3:])
op_code = op_code[:-4]
if op_code[-1:] == 's':
op_code = op_code[:-1]
if op_code[:2] == 'CZ' or op_code[:4] == 'upCZ':
num_two_qb += 1
if len(op_code) > 4:
val = -float(op_code[4:])
gate_formatted = f'{gate_type}{(factor * val):.1f}'.replace(
'.0', '')
output += f'\\draw({t / tscale:.4f},-{qb}) node[CZdot] {{}} -- ({t / tscale:.4f},-{qbt}) node[gate, minimum height={l / tscale * 100:.4f}mm] {{\\tiny {gate_formatted}}};\n'
else:
output += f'\\draw({t / tscale:.4f},-{qb}) node[CZdot] {{}} -- ({t / tscale:.4f},-{qbt}) node[CZdot] {{}};\n'
elif op_code[0] == 'I':
continue
else:
if op_code[0] == 'm':
factor = -1
op_code = op_code[1:]
else:
factor = 1
gate_type = 'R' + op_code[:1]
val = float(op_code[1:])
if val == 180:
gate_formatted = op_code[:1]
else:
gate_formatted = f'{gate_type}{(factor * val):.1f}'.replace(
'.0', '')
if l == 0:
if t - last_z[qb][0] > 1e-9:
z_height = 0 if (
t - last_z[qb][0] > 100e-9 or last_z[qb][
1] >= 3) else last_z[qb][1] + 1
z_output += f'\\draw[dashed,thick,shift={{(0,.03)}}] ({t / tscale:.4f},-{qb})--++(0,{0.3 + z_height * 0.1});\n'
else:
z_height = last_z[qb][1] + 1
z_output += f'\\draw({t / tscale:.4f},-{qb}) node[zgate,shift={{({(0, .35 + z_height * .1)})}}] {{\\tiny {gate_formatted}}};\n'
last_z[qb] = (t, z_height)
num_virtual += 1
else:
output += f'\\draw({t / tscale:.4f},-{qb}) node[gate, minimum height={l / tscale * 100:.4f}mm] {{\\tiny {gate_formatted}}};\n'
num_single_qb += 1
qb_output = ''
for qb, qb_name in enumerate(qb_names):
qb_output += f'\draw ({tmin / tscale:.4f},-{qb}) node[left] {{{qb_name}}} -- ({tmax / tscale:.4f},-{qb});\n'
output = start_output + qb_output + output + z_output
axis_ycoord = -len(qb_names) + .4
output += f'\\foreach\\x in {{{tmin / tscale},{tmin / tscale + .2},...,{tmax / tscale}}} \\pgfmathprintnumberto[fixed]{{\\x}}{{\\tmp}} \draw (\\x,{axis_ycoord})--++(0,-.1) node[below] {{\\tmp}} ;\n'
output += f'\\draw[->] ({tmin / tscale},{axis_ycoord}) -- ({tmax / tscale},{axis_ycoord}) node[right] {{$t/\\mathrm{{\\mu s}}$}};\n'
output += '\\end{tikzpicture}}\end{document}'
output += f'\n% {num_single_qb} single-qubit gates, {num_two_qb} two-qubit gates, {num_virtual} virtual gates'
return output
def rename(self, new_name):
"""
Renames a segment with the given new name. Hunts down element names in
unresolved pulses and acquisition elements that might have made use of
the old segment_name and renames them too.
Note: this function relies on the convention that the element_name ends with
"_segmentname".
Args:
new_name:
Returns:
"""
old_name = self.name
# rename element names in unresolved_pulses and resolved_pulses making
# use of the old name
for p in self.unresolved_pulses + self.resolved_pulses:
if hasattr(p.pulse_obj, "element_name") \
and p.pulse_obj.element_name.endswith(f"_{old_name}"):
p.pulse_obj.element_name = \
p.pulse_obj.element_name[:-(len(old_name) + 1)] + '_' \
+ new_name
# rebuild acquisition elements that used the old segment name
new_acq_elements = set()
for el in self.acquisition_elements:
if el.endswith(f"_{old_name}"):
new_acq_elements.add(el[:-(len(old_name) + 1)] + '_' \
+ new_name)
else:
new_acq_elements.add(el)
log.warning(f'Acquisition element name: {el} not ending'
f' with "_segmentname": {old_name}. Keeping '
f'current element name when renaming '
f'the segment.')
self.acquisition_elements = new_acq_elements
# rename segment name
self.name = new_name
# rename timer
self.timer.name = new_name
def __deepcopy__(self, memo):
cls = self.__class__
new_seg = cls.__new__(cls)
memo[id(self)] = new_seg
for k, v in self.__dict__.items():
if k == "pulsar": # the reference to pulsar cannot be deepcopied
setattr(new_seg, k, v)
else:
setattr(new_seg, k, deepcopy(v, memo))
return new_seg
class UnresolvedPulse:
"""
pulse_pars: dictionary containing pulse parameters
ref_pulse: 'segment_start', 'previous_pulse', pulse.name, or a list of
multiple pulse.name.
ref_point: 'start', 'end', 'middle', reference point of the reference pulse
ref_point_new: 'start', 'end', 'middle', reference point of the new pulse
ref_function: 'max', 'min', 'mean', specifies how timing is chosen if
multiple pulse names are listed in ref_pulse (default: 'max')
"""
def __init__(self, pulse_pars):
self.ref_pulse = pulse_pars.get('ref_pulse', 'previous_pulse')
alignments = {'start': 0, 'middle': 0.5, 'center': 0.5, 'end': 1}
if pulse_pars.get('ref_point', 'end') == 'end':
self.ref_point = 1
elif pulse_pars.get('ref_point', 'end') == 'middle':
self.ref_point = 0.5
elif pulse_pars.get('ref_point', 'end') == 'start':
self.ref_point = 0
else:
raise ValueError('Passed invalid value for ref_point. Allowed '
'values are: start, end, middle. Default value: end')
if pulse_pars.get('ref_point_new', 'start') == 'start':
self.ref_point_new = 0
elif pulse_pars.get('ref_point_new', 'start') == 'middle':
self.ref_point_new = 0.5
elif pulse_pars.get('ref_point_new', 'start') == 'end':
self.ref_point_new = 1
else:
raise ValueError('Passed invalid value for ref_point_new. Allowed '
'values are: start, end, middle. Default value: start')
self.ref_function = pulse_pars.get('ref_function', 'max')
self.block_align = pulse_pars.get('block_align', None)
if self.block_align is not None:
self.block_align = alignments.get(self.block_align,
self.block_align)
self.delay = pulse_pars.get('pulse_delay', 0)
self.original_phase = pulse_pars.get('phase', 0)
self.basis = pulse_pars.get('basis', None)
self.operation_type = pulse_pars.get('operation_type', None)
self.basis_rotation = pulse_pars.pop('basis_rotation', {})
self.op_code = pulse_pars.get('op_code', '')
pulse_func = None
for module in bpl.pulse_libraries:
try:
pulse_func = getattr(module, pulse_pars['pulse_type'])
except AttributeError:
pass
if pulse_func is None:
raise KeyError('pulse_type {} not recognized'.format(
pulse_pars['pulse_type']))
self.pulse_obj = pulse_func(**pulse_pars)
# allow a pulse to modify its op_code (e.g., for C-ARB gates)
self.op_code = getattr(self.pulse_obj, 'op_code', self.op_code)
if self.pulse_obj.codeword != 'no_codeword' and \
self.basis_rotation != {}:
raise Exception(
'Codeword pulse {} does not support basis_rotation!'.format(
self.pulse_obj.name))
def __repr__(self):
string_repr = self.pulse_obj.name
if self.operation_type != None:
string_repr += f"\n operation_type: {self.operation_type}"
string_repr += f"\n ref_pulse: {self.ref_pulse}"
if self.ref_point != 1:
string_repr += f"\n ref_point: {self.ref_point}"
if self.delay != 0:
string_repr += f"\n delay: {self.delay}"
if self.original_phase != 0:
string_repr += f"\n phase: {self.original_phase}"
return string_repr
| {
"repo_name": "QudevETH/PycQED_py3",
"path": "pycqed/measurement/waveform_control/segment.py",
"copies": "1",
"size": "66143",
"license": "mit",
"hash": 2053430329313236000,
"line_mean": 43.2132352941,
"line_max": 206,
"alpha_frac": 0.5027894108,
"autogenerated": false,
"ratio": 4.129549853280889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.513233926408089,
"avg_score": null,
"num_lines": null
} |
"""A selection of useful functions for optics, especially Fourier optics. The
documentation is designed to be used with sphinx (still lots to do)
Authors:
Dr Michael Ireland
Adam Rains
"""
from __future__ import print_function, division
import pdb
import numpy as np
import matplotlib.pyplot as plt
from scipy import special
from scipy import optimize
from .utils import *
try:
import pyfftw
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(1.0)
nthreads=6
except:
nthreads=0
#On load, create a quick index of the first 100 Zernike polynomials, according to OSA/ANSI:
MAX_ZERNIKE=105
ZERNIKE_N = np.empty(MAX_ZERNIKE, dtype=int)
ZERNIKE_M = np.empty(MAX_ZERNIKE, dtype=int)
ZERNIKE_NORM = np.ones(MAX_ZERNIKE)
n=0
m=0
for z_ix in range(0,MAX_ZERNIKE):
ZERNIKE_N[z_ix] = n
ZERNIKE_M[z_ix] = m
if m==0:
ZERNIKE_NORM[z_ix] = np.sqrt(n+1)
else:
ZERNIKE_NORM[z_ix] = np.sqrt(2*(n+1))
if m==n:
n += 1
m = -n
else:
m += 2
def azimuthalAverage(image, center=None, stddev=False, returnradii=False, return_nr=False,
binsize=0.5, weights=None, steps=False, interpnan=False, left=None, right=None, return_max=False):
"""
Calculate the azimuthally averaged radial profile.
NB: This was found online and should be properly credited! Modified by MJI
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fractional pixels).
stddev - if specified, return the azimuthal standard deviation instead of the average
returnradii - if specified, return (radii_array,radial_profile)
return_nr - if specified, return number of pixels per radius *and* radius
binsize - size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large
weights - can do a weighted average instead of a simple average if this keyword parameter
is set. weights.shape must = image.shape. weighted stddev is undefined, so don't
set weights and stddev.
steps - if specified, will return a double-length bin array and radial
profile so you can plot a step-form radial profile (which more accurately
represents what's going on)
interpnan - Interpolate over NAN values, i.e. bins where there is no data?
left,right - passed to interpnan; they set the extrapolated values
return_max - (MJI) Return the maximum index.
If a bin contains NO DATA, it will have a NAN value because of the
divide-by-sum-of-weights component. I think this is a useful way to denote
lack of data, but users let me know if an alternative is prefered...
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = np.hypot(x - center[0], y - center[1])
if weights is None:
weights = np.ones(image.shape)
elif stddev:
raise ValueError("Weighted standard deviation is not defined.")
# the 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize)+1)
maxbin = nbins * binsize
bins = np.linspace(0,maxbin,nbins+1)
# but we're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:]+bins[:-1])/2.0
# Find out which radial bin each point in the map belongs to
whichbin = np.digitize(r.flat,bins)
# how many per bin (i.e., histogram)?
# there are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.bincount(whichbin)[1:]
# recall that bins are from 1 to nbins (which is expressed in array terms by arange(nbins)+1 or xrange(1,nbins+1) )
# radial_prof.shape = bin_centers.shape
if stddev:
radial_prof = np.array([image.flat[whichbin==b].std() for b in range(1,nbins+1)])
elif return_max:
radial_prof = np.array([np.append((image*weights).flat[whichbin==b],-np.inf).max() for b in range(1,nbins+1)])
else:
radial_prof = np.array([(image*weights).flat[whichbin==b].sum() / weights.flat[whichbin==b].sum() for b in range(1,nbins+1)])
#import pdb; pdb.set_trace()
if interpnan:
radial_prof = np.interp(bin_centers,bin_centers[radial_prof==radial_prof],radial_prof[radial_prof==radial_prof],left=left,right=right)
if steps:
xarr = np.array(zip(bins[:-1],bins[1:])).ravel()
yarr = np.array(zip(radial_prof,radial_prof)).ravel()
return xarr,yarr
elif returnradii:
return bin_centers,radial_prof
elif return_nr:
return nr,bin_centers,radial_prof
else:
return radial_prof
def propagate_by_fresnel(wf, m_per_pix, d, wave):
"""Propagate a wave by Fresnel diffraction
Parameters
----------
wf: float array
Wavefront, i.e. a complex electric field in the scalar approximation.
m_per_pix: float
Scale of the pixels in the input wavefront in metres.
d: float
Distance to propagate the wavefront.
wave: float
Wavelength in metres.
Returns
-------
wf_new: float array
Wavefront after propagating.
"""
#Notation on Mike's board
sz = wf.shape[0]
if (wf.shape[0] != wf.shape[1]):
print("ERROR: Input wavefront must be square")
raise UserWarning
#The code below came from the board, i.e. via Huygen's principle.
#We got all mixed up when converting to Fourier transform co-ordinates.
#Co-ordinate axis of the wavefront. Not that 0 must be in the corner.
#x = (((np.arange(sz)+sz/2) % sz) - sz/2)*m_per_pix
#xy = np.meshgrid(x,x)
#rr =np.sqrt(xy[0]**2 + xy[1]**2)
#h_func = np.exp(1j*np.pi*rr**2/wave/d)
#h_ft = np.fft.fft2(h_func)
#Co-ordinate axis of the wavefront Fourier transform. Not that 0 must be in the corner.
#x is in cycles per wavefront dimension.
x = (((np.arange(sz)+sz/2) % sz) - sz/2)/m_per_pix/sz
xy = np.meshgrid(x,x)
uu =np.sqrt(xy[0]**2 + xy[1]**2)
h_ft = np.exp(1j*np.pi*uu**2*wave*d)
g_ft = np.fft.fft2(np.fft.fftshift(wf))*h_ft
wf_new = np.fft.ifft2(g_ft)
return np.fft.fftshift(wf_new)
def airy(x_in, obstruction_sz=0):
"""Return an Airy disk as an electric field as a function of angle in units of
lambda/D, with the possibility of a circular obstruction that is a fraction of the
pupil size.
The total intensity is proportional to the area, so the peak intensity is
proportional to the square of the area, and the peak electric field proportional
to the area.
Parameters
----------
x: array-like
Angular position in units of lambda/D for the Airy function.
obstruction_sz: float
Fractional size of the obstruction for a circular aperture.
"""
#Implicitly do a shallow copy of x_in to a numpy array x.
if type(x_in)==int or type(x_in)==float:
x = np.array([x_in])
else:
try:
x = np.array(x_in).flatten()
except:
print("ERROR: x must be castable to an array")
raise UserWarning
ix = np.where(x>0)[0]
y1 = np.ones(x.shape)
y1[ix] = 2*special.jn(1,np.pi*x[ix])/(np.pi*x[ix])
if obstruction_sz>0:
y2 = np.ones(x.shape)
y2[ix] = 2*special.jn(1,np.pi*x[ix]*obstruction_sz)/(np.pi*x[ix]*obstruction_sz)
y1 -= obstruction_sz**2 * y2
y1 /= (1 - obstruction_sz**2)
#Return the same data type input (within reason):
if type(x_in)==int or type(x_in)==float:
y1 = y1[0]
elif type(x_in)==list:
y1 = list(y1)
else:
y1 = y1.reshape(np.array(x_in).shape)
return y1
def curved_wf(sz,m_per_pix,f_length=np.infty,wave=633e-9, tilt=[0.0,0.0], power=None,diam=None,defocus=None):
"""A curved wavefront centered on the *middle*
of the python array.
Parameters
----------
sz: int
Size of the wavefront in pixels
m_per_pix: float
Meters per pixel
tilt: float (optional)
Tilt of the wavefront in radians in the x and y directions.
wave: float
Wavelength in m
"""
x = np.arange(sz) - sz//2
xy = np.meshgrid(x,x)
rr =np.sqrt(xy[0]**2 + xy[1]**2)
if not power:
power = 1.0/f_length
if not diam:
diam=sz*m_per_pix
#The following line computes phase in *wavelengths*
if defocus:
phase = defocus*(rr*m_per_pix/diam*2)**2
else:
phase = 0.5*m_per_pix**2/wave*power*rr**2
phase += tilt[0]*xy[0]*diam/sz/wave
phase += tilt[1]*xy[1]*diam/sz/wave
return np.exp(2j*np.pi*phase)
def zernike(sz, coeffs=[0.,0.,0.], diam=None, rms_norm=False):
"""A zernike wavefront centered on the *middle*
of the python array.
Parameters
----------
sz: int
Size of the wavefront in pixels
coeffs: float array
Zernike coefficients, starting with piston.
diam: float
Diameter for normalisation in pixels.
"""
x = np.arange(sz) - sz//2
xy = np.meshgrid(x,x)
if not diam:
diam=sz
rr = np.sqrt(xy[0]**2 + xy[1]**2)/(diam/2)
phi = np.arctan2(xy[0], xy[1])
n_coeff = len(coeffs)
phase = np.zeros((sz,sz))
#Loop over each zernike term.
for coeff,n,m_signed,norm in zip(coeffs,ZERNIKE_N[:n_coeff], ZERNIKE_M[:n_coeff], ZERNIKE_NORM[:n_coeff]):
m = np.abs(m_signed)
#Reset the term.
term = np.zeros((sz,sz))
#The "+1" is to make an inclusive range.
for k in range(0,(n-m)//2+1):
term += (-1)**k * np.math.factorial(n-k) / np.math.factorial(k)/\
np.math.factorial((n+m)/2-k) / np.math.factorial((n-m)/2-k) *\
rr**(n-2*k)
if m_signed < 0:
term *= np.sin(m*phi)
if m_signed > 0:
term *= np.cos(m*phi)
#Add to the phase
if rms_norm:
phase += term*coeff*norm
else:
phase += term*coeff
return phase
def zernike_wf(sz, coeffs=[0.,0.,0.], diam=None, rms_norm=False):
"""A zernike wavefront centered on the *middle*
of the python array. Amplitude of coefficients
normalised in radians.
Parameters
----------
sz: int
Size of the wavefront in pixels
coeffs: float array
Zernike coefficients, starting with piston.
diam: float
Diameter for normalisation in pixels.
"""
return np.exp(1j*zernike(sz, coeffs, diam, rms_norm))
def zernike_amp(sz, coeffs=[0.,0.,0.], diam=None, rms_norm=False):
"""A zernike based amplitude centered on the *middle*
of the python array.
Parameters
----------
sz: int
Size of the wavefront in pixels
coeffs: float array
Zernike coefficients, starting with piston.
diam: float
Diameter for normalisation in pixels.
"""
return np.exp(zernike(sz, coeffs, diam, rms_norm))
def pd_images(foc_offsets=[0,0], xt_offsets = [0,0], yt_offsets = [0,0],
phase_zernikes=[0,0,0,0], amp_zernikes = [0], outer_diam=200, inner_diam=0, \
stage_pos=[0,-10,10], radians_per_um=None, NA=0.58, wavelength=0.633, sz=512, \
fresnel_focal_length=None, um_per_pix=6.0):
"""
Create a set of simulated phase diversity images.
Note that dimensions here are in microns.
Parameters
----------
foc_offsets: (n_images-1) numpy array
Focus offset in radians for the second and subsequent images
xt_offsets: (n_images-1) numpy array
X tilt offset
yt_offsets: (n_images-1) numpy array
Y tilt offset
phase_zernikes: numpy array
Zernike terms for phase, excluding piston.
amp_zernikes: numpy array
Zernike terms for amplitude, including overall normalisation.
outer_rad, inner_rad: float
Inner and outer radius of annular pupil in pixels. Note that a better
model would have a (slightly) variable pupil size as the focus changes.
radians_per_micron: float
Radians in focus term per micron of stage movement. This is
approximately 2*np.pi * NA^2 / wavelength.
stage_pos: (n_images) numpy array
Nominal stage position in microns.
fresnel_focal_length: float
Focal length in microns if we are in the Fresnel regime. If this is None,
a Fraunhofer calculation will be made.
um_per_pix: float
If we are in the Fresnel regime, we need to define the pixel scale of the
input pupil.
"""
#Firstly, sort out focus, and tilt offsets. This focus offset is a little of a
#guess...
if radians_per_um is None:
radians_per_um = np.pi*NA**2/wavelength
total_focus = np.array(stage_pos) * radians_per_um
total_focus[1:] += np.array(foc_offsets)
#Add a zero (for ref image) to the tilt offsets
xt = np.concatenate([[0],xt_offsets])
yt = np.concatenate([[0],yt_offsets])
#Create the amplitude zernike array. Normalise so that the
#image sum is zero for a evenly illuminated pupil (amplitude zernikes
#all 0).
pup_even = circle(sz, outer_diam, interp_edge=True) - \
circle(sz, inner_diam, interp_edge=True)
pup_even /= np.sqrt(np.sum(pup_even**2))*sz
pup = pup_even*zernike_amp(sz, amp_zernikes, diam=outer_diam)
#Needed for the Fresnel calculation
flux_norm = np.sum(pup**2)/np.sum(pup_even**2)
#Prepare for fresnel propagation if needed.
if fresnel_focal_length is not None:
lens = FocusingLens(sz, um_per_pix, um_per_pix, fresnel_focal_length, wavelength)
print("Using Fresnel propagation...")
#Now iterate through the images at different foci.
n_ims = len(total_focus)
ims = np.zeros( (n_ims, sz, sz) )
for i in range(n_ims):
#Phase zernikes for this image
im_phase_zernikes = np.concatenate([[0.], phase_zernikes])
im_phase_zernikes[1] += xt[i]
im_phase_zernikes[2] += yt[i]
im_phase_zernikes[4] += total_focus[i]
wf = pup*zernike_wf(sz, im_phase_zernikes, diam=outer_diam)
if fresnel_focal_length is None:
ims[i] = np.fft.fftshift(np.abs(np.fft.fft2(wf))**2)
else:
#For a Fresnel propagation, we need to normalise separately,
#because the lens class was written with inbuilt normalisation.
ims[i] = lens.focus(wf) * flux_norm
return ims
def fourier_wf(sz,xcyc_aperture,ycyc_aperture,amp,phase):
"""This function creates a phase aberration, centered on the
middle of a python array
Parameters
----------
dim: int
Size of the 2D array
xcyc_aperture: float
cycles per aperture in the x direction.
ycyc_aperture: float
cycles per aperture in the y direction.
amp: float
amplitude of the aberration in radians
phase: float
phase of the aberration in radians.
Returns
-------
pupil: float array (sz,sz)
2D array circular pupil mask
"""
x = np.arange(sz) - sz//2
xy = np.meshgrid(x,x)
xx = xy[0]
yy = xy[1]
zz = 2*np.pi*(xx*xcyc_aperture/sz + yy*ycyc_aperture/sz)
aberration = np.exp( 1j * amp * (np.cos(phase)*np.cos(zz) + np.sin(phase)*np.sin(zz)))
return aberration
def gmt(dim,widths=None,pistons=[0,0,0,0,0,0],m_pix=None):
"""This function creates a GMT pupil.
http://www.gmto.org/Resources/GMT-ID-01467-Chapter_6_Optics.pdf
Parameters
----------
dim: int
Size of the 2D array
width: int
diameter of the primary mirror (scaled to 25.448m)
Returns
-------
pupil: float array (sz,sz)
2D array circular pupil mask
"""
#The geometry is complex... with eliptical segments due to their tilt.
#We'll just approximate by segments of approximately the right size.
pupils=[]
if m_pix:
widths = 25.448/m_pix
elif not widths:
print("ERROR: Must set widths or m_pix")
raise UserWarning
try:
awidth = widths[0]
except:
widths = [widths]
for width in widths:
segment_dim = width*8.27/25.448
segment_sep = width*(8.27 + 0.3)/25.448
obstruct = width*3.2/25.448
rollx = int(np.round(np.sqrt(3)/2.0*segment_sep))
one_seg = circle(dim, segment_dim)
pupil = one_seg - circle(dim, obstruct) + 0j
pupil += np.exp(1j*pistons[0])*np.roll(np.roll(one_seg, int(np.round(0.5*segment_sep)), axis=0),rollx, axis=1)
pupil += np.exp(1j*pistons[1])*np.roll(np.roll(one_seg, -int(np.round(0.5*segment_sep)), axis=0),rollx, axis=1)
pupil += np.exp(1j*pistons[4])*np.roll(np.roll(one_seg, int(np.round(0.5*segment_sep)), axis=0),-rollx, axis=1)
pupil += np.exp(1j*pistons[3])*np.roll(np.roll(one_seg, -int(np.round(0.5*segment_sep)), axis=0),-rollx, axis=1)
pupil += np.exp(1j*pistons[5])*np.roll(one_seg, int(segment_sep), axis=0)
pupil += np.exp(1j*pistons[2])*np.roll(one_seg, -int(segment_sep), axis=0)
pupils.append(pupil)
return np.array(pupils)
#--- Start masks ---
def mask2s(dim):
""" Returns 4 pupil mask that split the pupil into halves.
"""
masks = np.zeros( (4,dim,dim) )
masks[0,0:dim/2,:]=1
masks[1,dim/2:,:]=1
masks[2,:,0:dim/2]=1
masks[3,:,dim/2:]=1
return masks
def mask6s(dim):
""" Returns 4 pupil mask that split the pupil into halves, with a
six-way symmetry
"""
masks = np.zeros( (4,dim,dim) )
x = np.arange(dim) - dim//2
xy = np.meshgrid(x,x)
theta = np.arctan2(xy[0],xy[1])
twelfths = ( (theta + np.pi)/2/np.pi*12).astype(int)
masks[0,:,:]=(twelfths//2) % 2
masks[1,:,:]=(twelfths//2 + 1) % 2
masks[2,:,:]=((twelfths+1)//2) % 2
masks[3,:,:]=((twelfths+1)//2 + 1) % 2
return masks
def angel_mask(sz,m_per_pix,diam=25.5):
"""Create a mask like Roger Angel et al's original GMT tilt and piston sensor.
"""
diam_in_pix = diam/m_per_pix
inner_circ = circle(sz,int(round(diam_in_pix/3)))
outer_an = circle(sz,int(round(diam_in_pix))) - inner_circ
mask6s = mask6s(sz)
masks = np.array([inner_circ + outer_an*mask6s[2,:,:],inner_circ + outer_an*mask6s[3,:,:],outer_an])
return masks
def angel_mask_mod(sz,wave,diam=25.5):
"""Create a mask like Roger Angel et al's original GMT tilt and piston sensor, except
we 50/50 split the inner segment.
"""
diam_in_pix = diam/m_per_pix
inner_circ = circle(sz,int(round(diam_in_pix/3)))
outer_an = circle(sz,int(round(diam_in_pix))) - inner_circ
mask6s = mask6s(sz)
masks = np.array([0.5*inner_circ + outer_an*mask6s[2,:,:],0.5*inner_circ + outer_an*mask6s[3,:,:]])
return masks
def diversity_mask(sz,m_per_pix,defocus=2.0):
"""Create a traditional phase diversity mask.
"""
wf1 = curved_wf(sz,m_per_pix,defocus=defocus)
wf2 = curved_wf(sz,m_per_pix,defocus=-defocus)
masks = np.array([wf1,wf2])
return masks
#--- End Masks ---
def km1d(sz, r_0_pix=None):
"""
Algorithm:
y(midpoint) = ( y(x1) + y(x2) )/2 + 0.4542*Z, where
0.4542 = sqrt( 1 - 2^(5/3) / 2 )
"""
if sz != 2**int(np.log2(sz)):
raise UserWarning("Size must be within a factor of 2")
#Temporary code.
wf = kmf(sz, r_0_pix=r_0_pix)
return wf[0]
def kmf(sz, L_0=np.inf, r_0_pix=None):
"""This function creates a periodic wavefront produced by Kolmogorov turbulence.
It SHOULD normalised so that the variance at a distance of 1 pixel is 1 radian^2.
To scale this to an r_0 of r_0_pix, multiply by sqrt(6.88*r_0_pix**(-5/3))
The value of 1/15.81 in the code is (I think) a numerical approximation for the
value in e.g. Conan00 of np.sqrt(0.0229/2/np.pi)
Parameters
----------
sz: int
Size of the 2D array
l_0: (optional) float
The von-Karmann outer scale. If not set, the structure function behaves with
an outer scale of approximately half (CHECK THIS!) pixels.
r_0_pix: (optional) float
The Fried r_0 parameter in units of pixels.
Returns
-------
wavefront: float array (sz,sz)
2D array wavefront, in units of radians. i.e. a complex electric field based
on this wavefront is np.exp(1j*kmf(sz))
"""
xy = np.meshgrid(np.arange(sz/2 + 1)/float(sz), (((np.arange(sz) + sz/2) % sz)-sz/2)/float(sz))
dist2 = np.maximum( xy[1]**2 + xy[0]**2, 1e-12)
ft_wf = np.exp(2j * np.pi * np.random.random((sz,sz//2+1)))*dist2**(-11.0/12.0)*sz/15.81
ft_wf[0,0]=0
if r_0_pix is None:
return np.fft.irfft2(ft_wf)
else:
return np.fft.irfft2(ft_wf) * np.sqrt(6.88*r_0_pix**(-5/3.))
def von_karman_structure(B, r_0=1.0, L_0=1e6):
"""The Von Karan structure function, from Conan et al 2000"""
return 0.1717*(r_0/L_0)**(-5/3.)*(1.005635 - (2*np.pi*B/L_0)**(5/6.)*special.kv(5/6.,2*np.pi*B/L_0))
def test_kmf(sz,ntests):
"""Test the kmf. The variance at sz/4 is down by a factor of 0.35 over the
Kolmogorov function."""
vars_1pix = np.zeros(ntests)
vars_quarter = np.zeros(ntests)
for i in range(ntests):
wf = kmf(sz)
vars_1pix[i] = 0.5*(np.mean((wf[1:,:] - wf[:-1,:])**2) + \
np.mean((wf[:,1:] - wf[:,:-1])**2))
vars_quarter[i] = 0.5*(np.mean((np.roll(wf,sz//4,axis=0) - wf)**2) + \
np.mean((np.roll(wf,sz//4,axis=1) - wf)**2))
print("Mean var: {0:7.3e} Sdev var: {1:7.3e}".format(np.mean(vars_1pix),np.std(vars_1pix)))
print("Variance at sz//4 decreased by: {0:7.3f}".\
format(np.mean(vars_quarter)/np.mean(vars_1pix)/(sz/4)**(5./3.)))
def moffat(theta, hw, beta=4.0):
"""This creates a moffatt function for simulating seeing.
The output is an array with the same dimensions as theta.
Total Flux" is set to 1 - this only applies if sampling
of thetat is 1 per unit area (e.g. arange(100)).
From Racine (1996), beta=4 is a good approximation for seeing
Parameters
----------
theta: float or float array
Angle at which to calculate the moffat profile (same units as hw)
hw: float
Half-width of the profile
beta: float
beta parameters
"""
denom = (1 + (2**(1.0/beta) - 1)*(theta/hw)**2)**beta
return (2.0**(1.0/beta)-1)*(beta-1)/np.pi/hw**2/denom
def moffat2d(sz,hw, beta=4.0):
"""A 2D version of a moffat function
"""
x = np.arange(sz) - sz/2.0
xy = np.meshgrid(x,x)
r = np.sqrt(xy[0]**2 + xy[1]**2)
return moffat(r, hw, beta=beta)
def snell(u, f, n_i, n_f):
"""Snell's law at an interface between two dielectrics
Parameters
----------
u: float array(3)
Input unit vector
f: float array(3)
surface normal unit vector
n_i: float
initial refractive index
n_f: float
final refractive index.
"""
u_p = u - np.sum(u*f)*f
u_p /= np.sqrt(np.sum(u_p**2))
theta_i = np.arccos(np.sum(u*f))
theta_f = np.arcsin(n_i*np.sin(theta_i)/n_f)
v = u_p*np.sin(theta_f) + f*np.cos(theta_f)
return v
def grating_sim(u, l, s, ml_d, refract=False):
"""This function computes an output unit vector based on an input unit
vector and grating properties.
Math: v \cdot l = u \cdot l (reflection)
v \cdot s = u \cdot s + ml_d
The blaze wavelength is when m \lambda = 2 d sin(theta)
i.e. ml_d = 2 sin(theta)
x : to the right
y : out of page
z : down the page
Parameters
----------
u: float array(3)
initial unit vector
l: float array(3)
unit vector along grating lines
s: float array(3)
unit vector along grating surface, perpendicular to lines
ml_d: float
order * \lambda/d
refract: bool
Is the grating a refractive grating?
"""
if (np.abs(np.sum(l*s)) > 1e-3):
print('Error: input l and s must be orthogonal!')
raise UserWarning
n = np.cross(s,l)
if refract:
n *= -1
v_l = np.sum(u*l)
v_s = np.sum(u*s) + ml_d
v_n = np.sqrt(1-v_l**2 - v_s**2)
v = v_l*l + v_s*s + v_n*n
return v
def join_bessel(U,V,j):
"""In order to solve the Laplace equation in cylindrical co-ordinates, both the
electric field and its derivative must be continuous at the edge of the fiber...
i.e. the Bessel J and Bessel K have to be joined together.
The solution of this equation is the n_eff value that satisfies this continuity
relationship"""
W = np.sqrt(V**2 - U**2)
return U*special.jn(j+1,U)*special.kn(j,W) - W*special.kn(j+1,W)*special.jn(j,U)
def neff(V, accurate_roots=True):
"""For a cylindrical fiber, find the effective indices of all modes for a given value
of the fiber V number.
Parameters
----------
V: float
The fiber V-number.
accurate_roots: bool (optional)
Do we find accurate roots using Newton-Rhapson iteration, or do we just use a
first-order linear approach to zero-point crossing?"""
delu = 0.04
numu = int(V/delu)
U = np.linspace(delu/2,V - 1e-6,numu)
W = np.sqrt(V**2 - U**2)
all_roots=np.array([])
n_per_j=np.array([],dtype=int)
n_modes=0
for j in range(int(V+1)):
f = U*special.jn(j+1,U)*special.kn(j,W) - W*special.kn(j+1,W)*special.jn(j,U)
crossings = np.where(f[0:-1]*f[1:] < 0)[0]
roots = U[crossings] - f[crossings]*( U[crossings+1] - U[crossings] )/( f[crossings+1] - f[crossings] )
if accurate_roots:
for i in range(len(crossings)):
roots[i] = optimize.brenth(join_bessel, U[crossings[i]], U[crossings[i]+1], args=(V,j))
#roots[i] = optimize.newton(join_bessel, root, args=(V,j))
# except:
# print("Problem finding root, trying 1 last time...")
# roots[i] = optimize.newton(join_bessel, root + delu/2, args=(V,j))
#import pdb; pdb.set_trace()
if (j == 0):
n_modes = n_modes + len(roots)
n_per_j = np.append(n_per_j, len(roots))
else:
n_modes = n_modes + 2*len(roots)
n_per_j = np.append(n_per_j, len(roots)) #could be 2*length(roots) to account for sin and cos.
all_roots = np.append(all_roots,roots)
return all_roots, n_per_j
def mode_2d(V, r, j=0, n=0, sampling=0.3, sz=1024):
"""Create a 2D mode profile.
Parameters
----------
V: Fiber V number
r: core radius in microns
sampling: microns per pixel
n: radial order of the mode (0 is fundumental)
j: azimuthal order of the mode (0 is pure radial modes)
TODO: Nonradial modes."""
#First, find the neff values...
u_all,n_per_j = neff(V)
#Unsigned
unsigned_j = np.abs(j)
th_offset = (j<0) * np.pi/2
#Error check the input.
if n >= n_per_j[unsigned_j]:
print("ERROR: this mode is not bound!")
raise UserWarning
# Convert from float to be able to index
sz = int(sz)
ix = np.sum(n_per_j[0:unsigned_j]) + n
U0 = u_all[ix]
W0 = np.sqrt(V**2 - U0**2)
x = (np.arange(sz)-sz/2)*sampling/r
xy = np.meshgrid(x,x)
r = np.sqrt(xy[0]**2 + xy[1]**2)
th = np.arctan2(xy[0],xy[1]) + th_offset
win = np.where(r < 1)
wout = np.where(r >= 1)
the_mode = np.zeros( (sz,sz) )
the_mode[win] = special.jn(unsigned_j,r[win]*U0)
scale = special.jn(unsigned_j,U0)/special.kn(unsigned_j,W0)
the_mode[wout] = scale * special.kn(unsigned_j,r[wout]*W0)
return the_mode/np.sqrt(np.sum(the_mode**2))*np.exp(1j*unsigned_j*th)
def compute_v_number(wavelength_in_mm, core_radius, numerical_aperture):
"""Computes the V number (can be interpreted as a kind of normalized optical frequency) for an optical fibre
Parameters
----------
wavelength_in_mm: float
The wavelength of light in mm
core_radius: float
The core radius of the fibre in mm
numerical_aperture: float
The numerical aperture of the optical fibre, defined be refractive indices of the core and cladding
Returns
-------
v: float
The v number of the fibre
"""
v = 2 * np.pi / wavelength_in_mm * core_radius * numerical_aperture
return v
def shift_and_ft(im):
"""Sub-pixel shift an image to the origin and Fourier-transform it
Parameters
----------
im: (ny,nx) float array
ftpix: optional ( (nphi) array, (nphi) array) of Fourier sampling points.
If included, the mean square Fourier phase will be minimised.
Returns
----------
ftim: (ny,nx/2+1) complex array
"""
ny = im.shape[0]
nx = im.shape[1]
im = regrid_fft(im,(3*ny,3*nx))
shifts = np.unravel_index(im.argmax(), im.shape)
im = np.roll(np.roll(im,-shifts[0]+1,axis=0),-shifts[1]+1,axis=1)
im = rebin(im,(ny,nx))
ftim = np.fft.rfft2(im)
return ftim
def rebin(a, shape):
"""Re-bins an image to a new (smaller) image with summing
Originally from:
http://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array
Parameters
----------
a: array
Input image
shape: (xshape,yshape)
New shape
"""
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1).sum(1)
def correct_tip_tilt(turbulent_wf, pupil, size):
"""Given a turbulent wavefront, calculate the tip/tilt (horizontal and vertical slope)
TODO: Only compute turbulence over square immediately surrounding the pupil to save on unnecessary computation
Parameters
----------
turbulent_wf: np.array([[...]...])
2D square of numbers representing a turbulent patch of atmosphere
pupil: np.array([[...]...])
The pupil of the telescope receiving the light pasing through the turbulence.
size: int
Size of input_wf per side, preferentially a power of two (npix=2**n)
Return
------
corrected_wf: np.array([[...]...])
Tip/Tilt corrected turbulent_wf
"""
x = np.arange(size) - size/2
xy = np.meshgrid(x, x)
xtilt_func = xy[0]*pupil
ytilt_func = xy[1]*pupil
xtilt = np.sum(xtilt_func * turbulent_wf)/np.sum(xtilt_func**2)
ytilt = np.sum(ytilt_func * turbulent_wf)/np.sum(ytilt_func**2)
corrected_wf = turbulent_wf*pupil - ytilt_func*ytilt - xtilt_func*xtilt
return corrected_wf
def apply_and_scale_turbulent_ef(turbulence, npix, wavelength, dx, seeing):
""" Applies an atmosphere in the form of Kolmogorov turbulence to an initial wavefront and scales
Parameters
----------
npix: integer
The size of the square of Kolmogorov turbulence generated
wavelength: float
The wavelength in mm. Amount of atmospheric distortion depends on the wavelength.
dx: float
Resolution in mm/pixel
seeing: float
Seeing in arcseconds before magnification
Returns
-------
turbulent_ef or 1.0: np.array([[...]...]) or 1.0
Return an array of phase shifts in imperfect seeing, otherwise return 1.0, indicating no change to the incident wave.
"""
if seeing > 0.0:
# Convert seeing to radians
seeing_in_radians = np.radians(seeing/3600.)
# Generate the Kolmogorov turbulence
#turbulence = optics_tools.kmf(npix)
# Calculate r0 (Fried's parameter), which is a measure of the strength of seeing distortions
r0 = 0.98 * wavelength / seeing_in_radians
# Apply the atmosphere and scale
wf_in_radians = turbulence * np.sqrt(6.88*(dx/r0)**(5.0/3.0))
# Convert the wavefront to an electric field
turbulent_ef = np.exp(1.0j * wf_in_radians)
return turbulent_ef
else:
# Do not apply phase distortions --> multiply by unity
return 1.0
def calculate_fibre_mode(wavelength_in_mm, fibre_core_radius, numerical_aperture, npix, dx):
"""Computes the mode of the optical fibre.
Parameters
----------
wavelength_in_mm: float
The wavelength in mm
fibre_core_radius: float
The radius of the fibre core in mm
numerical_aperture: float
The numerical aperture of the fibre
npix: int
Size of input_wf per side, preferentially a power of two (npix=2**n)
dx: float
Resolution of the wave in mm/pixel
Returns
-------
fibre_mode: np.array([[...]...])
The mode of the optical fibre
"""
# Calculate the V number for the model
v = compute_v_number(wavelength_in_mm, fibre_core_radius, numerical_aperture)
# Use the V number to calculate the mode
fibre_mode = mode_2d(v, fibre_core_radius, sampling=dx, sz=npix)
return fibre_mode
def compute_coupling(npix, dx, electric_field, lens_width, fibre_mode, x_offset, y_offset):
"""Computes the coupling between the electric field and the optical fibre using an overlap integral.
Parameters
----------
npix: int
Size of input_wf per side, preferentially a power of two (npix=2**n)
dx: float
Resolution of the wave in mm/pixel
electric_field: np.array([[...]...])
The electric field at the fibre plane
lens_width: float
The width of the a single microlens (used for minimising the unnecessary calculations)
fibre_mode: np.array([[...]...])
The mode of the optical fibre
x_offset: int
x offset of the focal point at the fibre plane relative to the centre of the microlens.
y_offset: int
y offset of the focal point at the fibre plane relative to the centre of the microlens.
Returns
-------
coupling: float
The coupling between the fibre mode and the electric_field (Max 1)
"""
npix = int(npix)
# Crop the electric field to the central 1/4
low = npix//2 - int(lens_width / dx / 2) #* 3/8
upper = npix//2 + int(lens_width / dx / 2) #* 5/8
# Compute the fibre mode and shift (if required)
fibre_mode = fibre_mode[(low + x_offset):(upper + x_offset), (low + y_offset):(upper + y_offset)]
# Compute overlap integral - denominator first
den = np.sum(np.abs(fibre_mode)**2) * np.sum(np.abs(electric_field)**2)
#Crop the electric field and compute the numerator
#electric_field = electric_field[low:upper,low:upper]
num = np.abs(np.sum(fibre_mode*np.conj(electric_field)))**2
coupling = num / den
return coupling
def nglass(l, glass='sio2'):
"""Refractive index of fused silica and other glasses. Note that C is
in microns^{-2}
Parameters
----------
l: wavelength
"""
try:
nl = len(l)
except:
l = [l]
nl=1
l = np.array(l)
if (glass == 'sio2'):
B = np.array([0.696166300, 0.407942600, 0.897479400])
C = np.array([4.67914826e-3,1.35120631e-2,97.9340025])
elif (glass == 'bk7'):
B = np.array([1.03961212,0.231792344,1.01046945])
C = np.array([6.00069867e-3,2.00179144e-2,1.03560653e2])
elif (glass == 'nf2'):
B = np.array( [1.39757037,1.59201403e-1,1.26865430])
C = np.array( [9.95906143e-3,5.46931752e-2,1.19248346e2])
elif (glass == 'nsf11'):
B = np.array([1.73759695E+00, 3.13747346E-01, 1.89878101E+00])
C = np.array([1.31887070E-02, 6.23068142E-02, 1.55236290E+02])
elif (glass == 'ncaf2'):
B = np.array([0.5675888, 0.4710914, 3.8484723])
C = np.array([0.050263605, 0.1003909, 34.649040])**2
elif (glass == 'mgf2'):
B = np.array([0.48755108,0.39875031,2.3120353])
C = np.array([0.04338408,0.09461442,23.793604])**2
elif (glass == 'npk52a'):
B = np.array([1.02960700E+00,1.88050600E-01,7.36488165E-01])
C = np.array([5.16800155E-03,1.66658798E-02,1.38964129E+02])
elif (glass == 'psf67'):
B = np.array([1.97464225E+00,4.67095921E-01,2.43154209E+00])
C = np.array([1.45772324E-02,6.69790359E-02,1.57444895E+02])
elif (glass == 'npk51'):
B = np.array([1.15610775E+00,1.53229344E-01,7.85618966E-01])
C = np.array([5.85597402E-03,1.94072416E-02,1.40537046E+02])
elif (glass == 'nfk51a'):
B = np.array([9.71247817E-01,2.16901417E-01,9.04651666E-01])
C = np.array([4.72301995E-03,1.53575612E-02,1.68681330E+02])
elif (glass == 'si'): #https://refractiveindex.info/?shelf=main&book=Si&page=Salzberg
B = np.array([10.6684293,0.0030434748,1.54133408])
C = np.array([0.301516485,1.13475115,1104])**2
#elif (glass == 'zns'): #https://refractiveindex.info/?shelf=main&book=ZnS&page=Debenham
# B = np.array([7.393, 0.14383, 4430.99])
# C = np.array([0, 0.2421, 36.71])**2
elif (glass == 'znse'): #https://refractiveindex.info/?shelf=main&book=ZnSe&page=Connolly
B = np.array([4.45813734,0.467216334,2.89566290])
C = np.array([0.200859853,0.391371166,47.1362108])**2
elif (glass == 'noa61'):
n = 1.5375 + 8290.45/(l*1000)**2 - 2.11046/(l*1000)**4
return n
else:
print("ERROR: Unknown glass {0:s}".format(glass))
raise UserWarning
n = np.ones(nl)
for i in range(len(B)):
n += B[i]*l**2/(l**2 - C[i])
return np.sqrt(n)
#The following is directly from refractiveindex.info, and copied here because of
#UTF-8 encoding that doesn't seem to work with my python 2.7 installation.
#Author: Mikhail Polyanskiy
#(Ciddor 1996, https://doi.org/10.1364/AO.35.001566)
def Z(T,p,xw): #compressibility
t=T-273.15
a0 = 1.58123e-6 #K.Pa^-1
a1 = -2.9331e-8 #Pa^-1
a2 = 1.1043e-10 #K^-1.Pa^-1
b0 = 5.707e-6 #K.Pa^-1
b1 = -2.051e-8 #Pa^-1
c0 = 1.9898e-4 #K.Pa^-1
c1 = -2.376e-6 #Pa^-1
d = 1.83e-11 #K^2.Pa^-2
e = -0.765e-8 #K^2.Pa^-2
return 1-(p/T)*(a0+a1*t+a2*t**2+(b0+b1*t)*xw+(c0+c1*t)*xw**2) + (p/T)**2*(d+e*xw**2)
def nm1_air(wave,t,p,h,xc):
# wave: wavelength, 0.3 to 1.69 mu m
# t: temperature, -40 to +100 deg C
# p: pressure, 80000 to 120000 Pa
# h: fractional humidity, 0 to 1
# xc: CO2 concentration, 0 to 2000 ppm
sigma = 1/wave #mu m^-1
T= t + 273.15 #Temperature deg C -> K
R = 8.314510 #gas constant, J/(mol.K)
k0 = 238.0185 #mu m^-2
k1 = 5792105 #mu m^-2
k2 = 57.362 #mu m^-2
k3 = 167917 #mu m^-2
w0 = 295.235 #mu m^-2
w1 = 2.6422 #mu m^-2
w2 = -0.032380 #mu m^-4
w3 = 0.004028 #mu m^-6
A = 1.2378847e-5 #K^-2
B = -1.9121316e-2 #K^-1
C = 33.93711047
D = -6.3431645e3 #K
alpha = 1.00062
beta = 3.14e-8 #Pa^-1,
gamma = 5.6e-7 #deg C^-2
#saturation vapor pressure of water vapor in air at temperature T
if(t>=0):
svp = np.exp(A*T**2 + B*T + C + D/T) #Pa
else:
svp = 10**(-2663.5/T+12.537)
#enhancement factor of water vapor in air
f = alpha + beta*p + gamma*t**2
#molar fraction of water vapor in moist air
xw = f*h*svp/p
#refractive index of standard air at 15 deg C, 101325 Pa, 0% humidity, 450 ppm CO2
nas = 1 + (k1/(k0-sigma**2)+k3/(k2-sigma**2))*1e-8
#refractive index of standard air at 15 deg C, 101325 Pa, 0% humidity, xc ppm CO2
naxs = 1 + (nas-1) * (1+0.534e-6*(xc-450))
#refractive index of water vapor at standard conditions (20 deg C, 1333 Pa)
nws = 1 + 1.022*(w0+w1*sigma**2+w2*sigma**4+w3*sigma**6)*1e-8
Ma = 1e-3*(28.9635 + 12.011e-6*(xc-400)) #molar mass of dry air, kg/mol
Mw = 0.018015 #molar mass of water vapor, kg/mol
Za = Z(288.15, 101325, 0) #compressibility of dry air
Zw = Z(293.15, 1333, 1) #compressibility of pure water vapor
#Eq.4 with (T,P,xw) = (288.15, 101325, 0)
rhoaxs = 101325*Ma/(Za*R*288.15) #density of standard air
#Eq 4 with (T,P,xw) = (293.15, 1333, 1)
rhows = 1333*Mw/(Zw*R*293.15) #density of standard water vapor
# two parts of Eq.4: rho=rhoa+rhow
rhoa = p*Ma/(Z(T,p,xw)*R*T)*(1-xw) #density of the dry component of the moist air
rhow = p*Mw/(Z(T,p,xw)*R*T)*xw #density of the water vapor component
nprop = (rhoa/rhoaxs)*(naxs-1) + (rhow/rhows)*(nws-1)
return nprop
class FresnelPropagator(object):
"""Propagate a wave by Fresnel diffraction"""
def __init__(self,sz,m_per_pix, d, wave,nthreads=nthreads):
"""Initiate this fresnel_propagator for a particular wavelength,
distance etc.
Parameters
----------
wf: float array
m_per_pix: float
Scale of the pixels in the input wavefront in metres.
d: float
Distance to propagate the wavefront.
wave: float
Wavelength in metres.
nthreads: int
Number of threads.
"""
self.sz = sz
self.nthreads=nthreads
#Co-ordinate axis of the wavefront Fourier transform. Not that 0 must be in the corner.
#x is in cycles per wavefront dimension.
x = (((np.arange(sz)+sz/2) % sz) - sz/2)/m_per_pix/sz
xy = np.meshgrid(x,x)
uu =np.sqrt(xy[0]**2 + xy[1]**2)
self.h_ft = np.exp(1j*np.pi*uu**2*wave*d)
def propagate(self,wf):
"""Propagate a wavefront, according to the parameters established on the
__init__. No error checking for speed.
Parameters
----------
wf: complex array
Wavefront, i.e. a complex electric field in the scalar approximation.
Returns
-------
wf_new: float array
Wavefront after propagating.
"""
if (wf.shape[0] != self.sz | wf.shape[1] != self.sz):
print("ERROR: Input wavefront must match the size!")
raise UserWarning
if (self.nthreads>0):
g_ft = pyfftw.interfaces.numpy_fft.fft2(wf,threads=self.nthreads)*self.h_ft
wf_new = pyfftw.interfaces.numpy_fft.ifft2(g_ft,threads=self.nthreads)
else:
g_ft = np.fft.fft2(wf)*self.h_ft
wf_new = np.fft.ifft2(g_ft)
return wf_new
class FocusingLens(FresnelPropagator):
def __init__(self,sz,m_per_pix_pup, m_per_pix_im, f, wave,nthreads=nthreads):
"""Use Fresnel Diffraction to come to focus.
We do this by creating a new lens of focal length mag * f, where mag is the
magnification between pupil and image plane.
"""
f_new=f * m_per_pix_pup/m_per_pix_im
#Initialise the parent class.
super(FocusingLens, self).__init__(sz, m_per_pix_pup, f_new, wave,nthreads=nthreads)
#super(FresnelPropagator, self).__init__(m_per_pix_pup, f_new, wave,nthreads=nthreads)
#FresnelPropagator.__init__(self, sz,m_per_pix_pup, f_new, wave,nthreads=nthreads)
#Create our curved wavefront.
self.lens = curved_wf(sz, m_per_pix_pup, f_length=f_new, wave=wave)
self.sz=sz
def focus(self, wf):
"""Return a normalised image"""
if (wf.shape[0] != self.sz) or (wf.shape[1] != self.sz):
raise UserWarning("Incorrect Wavefront Shape!")
im = np.abs(self.propagate(wf*self.lens))**2
return im/np.sum(im)
def focusing_propagator(sz, m_per_pix_pup, m_per_pix_im, f, wave):
"""Create a propagator that propagates to focus, adjusting
the focal length for the new image scale using the thin lens formula.
The new lens has a focal length of mag * f, where mag is the magnification.
FIXME: Remove this if FocusingLens works.
Returns
-------
lens: (sz,sz) numpy complex array
Multiply the final pupil by this prior to applying the propagator
to_focus: FresnelPropagator
use np.abs(to_focus.propagate(wf*lens))**2 to create the image
"""
#Create a new focal length that is longer according to the magnification.
f_new = f * m_per_pix_pup/m_per_pix_im
#Create the curved wavefront.
lens = curved_wf(sz, m_per_pix_pup, f_length=f_new, wave=wave)
#Create the propagator
to_focus = FresnelPropagator(sz,m_per_pix, f_new, wave)
return lens, to_focus
class Base(object):
def __init__(self):
print("Base created")
class ChildA(Base):
def __init__(self):
Base.__init__(self)
class ChildB(Base):
def __init__(self):
super(ChildB, self).__init__()
def fresnel_reflection(n1, n2, theta=0):
"""
Parameters
----------
theta: float
incidence angle in degrees
Returns
-------
Rp: float
s (perpendicular) plane reflection
Rs: float
p (parallel) plane reflection
"""
th = np.radians(theta)
sqrt_term = np.sqrt(1-(n1/n2*np.sin(th))**2)
Rs = (n1*np.cos(th) - n2*sqrt_term)**2/(n1*np.cos(th) + n2*sqrt_term)**2
Rp = (n1*sqrt_term - n2*np.cos(th))**2/(n1*sqrt_term + n2*np.cos(th))**2
return Rs, Rp
| {
"repo_name": "mikeireland/opticstools",
"path": "opticstools/opticstools.py",
"copies": "1",
"size": "45771",
"license": "mit",
"hash": 103231941495800540,
"line_mean": 34.4539116964,
"line_max": 142,
"alpha_frac": 0.5978239497,
"autogenerated": false,
"ratio": 2.9794948574404376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40773188071404376,
"avg_score": null,
"num_lines": null
} |
"""A selection of useful functions for optics, especially Fourier optics. The
documentation is designed to be used with sphinx (still lots to do)
Note that this comes directly from a preliminary version of the astro-optics
repository. TODO: Replace this with either a release version of astro-optics
or an appropriate link.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy import special
from scipy import optimize
def azimuthalAverage(image, center=None, stddev=False, returnradii=False, return_nr=False,
binsize=0.5, weights=None, steps=False, interpnan=False, left=None, right=None, return_max=False):
"""
Calculate the azimuthally averaged radial profile.
NB: This was found online and should be properly credited! Modified by MJI
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fractional pixels).
stddev - if specified, return the azimuthal standard deviation instead of the average
returnradii - if specified, return (radii_array,radial_profile)
return_nr - if specified, return number of pixels per radius *and* radius
binsize - size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large
weights - can do a weighted average instead of a simple average if this keyword parameter
is set. weights.shape must = image.shape. weighted stddev is undefined, so don't
set weights and stddev.
steps - if specified, will return a double-length bin array and radial
profile so you can plot a step-form radial profile (which more accurately
represents what's going on)
interpnan - Interpolate over NAN values, i.e. bins where there is no data?
left,right - passed to interpnan; they set the extrapolated values
return_max - (MJI) Return the maximum index.
If a bin contains NO DATA, it will have a NAN value because of the
divide-by-sum-of-weights component. I think this is a useful way to denote
lack of data, but users let me know if an alternative is prefered...
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if center is None:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = np.hypot(x - center[0], y - center[1])
if weights is None:
weights = np.ones(image.shape)
elif stddev:
raise ValueError("Weighted standard deviation is not defined.")
# the 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(np.round(r.max() / binsize)+1)
maxbin = nbins * binsize
bins = np.linspace(0,maxbin,nbins+1)
# but we're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:]+bins[:-1])/2.0
# Find out which radial bin each point in the map belongs to
whichbin = np.digitize(r.flat,bins)
# how many per bin (i.e., histogram)?
# there are never any in bin 0, because the lowest index returned by digitize is 1
nr = np.bincount(whichbin)[1:]
# recall that bins are from 1 to nbins (which is expressed in array terms by arange(nbins)+1 or xrange(1,nbins+1) )
# radial_prof.shape = bin_centers.shape
if stddev:
radial_prof = np.array([image.flat[whichbin==b].std() for b in xrange(1,nbins+1)])
elif return_max:
radial_prof = np.array([np.append((image*weights).flat[whichbin==b],-np.inf).max() for b in xrange(1,nbins+1)])
else:
radial_prof = np.array([(image*weights).flat[whichbin==b].sum() / weights.flat[whichbin==b].sum() for b in xrange(1,nbins+1)])
#import pdb; pdb.set_trace()
if interpnan:
radial_prof = np.interp(bin_centers,bin_centers[radial_prof==radial_prof],radial_prof[radial_prof==radial_prof],left=left,right=right)
if steps:
xarr = np.array(zip(bins[:-1],bins[1:])).ravel()
yarr = np.array(zip(radial_prof,radial_prof)).ravel()
return xarr,yarr
elif returnradii:
return bin_centers,radial_prof
elif return_nr:
return nr,bin_centers,radial_prof
else:
return radial_prof
def fresnel(wf, m_per_pix, d, wave):
"""Propagate a wave by Fresnel diffraction
Parameters
----------
wf: float array
Wavefront, i.e. a complex electric field in the scalar approximation.
m_per_pix: float
Scale of the pixels in the input wavefront in metres.
d: float
Distance to propagate the wavefront.
wave: float
Wavelength in metres.
Returns
-------
wf_new: float array
Wavefront after propagating.
"""
#Notation on Mike's board
sz = wf.shape[0]
if (wf.shape[0] != wf.shape[1]):
print("ERROR: Input wavefront must be square")
raise UserWarning
#The code below came from the board, i.e. via Huygen's principle.
#We got all mixed up when converting to Fourier transform co-ordinates.
#Co-ordinate axis of the wavefront. Not that 0 must be in the corner.
#x = (((np.arange(sz)+sz/2) % sz) - sz/2)*m_per_pix
#xy = np.meshgrid(x,x)
#rr =np.sqrt(xy[0]**2 + xy[1]**2)
#h_func = np.exp(1j*np.pi*rr**2/wave/d)
#h_ft = np.fft.fft2(h_func)
#Co-ordinate axis of the wavefront Fourier transform. Not that 0 must be in the corner.
#x is in cycles per wavefront dimension.
x = (((np.arange(sz)+sz/2) % sz) - sz/2)/m_per_pix/sz
xy = np.meshgrid(x,x)
uu =np.sqrt(xy[0]**2 + xy[1]**2)
h_ft = np.exp(1j*np.pi*uu**2*wave*d)
g_ft = np.fft.fft2(np.fft.fftshift(wf))*h_ft
wf_new = np.fft.ifft2(g_ft)
return np.fft.fftshift(wf_new)
def curved_wf(sz,m_per_pix,f_length,wave):
"""A curved wavefront centered on the *middle*
of the python array.
Try this at home:
The wavefront phase we want is:
phi = alpha*n**2, with
alpha = 0.5*m_per_pix**2/wave/f_length
"""
x = np.arange(sz) - sz/2
xy = np.meshgrid(x,x)
rr =np.sqrt(xy[0]**2 + xy[1]**2)
phase = 0.5*m_per_pix**2/wave/f_length*rr**2
return np.exp(2j*np.pi*phase)
def kmf(sz):
"""This function creates a periodic wavefront produced by Kolmogorov turbulence.
It SHOULD normalised so that the variance at a distance of 1 pixel is 1 radian^2,
but this is totally wrong now. The correct normalisation comes from an
empirical calculation, scaled like in the IDL code.
Parameters
----------
sz: int
Size of the 2D array
Returns
-------
wavefront: float array (sz,sz)
2D array wavefront.
"""
xy = np.meshgrid(np.arange(sz/2 + 1)/float(sz), (((np.arange(sz) + sz/2) % sz)-sz/2)/float(sz))
dist2 = np.maximum( xy[1]**2 + xy[0]**2, 1e-12)
ft_wf = np.exp(2j * np.pi * np.random.random((sz,sz/2+1)))*dist2**(-11.0/12.0)*sz/15.81
ft_wf[0,0]=0
return np.fft.irfft2(ft_wf)
def test_kmf(sz,ntests):
vars = np.zeros(ntests)
for i in range(ntests):
wf = kmf(sz)
vars[i] = 0.5* ( np.mean((wf[1:,:] - wf[:-1,:])**2) + \
np.mean((wf[:,1:] - wf[:,:-1])**2) )
print("Mean var: {0:7.3e} Sdev var: {1:7.3e}".format(np.mean(vars),np.std(vars)))
def moffat(theta, hw, beta=4.0):
"""This creates a moffatt function for simulating seeing.
The output is an array with the same dimensions as theta.
Total Flux" is set to 1 - this only applies if sampling
of thetat is 1 per unit area (e.g. arange(100)).
From Racine (1996), beta=4 is a good approximation for seeing
Parameters
----------
theta: float or float array
Angle at which to calculate the moffat profile (same units as hw)
hw: float
Half-width of the profile
beta: float
beta parameters
"""
denom = (1 + (2**(1.0/beta) - 1)*(theta/hw)**2)**beta
return (2.0**(1.0/beta)-1)*(beta-1)/np.pi/hw**2/denom
def moffat2d(sz,hw, beta=4.0):
"""A 2D version of a moffat function
"""
x = np.arange(sz) - sz/2.0
xy = np.meshgrid(x,x)
r = np.sqrt(xy[0]**2 + xy[1]**2)
return moffat(r, hw, beta=beta)
def circle(dim,width):
"""This function creates a circle.
Parameters
----------
dim: int
Size of the 2D array
width: int
diameter of the circle
Returns
-------
pupil: float array (sz,sz)
2D array circular pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
circle = ((xx**2+yy**2) < (width/2.0)**2).astype(float)
return circle
def square(dim, width):
"""This function creates a square.
Parameters
----------
dim: int
Size of the 2D array
width: int
width of the square
Returns
-------
pupil: float array (sz,sz)
2D array square pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
w = np.where( (yy < width/2) * (yy > (-width/2)) * (xx < width/2) * (xx > (-width/2)))
square = np.zeros((dim,dim))
square[w] = 1.0
return square
def hexagon(dim, width):
"""This function creates a hexagon.
Parameters
----------
dim: int
Size of the 2D array
width: int
flat-to-flat width of the hexagon
Returns
-------
pupil: float array (sz,sz)
2D array hexagonal pupil mask
"""
x = np.arange(dim)-dim/2.0
xy = np.meshgrid(x,x)
xx = xy[1]
yy = xy[0]
w = np.where( (yy < width/2) * (yy > (-width/2)) * \
(yy < (width-np.sqrt(3)*xx)) * (yy > (-width+np.sqrt(3)*xx)) * \
(yy < (width+np.sqrt(3)*xx)) * (yy > (-width-np.sqrt(3)*xx)))
hex = np.zeros((dim,dim))
hex[w]=1.0
return hex
def snell(u, f, n_i, n_f):
"""Snell's law at an interface between two dielectrics
Parameters
----------
u: float array(3)
Input unit vector
f: float array(3)
surface normal unit vector
n_i: float
initial refractive index
n_f: float
final refractive index.
"""
u_p = u - np.sum(u*f)*f
u_p /= np.sqrt(np.sum(u_p**2))
theta_i = np.arccos(np.sum(u*f))
theta_f = np.arcsin(n_i*np.sin(theta_i)/n_f)
v = u_p*np.sin(theta_f) + f*np.cos(theta_f)
return v
def grating_sim(u, l, s, ml_d, refract=False):
"""This function computes an output unit vector based on an input unit
vector and grating properties.
Math: v \cdot l = u \cdot l (reflection)
v \cdot s = u \cdot s + ml_d
The blaze wavelength is when m \lambda = 2 d sin(theta)
i.e. ml_d = 2 sin(theta)
x : to the right
y : out of page
z : down the page
Parameters
----------
u: float array(3)
initial unit vector
l: float array(3)
unit vector along grating lines
s: float array(3)
unit vector along grating surface, perpendicular to lines
ml_d: float
order * \lambda/d
refract: bool
Is the grating a refractive grating?
"""
if (np.abs(np.sum(l*s)) > 1e-3):
print('Error: input l and s must be orthogonal!')
raise UserWarning
n = np.cross(s,l)
if refract:
n *= -1
v_l = np.sum(u*l)
v_s = np.sum(u*s) + ml_d
v_n = np.sqrt(1-v_l**2 - v_s**2)
v = v_l*l + v_s*s + v_n*n
return v
def rotate_xz(u, theta_deg):
"""Rotates a vector u in the x-z plane, clockwise where x is up and
z is right"""
th = np.radians(theta_deg)
M = np.array([[np.cos(th),0,np.sin(th)],[0,1,0],[-np.sin(th),0,np.cos(th)]])
return np.dot(M, u)
def nglass(l, glass='sio2'):
"""Refractive index of fused silica and other glasses. Note that C is
in microns^{-2}
Parameters
----------
l: wavelength
"""
try:
nl = len(l)
except:
l = [l]
nl=1
l = np.array(l)
if (glass == 'sio2'):
B = np.array([0.696166300, 0.407942600, 0.897479400])
C = np.array([4.67914826e-3,1.35120631e-2,97.9340025])
elif (glass == 'bk7'):
B = np.array([1.03961212,0.231792344,1.01046945])
C = np.array([6.00069867e-3,2.00179144e-2,1.03560653e2])
elif (glass == 'nf2'):
B = np.array( [1.39757037,1.59201403e-1,1.26865430])
C = np.array( [9.95906143e-3,5.46931752e-2,1.19248346e2])
else:
print("ERROR: Unknown glass {0:s}".format(glass))
raise UserWarning
n = np.ones(nl)
for i in range(len(B)):
n += B[i]*l**2/(l**2 - C[i])
return np.sqrt(n)
def join_bessel(U,V,j):
"""In order to solve the Laplace equation in cylindrical co-ordinates, both the
electric field and its derivative must be continuous at the edge of the fiber...
i.e. the Bessel J and Bessel K have to be joined together.
The solution of this equation is the n_eff value that satisfies this continuity
relationship"""
W = np.sqrt(V**2 - U**2)
return U*special.jn(j+1,U)*special.kn(j,W) - W*special.kn(j+1,W)*special.jn(j,U)
def neff(V, accurate_roots=True):
"""Find the effective indices of all modes for a given value of
the fiber V number. """
delu = 0.04
U = np.arange(delu/2,V,delu)
W = np.sqrt(V**2 - U**2)
all_roots=np.array([])
n_per_j=np.array([],dtype=int)
n_modes=0
for j in range(int(V+1)):
f = U*special.jn(j+1,U)*special.kn(j,W) - W*special.kn(j+1,W)*special.jn(j,U)
crossings = np.where(f[0:-1]*f[1:] < 0)[0]
roots = U[crossings] - f[crossings]*( U[crossings+1] - U[crossings] )/( f[crossings+1] - f[crossings] )
if accurate_roots:
for i,root in enumerate(roots):
roots[i] = optimize.newton(join_bessel, root, args=(V,j))
#import pdb; pdb.set_trace()
if (j == 0):
n_modes = n_modes + len(roots)
n_per_j = np.append(n_per_j, len(roots))
else:
n_modes = n_modes + 2*len(roots)
n_per_j = np.append(n_per_j, len(roots)) #could be 2*length(roots) to account for sin and cos.
all_roots = np.append(all_roots,roots)
return all_roots, n_per_j
def mode_2d(V, r, j=0, n=0, sampling=0.3, sz=1024):
"""Create a 2D mode profile.
Parameters
----------
V: Fiber V number
r: core radius in microns
sampling: microns per pixel
n: radial order of the mode (0 is fundumental)
j: azimuthal order of the mode (0 is pure radial modes)
TODO: Nonradial modes."""
#First, find the neff values...
u_all,n_per_j = neff(V)
ix = np.sum(n_per_j[0:j]) + n
U0 = u_all[ix]
W0 = np.sqrt(V**2 - U0**2)
x = (np.arange(sz)-sz/2)*sampling/r
xy = np.meshgrid(x,x)
r = np.sqrt(xy[0]**2 + xy[1]**2)
win = np.where(r < 1)
wout = np.where(r >= 1)
the_mode = np.zeros( (sz,sz) )
the_mode[win] = special.jn(j,r[win]*U0)
scale = special.jn(j,U0)/special.kn(j,W0)
the_mode[wout] = scale * special.kn(j,r[wout]*W0)
return the_mode/np.sqrt(np.sum(the_mode**2))
def compute_v_number(wavelength_in_mm, core_radius, numerical_aperture):
"""Computes the V number (can be interpreted as a kind of normalized optical frequency) for an optical fibre
Parameters
----------
wavelength_in_mm: float
The wavelength of light in mm
core_radius: float
The core radius of the fibre in mm
numerical_aperture: float
The numerical aperture of the optical fibre, defined be refractive indices of the core and cladding
Returns
-------
v: float
The v number of the fibre
"""
v = 2 * np.pi / wavelength_in_mm * core_radius * numerical_aperture
return v
def shift_and_ft(im):
"""Sub-pixel shift an image to the origin and Fourier-transform it
Parameters
----------
im: (ny,nx) float array
ftpix: optional ( (nphi) array, (nphi) array) of Fourier sampling points.
If included, the mean square Fourier phase will be minimised.
Returns
----------
ftim: (ny,nx/2+1) complex array
"""
ny = im.shape[0]
nx = im.shape[1]
im = regrid_fft(im,(3*ny,3*nx))
shifts = np.unravel_index(im.argmax(), im.shape)
im = np.roll(np.roll(im,-shifts[0]+1,axis=0),-shifts[1]+1,axis=1)
im = rebin(im,(ny,nx))
ftim = np.fft.rfft2(im)
return ftim
def rebin(a, shape):
"""Re-bins an image to a new (smaller) image with summing
Originally from:
http://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array
Parameters
----------
a: array
Input image
shape: (xshape,yshape)
New shape
"""
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1).sum(1)
def regrid_fft(im,new_shape):
"""Regrid onto a larger number of pixels using an fft. This is optimal
for Nyquist sampled data.
Parameters
----------
im: array
The input image.
new_shape: (new_y,new_x)
The new shape
Notes
------
TODO: This should work with an arbitrary number of dimensions
"""
ftim = np.fft.rfft2(im)
new_ftim = np.zeros((new_shape[0], new_shape[1]/2 + 1),dtype='complex')
new_ftim[0:ftim.shape[0]/2,0:ftim.shape[1]] = \
ftim[0:ftim.shape[0]/2,0:ftim.shape[1]]
new_ftim[new_shape[0]-ftim.shape[0]/2:,0:ftim.shape[1]] = \
ftim[ftim.shape[0]/2:,0:ftim.shape[1]]
return np.fft.irfft2(new_ftim)
| {
"repo_name": "mikeireland/pymfe",
"path": "pymfe/optics.py",
"copies": "2",
"size": "17674",
"license": "mit",
"hash": 6504584304853360000,
"line_mean": 31.8513011152,
"line_max": 142,
"alpha_frac": 0.6008260722,
"autogenerated": false,
"ratio": 3.027406646111682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.949201513585507,
"avg_score": 0.027243516491322412,
"num_lines": 538
} |
# A self-dividing number is a number that is divisible by every digit it contains.
# For example, 128 is a self-dividing number because 128 % 1 == 0, 128 % 2 == 0, and 128 % 8 == 0.
# Also, a self-dividing number is not allowed to contain the digit zero.
# Given a lower and upper number bound, output a list of every possible self dividing number, including the bounds if possible.
# Example 1:
# Input:
# left = 1, right = 22
# Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]
# Note:
# The boundaries of each input argument are 1 <= left <= right <= 10000.
# 题解:给出上限和下限,求得自分割数的个数,自分割数的概念就是:
# 自分割数是一个可被其包含的每个数字整除的数字。 例如,128是自分割数,
# 因为128%1 == 0,128%2 == 0,128%8 == 0.此外,不允许自分割数包含数字零。
# 给定下限和上限数字,输出每个可能的自分割数列表,如果可能,包括边界。
class Solution(object):
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
result = []
for num in range(left, right+1):
if '0' in str(num):
continue
if sum([num % int(digit) for digit in str(num)]) == 0:
result.append(num)
return result | {
"repo_name": "lanpong/LeetCode",
"path": "Python/self_dividing_numbers_728.py",
"copies": "1",
"size": "1385",
"license": "mit",
"hash": -3853217035903772700,
"line_mean": 31.8,
"line_max": 127,
"alpha_frac": 0.6111595466,
"autogenerated": false,
"ratio": 2.3895833333333334,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8475374201291741,
"avg_score": 0.005073735728318344,
"num_lines": 35
} |
"""A semantic pond for content delivery.
See:
https://github.com/alexmilowski/duckpond
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
long_description = """
A [semantic data lake](http://cacm.acm.org/news/200095-the-data-lake-concept-is-maturing/fulltext) can
be thought of a set of resources annotated with semantics (i.e., triples or quads). In the context of big data,
a "data lake" is a massive repository of data, structured or unstructured, of which we gain knowledge of
its contents by examining the semantic graphs derived from their contents.
That's a big idea. This project is about a *semantic data lake* at a much smaller
scale: a pond. I also like ducks and so it is a *duck pond*.
We can paddle around in our content, just like a duck, and harvest a bit knowledge
to derive local value. In this case, we use the semantic pond to understand the
content, its inter-relations, ordering, and other such content relations. From
that, we can derive a useful presentation on the Web.
"""
import re
vdir = __file__[0:__file__.rfind('/')]+'/' if __file__.rfind('/')>=0 else ''
with open(vdir+'duckpond/__init__.py', 'rt') as vfile:
verstrline = vfile.read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
version_info = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setup(
name='duckpond',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version_info,
description='A semantic pond for content delivery',
long_description=long_description,
# The project's main homepage.
url='https://github.com/alexmilowski/duckpond',
# Author details
author='Alex Miłowski',
author_email='alex@milowski.com',
# Choose your license
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='sparql semantics cms',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['commonmark', 'requests', 'Flask', 'Flask-Session', 'Flask-WTF', 'WTForms', 'pytz', 'boto3', 'botocore'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
},
include_package_data=True,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
},
)
| {
"repo_name": "alexmilowski/duckpond",
"path": "setup.py",
"copies": "1",
"size": "4780",
"license": "apache-2.0",
"hash": -7440236367435528000,
"line_mean": 37.5403225806,
"line_max": 127,
"alpha_frac": 0.6779661017,
"autogenerated": false,
"ratio": 3.866504854368932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004276239383319885,
"num_lines": 124
} |
""" A semi automated solution for creating - installing new SSL certificates
1. Generate and install a Letsencrypt SSL certificate
2. Replace the HTTP host with an HTTPS
3. Register a cron job for certificate renewals
"""
import os
import subprocess
import config_loader
import template
import nginx
import wordpress
from teminal_colors import echo
def create_certificate():
"""Initiate the SSL creation procedure
Many actions must be done manualy
"""
data = config_loader.parse_config('virtual_https_host.json')
if data['domain_name'] != '' and data['top_level_domain'] != '' \
and data['web_root'] != '':
if os.path.isdir(os.path.join(data['web_root'], data['domain_name'])):
_generate_well_known_dir(data)
_cert_ontained = _certbot_certonly_webroot(data)
if _cert_ontained:
_diffie_helman()
_configuration_snippet(data)
nginx.new_host(True)
_auto_renew()
else:
echo('public html (site web root) does not exist', 'e')
else:
echo('please provide a valid domain name and top '
'level domain in virtual_https_host.json.json', 'e')
def _generate_well_known_dir(data):
"""Create a hidden well-known directory"""
_target_dir = os.path.join(
data['web_root'], data['domain_name'], '.well-known', 'acme-challenge')
if not os.path.exists(_target_dir):
os.makedirs(_target_dir, exist_ok=True)
wordpress.fix_permissions(data)
def _certbot_certonly_webroot(data):
"""Invoke certbot for the listed domain"""
echo('starting SSL creation process, '
'pay attention because you may '
'be asked to input your email '
'and accept terms and conditions',
'i', 'n')
subprocess.call([
'certbot',
'certonly',
'--webroot',
'--agree-tos',
'--email',
data['admin_email'],
'--webroot-path={0}/{1}'.format(data['web_root'], data['domain_name']),
'-d',
'{0}.{1}'.format(data['domain_name'], data['top_level_domain']),
'-d',
'www.{0}.{1}'.format(data['domain_name'], data['top_level_domain'])
])
# check if certificate files have been created
# site live dir
_site = '/etc/letsencrypt/live/{0}.{1}'.format(
data['domain_name'], data['top_level_domain'])
if os.path.isdir(_site):
if os.path.isfile(os.path.join(_site, 'cert.pem')) and \
os.path.isfile(os.path.join(_site, 'chain.pem')) and \
os.path.isfile(os.path.join(_site, 'fullchain.pem')) and \
os.path.isfile(os.path.join(_site, 'privkey.pem')):
return True
else:
return False
else:
return False
def _diffie_helman():
"""Generate a Diffie-Helman 2048 Group"""
echo('generating Diffie-Helman group, '
'this will take some time, please wait')
subprocess.call([
'openssl',
'dhparam',
'-out',
'/etc/ssl/certs/dhparam.pem',
'2048'
])
def _configuration_snippet(data):
"""Associate the certificate with the domain"""
echo('configuring snippets', 'i')
_ssl_certificate = \
'ssl_certificate /etc/letsencrypt/live/{0}.{1}/fullchain.pem;'.format(
data['domain_name'], data['top_level_domain']
)
_ssl_certificate_key = \
'ssl_certificate_key /etc/letsencrypt/live/{0}.{1}/privkey.pem;' \
.format(
data['domain_name'], data['top_level_domain']
)
with open('/etc/nginx/snippets/ssl-{0}.{1}.conf'.format(
data['domain_name'], data['top_level_domain']), 'w') as _conf:
_conf.writelines([_ssl_certificate, '\n', _ssl_certificate_key])
# check if nginx ssl settings dont exist and create them
if not os.path.isfile('/etc/nginx/snippets/ssl-params.conf'):
tpl = template.generate_template('templates/nginx-ssl-settings', data)
with open('/etc/nginx/snippets/ssl-params.conf', 'w') as _nconf:
_nconf.write(tpl)
def _auto_renew():
"""Register a cronjob to renew SSL certificates"""
_job = ("@daily root /usr/bin/certbot renew --quiet "
"--renew-hook '/bin/ systemctl reload nginx'\n")
if not os.path.isfile('/etc/cron.d/renewssl'):
with open('/etc/cron.d/renewssl', 'w') as _cron:
_cron.write(_job)
| {
"repo_name": "stef-k/starter",
"path": "letsencrypt.py",
"copies": "1",
"size": "4473",
"license": "mit",
"hash": -4939739002800965000,
"line_mean": 32.8863636364,
"line_max": 79,
"alpha_frac": 0.5873015873,
"autogenerated": false,
"ratio": 3.6365853658536587,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9723886953153659,
"avg_score": 0,
"num_lines": 132
} |
"""A semi-secure external process keyring.
This allows storage (but not retrieval) of signing keys, by name, within an external process. Data may then be sent
for signing against a specific key. Theoretically, should the host application be compromised, your keys will remain
safe.
"""
from atexit import register, unregister
from binascii import hexlify, unhexlify
from hmac import compare_digest
from multiprocessing import Process, Pipe
from numbers import Number
from marrow.package.loader import load
def ringleader(queue, keys):
keystore = dict(keys)
while True:
message = queue.recv()
if not isinstance(message, tuple):
continue
if len(message) < 2:
continue
sequence, operation = message[:2]
message = message[2:]
if operation not in ('quit', 'register', 'sign', 'verify'):
continue
if operation == 'quit':
break
if operation == 'register':
name, value = message
keystore[name] = value
continue
if operation == 'unregister':
keystore.pop(message[0])
continue
if operation == 'sign':
name, algorithm, value = message
elif operation == 'verify':
name, algorithm, value, signature = message
key = keystore.get(name, name if len(name) >= 32 else None)
algorithm = load(algorithm, 'pep-247') # Load the PEP 247-compatible keyed hashing algorithm.
# Will usually be hmac:HMAC, can also be something like web.security.fastecdsa:P256.
signer = algorithm(key, value)
result = signer.digest()
if operation == 'sign':
queue.send((sequence, result))
continue
if hasattr(signer, 'verify'):
result = signer.verify(value, signature)
queue.send((sequence, result))
continue
result = compare_digest(signature, result)
queue.send((sequence, result))
queue.close()
class Keyring:
def __init__(self, keys=None):
self.queue, queue = Pipe()
self.ringleader = Process(target=ringleader, args=(queue, keys if keys else ()))
self.sequence = 0
def start(self):
self.ringleader.start()
register(self.stop)
def stop(self):
unregister(self.stop)
self.queue.send((0, 'quit'))
self.ringleader.join()
self.ringleader = None
def register(self, name, key):
sequence = self.sequence = self.sequence + 1
self.queue.send((sequence, 'register', name, key))
def deregister(self, name):
sequence = self.sequence = self.sequence + 1
self.queue.send((sequence, 'deregister', name))
def sign(self, key, value, signer='hmac:HMAC'):
if hasattr(value, 'encode'):
value = value.encode('utf-8')
sequence = self.sequence = self.sequence + 1
self.queue.send((sequence, 'sign', key, signer, value))
seq, signature = self.queue.recv()
assert sequence == seq
return signature
def verify(self, key, value, signature, signer='hmac:HMAC'):
if hasattr(value, 'encode'):
value = value.encode('utf-8')
sequence = self.sequence = self.sequence + 1
self.queue.send((sequence, 'verify', key, signer, value, signature))
seq, result = self.queue.recv()
assert sequence == seq
return result
def token(self, *parts):
def process():
for part in parts:
if isinstance(part, Number):
yield ("%x" % part).encode('ascii')
elif isinstance(part, str):
yield hexlify(part)
else:
yield part.encode('utf-8')
return b':'.join(process())
def __del__(self):
self.stop()
super().__del__()
| {
"repo_name": "marrow/web.security",
"path": "web/security/keyring.py",
"copies": "1",
"size": "3410",
"license": "mit",
"hash": -5968710111896781000,
"line_mean": 23.3571428571,
"line_max": 116,
"alpha_frac": 0.673313783,
"autogenerated": false,
"ratio": 3.434038267875126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4607352050875126,
"avg_score": null,
"num_lines": null
} |
"""A semi-synchronous Client for IPython parallel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import json
from threading import Thread, Event, current_thread
import time
import types
import warnings
from datetime import datetime
from getpass import getpass
from pprint import pprint
pjoin = os.path.join
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from tornado.concurrent import Future
from tornado.gen import multi_future
from traitlets.config.configurable import MultipleInstanceError
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils.capture import RichOutput
from IPython.utils.coloransi import TermColors
from jupyter_client.jsonutil import extract_dates, parse_date
from jupyter_client.localinterfaces import localhost, is_local_ip
from IPython.paths import get_ipython_dir
from IPython.utils.path import compress_user
from ipython_genutils.py3compat import cast_bytes, string_types, xrange, iteritems
from traitlets import (
HasTraits, Instance, Unicode,
Dict, List, Bool, Set, Any
)
from decorator import decorator
from ipyparallel import Reference
from ipyparallel import error
from ipyparallel import util
from jupyter_client.session import Session
from ipyparallel import serialize
from .asyncresult import AsyncResult, AsyncHubResult
from .futures import MessageFuture
from .view import DirectView, LoadBalancedView
#--------------------------------------------------------------------------
# Decorators for Client methods
#--------------------------------------------------------------------------
@decorator
def unpack_message(f, self, msg_parts):
"""Unpack a message before calling the decorated method."""
idents, msg = self.session.feed_identities(msg_parts, copy=False)
try:
msg = self.session.deserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Message", exc_info=True)
else:
if self.debug:
pprint(msg)
return f(self, msg)
#--------------------------------------------------------------------------
# Classes
#--------------------------------------------------------------------------
_no_connection_file_msg = """
Failed to connect because no Controller could be found.
Please double-check your profile and ensure that a cluster is running.
"""
class ExecuteReply(RichOutput):
"""wrapper for finished Execute results"""
def __init__(self, msg_id, content, metadata):
self.msg_id = msg_id
self._content = content
self.execution_count = content['execution_count']
self.metadata = metadata
# RichOutput overrides
@property
def source(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('source', '')
@property
def data(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('data', {})
@property
def _metadata(self):
execute_result = self.metadata['execute_result']
if execute_result:
return execute_result.get('metadata', {})
def display(self):
from IPython.display import publish_display_data
publish_display_data(self.data, self.metadata)
def _repr_mime_(self, mime):
if mime not in self.data:
return
data = self.data[mime]
if mime in self._metadata:
return data, self._metadata[mime]
else:
return data
def __getitem__(self, key):
return self.metadata[key]
def __getattr__(self, key):
if key not in self.metadata:
raise AttributeError(key)
return self.metadata[key]
def __repr__(self):
execute_result = self.metadata['execute_result'] or {'data':{}}
text_out = execute_result['data'].get('text/plain', '')
if len(text_out) > 32:
text_out = text_out[:29] + '...'
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
def _repr_pretty_(self, p, cycle):
execute_result = self.metadata['execute_result'] or {'data':{}}
text_out = execute_result['data'].get('text/plain', '')
if not text_out:
return
try:
ip = get_ipython()
except NameError:
colors = "NoColor"
else:
colors = ip.colors
if colors == "NoColor":
out = normal = ""
else:
out = TermColors.Red
normal = TermColors.Normal
if '\n' in text_out and not text_out.startswith('\n'):
# add newline for multiline reprs
text_out = '\n' + text_out
p.text(
out + u'Out[%i:%i]: ' % (
self.metadata['engine_id'], self.execution_count
) + normal + text_out
)
class Metadata(dict):
"""Subclass of dict for initializing metadata values.
Attribute access works on keys.
These objects have a strict set of keys - errors will raise if you try
to add new keys.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
md = {'msg_id' : None,
'submitted' : None,
'started' : None,
'completed' : None,
'received' : None,
'engine_uuid' : None,
'engine_id' : None,
'follow' : None,
'after' : None,
'status' : None,
'execute_input' : None,
'execute_result' : None,
'error' : None,
'stdout' : '',
'stderr' : '',
'outputs' : [],
'data': {},
}
self.update(md)
self.update(dict(*args, **kwargs))
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self:
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if key in self:
self[key] = value
else:
raise AttributeError(key)
def __setitem__(self, key, value):
"""strict static key enforcement"""
if key in self:
dict.__setitem__(self, key, value)
else:
raise KeyError(key)
class Client(HasTraits):
"""A semi-synchronous client to an IPython parallel cluster
Parameters
----------
url_file : str
The path to ipcontroller-client.json.
This JSON file should contain all the information needed to connect to a cluster,
and is likely the only argument needed.
Connection information for the Hub's registration. If a json connector
file is given, then likely no further configuration is necessary.
[Default: use profile]
profile : bytes
The name of the Cluster profile to be used to find connector information.
If run from an IPython application, the default profile will be the same
as the running application, otherwise it will be 'default'.
cluster_id : str
String id to added to runtime files, to prevent name collisions when using
multiple clusters with a single profile simultaneously.
When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but
should generally work)
context : zmq.Context
Pass an existing zmq.Context instance, otherwise the client will create its own.
debug : bool
flag for lots of message printing for debug purposes
timeout : float
time (in seconds) to wait for connection replies from the Hub
[Default: 10]
Other Parameters
----------------
sshserver : str
A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
If keyfile or password is specified, and this is not, it will default to
the ip given in addr.
sshkey : str; path to ssh private key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str
Your ssh password to sshserver. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
paramiko : bool
flag for whether to use paramiko instead of shell ssh for tunneling.
[default: True on win32, False else]
Attributes
----------
ids : list of int engine IDs
requesting the ids attribute always synchronizes
the registration state. To request ids without synchronization,
use semi-private _ids attributes.
history : list of msg_ids
a list of msg_ids, keeping track of all the execution
messages you have submitted in order.
outstanding : set of msg_ids
a set of msg_ids that have been submitted, but whose
results have not yet been received.
results : dict
a dict of all our results, keyed by msg_id
block : bool
determines default behavior when block not specified
in execution methods
"""
block = Bool(False)
outstanding = Set()
results = Instance('collections.defaultdict', (dict,))
metadata = Instance('collections.defaultdict', (Metadata,))
history = List()
debug = Bool(False)
_futures = Dict()
_output_futures = Dict()
_io_loop = Any()
_io_thread = Any()
profile=Unicode()
def _profile_default(self):
if BaseIPythonApplication.initialized():
# an IPython app *might* be running, try to get its profile
try:
return BaseIPythonApplication.instance().profile
except (AttributeError, MultipleInstanceError):
# could be a *different* subclass of config.Application,
# which would raise one of these two errors.
return u'default'
else:
return u'default'
_outstanding_dict = Instance('collections.defaultdict', (set,))
_ids = List()
_connected=Bool(False)
_ssh=Bool(False)
_context = Instance('zmq.Context', allow_none=True)
_config = Dict()
_engines=Instance(util.ReverseDict, (), {})
_query_socket=Instance('zmq.Socket', allow_none=True)
_control_socket=Instance('zmq.Socket', allow_none=True)
_iopub_socket=Instance('zmq.Socket', allow_none=True)
_notification_socket=Instance('zmq.Socket', allow_none=True)
_mux_socket=Instance('zmq.Socket', allow_none=True)
_task_socket=Instance('zmq.Socket', allow_none=True)
_task_scheme=Unicode()
_closed = False
def __new__(self, *args, **kw):
# don't raise on positional args
return HasTraits.__new__(self, **kw)
def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
context=None, debug=False,
sshserver=None, sshkey=None, password=None, paramiko=None,
timeout=10, cluster_id=None, **extra_args
):
if profile:
super(Client, self).__init__(debug=debug, profile=profile)
else:
super(Client, self).__init__(debug=debug)
if context is None:
context = zmq.Context.instance()
self._context = context
if 'url_or_file' in extra_args:
url_file = extra_args['url_or_file']
warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
if url_file and util.is_url(url_file):
raise ValueError("single urls cannot be specified, url-files must be used.")
self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
no_file_msg = '\n'.join([
"You have attempted to connect to an IPython Cluster but no Controller could be found.",
"Please double-check your configuration and ensure that a cluster is running.",
])
if self._cd is not None:
if url_file is None:
if not cluster_id:
client_json = 'ipcontroller-client.json'
else:
client_json = 'ipcontroller-%s-client.json' % cluster_id
url_file = pjoin(self._cd.security_dir, client_json)
short = compress_user(url_file)
if not os.path.exists(url_file):
print("Waiting for connection file: %s" % short)
for i in range(30):
time.sleep(1)
if os.path.exists(url_file):
break
if not os.path.exists(url_file):
msg = '\n'.join([
"Connection file %r not found." % short,
no_file_msg,
])
raise IOError(msg)
if url_file is None:
raise IOError(no_file_msg)
if not os.path.exists(url_file):
# Connection file explicitly specified, but not found
raise IOError("Connection file %r not found. Is a controller running?" % \
compress_user(url_file)
)
with open(url_file) as f:
cfg = json.load(f)
self._task_scheme = cfg['task_scheme']
# sync defaults from args, json:
if sshserver:
cfg['ssh'] = sshserver
location = cfg.setdefault('location', None)
proto,addr = cfg['interface'].split('://')
addr = util.disambiguate_ip_address(addr, location)
cfg['interface'] = "%s://%s" % (proto, addr)
# turn interface,port into full urls:
for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
cfg[key] = cfg['interface'] + ':%i' % cfg[key]
url = cfg['registration']
if location is not None and addr == localhost():
# location specified, and connection is expected to be local
if not is_local_ip(location) and not sshserver:
# load ssh from JSON *only* if the controller is not on
# this machine
sshserver=cfg['ssh']
if not is_local_ip(location) and not sshserver:
# warn if no ssh specified, but SSH is probably needed
# This is only a warning, because the most likely cause
# is a local Controller on a laptop whose IP is dynamic
warnings.warn("""
Controller appears to be listening on localhost, but not on this machine.
If this is true, you should specify Client(...,sshserver='you@%s')
or instruct your controller to listen on an external IP."""%location,
RuntimeWarning)
elif not sshserver:
# otherwise sync with cfg
sshserver = cfg['ssh']
self._config = cfg
self._ssh = bool(sshserver or sshkey or password)
if self._ssh and sshserver is None:
# default to ssh via localhost
sshserver = addr
if self._ssh and password is None:
from zmq.ssh import tunnel
if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
# configure and construct the session
try:
extra_args['packer'] = cfg['pack']
extra_args['unpacker'] = cfg['unpack']
extra_args['key'] = cast_bytes(cfg['key'])
extra_args['signature_scheme'] = cfg['signature_scheme']
except KeyError as exc:
msg = '\n'.join([
"Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
"If you are reusing connection files, remove them and start ipcontroller again."
])
raise ValueError(msg.format(exc.message))
self.session = Session(**extra_args)
self._query_socket = self._context.socket(zmq.DEALER)
if self._ssh:
from zmq.ssh import tunnel
tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
else:
self._query_socket.connect(cfg['registration'])
self.session.debug = self.debug
self._notification_handlers = {'registration_notification' : self._register_engine,
'unregistration_notification' : self._unregister_engine,
'shutdown_notification' : lambda msg: self.close(),
}
self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
'apply_reply' : self._handle_apply_reply}
try:
self._connect(sshserver, ssh_kwargs, timeout)
except:
self.close(linger=0)
raise
# last step: setup magics, if we are in IPython:
try:
ip = get_ipython()
except NameError:
return
else:
if 'px' not in ip.magics_manager.magics:
# in IPython but we are the first Client.
# activate a default view for parallel magics.
self.activate()
def __del__(self):
"""cleanup sockets, but _not_ context."""
self.close()
def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
if ipython_dir is None:
ipython_dir = get_ipython_dir()
if profile_dir is not None:
try:
self._cd = ProfileDir.find_profile_dir(profile_dir)
return
except ProfileDirError:
pass
elif profile is not None:
try:
self._cd = ProfileDir.find_profile_dir_by_name(
ipython_dir, profile)
return
except ProfileDirError:
pass
self._cd = None
def _update_engines(self, engines):
"""Update our engines dict and _ids from a dict of the form: {id:uuid}."""
for k,v in iteritems(engines):
eid = int(k)
if eid not in self._engines:
self._ids.append(eid)
self._engines[eid] = v
self._ids = sorted(self._ids)
if sorted(self._engines.keys()) != list(range(len(self._engines))) and \
self._task_scheme == 'pure' and self._task_socket:
self._stop_scheduling_tasks()
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning)
def _build_targets(self, targets):
"""Turn valid target IDs or 'all' into two lists:
(int_ids, uuids).
"""
if not self._ids:
# flush notification socket if no engines yet, just in case
if not self.ids:
raise error.NoEnginesRegistered("Can't build targets without any engines")
if targets is None:
targets = self._ids
elif isinstance(targets, string_types):
if targets.lower() == 'all':
targets = self._ids
else:
raise TypeError("%r not valid str target, must be 'all'"%(targets))
elif isinstance(targets, int):
if targets < 0:
targets = self.ids[targets]
if targets not in self._ids:
raise IndexError("No such engine: %i"%targets)
targets = [targets]
if isinstance(targets, slice):
indices = list(range(len(self._ids))[targets])
ids = self.ids
targets = [ ids[i] for i in indices ]
if not isinstance(targets, (tuple, list, xrange)):
raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
return [cast_bytes(self._engines[t]) for t in targets], list(targets)
def _connect(self, sshserver, ssh_kwargs, timeout):
"""setup all our socket connections to the cluster. This is called from
__init__."""
# Maybe allow reconnecting?
if self._connected:
return
self._connected=True
def connect_socket(s, url):
if self._ssh:
from zmq.ssh import tunnel
return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
else:
return s.connect(url)
self.session.send(self._query_socket, 'connection_request')
# use Poller because zmq.select has wrong units in pyzmq 2.1.7
poller = zmq.Poller()
poller.register(self._query_socket, zmq.POLLIN)
# poll expects milliseconds, timeout is seconds
evts = poller.poll(timeout*1000)
if not evts:
raise error.TimeoutError("Hub connection request timed out")
idents, msg = self.session.recv(self._query_socket, mode=0)
if self.debug:
pprint(msg)
content = msg['content']
# self._config['registration'] = dict(content)
cfg = self._config
if content['status'] == 'ok':
self._mux_socket = self._context.socket(zmq.DEALER)
connect_socket(self._mux_socket, cfg['mux'])
self._task_socket = self._context.socket(zmq.DEALER)
connect_socket(self._task_socket, cfg['task'])
self._notification_socket = self._context.socket(zmq.SUB)
self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._notification_socket, cfg['notification'])
self._control_socket = self._context.socket(zmq.DEALER)
connect_socket(self._control_socket, cfg['control'])
self._iopub_socket = self._context.socket(zmq.SUB)
self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._iopub_socket, cfg['iopub'])
self._update_engines(dict(content['engines']))
else:
self._connected = False
raise Exception("Failed to connect!")
self._start_io_thread()
#--------------------------------------------------------------------------
# handlers and callbacks for incoming messages
#--------------------------------------------------------------------------
def _unwrap_exception(self, content):
"""unwrap exception, and remap engine_id to int."""
e = error.unwrap_exception(content)
# print e.traceback
if e.engine_info:
e_uuid = e.engine_info['engine_uuid']
eid = self._engines[e_uuid]
e.engine_info['engine_id'] = eid
return e
def _extract_metadata(self, msg):
header = msg['header']
parent = msg['parent_header']
msg_meta = msg['metadata']
content = msg['content']
md = {'msg_id' : parent['msg_id'],
'received' : datetime.now(),
'engine_uuid' : msg_meta.get('engine', None),
'follow' : msg_meta.get('follow', []),
'after' : msg_meta.get('after', []),
'status' : content['status'],
}
if md['engine_uuid'] is not None:
md['engine_id'] = self._engines.get(md['engine_uuid'], None)
if 'date' in parent:
md['submitted'] = parent['date']
if 'started' in msg_meta:
md['started'] = parse_date(msg_meta['started'])
if 'date' in header:
md['completed'] = header['date']
return md
def _register_engine(self, msg):
"""Register a new engine, and update our connection info."""
content = msg['content']
eid = content['id']
d = {eid : content['uuid']}
self._update_engines(d)
def _unregister_engine(self, msg):
"""Unregister an engine that has died."""
content = msg['content']
eid = int(content['id'])
if eid in self._ids:
self._ids.remove(eid)
uuid = self._engines.pop(eid)
self._handle_stranded_msgs(eid, uuid)
if self._task_socket and self._task_scheme == 'pure':
self._stop_scheduling_tasks()
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
of the unregistration and later receive the successful result.
"""
outstanding = self._outstanding_dict[uuid]
for msg_id in list(outstanding):
if msg_id in self.results:
# we already
continue
try:
raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
except:
content = error.wrap_exception()
# build a fake message:
msg = self.session.msg('apply_reply', content=content)
msg['parent_header']['msg_id'] = msg_id
msg['metadata']['engine'] = uuid
self._handle_apply_reply(msg)
def _handle_execute_reply(self, msg):
"""Save the reply to an execute_request into our results.
execute messages are never actually used. apply is used instead.
"""
parent = msg['parent_header']
msg_id = parent['msg_id']
future = self._futures.get(msg_id, None)
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = ExecuteReply(msg_id, content, md)
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
# aborted tasks will not get output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
if content['status'] != 'ok' and not content.get('engine_info'):
# not an engine failure, don't expect output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
if future:
future.set_result(self.results[msg_id])
def _handle_apply_reply(self, msg):
"""Save the reply to an apply_request into our results."""
parent = msg['parent_header']
msg_id = parent['msg_id']
future = self._futures.get(msg_id, None)
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
print(self.results[msg_id])
print(msg)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = serialize.deserialize_object(msg['buffers'])[0]
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
if content['status'] != 'ok' and not content.get('engine_info'):
# not an engine failure, don't expect output
out_future = self._output_futures.get(msg_id)
if out_future and not out_future.done():
out_future.set_result(None)
if future:
future.set_result(self.results[msg_id])
def _make_io_loop(self):
"""Make my IOLoop. Override with IOLoop.current to return"""
return IOLoop()
def _stop_io_thread(self):
"""Stop my IO thread"""
if self._io_loop:
self._io_loop.add_callback(self._io_loop.stop)
if self._io_thread and self._io_thread is not current_thread():
self._io_thread.join()
def _start_io_thread(self):
"""Start IOLoop in a background thread."""
self._io_loop = self._make_io_loop()
self._query_stream = ZMQStream(self._query_socket, self._io_loop)
self._query_stream.on_recv(self._dispatch_single_reply, copy=False)
self._control_stream = ZMQStream(self._control_socket, self._io_loop)
self._control_stream.on_recv(self._dispatch_single_reply, copy=False)
self._mux_stream = ZMQStream(self._mux_socket, self._io_loop)
self._mux_stream.on_recv(self._dispatch_reply, copy=False)
self._task_stream = ZMQStream(self._task_socket, self._io_loop)
self._task_stream.on_recv(self._dispatch_reply, copy=False)
self._iopub_stream = ZMQStream(self._iopub_socket, self._io_loop)
self._iopub_stream.on_recv(self._dispatch_iopub, copy=False)
self._notification_stream = ZMQStream(self._notification_socket, self._io_loop)
self._notification_stream.on_recv(self._dispatch_notification, copy=False)
self._io_thread = Thread(target=self._io_main)
self._io_thread.daemon = True
self._io_thread.start()
def _io_main(self):
"""main loop for background IO thread"""
self._io_loop.start()
self._io_loop.close()
@unpack_message
def _dispatch_single_reply(self, msg):
"""Dispatch single (non-execution) replies"""
msg_id = msg['parent_header'].get('msg_id', None)
future = self._futures.get(msg_id)
if future is not None:
future.set_result(msg)
@unpack_message
def _dispatch_notification(self, msg):
"""Dispatch notification messages"""
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise KeyError("Unhandled notification message type: %s" % msg_type)
else:
handler(msg)
@unpack_message
def _dispatch_reply(self, msg):
"""handle execution replies waiting in ZMQ queue."""
msg_type = msg['header']['msg_type']
handler = self._queue_handlers.get(msg_type, None)
if handler is None:
raise KeyError("Unhandled reply message type: %s" % msg_type)
else:
handler(msg)
@unpack_message
def _dispatch_iopub(self, msg):
"""handler for IOPub messages"""
parent = msg['parent_header']
if not parent or parent['session'] != self.session.session:
# ignore IOPub messages not from here
return
msg_id = parent['msg_id']
content = msg['content']
header = msg['header']
msg_type = msg['header']['msg_type']
if msg_type == 'status' and msg_id not in self.metadata:
# ignore status messages if they aren't mine
return
# init metadata:
md = self.metadata[msg_id]
if msg_type == 'stream':
name = content['name']
s = md[name] or ''
md[name] = s + content['text']
elif msg_type == 'error':
md.update({'error' : self._unwrap_exception(content)})
elif msg_type == 'execute_input':
md.update({'execute_input' : content['code']})
elif msg_type == 'display_data':
md['outputs'].append(content)
elif msg_type == 'execute_result':
md['execute_result'] = content
elif msg_type == 'data_message':
data, remainder = serialize.deserialize_object(msg['buffers'])
md['data'].update(data)
elif msg_type == 'status':
# idle message comes after all outputs
if content['execution_state'] == 'idle':
future = self._output_futures.get(msg_id)
if future and not future.done():
# TODO: should probably store actual outputs on the Future
future.set_result(None)
else:
# unhandled msg_type (status, etc.)
pass
def _send(self, socket, msg_type, content=None, parent=None, ident=None,
buffers=None, track=False, header=None, metadata=None):
"""Send a message in the IO thread
returns msg object"""
if self._closed:
raise IOError("Connections have been closed.")
msg = self.session.msg(msg_type, content=content, parent=parent,
header=header, metadata=metadata)
msg_id = msg['header']['msg_id']
asyncresult = False
if msg_type in {'execute_request', 'apply_request'}:
asyncresult = True
# add future for output
self._output_futures[msg_id] = output = MessageFuture(msg_id)
# hook up metadata
output.metadata = self.metadata[msg_id]
self._futures[msg_id] = future = MessageFuture(msg_id, track=track)
futures = [future]
if asyncresult:
future.output = output
futures.append(output)
output.metadata['submitted'] = datetime.now()
def cleanup(f):
"""Purge caches on Future resolution"""
self.results.pop(msg_id, None)
self._futures.pop(msg_id, None)
self._output_futures.pop(msg_id, None)
self.metadata.pop(msg_id, None)
multi_future(futures).add_done_callback(cleanup)
def _really_send():
sent = self.session.send(socket, msg, track=track, buffers=buffers, ident=ident)
if track:
future.tracker.set_result(sent['tracker'])
# hand off actual send to IO thread
self._io_loop.add_callback(_really_send)
return future
def _send_recv(self, *args, **kwargs):
"""Send a message in the IO thread and return its reply"""
future = self._send(*args, **kwargs)
future.wait()
return future.result()
#--------------------------------------------------------------------------
# len, getitem
#--------------------------------------------------------------------------
def __len__(self):
"""len(client) returns # of engines."""
return len(self.ids)
def __getitem__(self, key):
"""index access returns DirectView multiplexer objects
Must be int, slice, or list/tuple/xrange of ints"""
if not isinstance(key, (int, slice, tuple, list, xrange)):
raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
else:
return self.direct_view(key)
def __iter__(self):
"""Since we define getitem, Client is iterable
but unless we also define __iter__, it won't work correctly unless engine IDs
start at zero and are continuous.
"""
for eid in self.ids:
yield self.direct_view(eid)
#--------------------------------------------------------------------------
# Begin public methods
#--------------------------------------------------------------------------
@property
def ids(self):
"""Always up-to-date ids property."""
# always copy:
return list(self._ids)
def activate(self, targets='all', suffix=''):
"""Create a DirectView and register it with IPython magics
Defines the magics `%px, %autopx, %pxresult, %%px`
Parameters
----------
targets: int, list of ints, or 'all'
The engines on which the view's magics will run
suffix: str [default: '']
The suffix, if any, for the magics. This allows you to have
multiple views associated with parallel magics at the same time.
e.g. ``rc.activate(targets=0, suffix='0')`` will give you
the magics ``%px0``, ``%pxresult0``, etc. for running magics just
on engine 0.
"""
view = self.direct_view(targets)
view.block = True
view.activate(suffix)
return view
def close(self, linger=None):
"""Close my zmq Sockets
If `linger`, set the zmq LINGER socket option,
which allows discarding of messages.
"""
if self._closed:
return
self._stop_io_thread()
snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
for name in snames:
socket = getattr(self, name)
if socket is not None and not socket.closed:
if linger is not None:
socket.close(linger=linger)
else:
socket.close()
self._closed = True
def spin_thread(self, interval=1):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin_thread is deprecated now that IO is always in a thread", DeprecationWarning)
def stop_spin_thread(self):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin_thread is deprecated now that IO is always in a thread", DeprecationWarning)
def spin(self):
"""DEPRECATED, DOES NOTHING"""
warnings.warn("Client.spin is deprecated now that IO is in a thread", DeprecationWarning)
def _await_futures(self, futures, timeout):
"""Wait for a collection of futures"""
if not futures:
return True
event = Event()
if timeout and timeout < 0:
timeout = None
f = multi_future(futures)
f.add_done_callback(lambda f: event.set())
return event.wait(timeout)
def _futures_for_msgs(self, msg_ids):
"""Turn msg_ids into Futures
msg_ids not in futures dict are presumed done.
"""
futures = []
for msg_id in msg_ids:
f = self._futures.get(msg_id, None)
if f:
futures.append(f)
return futures
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
if jobs is None:
theids = self.outstanding
else:
if isinstance(jobs, string_types + (int, AsyncResult)):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
theids.update(job.msg_ids)
continue
theids.add(job)
if not theids.intersection(self.outstanding):
return True
futures = self._futures_for_msgs(theids)
return self._await_futures(futures, timeout)
def wait_interactive(self, jobs=None, interval=1., timeout=-1.):
"""Wait interactively for jobs
If no job is specified, will wait for all outstanding jobs to complete.
"""
if jobs is None:
# get futures for results
futures = [ f for f in self._futures.values() if hasattr(f, 'output') ]
ar = AsyncResult(self, futures, owner=False)
else:
ar = self._asyncresult_from_jobs(jobs, owner=False)
return ar.wait_interactive(interval=interval, timeout=timeout)
#--------------------------------------------------------------------------
# Control methods
#--------------------------------------------------------------------------
def clear(self, targets=None, block=None):
"""Clear the namespace in target(s)."""
block = self.block if block is None else block
targets = self._build_targets(targets)[0]
futures = []
for t in targets:
futures.append(self._send(self._control_socket, 'clear_request', content={}, ident=t))
if not block:
return multi_future(futures)
for future in futures:
future.wait()
msg = future.result()
if msg['content']['status'] != 'ok':
raise self._unwrap_exception(msg['content'])
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, string_types + (AsyncResult,)):
jobs = [jobs]
bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
futures = []
for t in targets:
futures.append(self._send(self._control_socket, 'abort_request',
content=content, ident=t))
if not block:
return multi_future(futures)
else:
for f in futures:
f.wait()
msg = f.result()
if msg['content']['status'] != 'ok':
raise self._unwrap_exception(msg['content'])
def shutdown(self, targets='all', restart=False, hub=False, block=None):
"""Terminates one or more engine processes, optionally including the hub.
Parameters
----------
targets: list of ints or 'all' [default: all]
Which engines to shutdown.
hub: bool [default: False]
Whether to include the Hub. hub=True implies targets='all'.
block: bool [default: self.block]
Whether to wait for clean shutdown replies or not.
restart: bool [default: False]
NOT IMPLEMENTED
whether to restart engines after shutting them down.
"""
from ipyparallel.error import NoEnginesRegistered
if restart:
raise NotImplementedError("Engine restart is not yet implemented")
block = self.block if block is None else block
if hub:
targets = 'all'
try:
targets = self._build_targets(targets)[0]
except NoEnginesRegistered:
targets = []
futures = []
for t in targets:
futures.append(self._send(self._control_socket, 'shutdown_request',
content={'restart':restart},ident=t))
error = False
if block or hub:
for f in futures:
f.wait()
msg = f.result()
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if hub:
# don't trigger close on shutdown notification, which will prevent us from receiving the reply
self._notification_handlers['shutdown_notification'] = lambda msg: None
msg = self._send_recv(self._query_socket, 'shutdown_request')
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if not error:
self.close()
if error:
raise error
#--------------------------------------------------------------------------
# Execution related methods
#--------------------------------------------------------------------------
def _maybe_raise(self, result):
"""wrapper for maybe raising an exception if apply failed."""
if isinstance(result, error.RemoteError):
raise result
return result
def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
ident=None):
"""construct and send an apply message via a socket.
This is the principal method with which all engine execution is performed by views.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
args = args if args is not None else []
kwargs = kwargs if kwargs is not None else {}
metadata = metadata if metadata is not None else {}
# validate arguments
if not callable(f) and not isinstance(f, Reference):
raise TypeError("f must be callable, not %s"%type(f))
if not isinstance(args, (tuple, list)):
raise TypeError("args must be tuple or list, not %s"%type(args))
if not isinstance(kwargs, dict):
raise TypeError("kwargs must be dict, not %s"%type(kwargs))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s"%type(metadata))
bufs = serialize.pack_apply_message(f, args, kwargs,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
future = self._send(socket, "apply_request", buffers=bufs, ident=ident,
metadata=metadata, track=track)
msg_id = future.msg_id
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
return future
def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(code, string_types):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s" % type(metadata))
content = dict(code=code, silent=bool(silent), user_expressions={})
future = self._send(socket, "execute_request", content=content, ident=ident,
metadata=metadata)
msg_id = future.msg_id
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return future
#--------------------------------------------------------------------------
# construct a View object
#--------------------------------------------------------------------------
def load_balanced_view(self, targets=None):
"""construct a DirectView object.
If no arguments are specified, create a LoadBalancedView
using all engines.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance execution
"""
if targets == 'all':
targets = None
if targets is not None:
targets = self._build_targets(targets)[1]
return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
def executor(self, targets=None):
"""Construct a PEP-3148 Executor with a LoadBalancedView
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance execution
Returns
-------
executor: Executor
The Executor object
"""
return self.load_balanced_view(targets).executor
def direct_view(self, targets='all'):
"""construct a DirectView object.
If no targets are specified, create a DirectView using all engines.
rc.direct_view('all') is distinguished from rc[:] in that 'all' will
evaluate the target engines at each execution, whereas rc[:] will connect to
all *current* engines, and that list will not change.
That is, 'all' will always use all engines, whereas rc[:] will not use
engines added after the DirectView is constructed.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The engines to use for the View
"""
single = isinstance(targets, int)
# allow 'all' to be lazily evaluated at each execution
if targets != 'all':
targets = self._build_targets(targets)[1]
if single:
targets = targets[0]
return DirectView(client=self, socket=self._mux_socket, targets=targets)
#--------------------------------------------------------------------------
# Query methods
#--------------------------------------------------------------------------
def get_result(self, indices_or_msg_ids=None, block=None, owner=True):
"""Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
If the client already has the results, no request to the Hub will be made.
This is a convenient way to construct AsyncResult objects, which are wrappers
that include metadata about execution, and allow for awaiting results that
were not submitted by this Client.
It can also be a convenient way to retrieve the metadata associated with
blocking execution, since it always retrieves
Examples
--------
::
In [10]: r = client.apply()
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, AsyncResult,
or a list of same.
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
owner : bool [default: True]
Whether this AsyncResult should own the result.
If so, calling `ar.get()` will remove data from the
client's result and metadata cache.
There should only be one owner of any given msg_id.
Returns
-------
AsyncResult
A single AsyncResult object will always be returned.
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
ar = self._asyncresult_from_jobs(indices_or_msg_ids, owner=owner)
if block:
ar.wait()
return ar
def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
"""Resubmit one or more tasks.
in-flight tasks may not be resubmitted.
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
theids = self._msg_ids_from_jobs(indices_or_msg_ids)
content = dict(msg_ids = theids)
reply = self._send_recv(self._query_socket, 'resubmit_request', content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
mapping = content['resubmitted']
new_ids = [ mapping[msg_id] for msg_id in theids ]
ar = AsyncHubResult(self, new_ids)
if block:
ar.wait()
return ar
def result_status(self, msg_ids, status_only=True):
"""Check on the status of the result(s) of the apply request with `msg_ids`.
If status_only is False, then the actual results will be retrieved, else
only the status of the results will be checked.
Parameters
----------
msg_ids : list of msg_ids
if int:
Passed as index to self.history for convenience.
status_only : bool (default: True)
if False:
Retrieve the actual results of completed tasks.
Returns
-------
results : dict
There will always be the keys 'pending' and 'completed', which will
be lists of msg_ids that are incomplete or complete. If `status_only`
is False, then completed results will be keyed by their `msg_id`.
"""
theids = self._msg_ids_from_jobs(msg_ids)
completed = []
local_results = {}
# comment this block out to temporarily disable local shortcut:
for msg_id in theids:
if msg_id in self.results:
completed.append(msg_id)
local_results[msg_id] = self.results[msg_id]
theids.remove(msg_id)
if theids: # some not locally cached
content = dict(msg_ids=theids, status_only=status_only)
reply = self._send_recv(self._query_socket, "result_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
buffers = reply['buffers']
else:
content = dict(completed=[],pending=[])
content['completed'].extend(completed)
if status_only:
return content
failures = []
# load cached results into result:
content.update(local_results)
# update cache with results:
for msg_id in sorted(theids):
if msg_id in content['completed']:
rec = content[msg_id]
parent = extract_dates(rec['header'])
header = extract_dates(rec['result_header'])
rcontent = rec['result_content']
iodict = rec['io']
if isinstance(rcontent, str):
rcontent = self.session.unpack(rcontent)
md = self.metadata[msg_id]
md_msg = dict(
content=rcontent,
parent_header=parent,
header=header,
metadata=rec['result_metadata'],
)
md.update(self._extract_metadata(md_msg))
if rec.get('received'):
md['received'] = parse_date(rec['received'])
md.update(iodict)
if rcontent['status'] == 'ok':
if header['msg_type'] == 'apply_reply':
res,buffers = serialize.deserialize_object(buffers)
elif header['msg_type'] == 'execute_reply':
res = ExecuteReply(msg_id, rcontent, md)
else:
raise KeyError("unhandled msg type: %r" % header['msg_type'])
else:
res = self._unwrap_exception(rcontent)
failures.append(res)
self.results[msg_id] = res
content[msg_id] = res
if len(theids) == 1 and failures:
raise failures[0]
error.collect_exceptions(failures, "result_status")
return content
def queue_status(self, targets='all', verbose=False):
"""Fetch the status of engine queues.
Parameters
----------
targets : int/str/list of ints/strs
the engines whose states are to be queried.
default : all
verbose : bool
Whether to return lengths only, or lists of ids for each element
"""
if targets == 'all':
# allow 'all' to be evaluated on the engine
engine_ids = None
else:
engine_ids = self._build_targets(targets)[1]
content = dict(targets=engine_ids, verbose=verbose)
reply = self._send_recv(self._query_socket, "queue_request", content=content)
content = reply['content']
status = content.pop('status')
if status != 'ok':
raise self._unwrap_exception(content)
content = util.int_keys(content)
if isinstance(targets, int):
return content[targets]
else:
return content
def _msg_ids_from_target(self, targets=None):
"""Build a list of msg_ids from the list of engine targets"""
if not targets: # needed as _build_targets otherwise uses all engines
return []
target_ids = self._build_targets(targets)[0]
return [md_id for md_id in self.metadata if self.metadata[md_id]["engine_uuid"] in target_ids]
def _msg_ids_from_jobs(self, jobs=None):
"""Given a 'jobs' argument, convert it to a list of msg_ids.
Can be either one or a list of:
- msg_id strings
- integer indices to this Client's history
- AsyncResult objects
"""
if not isinstance(jobs, (list, tuple, set, types.GeneratorType)):
jobs = [jobs]
msg_ids = []
for job in jobs:
if isinstance(job, int):
msg_ids.append(self.history[job])
elif isinstance(job, string_types):
msg_ids.append(job)
elif isinstance(job, AsyncResult):
msg_ids.extend(job.msg_ids)
else:
raise TypeError("Expected msg_id, int, or AsyncResult, got %r" % job)
return msg_ids
def _asyncresult_from_jobs(self, jobs=None, owner=False):
"""Construct an AsyncResult from msg_ids or asyncresult objects"""
if not isinstance(jobs, (list, tuple, set, types.GeneratorType)):
single = True
jobs = [jobs]
else:
single = False
futures = []
msg_ids = []
for job in jobs:
if isinstance(job, int):
job = self.history[job]
if isinstance(job, string_types):
if job in self._futures:
futures.append(job)
elif job in self.results:
f = MessageFuture(job)
f.set_result(self.results[job])
f.output = Future()
f.output.metadata = self.metadata[job]
f.output.set_result(None)
futures.append(f)
else:
msg_ids.append(job)
elif isinstance(job, AsyncResult):
if job._children:
futures.extend(job._children)
else:
msg_ids.extend(job.msg_ids)
else:
raise TypeError("Expected msg_id, int, or AsyncResult, got %r" % job)
if msg_ids:
if single:
msg_ids = msg_ids[0]
return AsyncHubResult(self, msg_ids, owner=owner)
else:
if single and futures:
futures = futures[0]
return AsyncResult(self, futures, owner=owner)
def purge_local_results(self, jobs=[], targets=[]):
"""Clears the client caches of results and their metadata.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_local_results('all')` to scrub everything from the Clients's
results and metadata caches.
After this call all `AsyncResults` are invalid and should be discarded.
If you must "reget" the results, you can still do so by using
`client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
redownload the results from the hub if they are still available
(i.e `client.purge_hub_results(...)` has not been called.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be purged.
targets : int/list of ints
The engines, by integer ID, whose entire result histories are to be purged.
Raises
------
RuntimeError : if any of the tasks to be purged are still outstanding.
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if jobs == 'all':
if self.outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % self.outstanding)
self.results.clear()
self.metadata.clear()
self._futures.clear()
self._output_futures.clear()
else:
msg_ids = set()
msg_ids.update(self._msg_ids_from_target(targets))
msg_ids.update(self._msg_ids_from_jobs(jobs))
still_outstanding = self.outstanding.intersection(msg_ids)
if still_outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % still_outstanding)
for mid in msg_ids:
self.results.pop(mid, None)
self.metadata.pop(mid, None)
self._futures.pop(mid, None)
self._output_futures.pop(mid, None)
def purge_hub_results(self, jobs=[], targets=[]):
"""Tell the Hub to forget results.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub everything from the Hub's db.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if targets:
targets = self._build_targets(targets)[1]
# construct msg_ids from jobs
if jobs == 'all':
msg_ids = jobs
else:
msg_ids = self._msg_ids_from_jobs(jobs)
content = dict(engine_ids=targets, msg_ids=msg_ids)
reply = self._send_recv(self._query_socket, "purge_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
def purge_results(self, jobs=[], targets=[]):
"""Clears the cached results from both the hub and the local client
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub every cached result from both the Hub's and
the Client's db.
Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
the same arguments.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
self.purge_local_results(jobs=jobs, targets=targets)
self.purge_hub_results(jobs=jobs, targets=targets)
def purge_everything(self):
"""Clears all content from previous Tasks from both the hub and the local client
In addition to calling `purge_results("all")` it also deletes the history and
other bookkeeping lists.
"""
self.purge_results("all")
self.history = []
self.session.digest_history.clear()
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
reply = self._send_recv(self._query_socket, "history_request", content={})
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history']
def db_query(self, query, keys=None):
"""Query the Hub's TaskRecord database
This will return a list of task record dicts that match `query`
Parameters
----------
query : mongodb query dict
The search dict. See mongodb query docs for details.
keys : list of strs [optional]
The subset of keys to be returned. The default is to fetch everything but buffers.
'msg_id' will *always* be included.
"""
if isinstance(keys, string_types):
keys = [keys]
content = dict(query=query, keys=keys)
reply = self._send_recv(self._query_socket, "db_request", content=content)
content = reply['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
records = content['records']
buffer_lens = content['buffer_lens']
result_buffer_lens = content['result_buffer_lens']
buffers = reply['buffers']
has_bufs = buffer_lens is not None
has_rbufs = result_buffer_lens is not None
for i,rec in enumerate(records):
# unpack datetime objects
for hkey in ('header', 'result_header'):
if hkey in rec:
rec[hkey] = extract_dates(rec[hkey])
for dtkey in ('submitted', 'started', 'completed', 'received'):
if dtkey in rec:
rec[dtkey] = parse_date(rec[dtkey])
# relink buffers
if has_bufs:
blen = buffer_lens[i]
rec['buffers'], buffers = buffers[:blen],buffers[blen:]
if has_rbufs:
blen = result_buffer_lens[i]
rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
return records
__all__ = [ 'Client' ]
| {
"repo_name": "fzheng/codejam",
"path": "lib/python2.7/site-packages/ipyparallel/client/client.py",
"copies": "1",
"size": "70766",
"license": "mit",
"hash": 698515700502294500,
"line_mean": 35.9728317659,
"line_max": 111,
"alpha_frac": 0.555535144,
"autogenerated": false,
"ratio": 4.442031259807922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5497566403807922,
"avg_score": null,
"num_lines": null
} |
"""A semi-synchronous Client for the ZMQ cluster
Authors:
* MinRK
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import json
import sys
from threading import Thread, Event
import time
import warnings
from datetime import datetime
from getpass import getpass
from pprint import pprint
pjoin = os.path.join
import zmq
# from zmq.eventloop import ioloop, zmqstream
from IPython.config.configurable import MultipleInstanceError
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils.capture import RichOutput
from IPython.utils.coloransi import TermColors
from IPython.utils.jsonutil import rekey, extract_dates, parse_date
from IPython.utils.localinterfaces import localhost, is_local_ip
from IPython.utils.path import get_ipython_dir
from IPython.utils.py3compat import cast_bytes, string_types, xrange, iteritems
from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode,
Dict, List, Bool, Set, Any)
from IPython.external.decorator import decorator
from IPython.external.ssh import tunnel
from IPython.parallel import Reference
from IPython.parallel import error
from IPython.parallel import util
from IPython.kernel.zmq.session import Session, Message
from IPython.kernel.zmq import serialize
from .asyncresult import AsyncResult, AsyncHubResult
from .view import DirectView, LoadBalancedView
#--------------------------------------------------------------------------
# Decorators for Client methods
#--------------------------------------------------------------------------
@decorator
def spin_first(f, self, *args, **kwargs):
"""Call spin() to sync state prior to calling the method."""
self.spin()
return f(self, *args, **kwargs)
#--------------------------------------------------------------------------
# Classes
#--------------------------------------------------------------------------
class ExecuteReply(RichOutput):
"""wrapper for finished Execute results"""
def __init__(self, msg_id, content, metadata):
self.msg_id = msg_id
self._content = content
self.execution_count = content['execution_count']
self.metadata = metadata
# RichOutput overrides
@property
def source(self):
pyout = self.metadata['pyout']
if pyout:
return pyout.get('source', '')
@property
def data(self):
pyout = self.metadata['pyout']
if pyout:
return pyout.get('data', {})
@property
def _metadata(self):
pyout = self.metadata['pyout']
if pyout:
return pyout.get('metadata', {})
def display(self):
from IPython.display import publish_display_data
publish_display_data(self.source, self.data, self.metadata)
def _repr_mime_(self, mime):
if mime not in self.data:
return
data = self.data[mime]
if mime in self._metadata:
return data, self._metadata[mime]
else:
return data
def __getitem__(self, key):
return self.metadata[key]
def __getattr__(self, key):
if key not in self.metadata:
raise AttributeError(key)
return self.metadata[key]
def __repr__(self):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if len(text_out) > 32:
text_out = text_out[:29] + '...'
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
def _repr_pretty_(self, p, cycle):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if not text_out:
return
try:
ip = get_ipython()
except NameError:
colors = "NoColor"
else:
colors = ip.colors
if colors == "NoColor":
out = normal = ""
else:
out = TermColors.Red
normal = TermColors.Normal
if '\n' in text_out and not text_out.startswith('\n'):
# add newline for multiline reprs
text_out = '\n' + text_out
p.text(
out + u'Out[%i:%i]: ' % (
self.metadata['engine_id'], self.execution_count
) + normal + text_out
)
class Metadata(dict):
"""Subclass of dict for initializing metadata values.
Attribute access works on keys.
These objects have a strict set of keys - errors will raise if you try
to add new keys.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
md = {'msg_id' : None,
'submitted' : None,
'started' : None,
'completed' : None,
'received' : None,
'engine_uuid' : None,
'engine_id' : None,
'follow' : None,
'after' : None,
'status' : None,
'pyin' : None,
'pyout' : None,
'pyerr' : None,
'stdout' : '',
'stderr' : '',
'outputs' : [],
'data': {},
'outputs_ready' : False,
}
self.update(md)
self.update(dict(*args, **kwargs))
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self:
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if key in self:
self[key] = value
else:
raise AttributeError(key)
def __setitem__(self, key, value):
"""strict static key enforcement"""
if key in self:
dict.__setitem__(self, key, value)
else:
raise KeyError(key)
class Client(HasTraits):
"""A semi-synchronous client to the IPython ZMQ cluster
Parameters
----------
url_file : str/unicode; path to ipcontroller-client.json
This JSON file should contain all the information needed to connect to a cluster,
and is likely the only argument needed.
Connection information for the Hub's registration. If a json connector
file is given, then likely no further configuration is necessary.
[Default: use profile]
profile : bytes
The name of the Cluster profile to be used to find connector information.
If run from an IPython application, the default profile will be the same
as the running application, otherwise it will be 'default'.
cluster_id : str
String id to added to runtime files, to prevent name collisions when using
multiple clusters with a single profile simultaneously.
When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but
should generally work)
context : zmq.Context
Pass an existing zmq.Context instance, otherwise the client will create its own.
debug : bool
flag for lots of message printing for debug purposes
timeout : int/float
time (in seconds) to wait for connection replies from the Hub
[Default: 10]
#-------------- session related args ----------------
config : Config object
If specified, this will be relayed to the Session for configuration
username : str
set username for the session object
#-------------- ssh related args ----------------
# These are args for configuring the ssh tunnel to be used
# credentials are used to forward connections over ssh to the Controller
# Note that the ip given in `addr` needs to be relative to sshserver
# The most basic case is to leave addr as pointing to localhost (127.0.0.1),
# and set sshserver as the same machine the Controller is on. However,
# the only requirement is that sshserver is able to see the Controller
# (i.e. is within the same trusted network).
sshserver : str
A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
If keyfile or password is specified, and this is not, it will default to
the ip given in addr.
sshkey : str; path to ssh private key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str
Your ssh password to sshserver. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
paramiko : bool
flag for whether to use paramiko instead of shell ssh for tunneling.
[default: True on win32, False else]
Attributes
----------
ids : list of int engine IDs
requesting the ids attribute always synchronizes
the registration state. To request ids without synchronization,
use semi-private _ids attributes.
history : list of msg_ids
a list of msg_ids, keeping track of all the execution
messages you have submitted in order.
outstanding : set of msg_ids
a set of msg_ids that have been submitted, but whose
results have not yet been received.
results : dict
a dict of all our results, keyed by msg_id
block : bool
determines default behavior when block not specified
in execution methods
Methods
-------
spin
flushes incoming results and registration state changes
control methods spin, and requesting `ids` also ensures up to date
wait
wait on one or more msg_ids
execution methods
apply
legacy: execute, run
data movement
push, pull, scatter, gather
query methods
queue_status, get_result, purge, result_status
control methods
abort, shutdown
"""
block = Bool(False)
outstanding = Set()
results = Instance('collections.defaultdict', (dict,))
metadata = Instance('collections.defaultdict', (Metadata,))
history = List()
debug = Bool(False)
_spin_thread = Any()
_stop_spinning = Any()
profile=Unicode()
def _profile_default(self):
if BaseIPythonApplication.initialized():
# an IPython app *might* be running, try to get its profile
try:
return BaseIPythonApplication.instance().profile
except (AttributeError, MultipleInstanceError):
# could be a *different* subclass of config.Application,
# which would raise one of these two errors.
return u'default'
else:
return u'default'
_outstanding_dict = Instance('collections.defaultdict', (set,))
_ids = List()
_connected=Bool(False)
_ssh=Bool(False)
_context = Instance('zmq.Context')
_config = Dict()
_engines=Instance(util.ReverseDict, (), {})
# _hub_socket=Instance('zmq.Socket')
_query_socket=Instance('zmq.Socket')
_control_socket=Instance('zmq.Socket')
_iopub_socket=Instance('zmq.Socket')
_notification_socket=Instance('zmq.Socket')
_mux_socket=Instance('zmq.Socket')
_task_socket=Instance('zmq.Socket')
_task_scheme=Unicode()
_closed = False
_ignored_control_replies=Integer(0)
_ignored_hub_replies=Integer(0)
def __new__(self, *args, **kw):
# don't raise on positional args
return HasTraits.__new__(self, **kw)
def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
context=None, debug=False,
sshserver=None, sshkey=None, password=None, paramiko=None,
timeout=10, cluster_id=None, **extra_args
):
if profile:
super(Client, self).__init__(debug=debug, profile=profile)
else:
super(Client, self).__init__(debug=debug)
if context is None:
context = zmq.Context.instance()
self._context = context
self._stop_spinning = Event()
if 'url_or_file' in extra_args:
url_file = extra_args['url_or_file']
warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
if url_file and util.is_url(url_file):
raise ValueError("single urls cannot be specified, url-files must be used.")
self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
if self._cd is not None:
if url_file is None:
if not cluster_id:
client_json = 'ipcontroller-client.json'
else:
client_json = 'ipcontroller-%s-client.json' % cluster_id
url_file = pjoin(self._cd.security_dir, client_json)
if url_file is None:
raise ValueError(
"I can't find enough information to connect to a hub!"
" Please specify at least one of url_file or profile."
)
with open(url_file) as f:
cfg = json.load(f)
self._task_scheme = cfg['task_scheme']
# sync defaults from args, json:
if sshserver:
cfg['ssh'] = sshserver
location = cfg.setdefault('location', None)
proto,addr = cfg['interface'].split('://')
addr = util.disambiguate_ip_address(addr, location)
cfg['interface'] = "%s://%s" % (proto, addr)
# turn interface,port into full urls:
for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
cfg[key] = cfg['interface'] + ':%i' % cfg[key]
url = cfg['registration']
if location is not None and addr == localhost():
# location specified, and connection is expected to be local
if not is_local_ip(location) and not sshserver:
# load ssh from JSON *only* if the controller is not on
# this machine
sshserver=cfg['ssh']
if not is_local_ip(location) and not sshserver:
# warn if no ssh specified, but SSH is probably needed
# This is only a warning, because the most likely cause
# is a local Controller on a laptop whose IP is dynamic
warnings.warn("""
Controller appears to be listening on localhost, but not on this machine.
If this is true, you should specify Client(...,sshserver='you@%s')
or instruct your controller to listen on an external IP."""%location,
RuntimeWarning)
elif not sshserver:
# otherwise sync with cfg
sshserver = cfg['ssh']
self._config = cfg
self._ssh = bool(sshserver or sshkey or password)
if self._ssh and sshserver is None:
# default to ssh via localhost
sshserver = addr
if self._ssh and password is None:
if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
# configure and construct the session
try:
extra_args['packer'] = cfg['pack']
extra_args['unpacker'] = cfg['unpack']
extra_args['key'] = cast_bytes(cfg['key'])
extra_args['signature_scheme'] = cfg['signature_scheme']
except KeyError as exc:
msg = '\n'.join([
"Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
"If you are reusing connection files, remove them and start ipcontroller again."
])
raise ValueError(msg.format(exc.message))
self.session = Session(**extra_args)
self._query_socket = self._context.socket(zmq.DEALER)
if self._ssh:
tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
else:
self._query_socket.connect(cfg['registration'])
self.session.debug = self.debug
self._notification_handlers = {'registration_notification' : self._register_engine,
'unregistration_notification' : self._unregister_engine,
'shutdown_notification' : lambda msg: self.close(),
}
self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
'apply_reply' : self._handle_apply_reply}
try:
self._connect(sshserver, ssh_kwargs, timeout)
except:
self.close(linger=0)
raise
# last step: setup magics, if we are in IPython:
try:
ip = get_ipython()
except NameError:
return
else:
if 'px' not in ip.magics_manager.magics:
# in IPython but we are the first Client.
# activate a default view for parallel magics.
self.activate()
def __del__(self):
"""cleanup sockets, but _not_ context."""
self.close()
def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
if ipython_dir is None:
ipython_dir = get_ipython_dir()
if profile_dir is not None:
try:
self._cd = ProfileDir.find_profile_dir(profile_dir)
return
except ProfileDirError:
pass
elif profile is not None:
try:
self._cd = ProfileDir.find_profile_dir_by_name(
ipython_dir, profile)
return
except ProfileDirError:
pass
self._cd = None
def _update_engines(self, engines):
"""Update our engines dict and _ids from a dict of the form: {id:uuid}."""
for k,v in iteritems(engines):
eid = int(k)
if eid not in self._engines:
self._ids.append(eid)
self._engines[eid] = v
self._ids = sorted(self._ids)
if sorted(self._engines.keys()) != list(range(len(self._engines))) and \
self._task_scheme == 'pure' and self._task_socket:
self._stop_scheduling_tasks()
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning)
def _build_targets(self, targets):
"""Turn valid target IDs or 'all' into two lists:
(int_ids, uuids).
"""
if not self._ids:
# flush notification socket if no engines yet, just in case
if not self.ids:
raise error.NoEnginesRegistered("Can't build targets without any engines")
if targets is None:
targets = self._ids
elif isinstance(targets, string_types):
if targets.lower() == 'all':
targets = self._ids
else:
raise TypeError("%r not valid str target, must be 'all'"%(targets))
elif isinstance(targets, int):
if targets < 0:
targets = self.ids[targets]
if targets not in self._ids:
raise IndexError("No such engine: %i"%targets)
targets = [targets]
if isinstance(targets, slice):
indices = list(range(len(self._ids))[targets])
ids = self.ids
targets = [ ids[i] for i in indices ]
if not isinstance(targets, (tuple, list, xrange)):
raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
return [cast_bytes(self._engines[t]) for t in targets], list(targets)
def _connect(self, sshserver, ssh_kwargs, timeout):
"""setup all our socket connections to the cluster. This is called from
__init__."""
# Maybe allow reconnecting?
if self._connected:
return
self._connected=True
def connect_socket(s, url):
if self._ssh:
return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
else:
return s.connect(url)
self.session.send(self._query_socket, 'connection_request')
# use Poller because zmq.select has wrong units in pyzmq 2.1.7
poller = zmq.Poller()
poller.register(self._query_socket, zmq.POLLIN)
# poll expects milliseconds, timeout is seconds
evts = poller.poll(timeout*1000)
if not evts:
raise error.TimeoutError("Hub connection request timed out")
idents,msg = self.session.recv(self._query_socket,mode=0)
if self.debug:
pprint(msg)
content = msg['content']
# self._config['registration'] = dict(content)
cfg = self._config
if content['status'] == 'ok':
self._mux_socket = self._context.socket(zmq.DEALER)
connect_socket(self._mux_socket, cfg['mux'])
self._task_socket = self._context.socket(zmq.DEALER)
connect_socket(self._task_socket, cfg['task'])
self._notification_socket = self._context.socket(zmq.SUB)
self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._notification_socket, cfg['notification'])
self._control_socket = self._context.socket(zmq.DEALER)
connect_socket(self._control_socket, cfg['control'])
self._iopub_socket = self._context.socket(zmq.SUB)
self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._iopub_socket, cfg['iopub'])
self._update_engines(dict(content['engines']))
else:
self._connected = False
raise Exception("Failed to connect!")
#--------------------------------------------------------------------------
# handlers and callbacks for incoming messages
#--------------------------------------------------------------------------
def _unwrap_exception(self, content):
"""unwrap exception, and remap engine_id to int."""
e = error.unwrap_exception(content)
# print e.traceback
if e.engine_info:
e_uuid = e.engine_info['engine_uuid']
eid = self._engines[e_uuid]
e.engine_info['engine_id'] = eid
return e
def _extract_metadata(self, msg):
header = msg['header']
parent = msg['parent_header']
msg_meta = msg['metadata']
content = msg['content']
md = {'msg_id' : parent['msg_id'],
'received' : datetime.now(),
'engine_uuid' : msg_meta.get('engine', None),
'follow' : msg_meta.get('follow', []),
'after' : msg_meta.get('after', []),
'status' : content['status'],
}
if md['engine_uuid'] is not None:
md['engine_id'] = self._engines.get(md['engine_uuid'], None)
if 'date' in parent:
md['submitted'] = parent['date']
if 'started' in msg_meta:
md['started'] = parse_date(msg_meta['started'])
if 'date' in header:
md['completed'] = header['date']
return md
def _register_engine(self, msg):
"""Register a new engine, and update our connection info."""
content = msg['content']
eid = content['id']
d = {eid : content['uuid']}
self._update_engines(d)
def _unregister_engine(self, msg):
"""Unregister an engine that has died."""
content = msg['content']
eid = int(content['id'])
if eid in self._ids:
self._ids.remove(eid)
uuid = self._engines.pop(eid)
self._handle_stranded_msgs(eid, uuid)
if self._task_socket and self._task_scheme == 'pure':
self._stop_scheduling_tasks()
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
of the unregistration and later receive the successful result.
"""
outstanding = self._outstanding_dict[uuid]
for msg_id in list(outstanding):
if msg_id in self.results:
# we already
continue
try:
raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
except:
content = error.wrap_exception()
# build a fake message:
msg = self.session.msg('apply_reply', content=content)
msg['parent_header']['msg_id'] = msg_id
msg['metadata']['engine'] = uuid
self._handle_apply_reply(msg)
def _handle_execute_reply(self, msg):
"""Save the reply to an execute_request into our results.
execute messages are never actually used. apply is used instead.
"""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = ExecuteReply(msg_id, content, md)
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _handle_apply_reply(self, msg):
"""Save the reply to an apply_request into our results."""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print("got stale result: %s"%msg_id)
print(self.results[msg_id])
print(msg)
else:
print("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = serialize.unserialize_object(msg['buffers'])[0]
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _flush_notifications(self):
"""Flush notifications of engine registrations waiting
in ZMQ queue."""
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
def _flush_results(self, sock):
"""Flush task or queue results waiting in ZMQ queue."""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._queue_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_control(self, sock):
"""Flush replies from the control channel waiting
in the ZMQ queue.
Currently: ignore them."""
if self._ignored_control_replies <= 0:
return
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
self._ignored_control_replies -= 1
if self.debug:
pprint(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_ignored_control(self):
"""flush ignored control replies"""
while self._ignored_control_replies > 0:
self.session.recv(self._control_socket)
self._ignored_control_replies -= 1
def _flush_ignored_hub_replies(self):
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
while msg is not None:
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
def _flush_iopub(self, sock):
"""Flush replies from the iopub channel waiting
in the ZMQ queue.
"""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
parent = msg['parent_header']
# ignore IOPub messages with no parent.
# Caused by print statements or warnings from before the first execution.
if not parent:
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
continue
msg_id = parent['msg_id']
content = msg['content']
header = msg['header']
msg_type = msg['header']['msg_type']
# init metadata:
md = self.metadata[msg_id]
if msg_type == 'stream':
name = content['name']
s = md[name] or ''
md[name] = s + content['data']
elif msg_type == 'pyerr':
md.update({'pyerr' : self._unwrap_exception(content)})
elif msg_type == 'pyin':
md.update({'pyin' : content['code']})
elif msg_type == 'display_data':
md['outputs'].append(content)
elif msg_type == 'pyout':
md['pyout'] = content
elif msg_type == 'data_message':
data, remainder = serialize.unserialize_object(msg['buffers'])
md['data'].update(data)
elif msg_type == 'status':
# idle message comes after all outputs
if content['execution_state'] == 'idle':
md['outputs_ready'] = True
else:
# unhandled msg_type (status, etc.)
pass
# reduntant?
self.metadata[msg_id] = md
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
#--------------------------------------------------------------------------
# len, getitem
#--------------------------------------------------------------------------
def __len__(self):
"""len(client) returns # of engines."""
return len(self.ids)
def __getitem__(self, key):
"""index access returns DirectView multiplexer objects
Must be int, slice, or list/tuple/xrange of ints"""
if not isinstance(key, (int, slice, tuple, list, xrange)):
raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
else:
return self.direct_view(key)
#--------------------------------------------------------------------------
# Begin public methods
#--------------------------------------------------------------------------
@property
def ids(self):
"""Always up-to-date ids property."""
self._flush_notifications()
# always copy:
return list(self._ids)
def activate(self, targets='all', suffix=''):
"""Create a DirectView and register it with IPython magics
Defines the magics `%px, %autopx, %pxresult, %%px`
Parameters
----------
targets: int, list of ints, or 'all'
The engines on which the view's magics will run
suffix: str [default: '']
The suffix, if any, for the magics. This allows you to have
multiple views associated with parallel magics at the same time.
e.g. ``rc.activate(targets=0, suffix='0')`` will give you
the magics ``%px0``, ``%pxresult0``, etc. for running magics just
on engine 0.
"""
view = self.direct_view(targets)
view.block = True
view.activate(suffix)
return view
def close(self, linger=None):
"""Close my zmq Sockets
If `linger`, set the zmq LINGER socket option,
which allows discarding of messages.
"""
if self._closed:
return
self.stop_spin_thread()
snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
for name in snames:
socket = getattr(self, name)
if socket is not None and not socket.closed:
if linger is not None:
socket.close(linger=linger)
else:
socket.close()
self._closed = True
def _spin_every(self, interval=1):
"""target func for use in spin_thread"""
while True:
if self._stop_spinning.is_set():
return
time.sleep(interval)
self.spin()
def spin_thread(self, interval=1):
"""call Client.spin() in a background thread on some regular interval
This helps ensure that messages don't pile up too much in the zmq queue
while you are working on other things, or just leaving an idle terminal.
It also helps limit potential padding of the `received` timestamp
on AsyncResult objects, used for timings.
Parameters
----------
interval : float, optional
The interval on which to spin the client in the background thread
(simply passed to time.sleep).
Notes
-----
For precision timing, you may want to use this method to put a bound
on the jitter (in seconds) in `received` timestamps used
in AsyncResult.wall_time.
"""
if self._spin_thread is not None:
self.stop_spin_thread()
self._stop_spinning.clear()
self._spin_thread = Thread(target=self._spin_every, args=(interval,))
self._spin_thread.daemon = True
self._spin_thread.start()
def stop_spin_thread(self):
"""stop background spin_thread, if any"""
if self._spin_thread is not None:
self._stop_spinning.set()
self._spin_thread.join()
self._spin_thread = None
def spin(self):
"""Flush any registration notifications and execution results
waiting in the ZMQ queue.
"""
if self._notification_socket:
self._flush_notifications()
if self._iopub_socket:
self._flush_iopub(self._iopub_socket)
if self._mux_socket:
self._flush_results(self._mux_socket)
if self._task_socket:
self._flush_results(self._task_socket)
if self._control_socket:
self._flush_control(self._control_socket)
if self._query_socket:
self._flush_ignored_hub_replies()
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
tic = time.time()
if jobs is None:
theids = self.outstanding
else:
if isinstance(jobs, string_types + (int, AsyncResult)):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
theids.update(job.msg_ids)
continue
theids.add(job)
if not theids.intersection(self.outstanding):
return True
self.spin()
while theids.intersection(self.outstanding):
if timeout >= 0 and ( time.time()-tic ) > timeout:
break
time.sleep(1e-3)
self.spin()
return len(theids.intersection(self.outstanding)) == 0
#--------------------------------------------------------------------------
# Control methods
#--------------------------------------------------------------------------
@spin_first
def clear(self, targets=None, block=None):
"""Clear the namespace in target(s)."""
block = self.block if block is None else block
targets = self._build_targets(targets)[0]
for t in targets:
self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, string_types + (AsyncResult,)):
jobs = [jobs]
bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
for t in targets:
self.session.send(self._control_socket, 'abort_request',
content=content, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def shutdown(self, targets='all', restart=False, hub=False, block=None):
"""Terminates one or more engine processes, optionally including the hub.
Parameters
----------
targets: list of ints or 'all' [default: all]
Which engines to shutdown.
hub: bool [default: False]
Whether to include the Hub. hub=True implies targets='all'.
block: bool [default: self.block]
Whether to wait for clean shutdown replies or not.
restart: bool [default: False]
NOT IMPLEMENTED
whether to restart engines after shutting them down.
"""
from IPython.parallel.error import NoEnginesRegistered
if restart:
raise NotImplementedError("Engine restart is not yet implemented")
block = self.block if block is None else block
if hub:
targets = 'all'
try:
targets = self._build_targets(targets)[0]
except NoEnginesRegistered:
targets = []
for t in targets:
self.session.send(self._control_socket, 'shutdown_request',
content={'restart':restart},ident=t)
error = False
if block or hub:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if hub:
time.sleep(0.25)
self.session.send(self._query_socket, 'shutdown_request')
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if error:
raise error
#--------------------------------------------------------------------------
# Execution related methods
#--------------------------------------------------------------------------
def _maybe_raise(self, result):
"""wrapper for maybe raising an exception if apply failed."""
if isinstance(result, error.RemoteError):
raise result
return result
def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
ident=None):
"""construct and send an apply message via a socket.
This is the principal method with which all engine execution is performed by views.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
args = args if args is not None else []
kwargs = kwargs if kwargs is not None else {}
metadata = metadata if metadata is not None else {}
# validate arguments
if not callable(f) and not isinstance(f, Reference):
raise TypeError("f must be callable, not %s"%type(f))
if not isinstance(args, (tuple, list)):
raise TypeError("args must be tuple or list, not %s"%type(args))
if not isinstance(kwargs, dict):
raise TypeError("kwargs must be dict, not %s"%type(kwargs))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s"%type(metadata))
bufs = serialize.pack_apply_message(f, args, kwargs,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
metadata=metadata, track=track)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(code, string_types):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s" % type(metadata))
content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
msg = self.session.send(socket, "execute_request", content=content, ident=ident,
metadata=metadata)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
#--------------------------------------------------------------------------
# construct a View object
#--------------------------------------------------------------------------
def load_balanced_view(self, targets=None):
"""construct a DirectView object.
If no arguments are specified, create a LoadBalancedView
using all engines.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance
"""
if targets == 'all':
targets = None
if targets is not None:
targets = self._build_targets(targets)[1]
return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
def direct_view(self, targets='all'):
"""construct a DirectView object.
If no targets are specified, create a DirectView using all engines.
rc.direct_view('all') is distinguished from rc[:] in that 'all' will
evaluate the target engines at each execution, whereas rc[:] will connect to
all *current* engines, and that list will not change.
That is, 'all' will always use all engines, whereas rc[:] will not use
engines added after the DirectView is constructed.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The engines to use for the View
"""
single = isinstance(targets, int)
# allow 'all' to be lazily evaluated at each execution
if targets != 'all':
targets = self._build_targets(targets)[1]
if single:
targets = targets[0]
return DirectView(client=self, socket=self._mux_socket, targets=targets)
#--------------------------------------------------------------------------
# Query methods
#--------------------------------------------------------------------------
@spin_first
def get_result(self, indices_or_msg_ids=None, block=None):
"""Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
If the client already has the results, no request to the Hub will be made.
This is a convenient way to construct AsyncResult objects, which are wrappers
that include metadata about execution, and allow for awaiting results that
were not submitted by this Client.
It can also be a convenient way to retrieve the metadata associated with
blocking execution, since it always retrieves
Examples
--------
::
In [10]: r = client.apply()
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncResult
A single AsyncResult object will always be returned.
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
single_result = False
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
single_result = True
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, string_types):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
local_ids = [msg_id for msg_id in theids if (msg_id in self.outstanding or msg_id in self.results)]
remote_ids = [msg_id for msg_id in theids if msg_id not in local_ids]
# given single msg_id initially, get_result shot get the result itself,
# not a length-one list
if single_result:
theids = theids[0]
if remote_ids:
ar = AsyncHubResult(self, msg_ids=theids)
else:
ar = AsyncResult(self, msg_ids=theids)
if block:
ar.wait()
return ar
@spin_first
def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
"""Resubmit one or more tasks.
in-flight tasks may not be resubmitted.
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, string_types):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
content = dict(msg_ids = theids)
self.session.send(self._query_socket, 'resubmit_request', content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
mapping = content['resubmitted']
new_ids = [ mapping[msg_id] for msg_id in theids ]
ar = AsyncHubResult(self, msg_ids=new_ids)
if block:
ar.wait()
return ar
@spin_first
def result_status(self, msg_ids, status_only=True):
"""Check on the status of the result(s) of the apply request with `msg_ids`.
If status_only is False, then the actual results will be retrieved, else
only the status of the results will be checked.
Parameters
----------
msg_ids : list of msg_ids
if int:
Passed as index to self.history for convenience.
status_only : bool (default: True)
if False:
Retrieve the actual results of completed tasks.
Returns
-------
results : dict
There will always be the keys 'pending' and 'completed', which will
be lists of msg_ids that are incomplete or complete. If `status_only`
is False, then completed results will be keyed by their `msg_id`.
"""
if not isinstance(msg_ids, (list,tuple)):
msg_ids = [msg_ids]
theids = []
for msg_id in msg_ids:
if isinstance(msg_id, int):
msg_id = self.history[msg_id]
if not isinstance(msg_id, string_types):
raise TypeError("msg_ids must be str, not %r"%msg_id)
theids.append(msg_id)
completed = []
local_results = {}
# comment this block out to temporarily disable local shortcut:
for msg_id in theids:
if msg_id in self.results:
completed.append(msg_id)
local_results[msg_id] = self.results[msg_id]
theids.remove(msg_id)
if theids: # some not locally cached
content = dict(msg_ids=theids, status_only=status_only)
msg = self.session.send(self._query_socket, "result_request", content=content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
buffers = msg['buffers']
else:
content = dict(completed=[],pending=[])
content['completed'].extend(completed)
if status_only:
return content
failures = []
# load cached results into result:
content.update(local_results)
# update cache with results:
for msg_id in sorted(theids):
if msg_id in content['completed']:
rec = content[msg_id]
parent = extract_dates(rec['header'])
header = extract_dates(rec['result_header'])
rcontent = rec['result_content']
iodict = rec['io']
if isinstance(rcontent, str):
rcontent = self.session.unpack(rcontent)
md = self.metadata[msg_id]
md_msg = dict(
content=rcontent,
parent_header=parent,
header=header,
metadata=rec['result_metadata'],
)
md.update(self._extract_metadata(md_msg))
if rec.get('received'):
md['received'] = parse_date(rec['received'])
md.update(iodict)
if rcontent['status'] == 'ok':
if header['msg_type'] == 'apply_reply':
res,buffers = serialize.unserialize_object(buffers)
elif header['msg_type'] == 'execute_reply':
res = ExecuteReply(msg_id, rcontent, md)
else:
raise KeyError("unhandled msg type: %r" % header['msg_type'])
else:
res = self._unwrap_exception(rcontent)
failures.append(res)
self.results[msg_id] = res
content[msg_id] = res
if len(theids) == 1 and failures:
raise failures[0]
error.collect_exceptions(failures, "result_status")
return content
@spin_first
def queue_status(self, targets='all', verbose=False):
"""Fetch the status of engine queues.
Parameters
----------
targets : int/str/list of ints/strs
the engines whose states are to be queried.
default : all
verbose : bool
Whether to return lengths only, or lists of ids for each element
"""
if targets == 'all':
# allow 'all' to be evaluated on the engine
engine_ids = None
else:
engine_ids = self._build_targets(targets)[1]
content = dict(targets=engine_ids, verbose=verbose)
self.session.send(self._query_socket, "queue_request", content=content)
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
status = content.pop('status')
if status != 'ok':
raise self._unwrap_exception(content)
content = rekey(content)
if isinstance(targets, int):
return content[targets]
else:
return content
def _build_msgids_from_target(self, targets=None):
"""Build a list of msg_ids from the list of engine targets"""
if not targets: # needed as _build_targets otherwise uses all engines
return []
target_ids = self._build_targets(targets)[0]
return [md_id for md_id in self.metadata if self.metadata[md_id]["engine_uuid"] in target_ids]
def _build_msgids_from_jobs(self, jobs=None):
"""Build a list of msg_ids from "jobs" """
if not jobs:
return []
msg_ids = []
if isinstance(jobs, string_types + (AsyncResult,)):
jobs = [jobs]
bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
return msg_ids
def purge_local_results(self, jobs=[], targets=[]):
"""Clears the client caches of results and their metadata.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_local_results('all')` to scrub everything from the Clients's
results and metadata caches.
After this call all `AsyncResults` are invalid and should be discarded.
If you must "reget" the results, you can still do so by using
`client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
redownload the results from the hub if they are still available
(i.e `client.purge_hub_results(...)` has not been called.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be purged.
targets : int/list of ints
The engines, by integer ID, whose entire result histories are to be purged.
Raises
------
RuntimeError : if any of the tasks to be purged are still outstanding.
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if jobs == 'all':
if self.outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % self.outstanding)
self.results.clear()
self.metadata.clear()
else:
msg_ids = set()
msg_ids.update(self._build_msgids_from_target(targets))
msg_ids.update(self._build_msgids_from_jobs(jobs))
still_outstanding = self.outstanding.intersection(msg_ids)
if still_outstanding:
raise RuntimeError("Can't purge outstanding tasks: %s" % still_outstanding)
for mid in msg_ids:
self.results.pop(mid)
self.metadata.pop(mid)
@spin_first
def purge_hub_results(self, jobs=[], targets=[]):
"""Tell the Hub to forget results.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub everything from the Hub's db.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if targets:
targets = self._build_targets(targets)[1]
# construct msg_ids from jobs
if jobs == 'all':
msg_ids = jobs
else:
msg_ids = self._build_msgids_from_jobs(jobs)
content = dict(engine_ids=targets, msg_ids=msg_ids)
self.session.send(self._query_socket, "purge_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
def purge_results(self, jobs=[], targets=[]):
"""Clears the cached results from both the hub and the local client
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub every cached result from both the Hub's and
the Client's db.
Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
the same arguments.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
self.purge_local_results(jobs=jobs, targets=targets)
self.purge_hub_results(jobs=jobs, targets=targets)
def purge_everything(self):
"""Clears all content from previous Tasks from both the hub and the local client
In addition to calling `purge_results("all")` it also deletes the history and
other bookkeeping lists.
"""
self.purge_results("all")
self.history = []
self.session.digest_history.clear()
@spin_first
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
self.session.send(self._query_socket, "history_request", content={})
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history']
@spin_first
def db_query(self, query, keys=None):
"""Query the Hub's TaskRecord database
This will return a list of task record dicts that match `query`
Parameters
----------
query : mongodb query dict
The search dict. See mongodb query docs for details.
keys : list of strs [optional]
The subset of keys to be returned. The default is to fetch everything but buffers.
'msg_id' will *always* be included.
"""
if isinstance(keys, string_types):
keys = [keys]
content = dict(query=query, keys=keys)
self.session.send(self._query_socket, "db_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
records = content['records']
buffer_lens = content['buffer_lens']
result_buffer_lens = content['result_buffer_lens']
buffers = msg['buffers']
has_bufs = buffer_lens is not None
has_rbufs = result_buffer_lens is not None
for i,rec in enumerate(records):
# unpack datetime objects
for hkey in ('header', 'result_header'):
if hkey in rec:
rec[hkey] = extract_dates(rec[hkey])
for dtkey in ('submitted', 'started', 'completed', 'received'):
if dtkey in rec:
rec[dtkey] = parse_date(rec[dtkey])
# relink buffers
if has_bufs:
blen = buffer_lens[i]
rec['buffers'], buffers = buffers[:blen],buffers[blen:]
if has_rbufs:
blen = result_buffer_lens[i]
rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
return records
__all__ = [ 'Client' ]
| {
"repo_name": "EricCline/CEM_inc",
"path": "env/lib/python2.7/site-packages/IPython/parallel/client/client.py",
"copies": "2",
"size": "67974",
"license": "mit",
"hash": 8875764157415380000,
"line_mean": 35.4276527331,
"line_max": 107,
"alpha_frac": 0.5515344102,
"autogenerated": false,
"ratio": 4.44042330807421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007024992843552543,
"num_lines": 1866
} |
"""A semi-synchronous Client for the ZMQ cluster
Authors:
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import json
import sys
from threading import Thread, Event
import time
import warnings
from datetime import datetime
from getpass import getpass
from pprint import pprint
pjoin = os.path.join
import zmq
# from zmq.eventloop import ioloop, zmqstream
from IPython.config.configurable import MultipleInstanceError
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir, ProfileDirError
from IPython.utils.coloransi import TermColors
from IPython.utils.jsonutil import rekey
from IPython.utils.localinterfaces import LOCALHOST, LOCAL_IPS
from IPython.utils.path import get_ipython_dir
from IPython.utils.py3compat import cast_bytes
from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode,
Dict, List, Bool, Set, Any)
from IPython.external.decorator import decorator
from IPython.external.ssh import tunnel
from IPython.parallel import Reference
from IPython.parallel import error
from IPython.parallel import util
from IPython.kernel.zmq.session import Session, Message
from IPython.kernel.zmq import serialize
from .asyncresult import AsyncResult, AsyncHubResult
from .view import DirectView, LoadBalancedView
if sys.version_info[0] >= 3:
# xrange is used in a couple 'isinstance' tests in py2
# should be just 'range' in 3k
xrange = range
#--------------------------------------------------------------------------
# Decorators for Client methods
#--------------------------------------------------------------------------
@decorator
def spin_first(f, self, *args, **kwargs):
"""Call spin() to sync state prior to calling the method."""
self.spin()
return f(self, *args, **kwargs)
#--------------------------------------------------------------------------
# Classes
#--------------------------------------------------------------------------
class ExecuteReply(object):
"""wrapper for finished Execute results"""
def __init__(self, msg_id, content, metadata):
self.msg_id = msg_id
self._content = content
self.execution_count = content['execution_count']
self.metadata = metadata
def __getitem__(self, key):
return self.metadata[key]
def __getattr__(self, key):
if key not in self.metadata:
raise AttributeError(key)
return self.metadata[key]
def __repr__(self):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if len(text_out) > 32:
text_out = text_out[:29] + '...'
return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
def _repr_pretty_(self, p, cycle):
pyout = self.metadata['pyout'] or {'data':{}}
text_out = pyout['data'].get('text/plain', '')
if not text_out:
return
try:
ip = get_ipython()
except NameError:
colors = "NoColor"
else:
colors = ip.colors
if colors == "NoColor":
out = normal = ""
else:
out = TermColors.Red
normal = TermColors.Normal
if '\n' in text_out and not text_out.startswith('\n'):
# add newline for multiline reprs
text_out = '\n' + text_out
p.text(
out + u'Out[%i:%i]: ' % (
self.metadata['engine_id'], self.execution_count
) + normal + text_out
)
def _repr_html_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("text/html")
def _repr_latex_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("text/latex")
def _repr_json_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("application/json")
def _repr_javascript_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("application/javascript")
def _repr_png_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/png")
def _repr_jpeg_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/jpeg")
def _repr_svg_(self):
pyout = self.metadata['pyout'] or {'data':{}}
return pyout['data'].get("image/svg+xml")
class Metadata(dict):
"""Subclass of dict for initializing metadata values.
Attribute access works on keys.
These objects have a strict set of keys - errors will raise if you try
to add new keys.
"""
def __init__(self, *args, **kwargs):
dict.__init__(self)
md = {'msg_id' : None,
'submitted' : None,
'started' : None,
'completed' : None,
'received' : None,
'engine_uuid' : None,
'engine_id' : None,
'follow' : None,
'after' : None,
'status' : None,
'pyin' : None,
'pyout' : None,
'pyerr' : None,
'stdout' : '',
'stderr' : '',
'outputs' : [],
'data': {},
'outputs_ready' : False,
}
self.update(md)
self.update(dict(*args, **kwargs))
def __getattr__(self, key):
"""getattr aliased to getitem"""
if key in self.iterkeys():
return self[key]
else:
raise AttributeError(key)
def __setattr__(self, key, value):
"""setattr aliased to setitem, with strict"""
if key in self.iterkeys():
self[key] = value
else:
raise AttributeError(key)
def __setitem__(self, key, value):
"""strict static key enforcement"""
if key in self.iterkeys():
dict.__setitem__(self, key, value)
else:
raise KeyError(key)
class Client(HasTraits):
"""A semi-synchronous client to the IPython ZMQ cluster
Parameters
----------
url_file : str/unicode; path to ipcontroller-client.json
This JSON file should contain all the information needed to connect to a cluster,
and is likely the only argument needed.
Connection information for the Hub's registration. If a json connector
file is given, then likely no further configuration is necessary.
[Default: use profile]
profile : bytes
The name of the Cluster profile to be used to find connector information.
If run from an IPython application, the default profile will be the same
as the running application, otherwise it will be 'default'.
cluster_id : str
String id to added to runtime files, to prevent name collisions when using
multiple clusters with a single profile simultaneously.
When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
Since this is text inserted into filenames, typical recommendations apply:
Simple character strings are ideal, and spaces are not recommended (but
should generally work)
context : zmq.Context
Pass an existing zmq.Context instance, otherwise the client will create its own.
debug : bool
flag for lots of message printing for debug purposes
timeout : int/float
time (in seconds) to wait for connection replies from the Hub
[Default: 10]
#-------------- session related args ----------------
config : Config object
If specified, this will be relayed to the Session for configuration
username : str
set username for the session object
#-------------- ssh related args ----------------
# These are args for configuring the ssh tunnel to be used
# credentials are used to forward connections over ssh to the Controller
# Note that the ip given in `addr` needs to be relative to sshserver
# The most basic case is to leave addr as pointing to localhost (127.0.0.1),
# and set sshserver as the same machine the Controller is on. However,
# the only requirement is that sshserver is able to see the Controller
# (i.e. is within the same trusted network).
sshserver : str
A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
If keyfile or password is specified, and this is not, it will default to
the ip given in addr.
sshkey : str; path to ssh private key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str
Your ssh password to sshserver. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
paramiko : bool
flag for whether to use paramiko instead of shell ssh for tunneling.
[default: True on win32, False else]
Attributes
----------
ids : list of int engine IDs
requesting the ids attribute always synchronizes
the registration state. To request ids without synchronization,
use semi-private _ids attributes.
history : list of msg_ids
a list of msg_ids, keeping track of all the execution
messages you have submitted in order.
outstanding : set of msg_ids
a set of msg_ids that have been submitted, but whose
results have not yet been received.
results : dict
a dict of all our results, keyed by msg_id
block : bool
determines default behavior when block not specified
in execution methods
Methods
-------
spin
flushes incoming results and registration state changes
control methods spin, and requesting `ids` also ensures up to date
wait
wait on one or more msg_ids
execution methods
apply
legacy: execute, run
data movement
push, pull, scatter, gather
query methods
queue_status, get_result, purge, result_status
control methods
abort, shutdown
"""
block = Bool(False)
outstanding = Set()
results = Instance('collections.defaultdict', (dict,))
metadata = Instance('collections.defaultdict', (Metadata,))
history = List()
debug = Bool(False)
_spin_thread = Any()
_stop_spinning = Any()
profile=Unicode()
def _profile_default(self):
if BaseIPythonApplication.initialized():
# an IPython app *might* be running, try to get its profile
try:
return BaseIPythonApplication.instance().profile
except (AttributeError, MultipleInstanceError):
# could be a *different* subclass of config.Application,
# which would raise one of these two errors.
return u'default'
else:
return u'default'
_outstanding_dict = Instance('collections.defaultdict', (set,))
_ids = List()
_connected=Bool(False)
_ssh=Bool(False)
_context = Instance('zmq.Context')
_config = Dict()
_engines=Instance(util.ReverseDict, (), {})
# _hub_socket=Instance('zmq.Socket')
_query_socket=Instance('zmq.Socket')
_control_socket=Instance('zmq.Socket')
_iopub_socket=Instance('zmq.Socket')
_notification_socket=Instance('zmq.Socket')
_mux_socket=Instance('zmq.Socket')
_task_socket=Instance('zmq.Socket')
_task_scheme=Unicode()
_closed = False
_ignored_control_replies=Integer(0)
_ignored_hub_replies=Integer(0)
def __new__(self, *args, **kw):
# don't raise on positional args
return HasTraits.__new__(self, **kw)
def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
context=None, debug=False,
sshserver=None, sshkey=None, password=None, paramiko=None,
timeout=10, cluster_id=None, **extra_args
):
if profile:
super(Client, self).__init__(debug=debug, profile=profile)
else:
super(Client, self).__init__(debug=debug)
if context is None:
context = zmq.Context.instance()
self._context = context
self._stop_spinning = Event()
if 'url_or_file' in extra_args:
url_file = extra_args['url_or_file']
warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
if url_file and util.is_url(url_file):
raise ValueError("single urls cannot be specified, url-files must be used.")
self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
if self._cd is not None:
if url_file is None:
if not cluster_id:
client_json = 'ipcontroller-client.json'
else:
client_json = 'ipcontroller-%s-client.json' % cluster_id
url_file = pjoin(self._cd.security_dir, client_json)
if url_file is None:
raise ValueError(
"I can't find enough information to connect to a hub!"
" Please specify at least one of url_file or profile."
)
with open(url_file) as f:
cfg = json.load(f)
self._task_scheme = cfg['task_scheme']
# sync defaults from args, json:
if sshserver:
cfg['ssh'] = sshserver
location = cfg.setdefault('location', None)
proto,addr = cfg['interface'].split('://')
addr = util.disambiguate_ip_address(addr, location)
cfg['interface'] = "%s://%s" % (proto, addr)
# turn interface,port into full urls:
for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
cfg[key] = cfg['interface'] + ':%i' % cfg[key]
url = cfg['registration']
if location is not None and addr == LOCALHOST:
# location specified, and connection is expected to be local
if location not in LOCAL_IPS and not sshserver:
# load ssh from JSON *only* if the controller is not on
# this machine
sshserver=cfg['ssh']
if location not in LOCAL_IPS and not sshserver:
# warn if no ssh specified, but SSH is probably needed
# This is only a warning, because the most likely cause
# is a local Controller on a laptop whose IP is dynamic
warnings.warn("""
Controller appears to be listening on localhost, but not on this machine.
If this is true, you should specify Client(...,sshserver='you@%s')
or instruct your controller to listen on an external IP."""%location,
RuntimeWarning)
elif not sshserver:
# otherwise sync with cfg
sshserver = cfg['ssh']
self._config = cfg
self._ssh = bool(sshserver or sshkey or password)
if self._ssh and sshserver is None:
# default to ssh via localhost
sshserver = addr
if self._ssh and password is None:
if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
password=False
else:
password = getpass("SSH Password for %s: "%sshserver)
ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
# configure and construct the session
try:
extra_args['packer'] = cfg['pack']
extra_args['unpacker'] = cfg['unpack']
extra_args['key'] = cast_bytes(cfg['key'])
extra_args['signature_scheme'] = cfg['signature_scheme']
except KeyError as exc:
msg = '\n'.join([
"Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
"If you are reusing connection files, remove them and start ipcontroller again."
])
raise ValueError(msg.format(exc.message))
self.session = Session(**extra_args)
self._query_socket = self._context.socket(zmq.DEALER)
if self._ssh:
tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
else:
self._query_socket.connect(cfg['registration'])
self.session.debug = self.debug
self._notification_handlers = {'registration_notification' : self._register_engine,
'unregistration_notification' : self._unregister_engine,
'shutdown_notification' : lambda msg: self.close(),
}
self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
'apply_reply' : self._handle_apply_reply}
try:
self._connect(sshserver, ssh_kwargs, timeout)
except:
self.close(linger=0)
raise
# last step: setup magics, if we are in IPython:
try:
ip = get_ipython()
except NameError:
return
else:
if 'px' not in ip.magics_manager.magics:
# in IPython but we are the first Client.
# activate a default view for parallel magics.
self.activate()
def __del__(self):
"""cleanup sockets, but _not_ context."""
self.close()
def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
if ipython_dir is None:
ipython_dir = get_ipython_dir()
if profile_dir is not None:
try:
self._cd = ProfileDir.find_profile_dir(profile_dir)
return
except ProfileDirError:
pass
elif profile is not None:
try:
self._cd = ProfileDir.find_profile_dir_by_name(
ipython_dir, profile)
return
except ProfileDirError:
pass
self._cd = None
def _update_engines(self, engines):
"""Update our engines dict and _ids from a dict of the form: {id:uuid}."""
for k,v in engines.iteritems():
eid = int(k)
if eid not in self._engines:
self._ids.append(eid)
self._engines[eid] = v
self._ids = sorted(self._ids)
if sorted(self._engines.keys()) != range(len(self._engines)) and \
self._task_scheme == 'pure' and self._task_socket:
self._stop_scheduling_tasks()
def _stop_scheduling_tasks(self):
"""Stop scheduling tasks because an engine has been unregistered
from a pure ZMQ scheduler.
"""
self._task_socket.close()
self._task_socket = None
msg = "An engine has been unregistered, and we are using pure " +\
"ZMQ task scheduling. Task farming will be disabled."
if self.outstanding:
msg += " If you were running tasks when this happened, " +\
"some `outstanding` msg_ids may never resolve."
warnings.warn(msg, RuntimeWarning)
def _build_targets(self, targets):
"""Turn valid target IDs or 'all' into two lists:
(int_ids, uuids).
"""
if not self._ids:
# flush notification socket if no engines yet, just in case
if not self.ids:
raise error.NoEnginesRegistered("Can't build targets without any engines")
if targets is None:
targets = self._ids
elif isinstance(targets, basestring):
if targets.lower() == 'all':
targets = self._ids
else:
raise TypeError("%r not valid str target, must be 'all'"%(targets))
elif isinstance(targets, int):
if targets < 0:
targets = self.ids[targets]
if targets not in self._ids:
raise IndexError("No such engine: %i"%targets)
targets = [targets]
if isinstance(targets, slice):
indices = range(len(self._ids))[targets]
ids = self.ids
targets = [ ids[i] for i in indices ]
if not isinstance(targets, (tuple, list, xrange)):
raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
return [cast_bytes(self._engines[t]) for t in targets], list(targets)
def _connect(self, sshserver, ssh_kwargs, timeout):
"""setup all our socket connections to the cluster. This is called from
__init__."""
# Maybe allow reconnecting?
if self._connected:
return
self._connected=True
def connect_socket(s, url):
if self._ssh:
return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
else:
return s.connect(url)
self.session.send(self._query_socket, 'connection_request')
# use Poller because zmq.select has wrong units in pyzmq 2.1.7
poller = zmq.Poller()
poller.register(self._query_socket, zmq.POLLIN)
# poll expects milliseconds, timeout is seconds
evts = poller.poll(timeout*1000)
if not evts:
raise error.TimeoutError("Hub connection request timed out")
idents,msg = self.session.recv(self._query_socket,mode=0)
if self.debug:
pprint(msg)
content = msg['content']
# self._config['registration'] = dict(content)
cfg = self._config
if content['status'] == 'ok':
self._mux_socket = self._context.socket(zmq.DEALER)
connect_socket(self._mux_socket, cfg['mux'])
self._task_socket = self._context.socket(zmq.DEALER)
connect_socket(self._task_socket, cfg['task'])
self._notification_socket = self._context.socket(zmq.SUB)
self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._notification_socket, cfg['notification'])
self._control_socket = self._context.socket(zmq.DEALER)
connect_socket(self._control_socket, cfg['control'])
self._iopub_socket = self._context.socket(zmq.SUB)
self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
connect_socket(self._iopub_socket, cfg['iopub'])
self._update_engines(dict(content['engines']))
else:
self._connected = False
raise Exception("Failed to connect!")
#--------------------------------------------------------------------------
# handlers and callbacks for incoming messages
#--------------------------------------------------------------------------
def _unwrap_exception(self, content):
"""unwrap exception, and remap engine_id to int."""
e = error.unwrap_exception(content)
# print e.traceback
if e.engine_info:
e_uuid = e.engine_info['engine_uuid']
eid = self._engines[e_uuid]
e.engine_info['engine_id'] = eid
return e
def _extract_metadata(self, msg):
header = msg['header']
parent = msg['parent_header']
msg_meta = msg['metadata']
content = msg['content']
md = {'msg_id' : parent['msg_id'],
'received' : datetime.now(),
'engine_uuid' : msg_meta.get('engine', None),
'follow' : msg_meta.get('follow', []),
'after' : msg_meta.get('after', []),
'status' : content['status'],
}
if md['engine_uuid'] is not None:
md['engine_id'] = self._engines.get(md['engine_uuid'], None)
if 'date' in parent:
md['submitted'] = parent['date']
if 'started' in msg_meta:
md['started'] = msg_meta['started']
if 'date' in header:
md['completed'] = header['date']
return md
def _register_engine(self, msg):
"""Register a new engine, and update our connection info."""
content = msg['content']
eid = content['id']
d = {eid : content['uuid']}
self._update_engines(d)
def _unregister_engine(self, msg):
"""Unregister an engine that has died."""
content = msg['content']
eid = int(content['id'])
if eid in self._ids:
self._ids.remove(eid)
uuid = self._engines.pop(eid)
self._handle_stranded_msgs(eid, uuid)
if self._task_socket and self._task_scheme == 'pure':
self._stop_scheduling_tasks()
def _handle_stranded_msgs(self, eid, uuid):
"""Handle messages known to be on an engine when the engine unregisters.
It is possible that this will fire prematurely - that is, an engine will
go down after completing a result, and the client will be notified
of the unregistration and later receive the successful result.
"""
outstanding = self._outstanding_dict[uuid]
for msg_id in list(outstanding):
if msg_id in self.results:
# we already
continue
try:
raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
except:
content = error.wrap_exception()
# build a fake message:
msg = self.session.msg('apply_reply', content=content)
msg['parent_header']['msg_id'] = msg_id
msg['metadata']['engine'] = uuid
self._handle_apply_reply(msg)
def _handle_execute_reply(self, msg):
"""Save the reply to an execute_request into our results.
execute messages are never actually used. apply is used instead.
"""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print ("got stale result: %s"%msg_id)
else:
print ("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = ExecuteReply(msg_id, content, md)
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _handle_apply_reply(self, msg):
"""Save the reply to an apply_request into our results."""
parent = msg['parent_header']
msg_id = parent['msg_id']
if msg_id not in self.outstanding:
if msg_id in self.history:
print ("got stale result: %s"%msg_id)
print self.results[msg_id]
print msg
else:
print ("got unknown result: %s"%msg_id)
else:
self.outstanding.remove(msg_id)
content = msg['content']
header = msg['header']
# construct metadata:
md = self.metadata[msg_id]
md.update(self._extract_metadata(msg))
# is this redundant?
self.metadata[msg_id] = md
e_outstanding = self._outstanding_dict[md['engine_uuid']]
if msg_id in e_outstanding:
e_outstanding.remove(msg_id)
# construct result:
if content['status'] == 'ok':
self.results[msg_id] = serialize.unserialize_object(msg['buffers'])[0]
elif content['status'] == 'aborted':
self.results[msg_id] = error.TaskAborted(msg_id)
elif content['status'] == 'resubmitted':
# TODO: handle resubmission
pass
else:
self.results[msg_id] = self._unwrap_exception(content)
def _flush_notifications(self):
"""Flush notifications of engine registrations waiting
in ZMQ queue."""
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._notification_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
def _flush_results(self, sock):
"""Flush task or queue results waiting in ZMQ queue."""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
msg_type = msg['header']['msg_type']
handler = self._queue_handlers.get(msg_type, None)
if handler is None:
raise Exception("Unhandled message type: %s" % msg_type)
else:
handler(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_control(self, sock):
"""Flush replies from the control channel waiting
in the ZMQ queue.
Currently: ignore them."""
if self._ignored_control_replies <= 0:
return
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
self._ignored_control_replies -= 1
if self.debug:
pprint(msg)
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
def _flush_ignored_control(self):
"""flush ignored control replies"""
while self._ignored_control_replies > 0:
self.session.recv(self._control_socket)
self._ignored_control_replies -= 1
def _flush_ignored_hub_replies(self):
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
while msg is not None:
ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
def _flush_iopub(self, sock):
"""Flush replies from the iopub channel waiting
in the ZMQ queue.
"""
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
while msg is not None:
if self.debug:
pprint(msg)
parent = msg['parent_header']
# ignore IOPub messages with no parent.
# Caused by print statements or warnings from before the first execution.
if not parent:
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
continue
msg_id = parent['msg_id']
content = msg['content']
header = msg['header']
msg_type = msg['header']['msg_type']
# init metadata:
md = self.metadata[msg_id]
if msg_type == 'stream':
name = content['name']
s = md[name] or ''
md[name] = s + content['data']
elif msg_type == 'pyerr':
md.update({'pyerr' : self._unwrap_exception(content)})
elif msg_type == 'pyin':
md.update({'pyin' : content['code']})
elif msg_type == 'display_data':
md['outputs'].append(content)
elif msg_type == 'pyout':
md['pyout'] = content
elif msg_type == 'data_message':
data, remainder = serialize.unserialize_object(msg['buffers'])
md['data'].update(data)
elif msg_type == 'status':
# idle message comes after all outputs
if content['execution_state'] == 'idle':
md['outputs_ready'] = True
else:
# unhandled msg_type (status, etc.)
pass
# reduntant?
self.metadata[msg_id] = md
idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
#--------------------------------------------------------------------------
# len, getitem
#--------------------------------------------------------------------------
def __len__(self):
"""len(client) returns # of engines."""
return len(self.ids)
def __getitem__(self, key):
"""index access returns DirectView multiplexer objects
Must be int, slice, or list/tuple/xrange of ints"""
if not isinstance(key, (int, slice, tuple, list, xrange)):
raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
else:
return self.direct_view(key)
#--------------------------------------------------------------------------
# Begin public methods
#--------------------------------------------------------------------------
@property
def ids(self):
"""Always up-to-date ids property."""
self._flush_notifications()
# always copy:
return list(self._ids)
def activate(self, targets='all', suffix=''):
"""Create a DirectView and register it with IPython magics
Defines the magics `%px, %autopx, %pxresult, %%px`
Parameters
----------
targets: int, list of ints, or 'all'
The engines on which the view's magics will run
suffix: str [default: '']
The suffix, if any, for the magics. This allows you to have
multiple views associated with parallel magics at the same time.
e.g. ``rc.activate(targets=0, suffix='0')`` will give you
the magics ``%px0``, ``%pxresult0``, etc. for running magics just
on engine 0.
"""
view = self.direct_view(targets)
view.block = True
view.activate(suffix)
return view
def close(self, linger=None):
"""Close my zmq Sockets
If `linger`, set the zmq LINGER socket option,
which allows discarding of messages.
"""
if self._closed:
return
self.stop_spin_thread()
snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
for name in snames:
socket = getattr(self, name)
if socket is not None and not socket.closed:
if linger is not None:
socket.close(linger=linger)
else:
socket.close()
self._closed = True
def _spin_every(self, interval=1):
"""target func for use in spin_thread"""
while True:
if self._stop_spinning.is_set():
return
time.sleep(interval)
self.spin()
def spin_thread(self, interval=1):
"""call Client.spin() in a background thread on some regular interval
This helps ensure that messages don't pile up too much in the zmq queue
while you are working on other things, or just leaving an idle terminal.
It also helps limit potential padding of the `received` timestamp
on AsyncResult objects, used for timings.
Parameters
----------
interval : float, optional
The interval on which to spin the client in the background thread
(simply passed to time.sleep).
Notes
-----
For precision timing, you may want to use this method to put a bound
on the jitter (in seconds) in `received` timestamps used
in AsyncResult.wall_time.
"""
if self._spin_thread is not None:
self.stop_spin_thread()
self._stop_spinning.clear()
self._spin_thread = Thread(target=self._spin_every, args=(interval,))
self._spin_thread.daemon = True
self._spin_thread.start()
def stop_spin_thread(self):
"""stop background spin_thread, if any"""
if self._spin_thread is not None:
self._stop_spinning.set()
self._spin_thread.join()
self._spin_thread = None
def spin(self):
"""Flush any registration notifications and execution results
waiting in the ZMQ queue.
"""
if self._notification_socket:
self._flush_notifications()
if self._iopub_socket:
self._flush_iopub(self._iopub_socket)
if self._mux_socket:
self._flush_results(self._mux_socket)
if self._task_socket:
self._flush_results(self._task_socket)
if self._control_socket:
self._flush_control(self._control_socket)
if self._query_socket:
self._flush_ignored_hub_replies()
def wait(self, jobs=None, timeout=-1):
"""waits on one or more `jobs`, for up to `timeout` seconds.
Parameters
----------
jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
ints are indices to self.history
strs are msg_ids
default: wait on all outstanding messages
timeout : float
a time in seconds, after which to give up.
default is -1, which means no timeout
Returns
-------
True : when all msg_ids are done
False : timeout reached, some msg_ids still outstanding
"""
tic = time.time()
if jobs is None:
theids = self.outstanding
else:
if isinstance(jobs, (int, basestring, AsyncResult)):
jobs = [jobs]
theids = set()
for job in jobs:
if isinstance(job, int):
# index access
job = self.history[job]
elif isinstance(job, AsyncResult):
map(theids.add, job.msg_ids)
continue
theids.add(job)
if not theids.intersection(self.outstanding):
return True
self.spin()
while theids.intersection(self.outstanding):
if timeout >= 0 and ( time.time()-tic ) > timeout:
break
time.sleep(1e-3)
self.spin()
return len(theids.intersection(self.outstanding)) == 0
#--------------------------------------------------------------------------
# Control methods
#--------------------------------------------------------------------------
@spin_first
def clear(self, targets=None, block=None):
"""Clear the namespace in target(s)."""
block = self.block if block is None else block
targets = self._build_targets(targets)[0]
for t in targets:
self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def abort(self, jobs=None, targets=None, block=None):
"""Abort specific jobs from the execution queues of target(s).
This is a mechanism to prevent jobs that have already been submitted
from executing.
Parameters
----------
jobs : msg_id, list of msg_ids, or AsyncResult
The jobs to be aborted
If unspecified/None: abort all outstanding jobs.
"""
block = self.block if block is None else block
jobs = jobs if jobs is not None else list(self.outstanding)
targets = self._build_targets(targets)[0]
msg_ids = []
if isinstance(jobs, (basestring,AsyncResult)):
jobs = [jobs]
bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
content = dict(msg_ids=msg_ids)
for t in targets:
self.session.send(self._control_socket, 'abort_request',
content=content, ident=t)
error = False
if block:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket,0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if error:
raise error
@spin_first
def shutdown(self, targets='all', restart=False, hub=False, block=None):
"""Terminates one or more engine processes, optionally including the hub.
Parameters
----------
targets: list of ints or 'all' [default: all]
Which engines to shutdown.
hub: bool [default: False]
Whether to include the Hub. hub=True implies targets='all'.
block: bool [default: self.block]
Whether to wait for clean shutdown replies or not.
restart: bool [default: False]
NOT IMPLEMENTED
whether to restart engines after shutting them down.
"""
from IPython.parallel.error import NoEnginesRegistered
if restart:
raise NotImplementedError("Engine restart is not yet implemented")
block = self.block if block is None else block
if hub:
targets = 'all'
try:
targets = self._build_targets(targets)[0]
except NoEnginesRegistered:
targets = []
for t in targets:
self.session.send(self._control_socket, 'shutdown_request',
content={'restart':restart},ident=t)
error = False
if block or hub:
self._flush_ignored_control()
for i in range(len(targets)):
idents,msg = self.session.recv(self._control_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
else:
self._ignored_control_replies += len(targets)
if hub:
time.sleep(0.25)
self.session.send(self._query_socket, 'shutdown_request')
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
if msg['content']['status'] != 'ok':
error = self._unwrap_exception(msg['content'])
if error:
raise error
#--------------------------------------------------------------------------
# Execution related methods
#--------------------------------------------------------------------------
def _maybe_raise(self, result):
"""wrapper for maybe raising an exception if apply failed."""
if isinstance(result, error.RemoteError):
raise result
return result
def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
ident=None):
"""construct and send an apply message via a socket.
This is the principal method with which all engine execution is performed by views.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
args = args if args is not None else []
kwargs = kwargs if kwargs is not None else {}
metadata = metadata if metadata is not None else {}
# validate arguments
if not callable(f) and not isinstance(f, Reference):
raise TypeError("f must be callable, not %s"%type(f))
if not isinstance(args, (tuple, list)):
raise TypeError("args must be tuple or list, not %s"%type(args))
if not isinstance(kwargs, dict):
raise TypeError("kwargs must be dict, not %s"%type(kwargs))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s"%type(metadata))
bufs = serialize.pack_apply_message(f, args, kwargs,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
metadata=metadata, track=track)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
"""construct and send an execute request via a socket.
"""
if self._closed:
raise RuntimeError("Client cannot be used after its sockets have been closed")
# defaults:
metadata = metadata if metadata is not None else {}
# validate arguments
if not isinstance(code, basestring):
raise TypeError("code must be text, not %s" % type(code))
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict, not %s" % type(metadata))
content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
msg = self.session.send(socket, "execute_request", content=content, ident=ident,
metadata=metadata)
msg_id = msg['header']['msg_id']
self.outstanding.add(msg_id)
if ident:
# possibly routed to a specific engine
if isinstance(ident, list):
ident = ident[-1]
if ident in self._engines.values():
# save for later, in case of engine death
self._outstanding_dict[ident].add(msg_id)
self.history.append(msg_id)
self.metadata[msg_id]['submitted'] = datetime.now()
return msg
#--------------------------------------------------------------------------
# construct a View object
#--------------------------------------------------------------------------
def load_balanced_view(self, targets=None):
"""construct a DirectView object.
If no arguments are specified, create a LoadBalancedView
using all engines.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The subset of engines across which to load-balance
"""
if targets == 'all':
targets = None
if targets is not None:
targets = self._build_targets(targets)[1]
return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
def direct_view(self, targets='all'):
"""construct a DirectView object.
If no targets are specified, create a DirectView using all engines.
rc.direct_view('all') is distinguished from rc[:] in that 'all' will
evaluate the target engines at each execution, whereas rc[:] will connect to
all *current* engines, and that list will not change.
That is, 'all' will always use all engines, whereas rc[:] will not use
engines added after the DirectView is constructed.
Parameters
----------
targets: list,slice,int,etc. [default: use all engines]
The engines to use for the View
"""
single = isinstance(targets, int)
# allow 'all' to be lazily evaluated at each execution
if targets != 'all':
targets = self._build_targets(targets)[1]
if single:
targets = targets[0]
return DirectView(client=self, socket=self._mux_socket, targets=targets)
#--------------------------------------------------------------------------
# Query methods
#--------------------------------------------------------------------------
@spin_first
def get_result(self, indices_or_msg_ids=None, block=None):
"""Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
If the client already has the results, no request to the Hub will be made.
This is a convenient way to construct AsyncResult objects, which are wrappers
that include metadata about execution, and allow for awaiting results that
were not submitted by this Client.
It can also be a convenient way to retrieve the metadata associated with
blocking execution, since it always retrieves
Examples
--------
::
In [10]: r = client.apply()
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncResult
A single AsyncResult object will always be returned.
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
single_result = False
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
single_result = True
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, basestring):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
local_ids = filter(lambda msg_id: msg_id in self.outstanding or msg_id in self.results, theids)
remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids)
# given single msg_id initially, get_result shot get the result itself,
# not a length-one list
if single_result:
theids = theids[0]
if remote_ids:
ar = AsyncHubResult(self, msg_ids=theids)
else:
ar = AsyncResult(self, msg_ids=theids)
if block:
ar.wait()
return ar
@spin_first
def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
"""Resubmit one or more tasks.
in-flight tasks may not be resubmitted.
Parameters
----------
indices_or_msg_ids : integer history index, str msg_id, or list of either
The indices or msg_ids of indices to be retrieved
block : bool
Whether to wait for the result to be done
Returns
-------
AsyncHubResult
A subclass of AsyncResult that retrieves results from the Hub
"""
block = self.block if block is None else block
if indices_or_msg_ids is None:
indices_or_msg_ids = -1
if not isinstance(indices_or_msg_ids, (list,tuple)):
indices_or_msg_ids = [indices_or_msg_ids]
theids = []
for id in indices_or_msg_ids:
if isinstance(id, int):
id = self.history[id]
if not isinstance(id, basestring):
raise TypeError("indices must be str or int, not %r"%id)
theids.append(id)
content = dict(msg_ids = theids)
self.session.send(self._query_socket, 'resubmit_request', content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
mapping = content['resubmitted']
new_ids = [ mapping[msg_id] for msg_id in theids ]
ar = AsyncHubResult(self, msg_ids=new_ids)
if block:
ar.wait()
return ar
@spin_first
def result_status(self, msg_ids, status_only=True):
"""Check on the status of the result(s) of the apply request with `msg_ids`.
If status_only is False, then the actual results will be retrieved, else
only the status of the results will be checked.
Parameters
----------
msg_ids : list of msg_ids
if int:
Passed as index to self.history for convenience.
status_only : bool (default: True)
if False:
Retrieve the actual results of completed tasks.
Returns
-------
results : dict
There will always be the keys 'pending' and 'completed', which will
be lists of msg_ids that are incomplete or complete. If `status_only`
is False, then completed results will be keyed by their `msg_id`.
"""
if not isinstance(msg_ids, (list,tuple)):
msg_ids = [msg_ids]
theids = []
for msg_id in msg_ids:
if isinstance(msg_id, int):
msg_id = self.history[msg_id]
if not isinstance(msg_id, basestring):
raise TypeError("msg_ids must be str, not %r"%msg_id)
theids.append(msg_id)
completed = []
local_results = {}
# comment this block out to temporarily disable local shortcut:
for msg_id in theids:
if msg_id in self.results:
completed.append(msg_id)
local_results[msg_id] = self.results[msg_id]
theids.remove(msg_id)
if theids: # some not locally cached
content = dict(msg_ids=theids, status_only=status_only)
msg = self.session.send(self._query_socket, "result_request", content=content)
zmq.select([self._query_socket], [], [])
idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
buffers = msg['buffers']
else:
content = dict(completed=[],pending=[])
content['completed'].extend(completed)
if status_only:
return content
failures = []
# load cached results into result:
content.update(local_results)
# update cache with results:
for msg_id in sorted(theids):
if msg_id in content['completed']:
rec = content[msg_id]
parent = rec['header']
header = rec['result_header']
rcontent = rec['result_content']
iodict = rec['io']
if isinstance(rcontent, str):
rcontent = self.session.unpack(rcontent)
md = self.metadata[msg_id]
md_msg = dict(
content=rcontent,
parent_header=parent,
header=header,
metadata=rec['result_metadata'],
)
md.update(self._extract_metadata(md_msg))
if rec.get('received'):
md['received'] = rec['received']
md.update(iodict)
if rcontent['status'] == 'ok':
if header['msg_type'] == 'apply_reply':
res,buffers = serialize.unserialize_object(buffers)
elif header['msg_type'] == 'execute_reply':
res = ExecuteReply(msg_id, rcontent, md)
else:
raise KeyError("unhandled msg type: %r" % header['msg_type'])
else:
res = self._unwrap_exception(rcontent)
failures.append(res)
self.results[msg_id] = res
content[msg_id] = res
if len(theids) == 1 and failures:
raise failures[0]
error.collect_exceptions(failures, "result_status")
return content
@spin_first
def queue_status(self, targets='all', verbose=False):
"""Fetch the status of engine queues.
Parameters
----------
targets : int/str/list of ints/strs
the engines whose states are to be queried.
default : all
verbose : bool
Whether to return lengths only, or lists of ids for each element
"""
if targets == 'all':
# allow 'all' to be evaluated on the engine
engine_ids = None
else:
engine_ids = self._build_targets(targets)[1]
content = dict(targets=engine_ids, verbose=verbose)
self.session.send(self._query_socket, "queue_request", content=content)
idents,msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
status = content.pop('status')
if status != 'ok':
raise self._unwrap_exception(content)
content = rekey(content)
if isinstance(targets, int):
return content[targets]
else:
return content
def _build_msgids_from_target(self, targets=None):
"""Build a list of msg_ids from the list of engine targets"""
if not targets: # needed as _build_targets otherwise uses all engines
return []
target_ids = self._build_targets(targets)[0]
return filter(lambda md_id: self.metadata[md_id]["engine_uuid"] in target_ids, self.metadata)
def _build_msgids_from_jobs(self, jobs=None):
"""Build a list of msg_ids from "jobs" """
if not jobs:
return []
msg_ids = []
if isinstance(jobs, (basestring,AsyncResult)):
jobs = [jobs]
bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
if bad_ids:
raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
for j in jobs:
if isinstance(j, AsyncResult):
msg_ids.extend(j.msg_ids)
else:
msg_ids.append(j)
return msg_ids
def purge_local_results(self, jobs=[], targets=[]):
"""Clears the client caches of results and frees such memory.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_local_results('all')` to scrub everything from the Clients's db.
The client must have no outstanding tasks before purging the caches.
Raises `AssertionError` if there are still outstanding tasks.
After this call all `AsyncResults` are invalid and should be discarded.
If you must "reget" the results, you can still do so by using
`client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
redownload the results from the hub if they are still available
(i.e `client.purge_hub_results(...)` has not been called.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be purged.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire results are to be purged.
default : None
"""
assert not self.outstanding, "Can't purge a client with outstanding tasks!"
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if jobs == 'all':
self.results.clear()
self.metadata.clear()
return
else:
msg_ids = []
msg_ids.extend(self._build_msgids_from_target(targets))
msg_ids.extend(self._build_msgids_from_jobs(jobs))
map(self.results.pop, msg_ids)
map(self.metadata.pop, msg_ids)
@spin_first
def purge_hub_results(self, jobs=[], targets=[]):
"""Tell the Hub to forget results.
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub everything from the Hub's db.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
if not targets and not jobs:
raise ValueError("Must specify at least one of `targets` and `jobs`")
if targets:
targets = self._build_targets(targets)[1]
# construct msg_ids from jobs
if jobs == 'all':
msg_ids = jobs
else:
msg_ids = self._build_msgids_from_jobs(jobs)
content = dict(engine_ids=targets, msg_ids=msg_ids)
self.session.send(self._query_socket, "purge_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
def purge_results(self, jobs=[], targets=[]):
"""Clears the cached results from both the hub and the local client
Individual results can be purged by msg_id, or the entire
history of specific targets can be purged.
Use `purge_results('all')` to scrub every cached result from both the Hub's and
the Client's db.
Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
the same arguments.
Parameters
----------
jobs : str or list of str or AsyncResult objects
the msg_ids whose results should be forgotten.
targets : int/str/list of ints/strs
The targets, by int_id, whose entire history is to be purged.
default : None
"""
self.purge_local_results(jobs=jobs, targets=targets)
self.purge_hub_results(jobs=jobs, targets=targets)
def purge_everything(self):
"""Clears all content from previous Tasks from both the hub and the local client
In addition to calling `purge_results("all")` it also deletes the history and
other bookkeeping lists.
"""
self.purge_results("all")
self.history = []
self.session.digest_history.clear()
@spin_first
def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
self.session.send(self._query_socket, "history_request", content={})
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history']
@spin_first
def db_query(self, query, keys=None):
"""Query the Hub's TaskRecord database
This will return a list of task record dicts that match `query`
Parameters
----------
query : mongodb query dict
The search dict. See mongodb query docs for details.
keys : list of strs [optional]
The subset of keys to be returned. The default is to fetch everything but buffers.
'msg_id' will *always* be included.
"""
if isinstance(keys, basestring):
keys = [keys]
content = dict(query=query, keys=keys)
self.session.send(self._query_socket, "db_request", content=content)
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
records = content['records']
buffer_lens = content['buffer_lens']
result_buffer_lens = content['result_buffer_lens']
buffers = msg['buffers']
has_bufs = buffer_lens is not None
has_rbufs = result_buffer_lens is not None
for i,rec in enumerate(records):
# relink buffers
if has_bufs:
blen = buffer_lens[i]
rec['buffers'], buffers = buffers[:blen],buffers[blen:]
if has_rbufs:
blen = result_buffer_lens[i]
rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
return records
__all__ = [ 'Client' ]
| {
"repo_name": "marcoantoniooliveira/labweb",
"path": "oscar/lib/python2.7/site-packages/IPython/parallel/client/client.py",
"copies": "1",
"size": "67460",
"license": "bsd-3-clause",
"hash": -4372829999402266600,
"line_mean": 35.4254859611,
"line_max": 103,
"alpha_frac": 0.5517047139,
"autogenerated": false,
"ratio": 4.434073879321677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007273066975719277,
"num_lines": 1852
} |
# A semordnilap is a word or a phrase that spells a different word when
# backwards ("semordnilap" is a semordnilap of "palindromes"). Here are some
# examples:
#
# nametag / gateman
# dog / god
# live / evil
# desserts / stressed
#
# Write a recursive program, `semordnilap`, that takes in two words and says if
# they are semordnilap.
#
# This recursive function is not entirely straightforward. There are a few
# things that you need to check the first time you look at the inputs that you
# should not check on subsequent recursive calls: you need to make sure that the
# strings are not single characters, and also you need to be sure that the
# strings are not equal. If you do this check every time you call your function,
# though, this will end up interfering with the recursive base case (which we
# don't want!).
#
# The idea of a wrapper function is really important. You'll see more wrapper
# functions later. To introduce you to the idea, we are providing you with the
# wrapper function; your job is to write the recursive function semordnilap that
# the wrapper function calls. Here is the wrapper function:
#
# def semordnilapWrapper(str1, str2):
# # A single-length string cannot be semordnilap
# if len(str1) == 1 or len(str2) == 1:
# return False
#
# # Equal strings cannot be semordnilap
# if str1 == str2:
# return False
#
# return semordnilap(str1, str2)
def semordnilap(str1, str2):
'''
str1: a string
str2: a string
returns: True if str1 and str2 are semordnilap;
False otherwise.
'''
if len(str1) != len(str2):
return False
elif str1[0] != str2[-1]:
return False
elif str1[0] == str2[-1]:
return True
return semordnilap(str1[1:], str2[:-1]) | {
"repo_name": "emyarod/OSS",
"path": "1_intro/6.00.1x/Week 3/L5 Problems/L5 Problem 9.py",
"copies": "1",
"size": "1795",
"license": "mit",
"hash": -2730546031892387300,
"line_mean": 32.8867924528,
"line_max": 80,
"alpha_frac": 0.6846796657,
"autogenerated": false,
"ratio": 3.4786821705426356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46633618362426354,
"avg_score": null,
"num_lines": null
} |
"""A sensor for incoming calls using a USB modem that supports caller ID."""
import logging
import voluptuous as vol
from homeassistant.const import (
STATE_IDLE,
EVENT_HOMEASSISTANT_STOP,
CONF_NAME,
CONF_DEVICE,
)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Modem CallerID"
ICON = "mdi:phone-classic"
DEFAULT_DEVICE = "/dev/ttyACM0"
STATE_RING = "ring"
STATE_CALLERID = "callerid"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up modem caller ID sensor platform."""
from basicmodem.basicmodem import BasicModem as bm
name = config.get(CONF_NAME)
port = config.get(CONF_DEVICE)
modem = bm(port)
if modem.state == modem.STATE_FAILED:
_LOGGER.error("Unable to initialize modem.")
return
add_entities([ModemCalleridSensor(hass, name, port, modem)])
class ModemCalleridSensor(Entity):
"""Implementation of USB modem caller ID sensor."""
def __init__(self, hass, name, port, modem):
"""Initialize the sensor."""
self._attributes = {"cid_time": 0, "cid_number": "", "cid_name": ""}
self._name = name
self.port = port
self.modem = modem
self._state = STATE_IDLE
modem.registercallback(self._incomingcallcallback)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self._stop_modem)
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def icon(self):
"""Return icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def _stop_modem(self, event):
"""HA is shutting down, close modem port."""
if self.modem:
self.modem.close()
self.modem = None
def _incomingcallcallback(self, newstate):
"""Handle new states."""
if newstate == self.modem.STATE_RING:
if self.state == self.modem.STATE_IDLE:
att = {
"cid_time": self.modem.get_cidtime,
"cid_number": "",
"cid_name": "",
}
self.set_attributes(att)
self._state = STATE_RING
self.schedule_update_ha_state()
elif newstate == self.modem.STATE_CALLERID:
att = {
"cid_time": self.modem.get_cidtime,
"cid_number": self.modem.get_cidnumber,
"cid_name": self.modem.get_cidname,
}
self.set_attributes(att)
self._state = STATE_CALLERID
self.schedule_update_ha_state()
elif newstate == self.modem.STATE_IDLE:
self._state = STATE_IDLE
self.schedule_update_ha_state()
| {
"repo_name": "joopert/home-assistant",
"path": "homeassistant/components/modem_callerid/sensor.py",
"copies": "4",
"size": "3582",
"license": "apache-2.0",
"hash": 7721196765802210000,
"line_mean": 28.85,
"line_max": 76,
"alpha_frac": 0.5971524288,
"autogenerated": false,
"ratio": 3.9711751662971175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6568327595097118,
"avg_score": null,
"num_lines": null
} |
"""A sensor for incoming calls using a USB modem that supports caller ID."""
import logging
import voluptuous as vol
from homeassistant.const import (STATE_IDLE,
EVENT_HOMEASSISTANT_STOP,
CONF_NAME,
CONF_DEVICE)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Modem CallerID'
ICON = 'mdi:phone-classic'
DEFAULT_DEVICE = '/dev/ttyACM0'
STATE_RING = 'ring'
STATE_CALLERID = 'callerid'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up modem caller ID sensor platform."""
from basicmodem.basicmodem import BasicModem as bm
name = config.get(CONF_NAME)
port = config.get(CONF_DEVICE)
modem = bm(port)
if modem.state == modem.STATE_FAILED:
_LOGGER.error('Unable to initialize modem.')
return
add_entities([ModemCalleridSensor(hass, name, port, modem)])
class ModemCalleridSensor(Entity):
"""Implementation of USB modem caller ID sensor."""
def __init__(self, hass, name, port, modem):
"""Initialize the sensor."""
self._attributes = {"cid_time": 0, "cid_number": '', "cid_name": ''}
self._name = name
self.port = port
self.modem = modem
self._state = STATE_IDLE
modem.registercallback(self._incomingcallcallback)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self._stop_modem)
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def icon(self):
"""Return icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def _stop_modem(self, event):
"""HA is shutting down, close modem port."""
if self.modem:
self.modem.close()
self.modem = None
def _incomingcallcallback(self, newstate):
"""Handle new states."""
if newstate == self.modem.STATE_RING:
if self.state == self.modem.STATE_IDLE:
att = {"cid_time": self.modem.get_cidtime,
"cid_number": '',
"cid_name": ''}
self.set_attributes(att)
self._state = STATE_RING
self.schedule_update_ha_state()
elif newstate == self.modem.STATE_CALLERID:
att = {"cid_time": self.modem.get_cidtime,
"cid_number": self.modem.get_cidnumber,
"cid_name": self.modem.get_cidname}
self.set_attributes(att)
self._state = STATE_CALLERID
self.schedule_update_ha_state()
elif newstate == self.modem.STATE_IDLE:
self._state = STATE_IDLE
self.schedule_update_ha_state()
| {
"repo_name": "auduny/home-assistant",
"path": "homeassistant/components/modem_callerid/sensor.py",
"copies": "7",
"size": "3584",
"license": "apache-2.0",
"hash": -7587118682884345000,
"line_mean": 31.2882882883,
"line_max": 76,
"alpha_frac": 0.5968191964,
"autogenerated": false,
"ratio": 4.049717514124294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 111
} |
"""A sensor for incoming calls using a USB modem that supports caller ID."""
import logging
from basicmodem.basicmodem import BasicModem as bm
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Modem CallerID"
ICON = "mdi:phone-classic"
DEFAULT_DEVICE = "/dev/ttyACM0"
STATE_RING = "ring"
STATE_CALLERID = "callerid"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up modem caller ID sensor platform."""
name = config.get(CONF_NAME)
port = config.get(CONF_DEVICE)
modem = bm(port)
if modem.state == modem.STATE_FAILED:
_LOGGER.error("Unable to initialize modem")
return
add_entities([ModemCalleridSensor(hass, name, port, modem)])
class ModemCalleridSensor(Entity):
"""Implementation of USB modem caller ID sensor."""
def __init__(self, hass, name, port, modem):
"""Initialize the sensor."""
self._attributes = {"cid_time": 0, "cid_number": "", "cid_name": ""}
self._name = name
self.port = port
self.modem = modem
self._state = STATE_IDLE
modem.registercallback(self._incomingcallcallback)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self._stop_modem)
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def icon(self):
"""Return icon."""
return ICON
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def _stop_modem(self, event):
"""HA is shutting down, close modem port."""
if self.modem:
self.modem.close()
self.modem = None
def _incomingcallcallback(self, newstate):
"""Handle new states."""
if newstate == self.modem.STATE_RING:
if self.state == self.modem.STATE_IDLE:
att = {
"cid_time": self.modem.get_cidtime,
"cid_number": "",
"cid_name": "",
}
self.set_attributes(att)
self._state = STATE_RING
self.schedule_update_ha_state()
elif newstate == self.modem.STATE_CALLERID:
att = {
"cid_time": self.modem.get_cidtime,
"cid_number": self.modem.get_cidnumber,
"cid_name": self.modem.get_cidname,
}
self.set_attributes(att)
self._state = STATE_CALLERID
self.schedule_update_ha_state()
elif newstate == self.modem.STATE_IDLE:
self._state = STATE_IDLE
self.schedule_update_ha_state()
| {
"repo_name": "mKeRix/home-assistant",
"path": "homeassistant/components/modem_callerid/sensor.py",
"copies": "14",
"size": "3579",
"license": "mit",
"hash": 6342872922084965000,
"line_mean": 28.3360655738,
"line_max": 76,
"alpha_frac": 0.5976529757,
"autogenerated": false,
"ratio": 3.959070796460177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A sensor platform that give you information about the next space launch."""
from datetime import timedelta
import logging
from typing import Optional
from pylaunches import PyLaunches, PyLaunchesException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from .const import (
ATTR_AGENCY,
ATTR_AGENCY_COUNTRY_CODE,
ATTR_LAUNCH_TIME,
ATTR_STREAM,
ATTRIBUTION,
DEFAULT_NAME,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the launch sensor."""
name = config[CONF_NAME]
session = async_get_clientsession(hass)
launches = PyLaunches(session)
async_add_entities([LaunchLibrarySensor(launches, name)], True)
class LaunchLibrarySensor(Entity):
"""Representation of a launch_library Sensor."""
def __init__(self, launches: PyLaunches, name: str) -> None:
"""Initialize the sensor."""
self.launches = launches
self.next_launch = None
self._name = name
async def async_update(self) -> None:
"""Get the latest data."""
try:
launches = await self.launches.upcoming_launches()
except PyLaunchesException as exception:
_LOGGER.error("Error getting data, %s", exception)
else:
if launches:
self.next_launch = launches[0]
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def state(self) -> Optional[str]:
"""Return the state of the sensor."""
if self.next_launch:
return self.next_launch.name
return None
@property
def icon(self) -> str:
"""Return the icon of the sensor."""
return "mdi:rocket"
@property
def device_state_attributes(self) -> Optional[dict]:
"""Return attributes for the sensor."""
if self.next_launch:
return {
ATTR_LAUNCH_TIME: self.next_launch.net,
ATTR_AGENCY: self.next_launch.launch_service_provider.name,
ATTR_AGENCY_COUNTRY_CODE: self.next_launch.pad.location.country_code,
ATTR_STREAM: self.next_launch.webcast_live,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
return None
| {
"repo_name": "soldag/home-assistant",
"path": "homeassistant/components/launch_library/sensor.py",
"copies": "6",
"size": "2741",
"license": "apache-2.0",
"hash": -6936935101575951000,
"line_mean": 29.797752809,
"line_max": 86,
"alpha_frac": 0.6548704852,
"autogenerated": false,
"ratio": 4.066765578635015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7721636063835015,
"avg_score": null,
"num_lines": null
} |
"""A sensor platform that give you information about the next space launch."""
from datetime import timedelta
import logging
from pylaunches.api import Launches
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Launch Library."
DEFAULT_NAME = "Next launch"
SCAN_INTERVAL = timedelta(hours=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the launch sensor."""
name = config[CONF_NAME]
session = async_get_clientsession(hass)
launches = Launches(hass.loop, session)
sensor = [LaunchLibrarySensor(launches, name)]
async_add_entities(sensor, True)
class LaunchLibrarySensor(Entity):
"""Representation of a launch_library Sensor."""
def __init__(self, launches, name):
"""Initialize the sensor."""
self.launches = launches
self._attributes = {}
self._name = name
self._state = None
async def async_update(self):
"""Get the latest data."""
await self.launches.get_launches()
if self.launches.launches is None:
_LOGGER.error("No data received")
return
try:
data = self.launches.launches[0]
self._state = data["name"]
self._attributes["launch_time"] = data["start"]
self._attributes["agency"] = data["agency"]
agency_country_code = data["agency_country_code"]
self._attributes["agency_country_code"] = agency_country_code
self._attributes["stream"] = data["stream"]
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
except (KeyError, IndexError) as error:
_LOGGER.debug("Error getting data, %s", error)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:rocket"
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
| {
"repo_name": "GenericStudent/home-assistant",
"path": "homeassistant/components/launch_library/sensor.py",
"copies": "13",
"size": "2612",
"license": "apache-2.0",
"hash": 4469167184717340700,
"line_mean": 30.0952380952,
"line_max": 86,
"alpha_frac": 0.6558192956,
"autogenerated": false,
"ratio": 4.185897435897436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00013683634373289546,
"num_lines": 84
} |
"""A sensor platform that give you information about the next space launch."""
from datetime import timedelta
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Launch Library."
DEFAULT_NAME = "Next launch"
SCAN_INTERVAL = timedelta(hours=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the launch sensor."""
from pylaunches.api import Launches
name = config[CONF_NAME]
session = async_get_clientsession(hass)
launches = Launches(hass.loop, session)
sensor = [LaunchLibrarySensor(launches, name)]
async_add_entities(sensor, True)
class LaunchLibrarySensor(Entity):
"""Representation of a launch_library Sensor."""
def __init__(self, launches, name):
"""Initialize the sensor."""
self.launches = launches
self._attributes = {}
self._name = name
self._state = None
async def async_update(self):
"""Get the latest data."""
await self.launches.get_launches()
if self.launches.launches is None:
_LOGGER.error("No data received")
return
try:
data = self.launches.launches[0]
self._state = data["name"]
self._attributes["launch_time"] = data["start"]
self._attributes["agency"] = data["agency"]
agency_country_code = data["agency_country_code"]
self._attributes["agency_country_code"] = agency_country_code
self._attributes["stream"] = data["stream"]
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
except (KeyError, IndexError) as error:
_LOGGER.debug("Error getting data, %s", error)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:rocket"
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
| {
"repo_name": "joopert/home-assistant",
"path": "homeassistant/components/launch_library/sensor.py",
"copies": "4",
"size": "2616",
"license": "apache-2.0",
"hash": 6650959488487365000,
"line_mean": 30.1428571429,
"line_max": 86,
"alpha_frac": 0.6548165138,
"autogenerated": false,
"ratio": 4.1923076923076925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6847124206107693,
"avg_score": null,
"num_lines": null
} |
"""A sensor platform that give you information about the next space launch."""
from __future__ import annotations
from datetime import timedelta
import logging
from pylaunches import PyLaunches, PyLaunchesException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_AGENCY,
ATTR_AGENCY_COUNTRY_CODE,
ATTR_LAUNCH_TIME,
ATTR_STREAM,
ATTRIBUTION,
DEFAULT_NAME,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the launch sensor."""
name = config[CONF_NAME]
session = async_get_clientsession(hass)
launches = PyLaunches(session)
async_add_entities([LaunchLibrarySensor(launches, name)], True)
class LaunchLibrarySensor(SensorEntity):
"""Representation of a launch_library Sensor."""
def __init__(self, launches: PyLaunches, name: str) -> None:
"""Initialize the sensor."""
self.launches = launches
self.next_launch = None
self._name = name
async def async_update(self) -> None:
"""Get the latest data."""
try:
launches = await self.launches.upcoming_launches()
except PyLaunchesException as exception:
_LOGGER.error("Error getting data, %s", exception)
else:
if launches:
self.next_launch = launches[0]
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def state(self) -> str | None:
"""Return the state of the sensor."""
if self.next_launch:
return self.next_launch.name
return None
@property
def icon(self) -> str:
"""Return the icon of the sensor."""
return "mdi:rocket"
@property
def extra_state_attributes(self) -> dict | None:
"""Return attributes for the sensor."""
if self.next_launch:
return {
ATTR_LAUNCH_TIME: self.next_launch.net,
ATTR_AGENCY: self.next_launch.launch_service_provider.name,
ATTR_AGENCY_COUNTRY_CODE: self.next_launch.pad.location.country_code,
ATTR_STREAM: self.next_launch.webcast_live,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
return None
| {
"repo_name": "adrienbrault/home-assistant",
"path": "homeassistant/components/launch_library/sensor.py",
"copies": "5",
"size": "2714",
"license": "mit",
"hash": 8443492476257792000,
"line_mean": 29.4943820225,
"line_max": 86,
"alpha_frac": 0.6503316139,
"autogenerated": false,
"ratio": 4.038690476190476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7189022090090477,
"avg_score": null,
"num_lines": null
} |
"""A sensor that monitors trends in other components."""
from collections import deque
import logging
import math
import numpy as np
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
BinarySensorDevice,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_FRIENDLY_NAME,
CONF_SENSORS,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from homeassistant.util import utcnow
_LOGGER = logging.getLogger(__name__)
ATTR_ATTRIBUTE = "attribute"
ATTR_GRADIENT = "gradient"
ATTR_MIN_GRADIENT = "min_gradient"
ATTR_INVERT = "invert"
ATTR_SAMPLE_DURATION = "sample_duration"
ATTR_SAMPLE_COUNT = "sample_count"
CONF_ATTRIBUTE = "attribute"
CONF_INVERT = "invert"
CONF_MAX_SAMPLES = "max_samples"
CONF_MIN_GRADIENT = "min_gradient"
CONF_SAMPLE_DURATION = "sample_duration"
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_ATTRIBUTE): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Optional(CONF_MAX_SAMPLES, default=2): cv.positive_int,
vol.Optional(CONF_MIN_GRADIENT, default=0.0): vol.Coerce(float),
vol.Optional(CONF_SAMPLE_DURATION, default=0): cv.positive_int,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the trend sensors."""
sensors = []
for device_id, device_config in config[CONF_SENSORS].items():
entity_id = device_config[ATTR_ENTITY_ID]
attribute = device_config.get(CONF_ATTRIBUTE)
device_class = device_config.get(CONF_DEVICE_CLASS)
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device_id)
invert = device_config[CONF_INVERT]
max_samples = device_config[CONF_MAX_SAMPLES]
min_gradient = device_config[CONF_MIN_GRADIENT]
sample_duration = device_config[CONF_SAMPLE_DURATION]
sensors.append(
SensorTrend(
hass,
device_id,
friendly_name,
entity_id,
attribute,
device_class,
invert,
max_samples,
min_gradient,
sample_duration,
)
)
if not sensors:
_LOGGER.error("No sensors added")
return
add_entities(sensors)
class SensorTrend(BinarySensorDevice):
"""Representation of a trend Sensor."""
def __init__(
self,
hass,
device_id,
friendly_name,
entity_id,
attribute,
device_class,
invert,
max_samples,
min_gradient,
sample_duration,
):
"""Initialize the sensor."""
self._hass = hass
self.entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id, hass=hass)
self._name = friendly_name
self._entity_id = entity_id
self._attribute = attribute
self._device_class = device_class
self._invert = invert
self._sample_duration = sample_duration
self._min_gradient = min_gradient
self._gradient = None
self._state = None
self.samples = deque(maxlen=max_samples)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ENTITY_ID: self._entity_id,
ATTR_FRIENDLY_NAME: self._name,
ATTR_GRADIENT: self._gradient,
ATTR_INVERT: self._invert,
ATTR_MIN_GRADIENT: self._min_gradient,
ATTR_SAMPLE_COUNT: len(self.samples),
ATTR_SAMPLE_DURATION: self._sample_duration,
}
@property
def should_poll(self):
"""No polling needed."""
return False
async def async_added_to_hass(self):
"""Complete device setup after being added to hass."""
@callback
def trend_sensor_state_listener(entity, old_state, new_state):
"""Handle state changes on the observed device."""
try:
if self._attribute:
state = new_state.attributes.get(self._attribute)
else:
state = new_state.state
if state not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
sample = (new_state.last_updated.timestamp(), float(state))
self.samples.append(sample)
self.async_schedule_update_ha_state(True)
except (ValueError, TypeError) as ex:
_LOGGER.error(ex)
async_track_state_change(
self.hass, self._entity_id, trend_sensor_state_listener
)
async def async_update(self):
"""Get the latest data and update the states."""
# Remove outdated samples
if self._sample_duration > 0:
cutoff = utcnow().timestamp() - self._sample_duration
while self.samples and self.samples[0][0] < cutoff:
self.samples.popleft()
if len(self.samples) < 2:
return
# Calculate gradient of linear trend
await self.hass.async_add_job(self._calculate_gradient)
# Update state
self._state = (
abs(self._gradient) > abs(self._min_gradient)
and math.copysign(self._gradient, self._min_gradient) == self._gradient
)
if self._invert:
self._state = not self._state
def _calculate_gradient(self):
"""Compute the linear trend gradient of the current samples.
This need run inside executor.
"""
timestamps = np.array([t for t, _ in self.samples])
values = np.array([s for _, s in self.samples])
coeffs = np.polyfit(timestamps, values, 1)
self._gradient = coeffs[0]
| {
"repo_name": "Teagan42/home-assistant",
"path": "homeassistant/components/trend/binary_sensor.py",
"copies": "5",
"size": "6698",
"license": "apache-2.0",
"hash": 6482002634884211000,
"line_mean": 30.2990654206,
"line_max": 83,
"alpha_frac": 0.6058524933,
"autogenerated": false,
"ratio": 3.9845330160618677,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7090385509361867,
"avg_score": null,
"num_lines": null
} |
"""A sensor that monitors trends in other components."""
from collections import deque
import logging
import math
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA, ENTITY_ID_FORMAT, PLATFORM_SCHEMA,
BinarySensorDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, CONF_DEVICE_CLASS, CONF_ENTITY_ID,
CONF_FRIENDLY_NAME, STATE_UNKNOWN, CONF_SENSORS)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import async_track_state_change
from homeassistant.util import utcnow
REQUIREMENTS = ['numpy==1.16.2']
_LOGGER = logging.getLogger(__name__)
ATTR_ATTRIBUTE = 'attribute'
ATTR_GRADIENT = 'gradient'
ATTR_MIN_GRADIENT = 'min_gradient'
ATTR_INVERT = 'invert'
ATTR_SAMPLE_DURATION = 'sample_duration'
ATTR_SAMPLE_COUNT = 'sample_count'
CONF_ATTRIBUTE = 'attribute'
CONF_INVERT = 'invert'
CONF_MAX_SAMPLES = 'max_samples'
CONF_MIN_GRADIENT = 'min_gradient'
CONF_SAMPLE_DURATION = 'sample_duration'
SENSOR_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_ATTRIBUTE): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_INVERT, default=False): cv.boolean,
vol.Optional(CONF_MAX_SAMPLES, default=2): cv.positive_int,
vol.Optional(CONF_MIN_GRADIENT, default=0.0): vol.Coerce(float),
vol.Optional(CONF_SAMPLE_DURATION, default=0): cv.positive_int,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the trend sensors."""
sensors = []
for device_id, device_config in config[CONF_SENSORS].items():
entity_id = device_config[ATTR_ENTITY_ID]
attribute = device_config.get(CONF_ATTRIBUTE)
device_class = device_config.get(CONF_DEVICE_CLASS)
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device_id)
invert = device_config[CONF_INVERT]
max_samples = device_config[CONF_MAX_SAMPLES]
min_gradient = device_config[CONF_MIN_GRADIENT]
sample_duration = device_config[CONF_SAMPLE_DURATION]
sensors.append(
SensorTrend(
hass, device_id, friendly_name, entity_id, attribute,
device_class, invert, max_samples, min_gradient,
sample_duration)
)
if not sensors:
_LOGGER.error("No sensors added")
return
add_entities(sensors)
class SensorTrend(BinarySensorDevice):
"""Representation of a trend Sensor."""
def __init__(self, hass, device_id, friendly_name, entity_id,
attribute, device_class, invert, max_samples,
min_gradient, sample_duration):
"""Initialize the sensor."""
self._hass = hass
self.entity_id = generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass)
self._name = friendly_name
self._entity_id = entity_id
self._attribute = attribute
self._device_class = device_class
self._invert = invert
self._sample_duration = sample_duration
self._min_gradient = min_gradient
self._gradient = None
self._state = None
self.samples = deque(maxlen=max_samples)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ENTITY_ID: self._entity_id,
ATTR_FRIENDLY_NAME: self._name,
ATTR_GRADIENT: self._gradient,
ATTR_INVERT: self._invert,
ATTR_MIN_GRADIENT: self._min_gradient,
ATTR_SAMPLE_COUNT: len(self.samples),
ATTR_SAMPLE_DURATION: self._sample_duration,
}
@property
def should_poll(self):
"""No polling needed."""
return False
async def async_added_to_hass(self):
"""Complete device setup after being added to hass."""
@callback
def trend_sensor_state_listener(entity, old_state, new_state):
"""Handle state changes on the observed device."""
try:
if self._attribute:
state = new_state.attributes.get(self._attribute)
else:
state = new_state.state
if state != STATE_UNKNOWN:
sample = (utcnow().timestamp(), float(state))
self.samples.append(sample)
self.async_schedule_update_ha_state(True)
except (ValueError, TypeError) as ex:
_LOGGER.error(ex)
async_track_state_change(
self.hass, self._entity_id,
trend_sensor_state_listener)
async def async_update(self):
"""Get the latest data and update the states."""
# Remove outdated samples
if self._sample_duration > 0:
cutoff = utcnow().timestamp() - self._sample_duration
while self.samples and self.samples[0][0] < cutoff:
self.samples.popleft()
if len(self.samples) < 2:
return
# Calculate gradient of linear trend
await self.hass.async_add_job(self._calculate_gradient)
# Update state
self._state = (
abs(self._gradient) > abs(self._min_gradient) and
math.copysign(self._gradient, self._min_gradient) == self._gradient
)
if self._invert:
self._state = not self._state
def _calculate_gradient(self):
"""Compute the linear trend gradient of the current samples.
This need run inside executor.
"""
import numpy as np
timestamps = np.array([t for t, _ in self.samples])
values = np.array([s for _, s in self.samples])
coeffs = np.polyfit(timestamps, values, 1)
self._gradient = coeffs[0]
| {
"repo_name": "nugget/home-assistant",
"path": "homeassistant/components/binary_sensor/trend.py",
"copies": "3",
"size": "6437",
"license": "apache-2.0",
"hash": -6662413858287017000,
"line_mean": 33.6075268817,
"line_max": 79,
"alpha_frac": 0.6259126922,
"autogenerated": false,
"ratio": 3.920219244823386,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6046131937023386,
"avg_score": null,
"num_lines": null
} |
"""A separate Flask app that serves fake endpoints for demo purposes."""
# -*- coding: utf-8 -*-
from itertools import combinations
import json
import locale
import os
from datetime import timedelta as td
from datetime import datetime as dt
from random import randrange as rr
from random import choice, random
import time
from flask import (
Flask,
abort,
request,
jsonify,
render_template,
)
from flask_cors import CORS
from flask_cors import cross_origin
app = Flask('endpoints_test')
CORS(app)
app.config['SECRET_KEY'] = 'NOTSECURELOL'
app.debug = True
STRESS_MAX_POINTS = 300
locale.setlocale(locale.LC_ALL, '')
cwd = os.getcwd()
def recursive_d3_data(current=0, max_iters=12, data=None):
"""Generate d3js data for stress testing.
Format is suitable in treemap, circlepack and dendrogram testing.
"""
if current >= max_iters:
return data
if data is None:
data = dict(name='foo', size=rr(10, 10000), children=[])
data = dict(name='foo', size=rr(10, 10000),
children=[data, data])
return recursive_d3_data(
current=current + 1,
max_iters=max_iters,
data=data)
def dates_list(max_dates=10):
"""Generate a timeseries dates list."""
now = dt.now()
return [str(now + td(days=i * 10))[0:10] for i in range(max_dates)]
def rr_list(max_range=10):
"""Generate a list of random integers."""
return [rr(0, 100) for i in range(max_range)]
def rand_hex_color():
"""Generate a random hex color.
e.g. #FF0000
"""
chars = list('0123456789ABCDEF')
return '#{0}{1}{2}{3}{4}{5}'.format(
choice(chars),
choice(chars),
choice(chars),
choice(chars),
choice(chars),
choice(chars),
)
@cross_origin()
@app.route('/numbergroup')
def numbergroup():
"""Fake endpoint."""
dataset = int(request.args.get('dataset', 0))
# multiple examples shown here for variadic demonstrations
datas = [
[
{
"title": "Number of widgets sold in last day",
"description": 'This is a good sign',
"data": 32515.0,
"color": "green",
},
{
"title": "New customers signed up this week",
"description": 'New user accounts created',
"data": 740,
},
{
"title": "Average Daily Users",
"description": "(aka DAU)",
"data": 541200,
"noformat": False,
},
{
"title": "Max concurrent users this week",
"description": "Server load peak",
"data": 123401,
"color": "orange",
"noformat": True,
},
],
[
{
"title": "Simple thing",
"data": 2,
"width": "33%",
"description": "Just a simple number"
},
{
"title": "Simple thing 2",
"data": 4033,
"width": "33%",
"description": "Just a simple number"
},
{
"title": "Simple thing 3",
"data": 49102,
"width": "33%",
"description": "Just a simple number"
},
],
[
{
"title": "Average time on site",
"description": "Signed in to signed out (units nostyle)",
"data": '20 minutes',
"color": "#7D4EE4",
},
{
"title": "Average time on site page X",
"description": "Signed in to signed out (custom units style)",
"data": 15,
"units": "minutes",
},
{
"title": "Average $ spent per day",
"description": "Yeeehaw (custom units style)",
"data": 130,
"units": "dollars"
},
]
]
return jsonify(datas[dataset])
@cross_origin()
@app.route('/combination')
def combination():
"""Fake endpoint."""
data = {
'columns': [
['data1', 30, 20, 50, 40, 60, 50],
['data2', 200, 130, 90, 240, 130, 220],
['data3', 300, 200, 160, 400, 250, 250],
['data4', 200, 130, 90, 240, 130, 220],
['data5', 130, 120, 150, 140, 160, 150],
['data6', 90, 70, 20, 50, 60, 120],
],
'type': 'bar',
'types': {
'data3': 'spline',
'data4': 'line',
'data6': 'area',
},
'groups': [
['data1', 'data2'],
]
}
return jsonify(dict(data=data))
@cross_origin()
@app.route('/timeseriesc3')
def timeseriesc3():
"""Fake endpoint."""
return jsonify(dict(
dates=[
'19{}-{}-{}'.format(rr(10, 99), rr(10, 31), rr(10, 31))
for _ in range(4)
],
abc=rr_list(max_range=4),
cde=rr_list(max_range=4),
))
@cross_origin()
@app.route('/stacked-bar')
def stackedbar():
"""Fake endpoint."""
return jsonify({
'data': {
'columns': [
['data1', -30, 200, 200, 400, -150, 250],
['data2', 130, 100, -100, 200, -150, 50],
['data3', -230, 200, 200, -300, 250, 250]
],
'type': 'bar',
'groups': [
['data1', 'data2']
]
},
'grid': {
'y': {
'lines': [{'value': 0}]
}
}
})
@cross_origin()
@app.route('/wordcloud')
def wordcloud():
"""Fake endpoint."""
words = [
'awesome', 'rad', 'neato', 'the', 'flask', 'jsondash', 'graphs',
'charts', 'd3', 'js', 'dashboards', 'c3',
]
sizes = range(len(words))
data = [
{
'text': word,
'size': sizes[i] * 12
}
for i, word in enumerate(words)
]
return jsonify(data)
@cross_origin()
@app.route('/sigma')
def sigma():
"""Fake endpoint."""
chart_name = request.args.get('name', 'basic')
if chart_name == 'random':
nodes = request.args.get('nodes', 'abcdefghij')
_vertices = list(nodes)
_edges = combinations(_vertices, 2)
edges, vertices = [], []
for (frm, to) in _edges:
edges.append(dict(
id='{}-{}'.format(frm, to),
color=rand_hex_color(),
source=frm,
target=to,
size=rr(1, 10),
x=rr(1, 100),
y=rr(1, 100),
))
for vertex in _vertices:
vertices.append(dict(
id=vertex,
size=rr(1, 10),
x=rr(1, 100),
y=rr(1, 100),
color=rand_hex_color(),
label='node {}'.format(vertex),
))
data = dict(
nodes=vertices,
edges=edges,
)
return jsonify(data)
filename = '{}/examples/sigma/{}.json'.format(cwd, chart_name)
try:
with open(filename, 'r') as chartjson:
return chartjson.read()
except IOError:
pass
return jsonify({})
@cross_origin()
@app.route('/flamegraph')
def flamegraph():
"""Fake endpoint."""
chart_name = request.args.get('name', 'stacks')
filename = '{}/examples/flamegraph/{}.json'.format(cwd, chart_name)
try:
with open(filename, 'r') as chartjson:
return chartjson.read()
except IOError:
pass
return jsonify({})
@cross_origin()
@app.route('/cytoscape')
def cytoscape():
"""Fake endpoint.
Reads data from a local cytoscape spec, and if there is a
remote url specified, (assuming it exists here), open and load it as well.
This returns all required json as a single endpoint.
"""
chart_name = request.args.get('name', 'basic')
filename = '{}/examples/cytoscape/{}.json'.format(cwd, chart_name)
try:
with open(filename, 'r') as chartjson:
return chartjson.read()
except IOError:
pass
return jsonify({})
@cross_origin()
@app.route('/vegalite')
def vegalite():
"""Fake endpoint.
Reads data from a local vega spec, and if there is a
remote url specified, (assuming it exists here), open and load it as well.
This returns all required json as a single endpoint.
"""
chart_type = request.args.get('type', 'bar')
filename = '{}/examples/vegalite/{}.json'.format(cwd, chart_type)
try:
with open(filename, 'r') as chartjson:
chartjson = chartjson.read()
data = json.loads(chartjson)
if data.get('data', {}).get('url') is not None:
datapath = '{}/examples/vegalite/{}'.format(
cwd, data['data']['url']
)
with open(datapath, 'r') as datafile:
if datapath.endswith('.json'):
raw_data = datafile.read()
raw_data = json.loads(raw_data)
# TODO: adding csv support for example.
data.update(data=dict(
name='some data',
values=raw_data,
))
return jsonify(data)
else:
return chartjson
except IOError:
pass
return jsonify({})
@cross_origin()
@app.route('/plotly')
def plotly():
"""Fake endpoint."""
chart_type = request.args.get('chart', 'line')
filename = '{}/examples/plotly/{}.json'.format(cwd, chart_type)
with open(filename, 'r') as chartjson:
return chartjson.read()
return jsonify({})
@cross_origin
@app.route('/plotly-dynamic')
def plotly_dynamic():
"""Fake endpoint."""
filename = '{}/examples/plotly/bar_line_dynamic.json'.format(cwd)
with open(filename, 'r') as chartjson:
return chartjson.read()
return jsonify({})
@cross_origin()
@app.route('/timeline')
def timeline():
"""Fake endpoint."""
with open('{}/examples/timeline3.json'.format(cwd), 'r') as timelinejson:
return timelinejson.read()
return jsonify({})
@app.route('/dtable', methods=['GET'])
def dtable():
"""Fake endpoint."""
if 'stress' in request.args:
return jsonify([
dict(
foo=rr(1, 1000),
bar=rr(1, 1000),
baz=rr(1, 1000),
quux=rr(1, 1000)) for _ in range(STRESS_MAX_POINTS)
])
fname = 'dtable-override' if 'override' in request.args else 'dtable'
with open('{}/examples/{}.json'.format(os.getcwd(), fname), 'r') as djson:
return djson.read()
return jsonify({})
@cross_origin()
@app.route('/timeseries')
def timeseries():
"""Fake endpoint."""
return jsonify({
"dates": dates_list(),
"line1": rr_list(max_range=10),
"line2": rr_list(max_range=10),
"line3": rr_list(max_range=10),
})
@cross_origin()
@app.route('/custom')
def custompage():
"""Fake endpoint."""
kwargs = dict(number=rr(1, 1000))
return render_template('examples/custom.html', **kwargs)
@cross_origin()
@app.route('/gauge')
def gauge():
"""Fake endpoint."""
return jsonify({'data': rr(1, 100)})
@cross_origin()
@app.route('/area-custom')
def area_custom():
"""Fake endpoint."""
return jsonify({
"data": {
"columns": [
["data1", 300, 350, 300, 0, 0, 0],
["data2", 130, 100, 140, 200, 150, 50]
],
"types": {
"data1": "area",
"data2": "area-spline"
}
}
})
@cross_origin()
@app.route('/scatter')
def scatter():
"""Fake endpoint."""
if 'override' in request.args:
with open('{}/examples/overrides.json'.format(cwd), 'r') as jsonfile:
return jsonfile.read()
return jsonify({
"bar1": [1, 2, 30, 12, 100],
"bar2": rr_list(max_range=40),
"bar3": rr_list(max_range=40),
"bar4": [-10, 1, 5, 4, 10, 20],
})
@cross_origin()
@app.route('/pie')
def pie():
"""Fake endpoint."""
letters = list('abcde')
if 'stress' in request.args:
letters = range(STRESS_MAX_POINTS)
return jsonify({'data {}'.format(name): rr(1, 100) for name in letters})
@cross_origin()
@app.route('/custom-inputs')
def custom_inputs():
"""Fake endpoint."""
_range = int(request.args.get('range', 5))
entries = int(request.args.get('entries', 3))
starting = int(request.args.get('starting_num', 0))
prefix = request.args.get('prefix', 'item')
if 'override' in request.args:
show_axes = request.args.get('show_axes', False)
show_axes = show_axes == 'on'
data = dict(
data=dict(
columns=[
['{} {}'.format(prefix, i)] + rr_list(max_range=entries)
for i in range(starting, _range)
],
)
)
if show_axes:
data.update(axis=dict(
x=dict(label='This is the X axis'),
y=dict(label='This is the Y axis')))
return jsonify(data)
return jsonify({
i: rr_list(max_range=_range) for i in range(starting, entries)
})
@cross_origin()
@app.route('/bar')
def barchart():
"""Fake endpoint."""
if 'stress' in request.args:
return jsonify({
'bar-{}'.format(k): rr_list(max_range=STRESS_MAX_POINTS)
for k in range(STRESS_MAX_POINTS)
})
return jsonify({
"bar1": [1, 2, 30, 12, 100],
"bar2": rr_list(max_range=5),
"bar3": rr_list(max_range=5),
})
@cross_origin()
@app.route('/line')
def linechart():
"""Fake endpoint."""
if 'stress' in request.args:
return jsonify({
'bar-{}'.format(k): rr_list(max_range=STRESS_MAX_POINTS)
for k in range(STRESS_MAX_POINTS)
})
return jsonify({
"line1": [1, 4, 3, 10, 12, 14, 18, 10],
"line2": [1, 2, 10, 20, 30, 6, 10, 12, 18, 2],
"line3": rr_list(),
})
@cross_origin()
@app.route('/shared')
def shared_data():
"""Fake endpoint to demonstrate sharing data from one source."""
letters = list('abcde')
piedata = {'data {}'.format(name): rr(1, 100) for name in letters}
bardata = {
"bar1": [1, 2, 30, 12, 100],
"bar2": rr_list(max_range=5),
"bar3": rr_list(max_range=5),
}
linedata = {
"line1": [1, 4, 3, 10, 12, 14, 18, 10],
"line2": [1, 2, 10, 20, 30, 6, 10, 12, 18, 2],
"line3": rr_list(),
}
return jsonify({
'multicharts': {
'line': linedata,
'bar': bardata,
'pie': piedata,
}
})
@cross_origin()
@app.route('/singlenum')
def singlenum():
"""Fake endpoint."""
_min, _max = 10, 10000
if 'sales' in request.args:
val = locale.currency(float(rr(_min, _max)), grouping=True)
else:
val = rr(_min, _max)
if 'negative' in request.args:
val = '-{}'.format(val)
return jsonify(data=val)
@cross_origin()
@app.route('/deadend')
def test_die():
"""Fake endpoint that ends in a random 50x error."""
# Simulate slow connection
sleep = request.args.get('sleep', True)
if sleep != '':
sleep_for = request.args.get('sleep_for')
time.sleep(int(sleep_for) if sleep_for is not None else random())
err_code = request.args.get('error_code')
rand_err = choice([500, 501, 502, 503, 504])
abort(int(err_code) if err_code is not None else rand_err)
@cross_origin()
@app.route('/venn')
def test_venn():
"""Fake endpoint."""
data = [
{'sets': ['A'], 'size': rr(10, 100)},
{'sets': ['B'], 'size': rr(10, 100)},
{'sets': ['C'], 'size': rr(10, 100)},
{'sets': ['A', 'B'], 'size': rr(10, 100)},
{'sets': ['A', 'B', 'C'], 'size': rr(10, 100)},
]
return jsonify(data)
@cross_origin()
@app.route('/sparklines', methods=['GET'])
def sparklines():
"""Fake endpoint."""
if any([
'pie' in request.args,
'discrete' in request.args,
]):
return jsonify([rr(1, 100) for _ in range(10)])
return jsonify([[i, rr(i, 100)] for i in range(10)])
@cross_origin()
@app.route('/circlepack', methods=['GET'])
def circlepack():
"""Fake endpoint."""
if 'stress' in request.args:
# Build a very large dataset
return jsonify(recursive_d3_data())
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return jsonify({})
@cross_origin()
@app.route('/treemap', methods=['GET'])
def treemap():
"""Fake endpoint."""
if 'stress' in request.args:
# Build a very large dataset
return jsonify(recursive_d3_data())
with open('{}/examples/flare.json'.format(cwd), 'r') as djson:
return djson.read()
return jsonify({})
@cross_origin()
@app.route('/map', methods=['GET'])
def datamap():
"""Fake endpoint."""
return render_template('examples/map.html')
@cross_origin()
@app.route('/dendrogram', methods=['GET'])
def dendro():
"""Fake endpoint."""
if 'stress' in request.args:
# Build a very large dataset
return jsonify(recursive_d3_data())
filename = 'flare-simple' if 'simple' in request.args else 'flare'
with open('{}/examples/{}.json'.format(cwd, filename), 'r') as djson:
return djson.read()
return jsonify({})
@cross_origin()
@app.route('/voronoi', methods=['GET'])
def voronoi():
"""Fake endpoint."""
w, h = request.args.get('width', 800), request.args.get('height', 800)
max_points = int(request.args.get('points', 100))
if 'stress' in request.args:
max_points = 500
return jsonify([[rr(1, h), rr(1, w)] for _ in range(max_points)])
@cross_origin()
@app.route('/digraph', methods=['GET'])
def graphdata():
"""Fake endpoint."""
if 'filetree' in request.args:
with open('{}/examples/filetree_digraph.dot'.format(cwd), 'r') as dot:
return jsonify(dict(graph=dot.read()))
if 'simple' in request.args:
graphdata = """
digraph {
a -> b;
a -> c;
b -> c;
b -> a;
b -> b;
}
"""
return jsonify(dict(graph=graphdata))
nodes = list('abcdefghijkl')
node_data = '\n'.join([
'{0} -> {1};'.format(choice(nodes), choice(nodes))
for _ in range(10)
])
graphdata = """digraph {lb} {nodes} {rb}""".format(
lb='{', rb='}', nodes=node_data)
return jsonify(dict(
graph=graphdata,
))
if __name__ == '__main__':
PORT = int(os.getenv('PORT', 5004))
HOST = os.getenv('HOST', '0.0.0.0')
app.run(debug=True, host=HOST, port=PORT)
| {
"repo_name": "christabor/flask_jsondash",
"path": "example_app/endpoints.py",
"copies": "1",
"size": "19032",
"license": "mit",
"hash": -6318417875860366000,
"line_mean": 26.2664756447,
"line_max": 78,
"alpha_frac": 0.5107187894,
"autogenerated": false,
"ratio": 3.6148148148148147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46255336042148143,
"avg_score": null,
"num_lines": null
} |
"""A separate module for glyphname to filename functions.
glyphNameToShortFileName() generates a non-clashing filename for systems with
filename-length limitations.
"""
MAXLEN = 31
def glyphNameToShortFileName(glyphName, glyphSet):
"""Alternative glyphname to filename function.
Features a garuanteed maximum filename for really long glyphnames, and clash testing.
- all non-ascii characters are converted to "_" (underscore), including "."
- all glyphnames which are too long are truncated and a hash is added at the end
- the hash is generated from the whole glyphname
- finally, the candidate glyphname is checked against the contents.plist
and a incrementing number is added at the end if there is a clash.
"""
import binascii, struct, string
ext = ".glif"
ok = string.ascii_letters + string.digits + " _"
h = binascii.hexlify(struct.pack(">l", binascii.crc32(glyphName)))
n = ''
for c in glyphName:
if c in ok:
if c != c.lower():
n += c + "_"
else:
n += c
else:
n += "_"
if len(n + ext) < MAXLEN:
return n + ext
count = 0
candidate = n[:MAXLEN - len(h + ext)] + h + ext
if glyphSet is not None:
names = glyphSet.getReverseContents()
while candidate.lower() in names:
candidate = n[:MAXLEN - len(h + ext + str(count))] + h + str(count) + ext
count += 1
return candidate
| {
"repo_name": "adrientetar/robofab",
"path": "Lib/robofab/tools/glyphNameSchemes.py",
"copies": "11",
"size": "1330",
"license": "bsd-3-clause",
"hash": -84792869473121870,
"line_mean": 31.4390243902,
"line_max": 86,
"alpha_frac": 0.6954887218,
"autogenerated": false,
"ratio": 3.3842239185750635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.041660303675619736,
"num_lines": 41
} |
"""A separate terminal for every websocket opened.
"""
import tornado.web
# This demo requires tornado_xstatic and XStatic-term.js
import tornado_xstatic
from terminado import TermSocket, UniqueTermManager
from common_demo_stuff import run_and_show_browser, STATIC_DIR, TEMPLATE_DIR
class TerminalPageHandler(tornado.web.RequestHandler):
def get(self):
return self.render("termpage.html", static=self.static_url,
xstatic=self.application.settings['xstatic_url'],
ws_url_path="/websocket")
def main(argv):
term_manager = UniqueTermManager(shell_command=['bash'])
handlers = [
(r"/websocket", TermSocket,
{'term_manager': term_manager}),
(r"/", TerminalPageHandler),
(r"/xstatic/(.*)", tornado_xstatic.XStaticFileHandler,
{'allowed_modules': ['termjs']})
]
app = tornado.web.Application(handlers, static_path=STATIC_DIR,
template_path=TEMPLATE_DIR,
xstatic_url = tornado_xstatic.url_maker('/xstatic/'))
app.listen(8765, 'localhost')
run_and_show_browser("http://localhost:8765/", term_manager)
if __name__ == '__main__':
main([]) | {
"repo_name": "akalipetis/terminado",
"path": "demos/unique.py",
"copies": "4",
"size": "1270",
"license": "bsd-2-clause",
"hash": -678915599780590300,
"line_mean": 38.71875,
"line_max": 76,
"alpha_frac": 0.6078740157,
"autogenerated": false,
"ratio": 4.031746031746032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009298840931242246,
"num_lines": 32
} |
"""A seq2seq model"""
import theano
from theano import tensor
from blocks.bricks import (Initializable, Linear, NDimensionalSoftmax, MLP,
Tanh, Rectifier)
from blocks.bricks.base import application
from blocks.bricks.recurrent import LSTM
from blocks.bricks.lookup import LookupTable
from blocks.initialization import Constant
from dictlearn.ops import WordToIdOp
from dictlearn.aggregation_schemes import Perplexity
from dictlearn.util import masked_root_mean_square
class Seq2Seq(Initializable):
""" seq2seq model
Parameters
----------
emb_dim: int
The dimension of word embeddings (including for def model if standalone)
dim : int
The dimension of the RNNs states (including for def model if standalone)
num_input_words : int
The size of the LM's input vocabulary.
num_output_words : int
The size of the LM's output vocabulary.
vocab
The vocabulary object.
"""
def __init__(self, emb_dim, dim, num_input_words,
num_output_words, vocab,
**kwargs):
if emb_dim == 0:
emb_dim = dim
if num_input_words == 0:
num_input_words = vocab.size()
if num_output_words == 0:
num_output_words = vocab.size()
self._num_input_words = num_input_words
self._num_output_words = num_output_words
self._vocab = vocab
self._word_to_id = WordToIdOp(self._vocab)
children = []
self._main_lookup = LookupTable(self._num_input_words, emb_dim, name='main_lookup')
self._encoder_fork = Linear(emb_dim, 4 * dim, name='encoder_fork')
self._encoder_rnn = LSTM(dim, name='encoder_rnn')
self._decoder_fork = Linear(emb_dim, 4 * dim, name='decoder_fork')
self._decoder_rnn = LSTM(dim, name='decoder_rnn')
children.extend([self._main_lookup,
self._encoder_fork, self._encoder_rnn,
self._decoder_fork, self._decoder_rnn])
self._pre_softmax = Linear(dim, self._num_output_words)
self._softmax = NDimensionalSoftmax()
children.extend([self._pre_softmax, self._softmax])
super(LanguageModel, self).__init__(children=children, **kwargs)
def set_def_embeddings(self, embeddings):
self._main_lookup.parameters[0].set_value(embeddings.astype(theano.config.floatX))
def get_def_embeddings_params(self):
return self._main_lookup.parameters[0]
def add_perplexity_measure(self, application_call, minus_logs, mask, name):
costs = (minus_logs * mask).sum(axis=0)
perplexity = tensor.exp(costs.sum() / mask.sum())
perplexity.tag.aggregation_scheme = Perplexity(
costs.sum(), mask.sum())
application_call.add_auxiliary_variable(perplexity, name=name)
return costs
@application
def apply(self, application_call, words, mask):
"""Compute the log-likelihood for a batch of sequences.
words
An integer matrix of shape (B, T), where T is the number of time
step, B is the batch size. Note that this order of the axis is
different from what all RNN bricks consume, hence and the axis
should be transposed at some point.
mask
A float32 matrix of shape (B, T). Zeros indicate the padding.
"""
word_ids = self._word_to_id(words)
# shortlisting
input_word_ids = (tensor.lt(word_ids, self._num_input_words) * word_ids
+ tensor.ge(word_ids, self._num_input_words) * self._vocab.unk)
output_word_ids = (tensor.lt(word_ids, self._num_output_words) * word_ids
+ tensor.ge(word_ids, self._num_output_words) * self._vocab.unk)
application_call.add_auxiliary_variable(
unk_ratio(input_word_ids, mask, self._vocab.unk),
name='unk_ratio')
# Run the main rnn with combined inputs
rnn_inputs = self._main_lookup.apply(input_word_ids)
encoder_rnn_states = self._encoder_rnn.apply(
tensor.transpose(self._encoder_fork.apply(rnn_inputs), (1, 0, 2)),
mask=mask.T)[0]
# The first token is not predicted
logits = self._pre_softmax.apply(main_rnn_states[:-1])
targets = output_word_ids.T[1:]
out_softmax = self._softmax.apply(logits, extra_ndim=1)
application_call.add_auxiliary_variable(
out_softmax.copy(), name="proba_out")
minus_logs = self._softmax.categorical_cross_entropy(
targets, logits, extra_ndim=1)
targets_mask = mask.T[1:]
costs = self.add_perplexity_measure(application_call, minus_logs,
targets_mask,
"perplexity")
missing_embs = tensor.eq(input_word_ids, self._vocab.unk).astype('int32') # (bs, L)
self.add_perplexity_measure(application_call, minus_logs,
targets_mask * missing_embs.T[:-1],
"perplexity_after_mis_word_embs")
self.add_perplexity_measure(application_call, minus_logs,
targets_mask * (1-missing_embs.T[:-1]),
"perplexity_after_word_embs")
word_counts = self._word_to_count(words)
very_rare_masks = []
for threshold in self._very_rare_threshold:
very_rare_mask = tensor.lt(word_counts, threshold).astype('int32')
very_rare_mask = targets_mask * (very_rare_mask.T[:-1])
very_rare_masks.append(very_rare_mask)
self.add_perplexity_measure(application_call, minus_logs,
very_rare_mask,
"perplexity_after_very_rare_" + str(threshold))
if self._retrieval:
has_def = tensor.zeros_like(output_word_ids)
has_def = tensor.inc_subtensor(has_def[def_map[:,0], def_map[:,1]], 1)
mask_targets_has_def = has_def.T[:-1] * targets_mask # (L-1, bs)
self.add_perplexity_measure(application_call, minus_logs,
mask_targets_has_def,
"perplexity_after_def_embs")
for thresh, very_rare_mask in zip(self._very_rare_threshold, very_rare_masks):
self.add_perplexity_measure(application_call, minus_logs,
very_rare_mask * mask_targets_has_def,
"perplexity_after_def_very_rare_" + str(thresh))
application_call.add_auxiliary_variable(
mask_targets_has_def.T, name='mask_def_emb')
return costs, updates
| {
"repo_name": "tombosc/dict_based_learning",
"path": "dictlearn/seq2seq.py",
"copies": "1",
"size": "6778",
"license": "mit",
"hash": -1859169785513454600,
"line_mean": 41.8987341772,
"line_max": 91,
"alpha_frac": 0.588669224,
"autogenerated": false,
"ratio": 3.8207440811724918,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4909413305172492,
"avg_score": null,
"num_lines": null
} |
import numpy as np
import pycqed.measurement.waveform_control.pulsar as ps
from collections import OrderedDict as odict
from copy import deepcopy
import logging
from pycqed.utilities.timer import Timer
log = logging.getLogger(__name__)
class Sequence:
"""
A Sequence consists of several segments, which can be played back on the
AWGs sequentially.
"""
RENAMING_SEPARATOR = "+"
def __init__(self, name, segments=()):
"""
Initializes a Sequence object
Args:
name: Name of the sequence
segments (list, tuple): list of segments to add to the sequence
"""
self.name = name
self.pulsar = ps.Pulsar.get_instance()
self.segments = odict()
self.awg_sequence = {}
self.repeat_patterns = {}
self.extend(segments)
self.timer = Timer(self.name)
def add(self, segment):
if segment.name in self.segments:
raise NameError('Name {} already exisits in the sequence!'.format(
segment.name))
self.segments[segment.name] = segment
def extend(self, segments):
"""
Extends the sequence given a list of segments
Args:
segments (list): segments to add to the sequence
"""
for seg in segments:
self.add(seg)
@Timer()
def generate_waveforms_sequences(self, awgs=None):
"""
Calculates and returns
* a dictionary of waveforms used in the sequence, indexed
by their hash value
* For each awg, a list of elements, each element consisting of
a waveform-hash for each codeword and each channel
"""
waveforms = {}
sequences = {}
for seg in self.segments.values():
seg.resolve_segment()
seg.gen_elements_on_awg()
if awgs is None:
awgs = set()
for seg in self.segments.values():
awgs |= set(seg.elements_on_awg)
for awg in awgs:
sequences[awg] = odict()
for segname, seg in self.segments.items():
# Store the name of the segment
sequences[awg][segname] = None
for elname in seg.elements_on_awg.get(awg, []):
sequences[awg][elname] = {'metadata': {}}
for cw in seg.get_element_codewords(elname, awg=awg):
sequences[awg][elname][cw] = {}
for ch in seg.get_element_channels(elname, awg=awg):
h = seg.calculate_hash(elname, cw, ch)
chid = self.pulsar.get(f'{ch}_id')
sequences[awg][elname][cw][chid] = h
if h not in waveforms:
wf = seg.waveforms(awgs={awg},
elements={elname}, channels={ch},
codewords={cw})
waveforms[h] = wf.popitem()[1].popitem()[1]\
.popitem()[1].popitem()[1]
if elname in seg.acquisition_elements:
sequences[awg][elname]['metadata']['acq'] = True
else:
sequences[awg][elname]['metadata']['acq'] = False
return waveforms, sequences
def n_acq_elements(self, per_segment=False):
"""
Gets the number of acquisition elements in the sequence.
Args:
per_segment (bool): Whether or not to return the number of
acquisition elements per segment. Defaults to False.
Returns:
number of acquisition elements (list (if per_segment) or int)
"""
n_readouts = [len(seg.acquisition_elements)
for seg in self.segments.values()]
if not per_segment:
n_readouts = np.sum(n_readouts)
return n_readouts
def n_segments(self):
"""
Gets the number of segments in the sequence.
"""
return len(self.segments)
def repeat(self, pulse_name, operation_dict, pattern,
pulse_channel_names=('I_channel', 'Q_channel')):
"""
Creates a repetition dictionary keyed by awg channel for the pulse
to be repeated.
:param pulse_name: name of the pulse to repeat.
:param operation_dict:
:param pattern: repetition pattern (n_repetitions, nr_elements_per_loop or another loop-specification)
cf. Christian
:param pulse_channel_names: names of the channels on which the pulse is
applied.
:return:
"""
if operation_dict==None:
pulse=pulse_name
else:
pulse = operation_dict[pulse_name]
repeat = dict()
for ch in pulse_channel_names:
repeat[pulse[ch]] = pattern
self.repeat_patterns.update(repeat)
return self.repeat_patterns
def repeat_ro(self, pulse_name, operation_dict):
"""
Wrapper for repeated readout
:param pulse_name:
:param operation_dict:
:param sequence:
:return:
"""
return self.repeat(pulse_name, operation_dict,
(self.n_acq_elements(), 1))
@staticmethod
def merge(sequences, segment_limit=None, merge_repeat_patterns=True):
"""
Merges a list of sequences. See documentation of Sequence.__add__()
for more information on the merge of two sequences.
Args:
sequences (list): List of sequences to merge
segment_limit (int): maximal number of segments in the merged sequence.
if the total number of segments is higher, a list of sequences is
returned. Default is None (all sequences are merged)
merge_repeat_patterns (bool): Merges the readout pattern when
combining the sequences. If the readout pattern already exists, it adds
to the number of repetition of the pattern. Note that this behavior may
not work for all scenarios. In that case the patterns must be updated
manually after the merge and merge_repeat_patterns should be set to
False. Default: True.
Returns: list of merged sequences
Examples:
>>> # No segment_limit
>>> seq1 = Sequence('seq1')
>>> seq1.extend(segments_of_seq1) # 10 segments
>>> seq2 = Sequence('seq2')
>>> seq2.extend(segments_of_seq2) # 15 segments
>>> seq_comb = Sequence.merge([seq1, seq2])
>>> # returns a list with 1 sequence with 25 segments
>>> # i.e. [seq1 + seq2]
>>> # 20 segments limit
>>> seq1 = Sequence('seq1')
>>> seq1.extend(segments_of_seq1) # 10 segments
>>> seq2 = Sequence('seq2')
>>> seq2.extend(segments_of_seq2) # 15 segments
>>> seq3 = Sequence('seq3')
>>> seq3.extend(segments_of_seq3) # 5 segments
>>> seq_comb = Sequence.merge([seq1, seq2, seq3])
>>> # returns list of 2 sequences with 10 and 20 segments,
>>> # i.e. [seq1, seq2 + seq3]
"""
if len(sequences) == 0:
raise ValueError("merge requires at least one sequence")
elif len(sequences) == 1:
# special case, return current sequence:
return sequences
sequences = [deepcopy(s) for s in sequences]
merged_seqs = [sequences[0]]
if segment_limit is None:
segment_limit = np.inf
segment_counter = sequences[0].n_segments()
seg_occurences = [{s: 1 for s in sequences[0].segments}]
for seq in sequences[1:]:
assert seq.n_segments() <= segment_limit, \
f"Sequence {seq.name} has more segments ({seq.n_segments()})" \
f" than the segment_limit ({segment_limit}). Cannot merge " \
f"without cropping the sequence."
# if over segment_limit, add another separate sequence
# to merged sequences
if merged_seqs[-1].n_segments() + seq.n_segments() > segment_limit:
merged_seqs.append(seq)
seg_occurences.append({s: 1 for s in seq.segments})
segment_counter = seq.n_segments()
# otherwise merge sequences
else:
for seg_name, segment in seq.segments.items():
try:
merged_seqs[-1].add(segment)
except NameError: # in case segment name exists, create new name
seg_occurences[-1][seg_name] += 1
new_name =seg_name + \
f"_copy_from_merge_" \
f"{seg_occurences[-1][seg_name] - 1}"
segment.rename(new_name)
merged_seqs[-1].add(segment)
segment_counter += seq.n_segments()
# update name of merged seq
merged_seqs[-1].name += "+" + seq.name
if merge_repeat_patterns:
for ch_name, pattern in seq.repeat_patterns.items():
# if channel is already present, update number of
# repetitions
if ch_name in merged_seqs[-1].repeat_patterns:
pattern_prev = \
merged_seqs[-1].repeat_patterns[ch_name]
if pattern_prev[1:] != pattern[1:]:
raise NotImplementedError(
f"The repeat patterns for channel: "
f"{ch_name} do not have the same "
f"'outer loop' specification (see "
f"docstring Sequence.repeat). Repeat "
f"patterns cannot be merged automatically. "
f"Set merge_repeat_patterns to False and "
f"update the repeat patterns manually.")
pattern_updated = (pattern_prev[0] + pattern[0],
*pattern_prev[1:])
merged_seqs[-1].repeat_patterns[ch_name] = \
pattern_updated
# add repeat pattern
else:
merged_seqs[-1].repeat_patterns.update(
{ch_name: pattern})
return merged_seqs
@staticmethod
def interleave_sequences(seq_list_list):
"""
Interleave a list of Sequence instances.
:param seq_list_list: list of lists of Sequence instances
:return: list of interleaved Sequences
"""
# make sure all sequence lists in seq_list_list have the same length
if len(set([len(seq_list) for seq_list in seq_list_list])) != 1:
raise ValueError('The sequence lists do not have the same length.')
# make sure all sequence lists in seq_list_list have the same segments
if len(set([seq_list[0].n_acq_elements() for
seq_list in seq_list_list])) != 1:
raise ValueError('The sequence lists do not have the same number '
'of segments.')
interleaved_seqs = len(seq_list_list) * len(seq_list_list[0]) * ['']
for i in range(len(seq_list_list)):
interleaved_seqs[i::len(seq_list_list)] = seq_list_list[i]
mc_points = [np.arange(interleaved_seqs[0].n_acq_elements()),
np.arange(len(interleaved_seqs))]
return interleaved_seqs, mc_points
@staticmethod
def compress_2D_sweep(sequences, segment_limit=None,
merge_repeat_patterns=True, mc_points=None):
"""
Compresses a list of sequences to a lower number of sequences
(if possible), each of which containing the same amount of segments
(assumes fixed number of readout per segment) while respecting the
segment_limit (memory limit). Note that all sequences MUST have the
same number of segments. Wraps the Sequence.merge() by computing an
effective segment limit that minimizes the total number of sequences
(to reduce upload time overhead) while keeping the (new)
number of segments per sequence constant (it currently is a limitation
of 2D sweeps that all sequences must have same number of readouts)
Args:
sequences (list): list of sequences to compress, which all have
the same number of segments
segment_limit (int): maximal number of segments that can be in
a sequence
merge_repeat_patterns (bool): see docstring of Sequence.merge.
mc_points: mc_points array of the original hardware sweep.
Useful in case it differs from n_acq_elements().
Returns: list of sequences for the compressed 2D sweep,
new hardsweep points indices,
new soft sweeppoints indices, and the compression factor
"""
assert len(np.unique([s.n_segments() for s in sequences])) == 1, \
"To allow compression, all sequences must have the same number " \
"of segments"
from pycqed.utilities.math import factors
n_soft_sp = len(sequences)
n_seg = sequences[0].n_segments()
if segment_limit is None:
segment_limit = np.inf
# compute possible compression factors
compression_fact = np.sort(factors(n_soft_sp))[::-1]
for factor in compression_fact:
if factor * n_seg > segment_limit:
# too many segments in sequence, check for smaller factors
continue
elif factor == 1:
# no compression possible
log.warning(f'No compression possible: \n'
f'segments per sequence: \t\t{n_seg} \n'
f'limit of segments per sequence:\t{segment_limit}\n'
f'number of sequences: \t\t{n_soft_sp}\n'
f'To enable a compression, change the '
f'limit of segments to {compression_fact[-2] * n_seg} '
f'or the number of sequences to x such that x has a '
f'factor f larger than 1 for which f * '
f'{n_seg} < {segment_limit}, e.g. x = '
f'{np.floor(segment_limit / n_seg)} (full compression)')
break
seg_lim_eff = factor * n_seg
compressed_2D_sweep = Sequence.merge(sequences, seg_lim_eff,
merge_repeat_patterns)
if mc_points is None:
hard_sp_ind = np.arange(compressed_2D_sweep[0].n_acq_elements())
soft_sp_ind = np.arange(len(compressed_2D_sweep))
else:
hard_sp_ind = np.arange(len(mc_points)*len(sequences) //
len(compressed_2D_sweep))
soft_sp_ind = np.arange(len(compressed_2D_sweep))
return compressed_2D_sweep, hard_sp_ind, soft_sp_ind, factor
def __repr__(self):
string_repr = f"####### {self.name} #######\n"
for seg_name, seg in self.segments.items():
string_repr += str(seg) + "\n"
return string_repr
def __deepcopy__(self, memo):
cls = self.__class__
new_seq = cls.__new__(cls)
memo[id(self)] = new_seq
for k, v in self.__dict__.items():
if k == "pulsar": # the reference to pulsar cannot be deepcopied
setattr(new_seq, k, v)
else:
setattr(new_seq, k, deepcopy(v, memo))
return new_seq
def plot(self, segments=None, show_and_close=True, **segment_plot_kwargs):
"""
:param segments: list of segment names to plot
:param show_and_close: (bool) show and close the plots (default: True)
:param segment_plot_kwargs:
:return: A list of tuples of figure and axes objects if show_and_close
is False, otherwise no return value.
"""
plots = []
if segments is None:
segments = self.segments.values()
else:
segments = [self.segments[s] for s in segments]
for s in segments:
plots.append(s.plot(show_and_close=show_and_close,
**segment_plot_kwargs))
if show_and_close:
return
else:
return plots | {
"repo_name": "QudevETH/PycQED_py3",
"path": "pycqed/measurement/waveform_control/sequence.py",
"copies": "1",
"size": "17141",
"license": "mit",
"hash": 8284524679337477000,
"line_mean": 42.0703517588,
"line_max": 110,
"alpha_frac": 0.5367831515,
"autogenerated": false,
"ratio": 4.45452182952183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.549130498102183,
"avg_score": null,
"num_lines": null
} |
"""A serializer for serializing SQLAlchemy models to JSON API spec."""
import datetime
from inflection import dasherize, underscore
class JSONAPISerializer(object):
"""A JSON API serializer that serializes SQLAlchemy models."""
model = None
primary_key = 'id'
fields = []
dasherize = True
def __init__(self):
"""Ensure required members are not defaults."""
if self.model is None:
raise TypeError("Model cannot be of type 'None'.")
if self.primary_key not in self.fields:
raise ValueError(
"Serializer fields must contain primary key '{}'".format(
self.primary_key))
def serialize(self, resources):
"""Serialize resource(s) according to json-api spec."""
serialized = {
'meta': {
'sqlalchemy_jsonapi_version': '4.0.9'
},
'jsonapi': {
'version': '1.0'
}
}
# Determine multiple resources by checking for SQLAlchemy query count.
if hasattr(resources, 'count'):
serialized['data'] = []
for resource in resources:
serialized['data'].append(
self._render_resource(resource))
else:
serialized['data'] = self._render_resource(resources)
return serialized
def _render_resource(self, resource):
"""Renders a resource's top level members based on json-api spec.
Top level members include:
'id', 'type', 'attributes', 'relationships'
"""
if not resource:
return None
# Must not render a resource that has same named
# attributes as different model.
if not isinstance(resource, self.model):
raise TypeError(
'Resource(s) type must be the same as the serializer model type.')
top_level_members = {}
try:
top_level_members['id'] = str(getattr(resource, self.primary_key))
except AttributeError:
raise
top_level_members['type'] = resource.__tablename__
top_level_members['attributes'] = self._render_attributes(resource)
top_level_members['relationships'] = self._render_relationships(
resource)
return top_level_members
def _render_attributes(self, resource):
"""Render the resources's attributes."""
attributes = {}
attrs_to_ignore = set()
for key, relationship in resource.__mapper__.relationships.items():
attrs_to_ignore.update(set(
[column.name for column in relationship.local_columns]).union(
{key}))
if self.dasherize:
mapped_fields = {x: dasherize(underscore(x)) for x in self.fields}
else:
mapped_fields = {x: x for x in self.fields}
for attribute in self.fields:
if attribute == self.primary_key:
continue
# Per json-api spec, we cannot render foreign keys
# or relationsips in attributes.
if attribute in attrs_to_ignore:
raise AttributeError
try:
value = getattr(resource, attribute)
if isinstance(value, datetime.datetime):
attributes[mapped_fields[attribute]] = value.isoformat()
else:
attributes[mapped_fields[attribute]] = value
except AttributeError:
raise
return attributes
def _render_relationships(self, resource):
"""Render the resource's relationships."""
relationships = {}
related_models = resource.__mapper__.relationships.keys()
primary_key_val = getattr(resource, self.primary_key)
if self.dasherize:
mapped_relationships = {
x: dasherize(underscore(x)) for x in related_models}
else:
mapped_relationships = {x: x for x in related_models}
for model in related_models:
relationships[mapped_relationships[model]] = {
'links': {
'self': '/{}/{}/relationships/{}'.format(
resource.__tablename__,
primary_key_val,
mapped_relationships[model]),
'related': '/{}/{}/{}'.format(
resource.__tablename__,
primary_key_val,
mapped_relationships[model])
}
}
return relationships
| {
"repo_name": "ColtonProvias/sqlalchemy-jsonapi",
"path": "sqlalchemy_jsonapi/declarative/serializer.py",
"copies": "1",
"size": "4651",
"license": "mit",
"hash": 7957508448204500000,
"line_mean": 35.3359375,
"line_max": 82,
"alpha_frac": 0.5431090088,
"autogenerated": false,
"ratio": 5.033549783549783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6076658792349784,
"avg_score": null,
"num_lines": null
} |
"""A series of classes for loading external data"""
import csv
import json
from .utils import find_file
class Loader(object):
"""Base loader class for :class:`LoadedDataStruct`"""
def __init__(*args, **kwargs): pass
def load(self): pass
class FileLoader(Loader):
filepath = None
def __init__(self, filename, path=None):
self.filepath = find_file(filename, path)
def load(self):
return self._read_file_as_dict(self.filepath)
def _read_file_as_dict(self, filepath):
pass
class JSONLoader(FileLoader):
def _read_file_as_dict(self, filepath):
with open(filepath) as f:
d = json.load(f)
return d
class CSVLoader(FileLoader):
def __init__(self, filename, path=None, dialect='excel', table_form=None, **fmtparams):
super(CSVLoader, self).__init__(filename, path)
self.table_form = table_form
self.params = fmtparams
self.dialect = dialect
def _read_file_as_dict(self, filepath):
with open(filepath) as f:
reader = csv.reader(f, self.dialect, **self.params)
d = TableMapping(list(reader), self.table_form)
return d
# - - - - - - - - - - - - - - -
# Data Table Map For CSVLoader
# - - - - - - - - - - - - - - -
class TableMapping(dict):
def __init__(self, graph=None, encoding=None):
"""Convert a two dimensional categorical graph into a dict
Parameters
----------
graph: iterable of iterables
The two dimensional object in a narrow or wide form encoding
encoding: "wide" or "narrow" (default: None)
Specify how the data graph is encoded. If not specified, the
encoding is infered based on how categories are organized."""
super(TableMapping, self).__init__()
if graph is not None:
if encoding == 'wide':
self._wideform_encoding(graph)
elif encoding == 'narrow':
self._narrowform_encoding(graph)
else:
self.encode_as_dict(graph)
def encode_as_dict(self, graph):
for l in list(zip(*graph))[:-1]:
# The first two columns are expected to
# have shared categories. Thus duplicates
# should be present.
if len(l) != len(set(l)):
return self._narrowform_encoding(graph)
else:
return self._wideform_encoding(graph)
def _wideform_encoding(self, ll):
for i in range(1, len(ll)):
d = {}
l = ll[i]
try:
self[l[0]] = d
except IndexError:
raise ValueError("No values in row 0")
for j in range(1, len(l)):
try:
v = l[j]
except:
m = "No values in row %r, column %r"
raise ValueError(m % (i, j))
else:
d[ll[0][j]] = v
def _narrowform_encoding(self, ll):
for l in ll[1:]:
d = self
for v in l[:-2]:
if v in d:
d = d[v]
else:
_d = {}
d[v] = _d
d = _d
k, v = l[-2:]
d[k] = v
| {
"repo_name": "rmorshea/dstruct",
"path": "dstruct/loader.py",
"copies": "1",
"size": "3323",
"license": "mit",
"hash": -5534117996392747000,
"line_mean": 28.9369369369,
"line_max": 91,
"alpha_frac": 0.507071923,
"autogenerated": false,
"ratio": 4.027878787878788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00322254901532336,
"num_lines": 111
} |
import os
import re
import gtk, pango
import dialog
import browser
import utils
import preferences
import hig
import random
import ignore_info
from fractutils import flickr, slave
TOKEN = None
class FlickrGTKSlave(slave.GTKSlave):
def __init__(self,cmd,*args):
slave.GTKSlave.__init__(self,cmd,*args)
def response(self):
if self.process.returncode:
# an error occurred
raise Exception("An error occurred:\n%s" % self.err_output)
return flickr.parseResponse(self.output)
def is_authorized():
global TOKEN
TOKEN = preferences.userPrefs.get("user_info", "flickr_token")
if TOKEN == "":
return False
return True
def get_user(window, f):
if not is_authorized():
d = FlickrAssistantDialog(window, f)
d.run()
return preferences.userPrefs.get("user_info", "nsid")
def show_flickr_assistant(parent,alt_parent, f,dialog_mode):
if is_authorized():
FlickrUploadDialog.show(parent,alt_parent,f,dialog_mode)
else:
FlickrAssistantDialog.show(parent,alt_parent, f,True)
def display_flickr_error(err):
d = hig.ErrorAlert(
primary=_("Upload Error"),
secondary=str(err))
d.run()
d.destroy()
class FlickrUploadDialog(dialog.T):
clean_formula_re = re.compile(r'[^a-z0-9]', re.IGNORECASE)
def show(parent, alt_parent, f,dialog_mode):
dialog.T.reveal(FlickrUploadDialog,dialog_mode, parent, alt_parent, f)
show = staticmethod(show)
def __init__(self, main_window, f):
dialog.T.__init__(
self,
_("Upload to Flickr"),
main_window,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.f = f
self.main_window = main_window
self.controls = gtk.VBox()
self.vbox.pack_start(self.controls)
self.slave = None
table = gtk.Table(5,2,False)
self.controls.pack_start(table)
self.title_entry = gtk.Entry()
table.attach(self.title_entry,1,2,0,1,gtk.EXPAND | gtk.FILL, 0, 2, 2)
title_label = gtk.Label(_("Tit_le:"))
title_label.set_mnemonic_widget(self.title_entry)
title_label.set_use_underline(True)
table.attach(title_label,0,1,0,1,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.tags = gtk.Entry()
table.attach(self.tags,1,2,1,2,gtk.EXPAND | gtk.FILL, 0, 2, 2)
tag_label = gtk.Label(_("Ta_gs:"))
tag_label.set_mnemonic_widget(self.tags)
tag_label.set_use_underline(True)
table.attach(tag_label,0,1,1,2,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.description = gtk.TextView()
self.description.set_wrap_mode(gtk.WRAP_WORD)
sw = gtk.ScrolledWindow ()
sw.set_shadow_type (gtk.SHADOW_ETCHED_IN)
sw.set_policy (gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.add(self.description)
table.attach(sw,1,2,2,3,gtk.EXPAND | gtk.FILL, 0, 2, 2)
desc_label = gtk.Label(_("_Description:"))
desc_label.set_mnemonic_widget(self.description)
desc_label.set_use_underline(True)
table.attach(desc_label,0,1,2,3,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.include_params = gtk.CheckButton(
_("_Include parameters in description"))
table.attach(self.include_params,0,2,3,4,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.blog_menu = utils.create_option_menu([_("<None>")])
table.attach(self.blog_menu, 1,2,4,5,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.get_blogs()
blog_label = gtk.Label(_("_Blog To:"))
blog_label.set_mnemonic_widget(self.blog_menu)
blog_label.set_use_underline(True)
table.attach(blog_label,0,1,4,5,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.upload_button = gtk.Button(_("_Upload"))
self.upload_button.connect("clicked", self.onUpload)
table.attach(self.upload_button, 0,2,5,6,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.cancel_button = gtk.Button(_("_Cancel Upload"))
self.cancel_button.connect("clicked", self.onCancelUpload)
table.attach(self.cancel_button, 0,2,6,7,gtk.EXPAND | gtk.FILL, 0, 2, 2)
self.set_upload_mode(True)
self.bar = gtk.ProgressBar()
self.vbox.pack_end(self.bar,False,False)
def init_title(self):
pass
def runRequest(self,req,on_done, *args):
self.slave = FlickrGTKSlave(req.cmd,*req.args)
self.slave.connect('progress-changed',self.onProgress)
self.slave.connect('operation-complete', on_done, *args)
self.slave.run(req.input)
def onProgress(self,slave,type,position):
if position == -1.0:
self.bar.pulse()
else:
self.bar.set_fraction(position)
self.bar.set_text(type)
return True
def onCancelUpload(self,button):
if self.slave:
self.slave.terminate()
button.set_sensitive(False)
def onResponse(self,widget,id):
self.hide()
def get_description(self):
compress = preferences.userPrefs.getboolean("general","compress_fct")
buffer = self.description.get_buffer()
description = buffer.get_text(
buffer.get_start_iter(),buffer.get_end_iter())
if self.include_params.get_active():
description += "\n-----------------------------------\n"
description += self.f.serialize(compress)
return description
def get_blogs(self):
self.blogs = []
global TOKEN
req = flickr.requestBlogsGetList(TOKEN)
self.runRequest(req,self.onBlogsFetched)
def onBlogsFetched(self,slave):
blogs = flickr.parseBlogsGetList(slave.response())
for blog in blogs:
utils.add_menu_item(self.blog_menu,blog.name)
self.blogs.append(blog)
def get_tags(self):
formula_tags = " ".join([
FlickrUploadDialog.clean_formula_re.sub('',x.funcName) for x in
self.f.forms])
return "fractal gnofract4d %s %s" % (formula_tags,self.tags.get_text())
def set_upload_mode(self,is_upload):
self.cancel_button.set_sensitive(not is_upload)
self.upload_button.set_sensitive(is_upload)
def onUpload(self,widget):
global TOKEN
filename = "/tmp/%d.png" % int(random.uniform(0,1000000))
self.f.save_image(filename)
title_ = self.title_entry.get_text()
description_ = self.get_description()
req = flickr.requestUpload(
filename,
TOKEN,
title=title_,
description=description_,
tags=self.get_tags())
self.runRequest(req,self.onUploaded,title_,description_)
self.set_upload_mode(False)
def onUploaded(self, slave,title,description):
try:
id = flickr.parseUpload(slave.response())
except Exception,err:
display_flickr_error(err)
self.onUploadComplete()
return
req = flickr.requestGroupsPoolsAdd(id,TOKEN)
self.runRequest(req,self.onPoolAdded,title,description,id)
def onPoolAdded(self, slave,title,description,id):
try:
dummy = slave.response() # just to detect errors
except Exception,err:
if err.code == 2:
# user isn't a member of this group
d = hig.InformationAlert(
primary=_("Can't Post to Group"),
secondary=_("Your image has been uploaded to Flickr, but you aren't a member of the Gnofract 4D group, so your image hasn't been added to the group pool. You can join the group at http://www.flickr.com/groups_join.gne?id=46555832@N00 ."),
parent = self,
ignore=ignore_info.T("cannot_post", True, gtk.RESPONSE_ACCEPT))
d.run()
d.destroy()
else:
display_flickr_error(err)
# post to a blog if selected
selected_blog = utils.get_selected(self.blog_menu)
if selected_blog > 0:
# 0 is "<None>"
blog = self.blogs[selected_blog-1]
req = flickr.requestBlogsPostPhoto(blog,id,title,description,TOKEN)
self.runRequest(req, self.onBlogPostComplete)
else:
self.onUploadComplete()
def onBlogPostComplete(self,slave):
try:
resp = slave.response()
except Exception, err:
display_flickr_error(err)
self.onUploadComplete()
def onUploadComplete(self):
self.set_upload_mode(True)
class FlickrAssistantDialog(dialog.T):
def show(parent, alt_parent, f,dialog_mode):
dialog.T.reveal(FlickrAssistantDialog,dialog_mode, parent, alt_parent, f)
show = staticmethod(show)
intro_text=_("""Flickr is an online image-sharing service. If you like, Gnofract 4D can post your fractal images to the service so others can see them.
In order to post images to Flickr, you first need to have a Flickr or Yahoo account, and then authorize Gnofract 4D to post images for you. You only need to do this once.
To set that up, please click on the following link and follow the instructions on-screen. When done, close the browser window and click Next.
""")
success_text=_("""Congratulations, you've successfully authorized Gnofract 4D to access Flickr. Your user details are:
Username : %s
Full Name : %s
Click Finish to save your credentials and proceed.""")
NEXT=1
FINISH=2
def __init__(self, main_window, f):
dialog.T.__init__(
self,
_("Flickr Integration Setup"),
main_window,
gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CLOSE,
_("_Next"), FlickrAssistantDialog.NEXT))
self.main_window = main_window
self.textview = gtk.TextView()
self.textview.set_wrap_mode(gtk.WRAP_WORD)
self.textbuffer = self.textview.get_buffer()
self.textview.connect("button_release_event",self.onClick)
self.vbox.pack_start(self.textview)
self.bar = gtk.ProgressBar()
self.vbox.pack_end(self.bar,False,False)
req = flickr.requestFrob()
self.runRequest(req,self.onFrobReceived)
self.set_size_request(500,400)
self.f = f
def runRequest(self,req,on_done,*args):
self.slave = FlickrGTKSlave(req.cmd,*req.args)
self.slave.connect('progress-changed',self.onProgress)
self.slave.connect('operation-complete', on_done)
self.slave.run(req.input)
def onProgress(self,slave,type,position):
if position == -1.0:
self.bar.pulse()
else:
self.bar.set_fraction(position)
self.bar.set_text(type)
return True
def onFrobReceived(self,slave):
try:
self.frob = flickr.parseFrob(self.slave.response())
self.frob = self.frob.encode("ascii")
except Exception,err:
display_flickr_error(err)
return
# now display auth screen
self.auth_url = flickr.getAuthUrl(self.frob)
self.href_tag = self.textbuffer.create_tag(
"href",foreground="blue",underline=pango.UNDERLINE_SINGLE)
self.textbuffer.set_text(FlickrAssistantDialog.intro_text,-1)
self.textbuffer.insert_with_tags(
self.textbuffer.get_end_iter(),
self.auth_url,self.href_tag)
self.vbox.show_all()
def onClick(self, widget, event):
if event.button!=1:
return
(x,y) = (event.x, event.y)
(bx, by) = widget.window_to_buffer_coords(gtk.TEXT_WINDOW_TEXT,int(x),int(y))
iter = widget.get_iter_at_location(bx,by)
if not iter.has_tag(self.href_tag):
# click wasn't on a URL
return
# user clicked on URL, launch browser
utils.launch_browser(
preferences.userPrefs, self.auth_url, self.main_window)
def onResponse(self,widget,id):
if id == gtk.RESPONSE_CLOSE or \
id == gtk.RESPONSE_NONE or \
id == gtk.RESPONSE_DELETE_EVENT:
self.slave.terminate()
self.hide()
elif id == gtk.RESPONSE_ACCEPT:
self.onAccept()
elif id == FlickrAssistantDialog.NEXT:
self.onCheck()
elif id == FlickrAssistantDialog.FINISH:
self.onAccept()
def onCheck(self):
req = flickr.requestToken(self.frob)
self.runRequest(req,self.onTokenReceived)
def onTokenReceived(self,slave):
try:
self.token = flickr.parseToken(slave.response())
except flickr.FlickrError, err:
msg = _("Make sure you followed the link and authorized access.\n") + str(err)
d = hig.ErrorAlert(
primary=_("Flickr returned an error."),
secondary=msg,
parent=self.main_window)
d.run()
d.destroy()
return
except Exception,err:
display_flickr_error(err)
return
# update window with results
username, fullname = self.token.user.username, self.token.user.fullname
success_text = FlickrAssistantDialog.success_text % (username, fullname)
self.textbuffer.set_text(success_text,-1)
# hide Next, show Finish
self.set_response_sensitive(FlickrAssistantDialog.NEXT, False)
self.add_button(_("_Finish"), FlickrAssistantDialog.FINISH)
def onAccept(self):
preferences.userPrefs.set("user_info", "flickr_token",self.token.token)
preferences.userPrefs.set("user_info", "nsid", self.token.user.nsid)
self.hide()
if(is_authorized()):
FlickrUploadDialog.show(self.parent,self.parent,self.f,True)
| {
"repo_name": "ericchill/gnofract4d",
"path": "fract4dgui/flickr_assistant.py",
"copies": "1",
"size": "14246",
"license": "bsd-3-clause",
"hash": 1670102542822473500,
"line_mean": 33.3277108434,
"line_max": 258,
"alpha_frac": 0.5967289064,
"autogenerated": false,
"ratio": 3.5920322743318205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4688761180731821,
"avg_score": null,
"num_lines": null
} |
"""A series of fixtures that are shared among all tests."""
import logging
import os
import pytest
import uuid
from flask_socketio import SocketIOTestClient
from webtest import TestApp
os.environ['MATL_ONLINE_ENV'] = 'test'
from matl_online.app import create_app
from matl_online.database import db as _db
from matl_online.extensions import socketio
from matl_online.settings import config
from matl_online.tasks import OutputHandler
@pytest.fixture(scope='function')
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@pytest.yield_fixture(scope='function')
def socketclient(app):
"""Fake socketio client."""
yield SocketIOTestClient(app, socketio)
@pytest.yield_fixture(scope='function')
def app():
"""Flask app instance."""
_app = create_app(config)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.yield_fixture(scope='function')
def logger():
"""Logger which can be used to monitor logging calls."""
# Create a new random log
logger = logging.getLogger(str(uuid.uuid4()))
logger.setLevel(logging.INFO)
yield logger
for handler in logger.handlers:
# In the special case where an OutputHandler is registered, we want
# to clear out the message queue
if isinstance(handler, OutputHandler):
handler.clear()
logger.handlers = []
@pytest.fixture
def moctave(mocker, logger):
"""Mock version of OctaveEngine to monitor calls to octave."""
moctave = mocker.patch('matl_online.tasks.octave')
moctave.evals = list()
def moctave_eval(*args, **kwargs):
moctave.evals.append(*args)
moctave.eval = moctave_eval
moctave.logger = logger
return moctave
@pytest.yield_fixture(scope='function')
def db(app):
"""Database instance."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close the database connection
_db.session.close()
_db.drop_all()
| {
"repo_name": "suever/MATL-Online",
"path": "tests/conftest.py",
"copies": "1",
"size": "1996",
"license": "mit",
"hash": -1933689359012865800,
"line_mean": 21.9425287356,
"line_max": 75,
"alpha_frac": 0.6828657315,
"autogenerated": false,
"ratio": 3.609403254972875,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9785409711470056,
"avg_score": 0.0013718550005639022,
"num_lines": 87
} |
''' A series of functions that return a graph
'''
import random
import math
import networkx as nx
def get_graph(objects, properties):
graph_type = properties['graph_type']
n = len(objects)-1
if 'num_nodes_to_attach' in properties.keys():
k = properties['num_nodes_to_attach']
else:
k = 3
r = properties['connection_probability']
tries = 0
while(True):
if graph_type == 'random':
x = nx.fast_gnp_random_graph(n,r)
elif graph_type == 'erdos_renyi_graph':
x = nx.erdos_renyi_graph(n,r)
elif graph_type == 'watts_strogatz_graph':
x = nx.watts_strogatz_graph(n, k, r)
elif graph_type == 'newman_watts_strogatz_graph':
x = nx.newman_watts_strogatz_graph(n, k, r)
elif graph_type == 'barabasi_albert_graph':
x = nx.barabasi_albert_graph(n, k, r)
elif graph_type == 'powerlaw_cluster_graph':
x = nx.powerlaw_cluster_graph(n, k, r)
elif graph_type == 'cycle_graph':
x = nx.cycle_graph(n)
else: ##Star by default
x = nx.star_graph(len(objects)-1)
tries += 1
cc_conn = nx.connected_components(x)
if len(cc_conn) == 1 or tries > 5:
##best effort to create a connected graph!
break
return x, cc_conn
def create_graph_type(objects, properties):
(x, cc_conn) = get_graph(objects, properties)
cc = nx.closeness_centrality(x)
bc = nx.betweenness_centrality(x)
deg = nx.degree_centrality(x)
stats = {'cc':cc, 'bc':bc, 'deg':deg, \
'num_cc':len(cc_conn), 'largest_cc':len(cc_conn[0])}
conn = nx.Graph()
for (i,j) in x.edges():
inode = objects[i]
jnode = objects[j]
conn.add_edge(inode, jnode)
return conn, stats
def random_directed_graph(objects, p):
conn = nx.DiGraph()
for object1 in objects:
for object2 in objects:
if random.random() <= p:
conn.add_edge(object1,object2)
return conn
def random_undirected_graph(objects, p):
conn = nx.Graph()
for object1 in objects:
for object2 in objects:
if random.random() <= 1-math.sqrt(1-p):
conn.add_edge(object1,object2)
return conn
def spatial_random_graph(objects, radius=1):
## objects is a list of objects of type
## first assign objects a location on a 1x1 board
locs = {}
for object in objects:
x = random.random()
y = random.random()
locs[object] = (x,y)
## now determine connectivity between objects based on the input radius
conn = nx.Graph()
for object1 in objects:
for object2 in objects:
(x1,y1) = locs[object1]
(x2,y2) = locs[object2]
if math.sqrt( (x1-x2)**2 + (y1-y2)**2 ) <= radius:
conn.add_edge(object1,object2)
return conn
def collaborative_graph(objects):
conn = nx.DiGraph()
counter = 0
##Link bottom layer to middle layer
for object in objects[5:]:
conn.add_edge(object,objects[counter+1])
conn.node[object]['rank'] = 2
counter = (counter + 1) % 4
##Add collaboration between bottom row.
for i in range(len(objects)/4):
object1 = random.choice(objects[5:])
object2 = random.choice(objects[5:])
while object2 == object1:
object2 = random.choice(objects[5:])
conn.add_edge(object1,object2)
conn.add_edge(object2,object1)
##Add collaboration between middle row.
for object1 in objects[1:5]:
for object2 in objects[1:5]:
if object1 != object2:
conn.add_edge(object1,object2)
conn.add_edge(object2,object1)
##Link middle layer to root
for object in objects[1:5]:
conn.node[object]['rank'] = 1
conn.add_edge(object,objects[0])
conn.node[objects[0]]['rank'] = 0
return conn
def hierarchy_graph(objects):
conn = nx.DiGraph()
counter = 0
##Link bottom layer to middle layer
for object in objects[5:]:
conn.add_edge(object,objects[counter+1])
conn.node[object]['rank'] = 2
counter = (counter + 1) % 4
##Link middle layer to root
for object in objects[1:5]:
conn.add_edge(object, objects[0])
conn.node[object]['rank'] = 1
conn.node[objects[0]]['rank'] = 0
return conn
| {
"repo_name": "pankajk/MasterThesis",
"path": "Code/Graph_Kernels/SKG/GraphGen.py",
"copies": "1",
"size": "4474",
"license": "mit",
"hash": -5071038617852754000,
"line_mean": 29.8551724138,
"line_max": 75,
"alpha_frac": 0.5748770675,
"autogenerated": false,
"ratio": 3.4388931591083782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9438259064811921,
"avg_score": 0.015102232359291357,
"num_lines": 145
} |
"""A series of helper functions to create a mapping from
yelp IDs to place IDs from other providers in our database.
Intended to be used from a REPL. Primary functions are:
- yelp_ids_to_tripadvisor_ids
- (for debugging) verify_yelp_id_to_tripadvisor_ids
- yelp_ids_to_wiki_pages
- write_to_db
TODO:
- Handle hitting API limits
- Handle network errors (e.g. TA API)
- TEST ACCURACY
- Make it easy to run (read place ids from file? store intermediate xw into file? then try atomic update?)
- Make this code re-usable and put into separate files.
- How does this handle incorrect Yelp places?
- Keeps throwing 504: Gateway Time-out from Yelp3Client (rate limit?).
"""
from app import geo, util
from app.constants import proxwalkTable
from app.firebase import db
from app.providers import tripadvisor as ta
from app.providers import gplaces, wp, yelp
CROSSWALK_KEYS = {
'tripadvisor',
'google',
'wikipedia',
}
# HACK: used to avoid duplicating Yelp place requests.
_YELP_ID_TO_PLACE_CACHE = {}
def getAndCacheProviderIDs(keyID, providersList, identifiers):
providerIDs = _getCachedIDsForPlace(keyID, providersList)
toFetch = [p for p in providersList if p not in providerIDs]
providerIDs.update(fetchAndCacheProviders(keyID, toFetch, identifiers))
return providerIDs
def fetchAndCacheProviders(keyID, providersList, identifiers):
providers = {}
coordinates = (identifiers["lat"], identifiers["lng"])
name = identifiers["name"]
for p in providersList:
if p == "tripadvisor":
try:
res = ta.search(coordinates, name)["data"]
if res:
taID = res[0]["location_id"]
providers.update({p: taID})
except Exception as e:
log.e("Error fetching tripadvisor mapping: {}".format(e))
hasAPICalls = util.recordAPIStatus("tripadvisor-mapper")
if not hasAPICalls:
raise LookupError("No more API calls")
elif p == "wikipedia":
res = wp.search(coordinates, name)
if res:
providers.update({p: res})
elif p == "google":
gplace = gplaces.search(name, coordinates)
if gplace:
gplace.get_details()
providers.update({p: gplace.place_id})
_write_crosswalk_to_db(keyID, providers)
return providers
def _getCachedIDsForPlace(keyID, providersList):
ret = {}
if "yelp3" in providersList:
ret["yelp3"] = keyID
cached = _get_proxwalk_db().child(keyID).get().val()
if not cached:
return ret
for s in list(cached.keys()):
if s in providersList:
ret.update({s: cached[s]})
return ret
def _get_proxwalk_db(): return db().child(proxwalkTable)
# TODO: maybe we should allow users to pass in yelp data.
def _get_name_coord_from_yelp_id(yelp_id):
place = _YELP_ID_TO_PLACE_CACHE.get(yelp_id, yelp.resolve_with_key(yelp_id))
_YELP_ID_TO_PLACE_CACHE[yelp_id] = place
name = place['name']
coord = place['coordinates']
coord_tuple = (coord['latitude'], coord['longitude'])
return name, coord_tuple
def _yelp_id_to_tripadvisor(yelp_id):
name, coord = _get_name_coord_from_yelp_id(yelp_id)
return ta.search(coord, name)
def _yelp_ids_to_raw_tripadvisor(yelp_ids):
"""Takes the given yelp IDs and returns a dictionary of yelp_id to
a list of matching TA places, as received from the TA API, which is:
[
{
'name': ...,
'location_id': ...,
... # less important stuff
}
]
Emphasis on the enclosing list. Places with no matches will be an empty list.
Calling this directly (rather than `yelp_ids_to_tripadvisor_ids`) can be useful for debugging.
:param yelp_ids: an iterable of yelp IDs
:return: {<yelp_id>: <data-received-from-TA>, ...}
"""
out = {}
for yelp_id in yelp_ids:
res = _yelp_id_to_tripadvisor(yelp_id)
ta_place_list = res['data']
out[yelp_id] = ta_place_list
return out
def _get_yelp_to_ta_map_from_raw_ta(raw_ta):
"""
:param raw_ta: output of `yelp_ids_to_raw_tripadvisor`
:return: {<yelp_id>: <ta_id>}; only the first TA location is used.
"""
out = {}
for yelp_id, ta_res in raw_ta.items():
if len(ta_res) < 1:
out[yelp_id] = None
else:
out[yelp_id] = ta_res[0]['location_id']
# We take TA's top match so print out if we're dropping any places.
# We do this because disambiguating would add complexity.
if len(ta_res) > 1:
print('More than one match for yelp id, {}, dropping results:'.format(yelp_id))
for res in ta_res[1:]: print(' {}: {}'.format(res['location_id'], res['name']))
return out
def yelp_ids_to_tripadvisor_ids(yelp_ids):
"""Primary public TA function.
:param yelp_ids: an iterable of yelp ids.
:return: {<yelp_id>: <ta_id>, ...}; if no TA match, <ta_id> is None.
"""
raw_ta = _yelp_ids_to_raw_tripadvisor(yelp_ids)
return _get_yelp_to_ta_map_from_raw_ta(raw_ta)
def yelp_ids_to_wiki_pages(yelp_ids):
"""Primary public wikipedia function.
:param yelp_ids: an iterable of yelp ids.
:return: {<yelp_id>: <wiki_page_title>, ...}; if no match, <wiki_page_title> is None.
"""
yelp_ids_to_wiki_titles = {}
for yelp_id in yelp_ids:
yelp_ids_to_wiki_titles[yelp_id] = _yelp_id_to_wiki_page(yelp_id)
return yelp_ids_to_wiki_titles
def _yelp_id_to_wiki_page(yelp_id):
name, coord = _get_name_coord_from_yelp_id(yelp_id)
return wp.search(coord, name)
def _write_crosswalk_to_db(yelp_id, provider_map):
"""Ensure the given crosswalk object is valid and writes it to the DB.
Data existing at the given keys will be overwritten.
:param yelp_id: for the place
:param provider_map: is {'tripadvisor': <id-str>, ...}
"""
# Assert 1) no typos, 2) we haven't added keys that this code may not know how to handle.
for key in provider_map: assert key in CROSSWALK_KEYS
providers = _get_proxwalk_db().child(yelp_id).get().val()
if not providers:
providers = {}
providers.update(provider_map)
_get_proxwalk_db().child(yelp_id).update(providers)
def write_to_db(yelp_to_ta=None, yelp_to_wiki=None, yelp_to_website=None):
"""Takes yelp_id to other provider ID dicts and writes those values into the crosswalk DB.
Existing data for a given (yelp_id, other_id) pair will be overwritten.
:param yelp_to_ta: {<yelp_id>: <ta_id>, ...}; output of `yelp_ids_to_tripadvisor_ids`
"""
crosswalk = {} # Bound in fn below.
def add_ids_to_dict(provider_key, yelp_to_other_id):
if not yelp_to_other_id: return
for yelp_id, other_id in yelp_to_other_id.items():
val = crosswalk.get(yelp_id, {})
val[provider_key] = other_id
crosswalk[yelp_id] = val
add_ids_to_dict('tripadvisor', yelp_to_ta)
add_ids_to_dict('wikipedia', yelp_to_wiki)
add_ids_to_dict('website', yelp_to_website)
for yelp_id, provider_map in crosswalk.items():
_write_crosswalk_to_db(yelp_id, provider_map)
def verify_yelp_ids_to_tripadvisor_ids(yelp_ids):
"""Returns the expanded output of 1) Yelp -> TA places that match to allow human verification that
they're the same places and 2) Yelp places that did not match a TA equivalent to allow humans to find
out why not!
It is recommended to `pprint` the results.
:return: {'not_missing_ta': {'yelp': <yelp-place-obj>,
'ta': [<ta-place-obj>, ...]},
'missing_ta': [<yelp-place-obj>, ...]}
"""
tas = _yelp_ids_to_raw_tripadvisor(yelp_ids)
missing_out = []
not_missing_out = []
for yelp_id, ta in tas.items():
yplace = _YELP_ID_TO_PLACE_CACHE.get(yelp_id, yelp.resolve_with_key(yelp_id)) # These should all be cached.
yout = {'name': yplace['name'],
'url': util.strip_url_params(yplace['url']),
'loc': ', '.join(yplace['location']['display_address'])}
if len(ta) < 1:
missing_out.append(yout)
continue
tout = []
for ta_place in ta:
tout.append({'name': ta_place['name'],
'id': ta_place['location_id'],
'distance': ta_place['distance'],
'loc': ta_place['address_obj'].get('address_string', '')})
val = {'yelp': yout, 'ta': tout}
not_missing_out.append(val)
return {'not_missing_ta': not_missing_out,
'missing_ta': missing_out}
def write_all_places_ta(center, radius_km):
# TODO: code clean up. Is this needed?
place_ids = geo.get_place_ids_in_radius(center, radius_km)
yelp_ta_map = yelp_ids_to_tripadvisor_ids(place_ids)
write_to_db(yelp_to_ta=yelp_ta_map)
def _yelp_id_to_website(yelp_id):
name, coord = _get_name_coord_from_yelp_id(yelp_id)
gplace = gplaces.search(name, coord)
if gplace is None:
return None
gplace.get_details()
return gplace.website
def _yelp_ids_to_websites(yelp_ids):
return list(map(_yelp_id_to_website, yelp_ids))
| {
"repo_name": "liuche/prox-server",
"path": "scripts/prox_crosswalk.py",
"copies": "1",
"size": "9286",
"license": "mpl-2.0",
"hash": 7607750083080483000,
"line_mean": 34.041509434,
"line_max": 116,
"alpha_frac": 0.6178117596,
"autogenerated": false,
"ratio": 3.1987599035480536,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43165716631480533,
"avg_score": null,
"num_lines": null
} |
# a series of key value fields
class MiscDAO:
def __init__(self, db):
self.db = db
self.misc = self.db.misc
varsDict = self.misc.find_one()
if not varsDict:
self.misc.insert({})
def getCurrPeriod(self):
varsDict = self.misc.find_one()
if 'currPeriod' not in varsDict:
self.setCurrPeriod(40)
varsDict = self.misc.find_one()
return varsDict['currPeriod']
def setCurrPeriod(self, newPeriod):
self.misc.update({}, {'$set': {'currPeriod': newPeriod}})
def incrementPeriod(self):
varsDict = self.misc.find_one()
if 'currPeriod' not in varsDict:
self.setCurrPeriod(41)
elif varsDict['currPeriod'] == 10000:
self.setCurrPeriod(40)
else:
self.setCurrPeriod(varsDict['currPeriod'] + 1)
def getTimeUntilNextData(self):
varsDict = self.misc.find_one()
return varsDict['timeUntilNextData']
def setTimeUntilNextData(self, timeUntilNextData):
self.misc.update({}, {'$set': {'timeUntilNextData': timeUntilNextData}})
def beginFreePeriod(self):
self.misc.update({}, {'$set': {'freePeriod': True}})
def endFreePeriod(self):
self.misc.update({}, {'$set': {'freePeriod': False}})
def getFreePeriod(self):
varsDict = self.misc.find_one()
if 'freePeriod' not in varsDict:
self.beginFreePeriod()
varsDict = self.misc.find_one()
return varsDict['freePeriod']
def beginDemo(self):
self.misc.update({}, {'$set': {'demoMode': True}})
def endDemo(self):
self.misc.update({}, {'$set': {'demoMode': False}})
def getDemoMode(self):
varsDict = self.misc.find_one()
if 'demoMode' not in varsDict:
self.beginDemo()
varsDict = self.misc.find_one()
return varsDict['demoMode']
def reset(self):
self.misc.remove({})
self.misc.insert({})
| {
"repo_name": "jac2130/BettingIsBelieving",
"path": "Betting/miscDAO.py",
"copies": "1",
"size": "2000",
"license": "mit",
"hash": 3576011046158988000,
"line_mean": 30.25,
"line_max": 80,
"alpha_frac": 0.582,
"autogenerated": false,
"ratio": 3.75234521575985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.483434521575985,
"avg_score": null,
"num_lines": null
} |
""" A series of mixins for reporting. """
from datetime import datetime as dt
from metapipe.templates import env
template = env.get_template('progress-report.tmpl.html')
class BaseReportingMixin(object):
""" An abstract mixin for reporting. """
message_format = '%Y-%m-%d %H:%M:%S'
def render(self, message, progress):
""" Render the output of the report. """
pass
class HtmlReportingMixin(BaseReportingMixin):
""" A reporting mixin that writes progress to an HTML report. """
messages = []
output = 'metapipe.report.html'
def render(self, message, progress):
msg = Message(dt.strftime(dt.now(), self.message_format), message)
self.messages.insert(0, msg)
with open(self.output, 'w') as f:
f.write(self.template.render(
name=self.name,
messages=self.messages, progress=progress, jobs=sorted(self.real_jobs)))
class TextReportingMixin(BaseReportingMixin):
""" A reporting mixin that prints any progress to the console. """
def render(self, message, progress):
print('[{}%] {} {}'.format(progress, dt.strftime(dt.now(),
self.message_format), message))
class Message(object):
def __init__(self, time, text):
self.time = time
self.text = text
| {
"repo_name": "Sonictherocketman/metapipe",
"path": "metapipe/models/reporting.py",
"copies": "2",
"size": "1313",
"license": "mit",
"hash": -6162293988671875000,
"line_mean": 28.8409090909,
"line_max": 88,
"alpha_frac": 0.6359482102,
"autogenerated": false,
"ratio": 3.942942942942943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007718915405330114,
"num_lines": 44
} |
# A series of recursive functions:
# replaceString: replaces all instances of one type of separator with another type of separator
# countSiblings: count total siblings based on number of elves (each odd elf has 3 siblings while
# even elves have 1 sibling)
# extractor: extracts information from a string only if info is inside parentheses
# sameSums: return True if it is possible to divide a list of integers into two groups where
# the groups' sums are equal
def replaceSep(myString, sep1, sep2) :
''' Replace all instances of sep1 in a string with sep2 using recursion. Return new string.
Parameters: myString = a string; sep1 = separator to replace; sep2 = replacement separator'''
# base case: sep1 no longer in myString
if sep1 not in myString:
return myString
# work backwards through myString and check if last character is sep1 or not. if it is, replace.
# either way, re-call function until sep1 is no longer present.
else :
if myString[-1] == sep1 :
return replaceSep(myString[:-1] + sep2, sep1, sep2)
else :
return replaceSep(myString[:-1], sep1, sep2) + myString[-1]
def countSiblings(numberElves) :
''' Count how many siblings a total number of elves has using recursion. Odd-numbered elves
have 3 siblings. Even-numbered elves have 2. Return total sibling count.
Parameters: numberElves = total number of elves'''
if numberElves < 1 :
return 0
else :
if numberElves % 2 == 0 : # even-numbered elves
return 1 + countSiblings(numberElves - 1)
else : # odd-numbered elves
return 3 + countSiblings(numberElves - 1)
def extractor(string) :
''' Extract information from a string if the information is inside parentheses.
Return new string with extracted info.
Parameters: string = a string (with or without parenthetical info)'''
if "(" not in string or ")" not in string :
return string
elif string[0] == "(" and string[-1] == ")" :
return string
# check and modify the string forwards and backwards simultaneously until a
# parenthesis is found
if string[0] != "(" and string[-1] != ")":
return extractor(string[1:-1])
# once a parenthesis is found, start checking from front or end (depending on
# if found parenthesis is closer to beginning or end)
elif string[0] == "(" :
return extractor(string[:-1])
elif string[-1] == ")" :
return extractor(string[1:])
def sameSums(aList, sum1=0, sum2=0) :
''' Determine recursively if it is possible to divide a list of integers into two groups
so that the sums of the two groups are the same. Return True or False.
Parameters: aList = list of integers; sum1 = default set to 0, but updates with sum
of group 1 numbers; sum2 = default set to 0, but updates with sum of group 2 numbers'''
# check if list exists before continuing
if not aList and sum1 == 0 and sum2 == 0 :
return False
# this carries the Boolean values returned by function as it calls itself
flag = None
# update sum1 or sum2, depending on which is the smaller sum
if sum1 < sum2 :
sum1 = sum1 + aList.pop()
elif sum1 > sum2 :
sum2 = sum2 + aList.pop()
else :
sum1 = sum1 + aList.pop() # if sum1 = sum2, default to giving sum1 the value
# if list continues to exist, call sameSums again recursively!
if aList :
flag = sameSums(aList, sum1, sum2)
if sum1 == sum2 or flag == True :
return True
else :
return False
def tester():
'''Tester function provided for assignment by professor.'''
print replaceSep("hope*you*are*enjoying*the*course", "*", " ")
print replaceSep("Hi. I am having fun. Are you?", ".", "!!")
print replaceSep("popopopopo", "p", "x")
print replaceSep("xxxxx", "o", "b")
print countSiblings(0)
print countSiblings(100)
print countSiblings(2)
print countSiblings(5)
print countSiblings(-9)
print extractor("(hello world)")
print extractor("My country (of origin) is Canada")
print extractor("I do not have any parenthesis")
print sameSums([1, 7, 2, 4, 3, 6])
print sameSums([10, 0])
print sameSums([1, 9, 5, 9])
print sameSums([2, 2, 3, 3, 4, 4, 1, 1])
print sameSums([])
print sameSums([9, 1, 10])
tester()
| {
"repo_name": "caleighm/little-programs",
"path": "Assignment3-Recursion.py",
"copies": "1",
"size": "4547",
"license": "mit",
"hash": -3447854151058754000,
"line_mean": 39.7155963303,
"line_max": 100,
"alpha_frac": 0.6338244997,
"autogenerated": false,
"ratio": 3.859932088285229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9920853070340752,
"avg_score": 0.01458070352889547,
"num_lines": 109
} |
"""A series of reward functions."""
import numpy as np
def desired_velocity(env, fail=False, edge_list=None):
r"""Encourage proximity to a desired velocity.
This function measures the deviation of a system of vehicles from a
user-specified desired velocity peaking when all vehicles in the ring
are set to this desired velocity. Moreover, in order to ensure that the
reward function naturally punishing the early termination of rollouts due
to collisions or other failures, the function is formulated as a mapping
:math:`r: \\mathcal{S} \\times \\mathcal{A}
\\rightarrow \\mathbb{R}_{\\geq 0}`.
This is done by subtracting the deviation of the system from the
desired velocity from the peak allowable deviation from the desired
velocity. Additionally, since the velocity of vehicles are
unbounded above, the reward is bounded below by zero,
to ensure nonnegativity.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
fail : bool, optional
specifies if any crash or other failure occurred in the system
edge_list : list of str, optional
list of edges the reward is computed over. If no edge_list is defined,
the reward is computed over all edges
Returns
-------
float
reward value
"""
if edge_list is None:
veh_ids = env.k.vehicle.get_ids()
else:
veh_ids = env.k.vehicle.get_ids_by_edge(edge_list)
vel = np.array(env.k.vehicle.get_speed(veh_ids))
num_vehicles = len(veh_ids)
if any(vel < -100) or fail or num_vehicles == 0:
return 0.
target_vel = env.env_params.additional_params['target_velocity']
max_cost = np.array([target_vel] * num_vehicles)
max_cost = np.linalg.norm(max_cost)
cost = vel - target_vel
cost = np.linalg.norm(cost)
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
return max(max_cost - cost, 0) / (max_cost + eps)
def average_velocity(env, fail=False):
"""Encourage proximity to an average velocity.
This reward function returns the average velocity of all
vehicles in the system.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
fail : bool, optional
specifies if any crash or other failure occurred in the system
Returns
-------
float
reward value
"""
vel = np.array(env.k.vehicle.get_speed(env.k.vehicle.get_ids()))
if any(vel < -100) or fail:
return 0.
if len(vel) == 0:
return 0.
return np.mean(vel)
def rl_forward_progress(env, gain=0.1):
"""Rewared function used to reward the RL vehicles for travelling forward.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
gain : float
specifies how much to reward the RL vehicles
Returns
-------
float
reward value
"""
rl_velocity = env.k.vehicle.get_speed(env.k.vehicle.get_rl_ids())
rl_norm_vel = np.linalg.norm(rl_velocity, 1)
return rl_norm_vel * gain
def boolean_action_penalty(discrete_actions, gain=1.0):
"""Penalize boolean actions that indicate a switch."""
return gain * np.sum(discrete_actions)
def min_delay(env):
"""Reward function used to encourage minimization of total delay.
This function measures the deviation of a system of vehicles from all the
vehicles smoothly travelling at a fixed speed to their destinations.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
Returns
-------
float
reward value
"""
vel = np.array(env.k.vehicle.get_speed(env.k.vehicle.get_ids()))
vel = vel[vel >= -1e-6]
v_top = max(
env.k.scenario.speed_limit(edge)
for edge in env.k.scenario.get_edge_list())
time_step = env.sim_step
max_cost = time_step * sum(vel.shape)
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
cost = time_step * sum((v_top - vel) / v_top)
return max((max_cost - cost) / (max_cost + eps), 0)
def avg_delay_specified_vehicles(env, veh_ids):
"""Calculate the average delay for a set of vehicles in the system.
Parameters
----------
env: flow.envs.Env
the environment variable, which contains information on the current
state of the system.
veh_ids: a list of the ids of the vehicles, for which we are calculating
average delay
Returns
-------
float
average delay
"""
sum = 0
for edge in env.k.scenario.get_edge_list():
for veh_id in env.k.vehicle.get_ids_by_edge(edge):
v_top = env.k.scenario.speed_limit(edge)
sum += (v_top - env.k.vehicle.get_speed(veh_id)) / v_top
time_step = env.sim_step
try:
cost = time_step * sum
return cost / len(veh_ids)
except ZeroDivisionError:
return 0
def min_delay_unscaled(env):
"""Return the average delay for all vehicles in the system.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
Returns
-------
float
reward value
"""
vel = np.array(env.k.vehicle.get_speed(env.k.vehicle.get_ids()))
vel = vel[vel >= -1e-6]
v_top = max(
env.k.scenario.speed_limit(edge)
for edge in env.k.scenario.get_edge_list())
time_step = env.sim_step
# epsilon term (to deal with ZeroDivisionError exceptions)
eps = np.finfo(np.float32).eps
cost = time_step * sum((v_top - vel) / v_top)
return cost / (env.k.vehicle.num_vehicles + eps)
def penalize_standstill(env, gain=1):
"""Reward function that penalizes vehicle standstill.
Is it better for this to be:
a) penalize standstill in general?
b) multiplicative based on time that vel=0?
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
gain : float
multiplicative factor on the action penalty
Returns
-------
float
reward value
"""
veh_ids = env.k.vehicle.get_ids()
vel = np.array(env.k.vehicle.get_speed(veh_ids))
num_standstill = len(vel[vel == 0])
penalty = gain * num_standstill
return -penalty
def penalize_near_standstill(env, thresh=0.3, gain=1):
"""Reward function which penalizes vehicles at a low velocity.
This reward function is used to penalize vehicles below a
specified threshold. This assists with discouraging RL from
gamifying a scenario, which can result in standstill behavior
or similarly bad, near-zero velocities.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
thresh : float
the velocity threshold below which penalties are applied
gain : float
multiplicative factor on the action penalty
"""
veh_ids = env.k.vehicle.get_ids()
vel = np.array(env.k.vehicle.get_speed(veh_ids))
penalize = len(vel[vel < thresh])
penalty = gain * penalize
return -penalty
def penalize_headway_variance(vehicles,
vids,
normalization=1,
penalty_gain=1,
penalty_exponent=1):
"""Reward function used to train rl vehicles to encourage large headways.
Parameters
----------
vehicles : flow.core.kernel.vehicle.KernelVehicle
contains the state of all vehicles in the network (generally
self.vehicles)
vids : list of str
list of ids for vehicles
normalization : float, optional
constant for scaling (down) the headways
penalty_gain : float, optional
sets the penalty for each vehicle between 0 and this value
penalty_exponent : float, optional
used to allow exponential punishing of smaller headways
"""
headways = penalty_gain * np.power(
np.array(
[vehicles.get_headway(veh_id) / normalization
for veh_id in vids]), penalty_exponent)
return -np.var(headways)
def punish_rl_lane_changes(env, penalty=1):
"""Penalize an RL vehicle performing lane changes.
This reward function is meant to minimize the number of lane changes and RL
vehicle performs.
Parameters
----------
env : flow.envs.Env
the environment variable, which contains information on the current
state of the system.
penalty : float, optional
penalty imposed on the reward function for any rl lane change action
"""
total_lane_change_penalty = 0
for veh_id in env.k.vehicle.get_rl_ids():
if env.k.vehicle.get_last_lc(veh_id) == env.timer:
total_lane_change_penalty -= penalty
return total_lane_change_penalty
| {
"repo_name": "cathywu/flow",
"path": "flow/core/rewards.py",
"copies": "1",
"size": "9314",
"license": "mit",
"hash": -8708177784690541000,
"line_mean": 29.4379084967,
"line_max": 79,
"alpha_frac": 0.6400042946,
"autogenerated": false,
"ratio": 3.8203445447087776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49603488393087775,
"avg_score": null,
"num_lines": null
} |
# A series of tests to check whether the python scripts of log-reconstruction perform correctly.
# Start the tests by writing
# py.test
# or
# python -m pytest
# in a terminal window on this folder
from os.path import dirname, abspath, join, exists
from os import walk, remove, rmdir, chdir, chmod, mkdir
def almost_equal(value1, value2, precision):
return abs(value1 - value2) < precision
def get_lines(file_name):
with open(file_name, 'r') as f:
result = f.readlines()
f.close()
return result
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
def compare_files(first_file, second_file, precision=1e-6):
"""
Returns true if two files are equal and False otherwise. Any numbers are compared w.r.t. the given precision.
Values of the "coco_version" are ignored.
"""
lines1 = get_lines(first_file)
lines2 = get_lines(second_file)
if len(lines1) != len(lines2):
return False
for line1, line2 in zip(lines1, lines2):
words1 = line1.split()
words2 = line2.split()
if len(words1) != len(words2):
return False
for word1, word2 in zip(words1, words2):
if "coco_version" in word1 and "coco_version" in word2:
break
if is_float(word1) and is_float(word2):
if not almost_equal(float(word1), float(word2), precision):
return False
else:
if word1 != word2:
return False
return True
def prepare_reconstruction_data(download_data=False):
"""
Prepares the data needed for the tests (deletes the exdata folder) and, if download_data is True, downloads the
test data from the internet.
"""
import urllib
import tarfile
cleanup_reconstruction_data()
data_folder = abspath(join(dirname(__file__), 'test-data'))
if download_data and (not exists(abspath(join(data_folder, 'archives-input'))) or not exists(
abspath(join(data_folder, 'reconstruction')))):
cleanup_reconstruction_data(True)
chdir(abspath(dirname(__file__)))
data_url = 'link-to-log-reconstruction-test-data.tgz'
filename, headers = urllib.urlretrieve(data_url)
tar_file = tarfile.open(filename)
tar_file.extractall()
for root, dirs, files in walk(data_folder, topdown=False):
for name in files:
# Change file permission so it can be deleted
chmod(join(root, name), 0777)
def cleanup_reconstruction_data(delete_all=False):
"""
Deletes the exdata folder. If delete_all is True, deletes also the test-data folder.
"""
if exists(abspath(join(dirname(__file__), 'exdata'))):
for root, dirs, files in walk(abspath(join(dirname(__file__), 'exdata')), topdown=False):
for name in files:
remove(join(root, name))
for name in dirs:
rmdir(join(root, name))
rmdir(abspath(join(dirname(__file__), 'exdata')))
if delete_all and exists(abspath(join(dirname(__file__), 'test-data'))):
for root, dirs, files in walk(abspath(join(dirname(__file__), 'test-data')), topdown=False):
for name in files:
remove(join(root, name))
for name in dirs:
rmdir(join(root, name))
rmdir(abspath(join(dirname(__file__), 'test-data')))
def run_log_reconstruct():
"""
Tests whether log_reconstruct() from log_reconstruct.py works correctly for the given input.
"""
from log_reconstruct import log_reconstruct
from cocoprep.archive_load_data import parse_range
base_path = dirname(__file__)
log_reconstruct(abspath(join(base_path, 'test-data', 'archives-input')),
'reconstruction',
'RECONSTRUCTOR',
'A test for reconstruction of logger output',
parse_range('1-55'),
parse_range('1-10'),
parse_range('2,3,5,10,20,40'))
for root, dirs, files in walk(abspath(join(base_path, 'exdata', 'reconstruction')), topdown=False):
for name in files:
compare_files(abspath(join(root, name)),
abspath(join(root, name)).replace('exdata', 'test-data'))
def run_merge_lines():
"""
Tests whether merge_lines_in() from merge_lines_in_info_files.py works correctly for the given input.
"""
from merge_lines_in_info_files import merge_lines_in
import shutil
base_path = dirname(__file__)
in_path = abspath(join(base_path, 'exdata', 'reconstruction'))
out_path = abspath(join(base_path, 'exdata', 'reconstruction-merged'))
mkdir(out_path)
for root, dirs, files in walk(in_path, topdown=False):
for name in files:
if name.endswith('.info'):
shutil.copyfile(abspath(join(in_path, name)), abspath(join(out_path, name)))
merge_lines_in(abspath(join(root, name)), in_path, out_path)
for root, dirs, files in walk(out_path, topdown=False):
for name in files:
compare_files(abspath(join(root, name)),
abspath(join(root, name)).replace('exdata', 'test-data'))
def test_all():
"""
Runs a number of tests to check whether the python scripts of log-reconstruction perform correctly.
The name of the method needs to start with "test_" so that it gets picked up by py.test.
"""
prepare_reconstruction_data()
run_log_reconstruct()
run_merge_lines()
cleanup_reconstruction_data()
if __name__ == '__main__':
test_all()
| {
"repo_name": "PyQuake/earthquakemodels",
"path": "code/cocobbob/coco/code-preprocessing/log-reconstruction/test_reconstruction.py",
"copies": "1",
"size": "5712",
"license": "bsd-3-clause",
"hash": 1947312365431254800,
"line_mean": 32.0173410405,
"line_max": 115,
"alpha_frac": 0.6073179272,
"autogenerated": false,
"ratio": 3.8464646464646464,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9949345119437774,
"avg_score": 0.0008874908453745116,
"num_lines": 173
} |
AS_ERR_TYPE_NOT_SUPPORTED = -7
AS_ERR_COMMAND_REJECTED = -6
AS_ERR_QUERY_TERMINATED = -5
AS_ERR_SCAN_TERMINATED = -4
AS_ERR_INVALID_NODE_ERROR = -3
AS_ERR_PARSE_ERROR = -2
AS_ERR_SERIALIZE_ERROR = -1
AS_ERR_OK = 0
AS_ERR_SERVER_ERROR = 1
AS_ERR_KEY_NOT_FOUND_ERROR = 2
AS_ERR_GENERATION_ERROR = 3
AS_ERR_PARAMETER_ERROR = 4
AS_ERR_KEY_EXISTS_ERROR = 5
AS_ERR_BIN_EXISTS_ERROR = 6
AS_ERR_CLUSTER_KEY_MISMATCH = 7
AS_ERR_SERVER_MEM_ERROR = 8
AS_ERR_KEY_BUSY = 14
AS_ERR_INVALID_NAMESPACE = 20
AS_ERR_BIN_NAME_TOO_LONG = 21
error_table = {
AS_ERR_TYPE_NOT_SUPPORTED: "Type not supported",
AS_ERR_COMMAND_REJECTED: "Command rejected",
AS_ERR_QUERY_TERMINATED: "Query terminated",
AS_ERR_SCAN_TERMINATED: "Scan terminated",
AS_ERR_INVALID_NODE_ERROR: "Invalid node",
AS_ERR_PARSE_ERROR: "Parse error",
AS_ERR_SERIALIZE_ERROR: "Serialize error",
AS_ERR_OK: "OK",
AS_ERR_SERVER_ERROR: "Unspecified server error",
AS_ERR_KEY_NOT_FOUND_ERROR: "Specified key could not be located (or has maybe expired)",
AS_ERR_GENERATION_ERROR: "Invalid generation specified",
AS_ERR_PARAMETER_ERROR: "Invalid parameter specified",
AS_ERR_KEY_EXISTS_ERROR: "Specified key already exists",
AS_ERR_BIN_EXISTS_ERROR: "Specified bin already exists",
AS_ERR_CLUSTER_KEY_MISMATCH: "Cluster key does not match",
AS_ERR_SERVER_MEM_ERROR: "Out of memory",
AS_ERR_KEY_BUSY: "Key is busy (record update in progress, try again later)",
AS_ERR_INVALID_NAMESPACE: "Invalid namespace",
AS_ERR_BIN_NAME_TOO_LONG: "Bin names must be less than 14 bytes",
}
class ASMSGProtocolException(Exception):
def __init__(self, result_code):
super(ASMSGProtocolException, self).__init__(error_table.get(result_code, '??? [%d]' % result_code))
self.result_code = result_code
| {
"repo_name": "kaniini/aerospike-py",
"path": "aerospike_py/result_code.py",
"copies": "1",
"size": "1827",
"license": "isc",
"hash": -2259779566624375300,
"line_mean": 32.2181818182,
"line_max": 108,
"alpha_frac": 0.6951286262,
"autogenerated": false,
"ratio": 2.9804241435562804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9172618889837099,
"avg_score": 0.0005867759838362581,
"num_lines": 55
} |
"""ASEr Setup Script."""
from setuptools import setup, find_packages
from codecs import open
from os import path
from os import listdir
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Generate a list of python scripts
scpts = []
for i in listdir(here + '/bin'):
if i.endswith('.py'):
scpts.append('bin/' + i)
setup(
name='ASEr',
version='0.3.0',
description='Get ASE counts from BAMs or raw fastq data -- repackage of pipeline by Carlo Artieri ',
long_description=long_description,
url='https://github.com/MikeDacre/ASEr',
author='Michael Dacre',
author_email='mike.dacre@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Beta',
'Intended Audience :: Science/Research',
'Environment :: Console',
'Operating System :: Linux',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='ASE allele-specific expression RNA-seq fastq BAM SAM SNP',
install_requires=['pybedtools', 'pysam'],
scripts=scpts,
packages=['ASEr']
)
| {
"repo_name": "MikeDacre/ASEr",
"path": "setup.py",
"copies": "2",
"size": "1637",
"license": "mit",
"hash": 105676748971927060,
"line_mean": 30.4807692308,
"line_max": 104,
"alpha_frac": 0.6310323763,
"autogenerated": false,
"ratio": 3.771889400921659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00018315018315018318,
"num_lines": 52
} |
# aserver: parallel webserver in one file
import sys
import os
import threading
import traceback
import html
import urllib
import json
import mimetypes
from multiprocessing import Pool
from http.server import BaseHTTPRequestHandler, HTTPStatus
from socketserver import ThreadingMixIn, TCPServer
ERROR_MESSAGE = '''\
<html>
<head>
<title>%(code)d %(message)s</title>
</head>
<body>
<center>
<h1>%(code)d %(message)s</h1>
<p>{} {}</p>
</center>
<hr>
<pre>%(explain)s</pre>
</body>
</html>
'''
ERROR_CONTENT_TYPE = 'text/html'
# quick demo
WEBPAGE_MESSAGE = '''\
<!DOCTYPE html>
<html>
<head>
<title>{title:s}</title>
</head>
<body>
<pre>
{body:s}
<pre>
</body>
</html>
'''
WEBPAGE_CONTENT_TYPE = 'text/html'
def test_worker_parse(qs):
# diagnostic info
ppid = os.getppid()
pid = os.getpid()
active_threads = threading.active_count()
current_thread = threading.current_thread()
# do some work
query_list = urllib.parse.parse_qsl(qs)
return (ppid, pid, active_threads, current_thread.name,), query_list
class AsyncHTTPRequestHandler(BaseHTTPRequestHandler):
# configuration
server_version = 'fserver/0.5'
sys_version = "Python/" + sys.version.split()[0]
protocol_version = 'HTTP/1.0'
error_message_format = ERROR_MESSAGE.format(server_version, sys_version)
error_content_type = ERROR_CONTENT_TYPE
traceback_log = True
traceback_send = True
# subclass and override to process requests in parallel
the_pool = None
# async calls
def apply(self, fn, args):
if self.the_pool is None:
return fn(*args)
else:
return self.the_pool.apply(fn, args)
# These do not need to be overridden again; they just call send_head.
def do_HEAD(self):
self.send_head()
def do_GET(self):
content = self.send_head(None)
if content is not None:
try:
self.wfile.write(content)
except Exception as e:
self.log_error('Caught {} while writing response to GET.\n\n{}',
repr(e), traceback.format_exc())
def do_POST(self):
try:
length = int(self.headers['Content-Length'])
data = self.rfile.read(length)
except Exception as e:
self.log_error('Caught {} while reading post data.\n\n{}',
repr(e), traceback.format_exc())
data = False
content = self.send_head(data)
if content is not None:
try:
self.wfile.write(content)
except Exception as e:
self.log_error('Caught {} while writing response to POST.\n\n{}',
repr(e), traceback.format_exc())
# This does not need to be overridden again: it just calls self.construct_content().
# Override that in subclasses to change the content produced by the handler.
def send_head(self, data):
try:
response, msg, headers, content = self.construct_content(data)
except Exception as e:
code = HTTPStatus.INTERNAL_SERVER_ERROR
message = None
explain = None
if self.traceback_log or self.traceback_send:
explain_traceback = ('Caught {} while preparing content.\n\n{}'
.format(repr(e), traceback.format_exc()))
if self.traceback_send:
message = type(e).__name__
explain = explain_traceback
self.send_error(code, message, explain)
if self.traceback_log:
self.log_message('%s', explain_traceback)
# send_error already writes the body, so we don't need to return anything
return None
else:
self.send_response(response, msg)
for k, v in headers:
self.send_header(k, v)
self.end_headers()
return content
# avoid sending unescaped strings that might break the console
def log_request(self, code='-', size='-'):
if isinstance(code, HTTPStatus):
code = code.value
self.log_message('%s %s', repr(self.requestline), str(code))
def log_message(self, format, *args):
sys.stderr.write("%s [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args))
# Uses urllib to parse the path of the current request.
def translate_path(self):
return urllib.parse.urlparse(self.path)
# Override this to change the content produced by the handler. Returns a tuple of:
# response : the http response code, such as HTTPStatus.OK
# msg : the message to send at the end of the http response line (or None for a default message)
# headers : a list of tuples to send as MIME headers: (keyword, value)
# NOTE: do not put Content-Length in here, it is generated automatically in send_head
# NOTE: however, do put Content-Type in here if you want to send it!
# content : the bytes you want to send as the body of this response
def construct_content(self, data):
pr = self.translate_path()
assert pr.path != '/crash'
if pr.path.startswith('/test'):
# diagnostic info
ppid = os.getppid()
pid = os.getpid()
active_threads = threading.active_count()
current_thread = threading.current_thread()
handler_diag = ppid, pid, active_threads, current_thread.name
# parse the query (a)synchronously
worker_diag, parsed = self.apply(test_worker_parse, (pr.query,))
# spit it all out
req_format = 'raw request:\n{}\n\ncommand: {}\npath: {}\nversion: {}\n\nheaders:\n{}'
diag_format = '{}\n ppid: {:d}\n pid: {:d}\n active threads: {:d}\n current thread: {}'
body_text = ('{}\n\n{}\n\n{}\n\npath: {}\nquery: {}'
.format(
req_format.format(self.raw_requestline, self.command, self.path, self.request_version, self.headers),
diag_format.format('Handler:', *handler_diag),
diag_format.format('Parser worker:', *worker_diag),
repr(pr.path),
repr(parsed),
))
response = HTTPStatus.OK
msg = None
headers = (
('Content-Type', WEBPAGE_CONTENT_TYPE,),
)
body = WEBPAGE_MESSAGE.format(title=html.escape('aserver test', quote=False),
body=html.escape(body_text, quote=False))
return response, msg, headers, bytes(body, encoding='ascii')
else:
response = HTTPStatus.NOT_FOUND
msg = None
headers = (
('Content-Type', WEBPAGE_CONTENT_TYPE,),
)
body = WEBPAGE_MESSAGE.format(title=html.escape('404 Nothing Here', quote=False),
body=html.escape('There is no content on this server!', quote=False))
return response, msg, headers, bytes(body, encoding='ascii')
class AsyncTCPServer(ThreadingMixIn, TCPServer):
allow_reuse_address = True
daemon_threads = True
# quick and dirty, serve a directory
def serve_flat_directory(root):
if not os.path.isdir(root):
raise ValueError('FServer must serve a directory')
print('Serving files from {}...'.format(root))
content = {}
content_bytes = 0
for fname in os.listdir(root):
fpath = os.path.join(root, fname)
if os.path.isfile(fpath):
with open(fpath, 'rb') as f:
fcont = f.read()
ftype = mimetypes.guess_type(fname)
print(' {}, {}, {} bytes'.format(fname, str(ftype), len(fcont)))
content[fname.lstrip('/')] = (ftype, fcont)
content_bytes += len(fcont)
print('Found {} files, {} bytes total.'.format(len(content), content_bytes))
return content
class FServerRequestHandler(AsyncHTTPRequestHandler):
# override this in a subclass to serve something
the_content = {}
def construct_content(self, data):
pr = self.translate_path()
path = pr.path.lstrip('/')
if path in self.the_content:
ctype_enc, cont = self.the_content[path]
ctype, enc = ctype_enc
response = HTTPStatus.OK
msg = None
headers = (
('Content-Type', ctype),
)
body = cont
elif path == '' and 'index.html' in self.the_content:
ctype, cont = self.the_content['index.html']
response = HTTPStatus.OK
msg = None
headers = (
('Content-Type', ctype),
)
body = cont
else:
response = HTTPStatus.NOT_FOUND
msg = None
headers = (
('Content-Type', WEBPAGE_CONTENT_TYPE,),
)
body = bytes(
WEBPAGE_MESSAGE.format(title=html.escape('404 Not Found', quote=False),
body=html.escape('Nothing to see here.', quote=False)),
encoding='ascii')
return response, msg, headers, body
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, default='localhost',
help='server host')
parser.add_argument('--port', type=int, default=8000,
help='server port')
parser.add_argument('--workers', type=int, default=2,
help='number of worker processes to run in parallel')
parser.add_argument('--serve', type=str, default='',
help='serve a directory')
args = parser.parse_args()
with Pool(args.workers) as pool:
print('{:d} worker processes.'.format(args.workers))
if args.serve:
class CustomHTTPRequestHandler(FServerRequestHandler):
the_pool = pool
the_content = serve_flat_directory(args.serve)
else:
class CustomHTTPRequestHandler(AsyncHTTPRequestHandler):
the_pool = pool
with AsyncTCPServer((args.host, args.port,), CustomHTTPRequestHandler) as server:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print('Server on thread: {}.'.format(server_thread.name))
print('Close stdin to stop.')
for line in sys.stdin:
pass
print('Closed stdin, stopping...')
server.shutdown()
print('Goodbye!')
| {
"repo_name": "billzorn/fpunreal",
"path": "titanfp/web/fserver.py",
"copies": "1",
"size": "10956",
"license": "mit",
"hash": -7359330903605460000,
"line_mean": 31.6071428571,
"line_max": 130,
"alpha_frac": 0.562248996,
"autogenerated": false,
"ratio": 4.1737142857142855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5235963281714285,
"avg_score": null,
"num_lines": null
} |
# A server-side implementation of the redis protocol using gevent
from gevent.server import StreamServer
BUFSIZE=1024
CRLF = '\r\n'
def sock_readlines(socket):
'''Iteratively read lines from the socket.
A line is considered to be everything up
to a CRLF'''
remainder = ''
while True:
data = socket.recv(BUFSIZE)
if len(data) == 0:
break
if CRLF in data:
lines = data.split(CRLF)
yield remainder + lines[0]
for line in lines[1:-1]:
yield line
if data.endswith(CRLF):
yield lines[-1]
remainder = ''
else:
remainder = lines[-1]
class RedisSocket:
'''Wraps a socket object and provide methods
for communicating over Redis protocol'''
def __init__(self, sock):
self.sock = sock
return
def rep_line(self, line):
'''Send a single line reply'''
self.sock.send('+' + line + CRLF)
return
def rep_integer(self, num):
'''Send an integer reply'''
self.sock.send(':' + str(num) + CRLF)
def rep_error(self, errmsg):
'''Send an error message'''
self.sock.send('-ERR ' + errmsg + CRLF)
def rep_bulk(self, data):
'''Send a bulk reply'''
self.sock.send('$' + str(len(data)) + CRLF + data + CRLF)
def rep_multibulk(self, lst):
'''Send a multibulk reply.
lst is a list of strings and integers'''
self.sock.send('*' + str(len(lst)) + CRLF)
for data in lst:
if isinstance(data, int):
self.sock.send(':' + str(data) + CRLF)
else:
self.sock.send('$' + str(len(data)) + CRLF)
self.sock.send(data + CRLF)
class RedisServer(StreamServer):
def __init__(self, addr, commands):
StreamServer.__init__(self, addr, self.handle_connection)
self.commands = commands
def handle_connection(self, sock, addr):
cmdargs = []
nargs = 0
nbytes = 0
rdsock = RedisSocket(sock)
if 'connect' in self.commands:
self.commands['connect'](rdsock)
print("Connection from " + addr[0])
for line in sock_readlines(sock):
if len(line) == 0:
# skip empty lines
continue
elif line[0] == '*' and nargs == 0:
# first line is number of arguments
nargs = int(line[1:])
elif line[0] == '$' and nbytes == 0:
# the number of bytes in the following argument
# this isn't really necessary
nbytes = int(line[1:])
else:
cmdargs.append(line)
nbytes = 0
if len(cmdargs) == nargs:
# if we've reached the expected number of arguments
# find and execute the command
print(addr[0] + ' ' + ' '.join(cmdargs))
nargs = 0
command = self.commands.get(cmdargs[0].lower())
if command is None:
rdsock.rep_error('No such command')
else:
command(rdsock, *cmdargs[1:])
cmdargs = []
# If the client has closed the connection, run the close handler
closecmd = self.commands.get('close')
if closecmd is not None:
closecmd(rdsock)
sock.close()
| {
"repo_name": "zhemao/lerner",
"path": "server/redisd.py",
"copies": "1",
"size": "3538",
"license": "mit",
"hash": 6372231003742261000,
"line_mean": 30.5892857143,
"line_max": 72,
"alpha_frac": 0.5070661391,
"autogenerated": false,
"ratio": 4.283292978208232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5290359117308232,
"avg_score": null,
"num_lines": null
} |
# A server that accepts connections on a socket, reads parameters from it,
# passes the parameters to a likelihood function, and sends the result back to
# the socket
import os
import socket
import struct
import math
# log-likelihood function (modify according to problem)
def log_likelihood(x):
return math.log(math.exp(-0.5 * x * x) + math.exp(-0.5 * (x - 10) * (x - 10)))
# definition of parameters and priors:
# name, boundary min, boundary max, prior center, prior scale
params = [('x', -100, 100, 0, 20)]
n_params = len(params)
# path to Unix domain socket
socket_addr = '/tmp/likelihood.socket'
if os.path.exists(socket_addr):
os.unlink(socket_addr)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(socket_addr)
while True:
print('Waiting for connection')
s.listen(1)
conn, addr = s.accept()
print('Connection accepted')
with conn:
# send parameter definition
out_packet = struct.pack('I', n_params)
for name, min_bound, max_bound, center, scale in params:
out_packet += struct.pack('128sdddd', name.encode('ascii'),
min_bound, max_bound, center, scale)
conn.sendall(out_packet)
while True:
in_packet = conn.recv(n_params * 8)
if len(in_packet) < n_params * 8:
print('No more parameters given, closing connection')
break
params = struct.unpack('{:d}d'.format(n_params), in_packet)
llh = log_likelihood(*params)
out_packet = struct.pack('d', llh)
conn.sendall(out_packet)
| {
"repo_name": "JohnGBaker/ptmcmc",
"path": "python/socket_interface/likelihood_server.py",
"copies": "1",
"size": "1717",
"license": "apache-2.0",
"hash": 5425339746593966000,
"line_mean": 32.6666666667,
"line_max": 82,
"alpha_frac": 0.5917297612,
"autogenerated": false,
"ratio": 3.824053452115813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9914602017946099,
"avg_score": 0.00023623907394283014,
"num_lines": 51
} |
"""A server that execute arbitrary Python code."""
# NOTE: This module is Python 2 compatible.
import argparse
import contextlib
import logging
import os
import os.path
import sys
import threading
from multiprocessing.connection import Listener
try:
import backport
except ImportError:
from . import backport
LOG = logging.getLogger('multiprocessing.server')
LOG.addHandler(logging.NullHandler())
LOG_FORMAT = '%(asctime)s %(threadName)s %(levelname)s %(name)s: %(message)s'
TIMEOUT = 5.0
def run_server(listener, semaphore):
exit_flag = threading.Event()
server_thread = threading.Thread(
name='multiprocessing',
target=server,
args=(listener, semaphore, exit_flag),
)
server_thread.daemon = True
server_thread.start()
wait_forever(exit_flag)
LOG.info('exit')
def wait_forever(event):
# Unfortunately event.wait() without timeout is not uninterruptable.
while not event.is_set():
event.wait(3600)
def server(listener, semaphore, exit_flag):
LOG.info('start server')
worker_serial = 0
global_vars = {}
while not exit_flag.is_set():
conn = listener.accept()
try:
semaphore.acquire(TIMEOUT)
LOG.debug('accept %r', listener.last_accepted)
worker = Worker(
closing(conn),
semaphore,
exit_flag,
global_vars,
listener.last_accepted,
)
worker_serial += 1
worker_thread = threading.Thread(
name='multiprocessing-%02d' % worker_serial,
target=worker.run,
)
worker_thread.daemon = True
worker_thread.start()
conn = None # conn is transfered to the worker.
except backport.Timeout:
LOG.error('exceed concurrent workers limit')
finally:
# Close conn only when it is not transfered to the worker.
if conn is not None:
conn.close()
LOG.info('exit')
class Worker(object):
VERSION_INFO = {'version_info': tuple(sys.version_info)}
OKAY = {}
ERROR_REQUIRE_COMMAND = {'error': 'require command'}
ERROR_REQUIRE_NAME = {'error': 'require name argument'}
ERROR_REQUIRE_VALUE = {'error': 'require value argument'}
ERROR_REQUIRE_SOURCE = {'error': 'require source argument'}
def __init__(
self, conn_manager, semaphore, exit_flag, global_vars, address):
self.conn_manager = conn_manager
self.semaphore = semaphore
self.exit_flag = exit_flag
self.global_vars = global_vars
if isinstance(address, tuple):
self.filename = '%s:%s' % (address)
else:
self.filename = str(address)
def run(self):
LOG.debug('start worker')
try:
with self.conn_manager as conn:
self.serve_forever(conn)
finally:
self.semaphore.release()
LOG.debug('exit')
def serve_forever(self, conn):
conn.send(self.VERSION_INFO)
while not self.exit_flag.is_set():
if self.process_request(conn):
break
def process_request(self, conn):
try:
request = conn.recv()
except EOFError:
return True
command = request.get('command')
LOG.debug('receive command %r', command)
if not command:
conn.send(self.ERROR_REQUIRE_COMMAND)
return
handler = {
'shutdown': self.do_shutdown,
'close': self.do_close,
'get': self.do_get,
'set': self.do_set,
'del': self.do_del,
'execute': self.do_execute,
'call': self.do_call,
}.get(command)
if handler is None:
LOG.warning('unknown command %r', command)
conn.send({'error': 'unknown command', 'command': command})
return
try:
return handler(conn, request)
except Exception as exc:
conn.send({'error': 'uncaught exception', 'exception': str(exc)})
raise
def do_shutdown(self, conn, _):
self.exit_flag.set()
conn.send(self.OKAY)
def do_close(self, conn, _):
conn.send(self.OKAY)
return True
def do_get(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if name not in self.global_vars:
conn.send({'error': 'undefined variable', 'name': name})
return
conn.send({'name': name, 'value': self.global_vars[name]})
def do_set(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if 'value' not in request:
conn.send(self.ERROR_REQUIRE_VALUE)
return
self.global_vars[name] = request['value']
conn.send(self.OKAY)
def do_del(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if name not in self.global_vars:
conn.send({'error': 'undefined variable', 'name': name})
return
del self.global_vars[name]
conn.send(self.OKAY)
def do_execute(self, conn, request):
if 'source' not in request:
conn.send(self.ERROR_REQUIRE_SOURCE)
return
source = request['source']
filename = request.get('filename', self.filename)
try:
code = compile(source, filename, 'exec')
except SyntaxError as exc:
LOG.exception('syntax error in %s', filename)
conn.send({
'error': 'syntax error',
'filename': filename,
'exception': str(exc),
})
return
try:
exec(code, self.global_vars)
except Exception as exc:
LOG.exception('runtime error in exec %s', filename)
conn.send({
'error': 'runtime error',
'filename': filename,
'exception': str(exc),
})
return
conn.send(self.OKAY)
def do_call(self, conn, request):
name = request.get('name')
if not name:
conn.send(self.ERROR_REQUIRE_NAME)
return
if name not in self.global_vars:
conn.send({'error': 'undefined function', 'name': name})
return
func = self.global_vars[name]
args = request.get('args', ())
kwargs = request.get('kwargs', {})
try:
value = func(*args, **kwargs)
except Exception as exc:
LOG.exception(
'runtime error when calling %s(*%r, **%r)', name, args, kwargs)
conn.send({
'error': 'runtime error',
'name': name,
'exception': str(exc),
})
return
conn.send({'name': name, 'value': value})
def closing(context_manager):
# Some Python 2 objects are not managed.
for attr in ('__enter__', '__exit__'):
if not hasattr(context_manager, attr):
return contextlib.closing(context_manager)
return context_manager
def main(argv):
parser = argparse.ArgumentParser(description="""
A server that executes arbitrary Python codes.
""")
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='verbose output')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--listen-net', metavar=('ADDRESS', 'PORT'), nargs=2,
help="""listen on AF_INET style address""")
group.add_argument(
'--listen-sock', metavar='PATH',
help="""listen on AF_UNIX or AF_PIPE style path""")
parser.add_argument(
'--authkey-var', metavar='VAR', default='AUTHKEY',
help="""read authkey from this environment variable
(default %(default)s)""")
parser.add_argument(
'--max-workers', type=int, default=8,
help="""set max concurrent workers""")
args = parser.parse_args(argv[1:])
if args.verbose == 0:
level = logging.WARNING
elif args.verbose == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.basicConfig(level=level, format=LOG_FORMAT)
if args.listen_net:
address = (args.listen_net[0], int(args.listen_net[1]))
else:
address = args.listen_sock
authkey = os.getenv(args.authkey_var)
if authkey is None:
parser.error('cannot read authkey from %s' % args.authkey_var)
return 2
if sys.version_info.major > 2:
authkey = bytes(authkey, encoding='ascii')
if args.max_workers <= 0:
semaphore = backport.UnlimitedSemaphore()
else:
semaphore = backport.BoundedSemaphore(args.max_workers)
threading.current_thread().name = 'multiprocessing.server#main'
with closing(Listener(address, authkey=authkey)) as listener:
run_server(listener, semaphore)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"repo_name": "clchiou/garage",
"path": "py/garage/garage/multiprocessing/server.py",
"copies": "1",
"size": "9306",
"license": "mit",
"hash": 8215171534444339000,
"line_mean": 29.5114754098,
"line_max": 79,
"alpha_frac": 0.5622179239,
"autogenerated": false,
"ratio": 4.123172352680549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5185390276580549,
"avg_score": null,
"num_lines": null
} |
"""A server that handles a connection with an OpenBCI board and serves that
data over both a UDP socket server and a WebSocket server.
Requires:
- pyserial
- asyncio
- websockets
"""
from __future__ import print_function
import argparse
try:
import cPickle as pickle
except ImportError:
import _pickle as pickle
import json
import sys
sys.path.append('..') # help python find cyton.py relative to scripts folder
from openbci import cyton as open_bci
import socket
parser = argparse.ArgumentParser(
description='Run a UDP server streaming OpenBCI data.')
parser.add_argument(
'--json',
action='store_true',
help='Send JSON data rather than pickled Python objects.')
parser.add_argument(
'--filter_data',
action='store_true',
help='Enable onboard filtering.')
parser.add_argument(
'--host',
help='The host to listen on.',
default='127.0.0.1')
parser.add_argument(
'--port',
help='The port to listen on.',
default='8888')
parser.add_argument(
'--serial',
help='The serial port to communicate with the OpenBCI board.',
default='/dev/tty.usbmodem1421')
parser.add_argument(
'--baud',
help='The baud of the serial connection with the OpenBCI board.',
default='115200')
class UDPServer(object):
def __init__(self, ip, port, json):
self.ip = ip
self.port = port
self.json = json
print("Selecting raw UDP streaming. IP: ", self.ip, ", port: ", str(self.port))
self.server = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM)
def send_data(self, data):
self.server.sendto(data, (self.ip, self.port))
def handle_sample(self, sample):
if self.json:
# Just send channel data.
self.send_data(json.dumps(sample.channel_data))
else:
# Pack up and send the whole OpenBCISample object.
self.send_data(pickle.dumps(sample))
args = parser.parse_args()
obci = open_bci.OpenBCICyton(args.serial, int(args.baud))
if args.filter_data:
obci.filter_data = True
sock_server = UDPServer(args.host, int(args.port), args.json)
obci.start_streaming(sock_server.handle_sample)
| {
"repo_name": "OpenBCI/OpenBCI_Python",
"path": "scripts/udp_server.py",
"copies": "1",
"size": "2211",
"license": "mit",
"hash": -8848139854823911000,
"line_mean": 26.6375,
"line_max": 87,
"alpha_frac": 0.6594301221,
"autogenerated": false,
"ratio": 3.618657937806874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9771268810077756,
"avg_score": 0.00136384996582365,
"num_lines": 80
} |
"""A server that handles a connection with an OpenBCI board and serves that
data over both a UDP socket server and a WebSocket server.
Requires:
- pyserial
- asyncio
- websockets
"""
import argparse
import cPickle as pickle
import json
import sys;
sys.path.append(
'..') # help python find open_bci_v3.py relative to scripts folder
import open_bci_v3 as open_bci
import socket
parser = argparse.ArgumentParser(
description='Run a UDP server streaming OpenBCI data.')
parser.add_argument(
'--json',
action='store_true',
help='Send JSON data rather than pickled Python objects.')
parser.add_argument(
'--filter_data',
action='store_true',
help='Enable onboard filtering.')
parser.add_argument(
'--host',
help='The host to listen on.',
default='127.0.0.1')
parser.add_argument(
'--port',
help='The port to listen on.',
default='8888')
parser.add_argument(
'--serial',
help='The serial port to communicate with the OpenBCI board.',
default='/dev/tty.usbmodem1421')
parser.add_argument(
'--baud',
help='The baud of the serial connection with the OpenBCI board.',
default='115200')
class UDPServer(object):
def __init__(self, ip, port, json):
self.ip = ip
self.port = port
self.json = json
print "Selecting raw UDP streaming. IP: ", self.ip, ", port: ", str(
self.port)
self.server = socket.socket(
socket.AF_INET, # Internet
socket.SOCK_DGRAM)
def send_data(self, data):
self.server.sendto(data, (self.ip, self.port))
def handle_sample(self, sample):
if self.json:
# Just send channel data.
self.send_data(json.dumps(sample.channel_data))
else:
# Pack up and send the whole OpenBCISample object.
self.send_data(pickle.dumps(sample))
args = parser.parse_args()
obci = open_bci.OpenBCIBoard(args.serial, int(args.baud))
if args.filter_data:
obci.filter_data = True
sock_server = UDPServer(args.host, int(args.port), args.json)
obci.start_streaming(sock_server.handle_sample)
| {
"repo_name": "neurotechuoft/Wall-EEG",
"path": "Code/OpenBCIPy/src/scripts/udp_server.py",
"copies": "1",
"size": "2130",
"license": "mit",
"hash": -6893668619244504000,
"line_mean": 27.0263157895,
"line_max": 76,
"alpha_frac": 0.6507042254,
"autogenerated": false,
"ratio": 3.5618729096989967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9700273189751758,
"avg_score": 0.0024607890694478126,
"num_lines": 76
} |
"""A server that handles a connection with an OpenBCI board and serves that
data over both a UDP socket server and a WebSocket server.
Requires:
- pyserial
- asyncio
- websockets
"""
import cPickle as pickle
import json
import socket
import plugin_interface as plugintypes
# class PluginPrint(IPlugin):
# # args: passed by command line
# def activate(self, args):
# print "Print activated"
# # tell outside world that init went good
# return True
# def deactivate(self):
# print "Print Deactivated"
# def show_help(self):
# print "I do not need any parameter, just printing stuff."
# # called with each new sample
# def __call__(self, sample):
# sample_string = "ID: %f\n%s\n%s" %(sample.id, str(sample.channel_data)[1:-1], str(sample.aux_data)[1:-1])
# print "---------------------------------"
# print sample_string
# print "---------------------------------"
# # DEBBUGING
# # try:
# # sample_string.decode('ascii')
# # except UnicodeDecodeError:
# # print "Not a ascii-encoded unicode string"
# # else:
# # print sample_string
class UDPServer(plugintypes.IPluginExtended):
def __init__(self, ip='localhost', port=8888):
self.ip = ip
self.port = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def activate(self):
print "udp_server plugin"
print self.args
if len(self.args) > 0:
self.ip = self.args[0]
if len(self.args) > 1:
self.port = int(self.args[1])
# init network
print "Selecting raw UDP streaming. IP: ", self.ip, ", port: ", str(
self.port)
self.server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print "Server started on port " + str(self.port)
def __call__(self, sample):
self.send_data(json.dumps(sample.channel_data))
def send_data(self, data):
self.server.sendto(data, (self.ip, self.port))
# From IPlugin: close sockets, send message to client
def deactivate(self):
self.server.close();
def show_help(self):
print """Optional arguments: [ip [port]]
\t ip: target IP address (default: 'localhost')
\t port: target port (default: 12345)"""
| {
"repo_name": "neurotechuoft/Wall-EEG",
"path": "Code/OpenBCIPy/src/plugins/udp_server.py",
"copies": "1",
"size": "2293",
"license": "mit",
"hash": 8255637031738700000,
"line_mean": 26.9634146341,
"line_max": 111,
"alpha_frac": 0.5979066725,
"autogenerated": false,
"ratio": 3.5550387596899227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4652945432189922,
"avg_score": null,
"num_lines": null
} |
"""A server to send commands to the DDS Comb.
Author: Sepehr Ebadi
"""
import inspect
import zmq
import json
import socket
from subprocess import Popen, PIPE
from rampage.zmq_server import RequestProcessor
from rampage.zmq_server import ClientForServer
class DDSCombServer(RequestProcessor):
def __init__(self, bind_port):
RequestProcessor.__init__(self, bind_port)
# write code to connect to DDS comb here
# Finds the IP of the DDSComb from its MAC address
# NEED TO IMPORT: from subprocess import Popen, PIPE
DDS_MAC_ADDRESS = '00:90:c2:ee:a9:8f'
DDS_PORT = 37829
for a in range(255):
IP = '192.168.0.' + str(a)
pid = Popen(["arp", "-n", IP], stdout=PIPE)
s = pid.communicate()[0]
if DDS_MAC_ADDRESS in s:
DDS_IP = IP
else:
new_ip = raw_input('MAC ID not found, enter IP manually: ')
DDS_IP = new_ip
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.connect((DDS_IP, DDS_PORT))
def set_freq(self, mesg):
freq, ch = mesg['freq'], mesg['ch']
if (not (ch in ['A','B','C','D','a','b','c','d'])):
return {'success': 0}
if (not (type(freq) == int and (freq<=175000000) and (freq>=30000))):
return {'success': 0}
ch = ch.capitalize()
self.socket.send('F'+ ch + ' ' + str(freq) + ' ')
return {'success': 1}
def set_amp(self, mesg):
amp, ch = mesg['amp'], mesg['ch']
if (not (ch in ['A','B','C','D','a','b','c','d'])):
return {'success': 0}
if (not (type(amp) == int and (amp<=100) and (amp>=0))):
return {'success': 0}
ch = ch.capitalize()
self.socket.send('A'+ ch + ' ' + str(amp) + ' ')
return {'success': 1}
def set_phase(self, mesg):
phase, ch = mesg['phase'], mesg['ch']
if (not (ch in ['A','B','C','D','a','b','c','d'])):
return {'success': 0}
if (not (type(phase) == int and (phase<=359) and (phase>=0))):
return {'success': 0}
ch = ch.capitalize()
self.socket.send('P'+ ch + ' ' + str(phase) + ' ')
return {'success': 1}
def sweep_freq(self, mesg):
low_freq, high_freq, step_size, step_time, ch = \
mesg['low_freq'], mesg['high_freq'], mesg['step_size'], mesg['step_time'], mesg['ch']
if (not (ch in ['A','B','C','D','a','b','c','d'])):
return {'success': 0}
if (not (type(low_freq) == int and (low_freq<=175000000) and (low_freq>=30000))):
return {'success': 0}
if (not (type(high_freq) == int and (high_freq<=175000000) and (high_freq>=30000))):
return {'success': 0}
if (high_freq < low_freq):
return {'success': 0}
if (not (type(step_size) == int and (step_size<=175000000) and (step_size>=1))):
return {'success': 0}
if (not (type(step_time) == int and (step_time<=65000) and (step_time>=4))):
return {'success': 0}
ch = ch.capitalize()
step_time = int(float(step_time)/4) * 4
self.socket.send('S' + ch + ' ' + str(high_freq) + ' ' + str(low_freq) + ' ' + str(step_size) + ' ' + str(step_time) + ' ')
return {'success': 1}
def ramp_amp(self, mesg):
ramp_time, ch = mesg['ramp_time'], mesg['ch']
if (not (ch in ['A','B','C','D','a','b','c','d'])):
return {'success': 0}
if (not (type(ramp_time) == int and (ramp_time<=255) and (ramp_time>=0))):
return {'success': 0}
ch = ch.capitalize()
self.socket.send('U' + ch + ' ' + str(ramp_time) + ' ')
return {'success': 1}
def reset_phase(self, mesg):
self.socket.send('R')
return {'success': 1}
def version(self, mesg):
self.socket.send('V')
return {'success': 1}
def heartbeat(self, mesg):
self.socket.send('H')
reply = self.socket.recv(40)
return {'reply': reply}
def main():
s = DDSCombServer(5555)
s._run()
if __name__ == '__main__':
main()
| {
"repo_name": "shreyaspotnis/rampage",
"path": "rampage/daq/dds_server.py",
"copies": "1",
"size": "4208",
"license": "mit",
"hash": 8876833347157838000,
"line_mean": 28.4265734266,
"line_max": 131,
"alpha_frac": 0.506891635,
"autogenerated": false,
"ratio": 3.24191063174114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9202647610870012,
"avg_score": 0.009230931174225715,
"num_lines": 143
} |
"""A server to send commands to Windfreak SynthHD.
Based on Sepher's code
Author: Ramon Ramos
"""
import inspect
import zmq
import json
import socket
from subprocess import Popen, PIPE
import serial
class RequestProcessor():
def __init__(self, bind_port):
members = inspect.getmembers(self, predicate=inspect.ismethod)
self._messages_dict = {}
for func_name, func in members:
if func_name[0] is not '_':
self._messages_dict[func_name] = func
bind_addr = 'tcp://*:' + str(bind_port)
print(bind_addr)
self._context = zmq.Context(1)
self._server_sock = self._context.socket(zmq.REP)
self._server_sock.bind(bind_addr)
def _run(self):
done = False
while not done:
try:
recv_string = self._server_sock.recv()
print('Message Received:\t' + recv_string)
request = json.loads(recv_string)
if request['name'] not in self._messages_dict:
reply = {'error': 'Request name not valid.'}
else:
func = self._messages_dict[request['name']]
reply = func(request['mesg'])
send_string = json.dumps(reply)
print('Sending:\t' + send_string)
self._server_sock.send(send_string)
except KeyboardInterrupt:
done = True
print('Killed using Ctrl C')
class ClientForServer(object):
def __init__(self, Server, server_endpoint):
self.server_endpoint = server_endpoint
members = inspect.getmembers(Server, predicate=inspect.ismethod)
for func_name, func in members:
if func_name[0] is not '_':
self._add_request_name(func_name)
def _add_request_name(self, req_name):
def func(mesg):
request_dict = {'mesg': mesg, 'name': req_name}
return self._send_request_basic(request_dict, self.server_endpoint)
func.__name__ = req_name
setattr(self, func.__name__, func)
def _send_request_basic(self, request_dict, server_endpoint):
req_string = json.dumps(request_dict)
context = zmq.Context(1)
client = context.socket(zmq.REQ)
client.connect(server_endpoint)
client.send(req_string)
reply = client.recv()
client.close()
context.term()
reply_dict = json.loads(reply)
return reply_dict
# Later try a way not to hard-code this port
com_port = 'COM3'
class SynthHDSerial(RequestProcessor):
def __init__(self, bind_port):
self.serv = serial.Serial(com_port, 38400, timeout=0, parity = serial.PARITY_EVEN)
RequestProcessor.__init__(self, bind_port)
def set_freq(self, mesg):
freq, ch = mesg['freq'], mesg['ch']
if (not (ch in ['0','1',0,1])):
return {'success': 0}
if (not (type(freq) == float and (freq<=13000.0) and (freq>=100.0))):
return {'success': 0}
self.serv.write('C'+ str(ch) + 'f' + str(freq))
return {'success': 1}
def set_amp(self, mesg):
amp, ch = mesg['amp'], mesg['ch']
if (not (ch in ['0','1',0,1])):
return {'success': 0}
if (not (type(amp) == int and (amp<=45000) and (amp>=0))):
return {'success': 0}
self.serv.write('C'+ str(ch) + 'a' + str(amp))
return {'success': 1}
def set_phase(self, mesg):
phase, ch = mesg['phase'], mesg['ch']
if (not (ch in ['0','1',0,1])):
return {'success': 0}
if (not (type(phase) == int and (phase<=359) and (phase>=0))):
return {'success': 0}
self.serv.write('C'+ str(ch) + '~' + str(phase))
return {'success': 1}
def help(self):
str_send = '?'
self.serv.write(str_send)
reply = self.serv.read(2000)
return reply
def main():
mw = SynthHDSerial(5556)
mw._run()
if __name__ == '__main__':
main()
| {
"repo_name": "shreyaspotnis/rampage",
"path": "rampage/daq/mw_server.py",
"copies": "1",
"size": "4057",
"license": "mit",
"hash": -1331603623579921200,
"line_mean": 29.5037593985,
"line_max": 90,
"alpha_frac": 0.5439980281,
"autogenerated": false,
"ratio": 3.6353046594982077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46793026875982074,
"avg_score": null,
"num_lines": null
} |
"""A server wrapper around dealer operations."""
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import logging
from .bank import Bank
QUERIES = {
"is_done": (lambda bank: bank.is_done(), True),
"get_output_path": (lambda bank: bank.output_path, None),
"get_header": (lambda bank: bank.header, ""),
"get_feature": (lambda bank: bank.feature, ""),
}
class BankServer(SimpleXMLRPCServer, object):
"""RPC command server."""
allow_reuse_address = True
def __init__(self, host, port, banks):
super(BankServer, self).__init__(
(host, port),
SimpleXMLRPCRequestHandler,
logRequests = False)
self.__banks = [Bank(path) for path in banks]
self.__assigned = {}
self.__log = logging.getLogger(__name__)
self.register_function(self.is_fresh, "is_fresh")
self.register_function(self.get_next_scenario, "get_next_scenario")
for (name, (callback, default)) in QUERIES.iteritems():
self.register_function(self.__query_bank(callback, default), name)
def serve_forever(self, poll_interval = 0.5):
"""Start serving."""
(address, port) = self.server_address
self.__log.info("Server started on %s:%d", address, port)
super(BankServer, self).serve_forever(poll_interval)
def shutdown(self):
"""Stop serving."""
self.__log.info("Stopped serving")
super(BankServer, self).shutdown()
def is_fresh(self, client):
"""Returns whether the current bank is fresh.
This functions always returns True as long as the client was not assigned a bank.
"""
if client not in self.__assigned:
return True
else:
query = self.__query_bank(lambda bank: bank.is_fresh(), False)
return query(client)
def get_next_scenario(self, client):
"""Returns the next scenario to deal to the client.
This functions also assigns the bank to the client.
"""
bank = self.__get_current_bank(client)
if not bank:
self.__log.debug("No more scenarios for '%s'", client)
return None
if client not in self.__assigned:
self.__log.info("Assigning '%s' to '%s'", bank.feature.splitlines()[0], client)
self.__assigned[client] = bank
scenario = bank.get_next_scenario()
self.__log.info("Sent '%s' to '%s'", scenario.lstrip(), client)
return scenario
def __query_bank(self, get_value, default):
"""Returns a callback to query the current bank's property."""
def query(client):
# pylint: disable=missing-docstring
bank = self.__get_current_bank(client)
if not bank:
return default
return get_value(bank)
return query
def __get_current_bank(self, client):
"""Returns the first bank that isn't done yet, None otherwise."""
# If client was already assigned a bank, check it.
if client in self.__assigned:
bank = self.__assigned[client]
# If bank isn't done, deal from it.
if not bank.is_done():
return bank
# Bank is done. Unassign it and look for the next one.
self.__log.info("Unassigning '%s' from '%s'", bank.feature.splitlines()[0], client)
self.__assigned.pop(client)
for bank in self.__banks:
# Skip finished banks.
if bank.is_done():
continue
# Skip banks which were already assigned.
if bank in self.__assigned.itervalues():
continue
# Found a bank.
return bank
# No bank was found.
return None
| {
"repo_name": "nivbend/bdd_bot",
"path": "bddbot/server.py",
"copies": "1",
"size": "3820",
"license": "mit",
"hash": 2228595925238183200,
"line_mean": 32.8053097345,
"line_max": 95,
"alpha_frac": 0.5814136126,
"autogenerated": false,
"ratio": 4.263392857142857,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013694029560709052,
"num_lines": 113
} |
"""A service to run a pomodoro timer."""
from datetime import datetime
from .workblock import WorkBlock
from twisted.internet import task, defer, reactor
from twisted.python import log
class Pomodoro(WorkBlock):
"""Wrap a block of work with pomo stuff."""
class PomoTimer(object):
"""A pomodoro timer."""
reactor = reactor
def __init__(self, options=None): # noqa
self._options = options or {}
self.duration = self._options.get("POMO_DURATION", 30)
self.idle_duration = self._options.get("POMO_IDLE_DURATION", 5)
self.current_deferred = None
self.started_at = None
self.paused_elapsed = None
self.paused_at = None
self.unpaused_at = None
self.on_finished = defer.Deferred(self.cancel)
def start(self):
"""Start a pomodoro."""
log.msg("Starting timer.")
if self.current_deferred:
raise ValueError("We are already started.")
self.reset()
self.current_deferred = task.deferLater(
self.reactor,
self.duration * 60,
self.finish
).addErrback(self._on_err)
self.started_at = datetime.now()
return self.on_finished
def stop(self, reason):
"""Stop a pomodoro."""
log.msg("Stopping a timer.")
return self.finish(reason)
def cancel(self, deferred):
"""Cancel it."""
deferred.callback("Cancelled.")
self.current_deferred, d = None, self.current_deferred
d.cancel()
def pause(self):
"""Pause a pomodoro."""
log.msg("Paused.")
self.paused_at = datetime.now()
self.paused_elapsed = self.get_elapsed()
self.current_deferred, d = None, self.current_deferred
d.cancel()
def unpause(self):
"""Unpause a pomodoro."""
log.msg("Unpaused.")
self.unpaused_at = datetime.now()
self.current_deferred = task.deferLater(
self.reactor,
(self.duration * 60) - self.paused_elapsed.total_seconds(),
self.finish
).addErrback(self._on_err)
def _on_err(self, error):
log.err("Some error happened.")
error.trap(defer.CancelledError)
self.current_deferred = None
def finish(self, reason=None):
"""Finish a pomodoro."""
log.msg("Finishing up now, reason: {}".format(reason))
self.current_deferred, d = None, self.current_deferred
d.cancel()
d, self.on_finished = self.on_finished, defer.Deferred(self.cancel)
d.callback(reason)
def get_elapsed(self):
"""Get the elapsed time for this pomo."""
elapsed = datetime.now() - self.started_at
return elapsed
def reset(self):
"""Reset the pomo."""
self.started_at = None
self.paused_elapsed = None
self.paused_at = None
self.unpaused_at = None
self.current_deferred = None
| {
"repo_name": "dpnova/devdaemon",
"path": "devdaemon/timer/pomodoro.py",
"copies": "1",
"size": "2955",
"license": "mit",
"hash": 7656887478497744000,
"line_mean": 29.4639175258,
"line_max": 75,
"alpha_frac": 0.5912013536,
"autogenerated": false,
"ratio": 3.872870249017038,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4964071602617038,
"avg_score": null,
"num_lines": null
} |
"""A set of additional functions used across the whole project"""
from .const import *
import os
import platform
import sys
def printLine(txt):
#helper function for printing pretty lines
return ('-- '+txt + (ROW_LEN-len(txt)-6)*' ' + ' --\n')
if platform.system() == 'Windows':
clear_display = lambda: os.system('cls')
os.system('mode con: cols=80 lines=40')
else:
clear_display = lambda: os.system('clear')
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=40, cols=80))
class EventLog():
"""Class for storing a displaying messages collected during battles"""
def __init__(self):
"""Initialize with empty message list and message count = 0"""
self.message = []
self.length = 0
def __repr__(self):
return(str(self.length))
def display_log(self):
"""Display 10 lines with most recently logged messages.
If there are fewer lines, then leave unused lines blank, by still
format to 10 rows"""
msg = ''
for i in range(1,11):
if len(self.message) - 11 + i >= 0:
msg+=('* ' + self.message[len(self.message) - 11 +i]+'\n')
else:
msg+=('* '+'\n')
#print(msg)
return(msg)
def insert(self, msg):
if len(msg.split('\n')) > 1:
for line in msg.split('\n'):
self.message.append(line)
self.length += 1
else:
self.message.append(msg)
self.length += 1
msg_log = EventLog()
| {
"repo_name": "wilk16/rpggame",
"path": "src/misc/misc.py",
"copies": "1",
"size": "1344",
"license": "mit",
"hash": 832611352925235100,
"line_mean": 25.88,
"line_max": 71,
"alpha_frac": 0.6391369048,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41391369048,
"avg_score": null,
"num_lines": null
} |
"""A set of algorithms or models"""
from diot import Diot
from pyppl import Proc
from . import params, proc_factory
pRWR = proc_factory(
desc = 'Do random walk with restart (RWR).',
config = Diot(annotate = """
@input:
`Wfile:file`: The adjecent matrix
`Efile:file`: The start vector
@output:
`outfile:file`: The output of final probabilities
@args:
`c`: The restart probability. Default: 0.1
`eps`: The convergent cutoff || R(i+1) - R(i) ||. Default: 1e-5
`niter`: Max iterations to stop. Default: 10000
`normW`: Weather to normalize W or not, default True.
- Laplacian normalization is used (more to add).
`normE`: Weather to normalize E or not, default True.
- E will be normalized as: E = E/sum(E)
@requires:
[NetPreProc]
desc = "Package for the pre-processing and normalization of graphs."
url = "https://cran.r-project.org/web/packages/NetPreProc/index.html"
when = "[[ {{args.normW}} == True ]]"
validate = '[[ $({{proc.lang}} --vanilla -e 'library(NetPreProc)' 2>&1) == \
*"Network Pre-Processing package"* ]]'
install = '{{proc.lang}} -e \'install.packages("NetPreProc", \
repos="https://cran.rstudio.com")\''
"""))
pRWR.input = "Wfile:file, Efile:file"
pRWR.output = "outfile:file:{{i.Wfile | fn2}}.rwr.txt"
pRWR.lang = params.Rscript.value
pRWR.args.c = 0.1
pRWR.args.eps = 1e-5
pRWR.args.niter = 10000
pRWR.args.normW = True
pRWR.args.normE = True
pAR = Proc(
desc = 'Affinity Regression.',
config = Diot(annotate = """
@name:
pAR
@description:
Affinity Regression.
Ref: https://www.nature.com/articles/nbt.3343
```
b c d d
_________ _______ ____ ____
| | | W | | | | |
a | D | b |_____| c |Pt| = a |Y | <=>
|_______| |__| | |
|__|
kronecker(P, YtD)*vec(W) = vec(YtY) <=>
X*vec(W) = vec(YtY)
WPt:
c d d
_______ ____ _____
| W | | | | |
b |_____| c |Pt| ---> b |___|
|__|
YtDW:
WtDtY:
b a d d
_______ _________ ____ _____
| Wt | | | | | | |
c |_____| b | Dt | a |Y | ---> c |___|
|_______| | |
|__|
```
@input:
`D:file` : The D matrix
`Pt:file`: The Pt matrix
`Y:file`: The Y matrix
- All input files could be gzipped
@output:
`W:file`: The interaction matrix
`outdir:dir`: The output directory
@args:
`seed`: The seed for sampling the training set.
`tfrac`: The fraction of samples used for training.
"""))
pAR.input = 'D:file, Pt:file, Y:file'
pAR.output = [
'W:file:{{i.D | fn}}-{{i.Pt | fn}}-{{i.Y | fn}}.AR/W.txt',
'outdir:dir:{{i.D | fn}}-{{i.Pt | fn}}-{{i.Y | fn}}.AR'
]
pAR.lang = params.Rscript.value
pAR.args.seed = None
pAR.args.tfrac = .5
pAR.args.inopts = Diot(cnames = True, rnames = True)
pAR.args.svdP = 0
pAR.args.predY = True
pAR.args.WPt = True
pAR.args.WtDtY = True
pAR.args.nfold = 3
pAR.args.nthread = 1
pAR.args.method = 'glmnet' # admm
pColoc = Proc(
desc = "Bayes Factor colocalisation analyses using R `coloc` package.",
config = Diot(annotate = """
@description:
Bayes Factor colocalisation analyses using R `coloc` package.
`coloc` package can accept multiple formats of input. Here we adopt the one using pvalues.
`coloc.abf(dataset1=list(pvalues=p1,N=nrow(X1),type="quant"), dataset2=list(pvalues=p2,N=nrow(X2),type="quant"), MAF=maf)`
@input:
`infile:file`: The input file including the MAF, pvalues of 1st and 2nd phenotypes
- The first 6 columns are in BED6 format.
- 7th : MAF
- 8th : Pvalues for the 1st phenotype
- 9th : Pvalues for the 2nd phenotype
- This file could have a header with the names for phenotypes
- Snps have to be on the same chromosome, and sorted by positions.
@output:
`outfile:file`: The output file including:
- # snps, PP.H0.abf, PP.H1.abf, PP.H2.abf, PP.H3.abf and PP.H4.abf
`outdir:dir` : The output directory containing the output file and plots.
@args:
`plot`: Do manhattan plot? Default: `True`
"""))
pColoc.input = 'infile:file'
pColoc.output = [
'outfile:file:{{i.infile | fn2}}.coloc/{{i.infile | fn2}}.coloc.txt',
'outdir:dir:{{i.infile | fn2}}.coloc'
]
pColoc.args.inopts = Diot(cnames = True, rnames = False)
pColoc.args.plot = True
pColoc.args.ggs = Diot()
pColoc.args.params = Diot()
pColoc.args.devpars = Diot(res = 300, height = 2000, width = 2000)
pColoc.args.hifile = ''
pColoc.args.hilabel = True
pColoc.lang = params.Rscript.value
| {
"repo_name": "pwwang/bioprocs",
"path": "bioprocs/algorithm.py",
"copies": "1",
"size": "4683",
"license": "mit",
"hash": 7818258702689222000,
"line_mean": 33.4338235294,
"line_max": 124,
"alpha_frac": 0.5742045697,
"autogenerated": false,
"ratio": 2.5858641634456103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8434071948672051,
"avg_score": 0.04519935689471207,
"num_lines": 136
} |
"""A set of AST classes that correspond to the code.
This AST format is desinged to be easy to parse into. See typed_ast for the AST
format that is used during the evaluation step.
"""
from __future__ import absolute_import
import collections
class Select(collections.namedtuple(
'Select', ['select_fields', 'table_expr', 'where_expr', 'groups',
'having_expr', 'orderings', 'limit', 'alias'])):
"""Represents a top-level select statement.
Fields:
select_fields: A list of SelectField objects.
table_expr: A table expression referring to the data to select from, or
None if there is no table specified.
where_expr: An expression for the WHERE filter, or None if there is
no WHERE filter.
groups: A list of strings for fields to group by, or None if there is
no GROUP BY clause.
having_expr: An expression for the HAVING filter, or None if there is
no HAVING filter.
orderings: A list of Ordering instances, or None if there was no
ORDER BY clause.
limit: An integer limit
alias: For subqueries, a name given to the subquery, or None if no name
was given (or if this is an outermost query).
"""
def __str__(self):
result = 'SELECT {}'.format(
', '.join([str(field) for field in self.select_fields]))
if self.table_expr:
result += ' FROM {}'.format(self.table_expr)
if self.where_expr:
result += ' WHERE {}'.format(self.where_expr)
if self.groups:
result += ' GROUP BY {}'.format(
', '.join(str(group) for group in self.groups))
if self.having_expr:
result += ' HAVING {}'.format(self.having_expr)
if self.orderings:
result += ' ORDER BY {}'.format(
', '.join(str(ordering) for ordering in self.orderings))
if self.limit:
result += ' LIMIT {}'.format(self.limit)
return result
class SelectField(collections.namedtuple('SelectField', ['expr', 'alias',
'within_record'])):
def __str__(self):
if self.alias is not None:
if self.within_record is not None:
return '{} WITHIN {} AS {}'.format(
self.expr, self.within_record, self.alias)
else:
return '{} AS {}'.format(self.expr, self.alias)
else:
return str(self.expr)
class Star(collections.namedtuple('Star', [])):
def __str__(self):
return '*'
class UnaryOperator(collections.namedtuple(
'UnaryOperator', ['operator', 'expr'])):
def __str__(self):
return '({}{})'.format(self.operator, self.expr)
class BinaryOperator(collections.namedtuple(
'BinaryOperator', ['operator', 'left', 'right'])):
def __str__(self):
return '({}{}{})'.format(self.left, self.operator, self.right)
class FunctionCall(collections.namedtuple('FunctionCall', ['name', 'args'])):
def __str__(self):
return '({}({}))'.format(self.name, self.args)
class Literal(collections.namedtuple('Literal', ['value'])):
def __str__(self):
return str(self.value)
class ColumnId(collections.namedtuple('ColumnId', ['name'])):
def __str__(self):
return self.name
class Ordering(collections.namedtuple('Ordering',
['column_id', 'is_ascending'])):
def __str__(self):
if self.is_ascending:
return '{} ASC'.format(self.column_id)
else:
return '{} DESC'.format(self.column_id)
class TableId(collections.namedtuple('TableId', ['name', 'alias'])):
"""Table expression referencing a table to select from.
Fields:
name: The name of the table to select from.
alias: An alias to assign to use for this table, or None if no alias
was specified.
"""
def __str__(self):
return self.name
class TableUnion(collections.namedtuple('TableUnion', ['tables'])):
"""Table expression for a union of tables (the comma operator).
The tables can be arbitrary table expressions.
"""
def __str__(self):
return ', '.join(str(table) for table in self.tables)
class JoinType(object):
"""A namespace for holding constants for different types of join.
TODO(colin): if/when running python 3.5+ replace with an enum.
"""
def __init__(self, name):
self.name = name
def __str__(self):
return '%s JOIN' % self.name
JoinType.LEFT_OUTER = JoinType('LEFT OUTER')
JoinType.INNER = JoinType('INNER')
JoinType.CROSS = JoinType('CROSS')
class PartialJoin(collections.namedtuple('PartialJoin',
['table_expr', 'join_type',
'condition'])):
"""Expression for the right side of a join, its type, and condition.
This represents something like `LEFT JOIN [dataset.table] ON x = y`
"""
def __str__(self):
if self.join_type is JoinType.CROSS:
return '%s %s' % (self.join_type, self.table_expr)
else:
return '%s %s ON %s' % (
self.join_type, self.table_expr, self.condition)
class Join(collections.namedtuple('Join', ['base', 'join_parts'])):
"""Expression for a join of two or more tables.
base is the expression in the leftmost part of the join
join_parts is an array of one or more `PartialJoin`s
"""
def __str__(self):
return '%s %s' % (self.base,
' '.join(str(part) for part in self.join_parts))
class CaseClause(collections.namedtuple('CaseClause',
['condition', 'result_expr'])):
"""Expression for a single clause from a CASE / WHEN / END statement.
ELSE is just expressed as a final WHEN with a condition of TRUE.
"""
def __str__(self):
return 'WHEN {} THEN {}'.format(self.condition, self.result_expr)
class CaseExpression(collections.namedtuple('CaseExpression', ['clauses'])):
"""Case expression with one or more WHEN clauses and optional ELSE."""
def __str__(self):
return 'CASE {} END'.format(
' '.join(str(clause) for clause in self.clauses))
| {
"repo_name": "Khan/tinyquery",
"path": "tinyquery/tq_ast.py",
"copies": "1",
"size": "6354",
"license": "mit",
"hash": -6555121143733884000,
"line_mean": 33.5326086957,
"line_max": 79,
"alpha_frac": 0.5845136922,
"autogenerated": false,
"ratio": 4.177514792899408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 184
} |
"""A set of AST classes that correspond to the code.
This AST format is desinged to be easy to parse into. See typed_ast for the AST
format that is used during the evaluation step.
"""
import collections
class Select(collections.namedtuple(
'Select', ['select_fields', 'table_expr', 'where_expr', 'groups',
'orderings', 'limit', 'alias'])):
"""Represents a top-level select statement.
Fields:
select_fields: A list of SelectField objects.
table_expr: A table expression referring to the data to select from, or
None if there is no table specified.
where_expr: An expression for the WHERE filter, or None if there is
no WHERE filter.
groups: A list of strings for fields to group by, or None if there is
no GROUP BY clause.
orderings: A list of Ordering instances, or None if there was no
ORDER BY clause.
limit: An integer limit
alias: For subqueries, a name given to the subquery, or None if no name
was given (or if this is an outermost query).
"""
def __str__(self):
result = 'SELECT {}'.format(
', '.join([str(field) for field in self.select_fields]))
if self.table_expr:
result += ' FROM {}'.format(self.table_expr)
if self.where_expr:
result += ' WHERE {}'.format(self.where_expr)
if self.groups:
result += ' GROUP BY {}'.format(
', '.join(str(group) for group in self.groups))
if self.orderings:
result += ' ORDER BY {}'.format(
', '.join(str(ordering) for ordering in self.orderings))
if self.limit:
result += ' LIMIT {}'.format(self.limit)
return result
class SelectField(collections.namedtuple('SelectField', ['expr', 'alias'])):
def __str__(self):
if self.alias is not None:
return '{} AS {}'.format(self.expr, self.alias)
else:
return str(self.expr)
class Star(collections.namedtuple('Star', [])):
def __str__(self):
return '*'
class UnaryOperator(collections.namedtuple(
'UnaryOperator', ['operator', 'expr'])):
def __str__(self):
return '({}{})'.format(self.operator, self.expr)
class BinaryOperator(collections.namedtuple(
'BinaryOperator', ['operator', 'left', 'right'])):
def __str__(self):
return '({}{}{})'.format(self.left, self.operator, self.right)
class FunctionCall(collections.namedtuple('FunctionCall', ['name', 'args'])):
def __str__(self):
return '({}({}))'.format(self.name, self.args)
class Literal(collections.namedtuple('Literal', ['value'])):
def __str__(self):
return str(self.value)
class ColumnId(collections.namedtuple('ColumnId', ['name'])):
def __str__(self):
return self.name
class Ordering(collections.namedtuple('Ordering',
['column_id', 'is_ascending'])):
def __str__(self):
if self.is_ascending:
return '{} ASC'.format(self.column_id)
else:
return '{} DESC'.format(self.column_id)
class TableId(collections.namedtuple('TableId', ['name', 'alias'])):
"""Table expression referencing a table to select from.
Fields:
name: The name of the table to select from.
alias: An alias to assign to use for this table, or None if no alias
was specified.
"""
def __str__(self):
return self.name
class TableUnion(collections.namedtuple('TableUnion', ['tables'])):
"""Table expression for a union of tables (the comma operator).
The tables can be arbitrary table expressions.
"""
def __str__(self):
return ', '.join(str(table) for table in self.tables)
class Join(collections.namedtuple('Join', ['table1', 'table2', 'condition',
'is_left_outer'])):
"""Table expression for a join of two tables.
Joining more than two tables currently isn't supported.
"""
def __str__(self):
if self.is_left_outer:
return '{} LEFT OUTER JOIN {} ON {}'.format(
self.table1, self.table2, self.condition)
else:
return '{} JOIN {} ON {}'.format(
self.table1, self.table2, self.condition)
class CrossJoin(collections.namedtuple('CrossJoin', ['table1', 'table2'])):
"""Table expression for a cross join of two tables.
This needs to be parsed separately instead of joining on true since there's
no way to write a regular JOIN that behaves as a CROSS JOIN.
"""
def __str__(self):
return '{} CROSS JOIN {}'.format(self.table1, self.table2)
| {
"repo_name": "burnhamup/tinyquery",
"path": "tinyquery/tq_ast.py",
"copies": "1",
"size": "4743",
"license": "mit",
"hash": 8957019290822566000,
"line_mean": 33.1223021583,
"line_max": 79,
"alpha_frac": 0.5932953827,
"autogenerated": false,
"ratio": 4.1936339522546415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 139
} |
"""A set of classes and functions for btd_sched.py (and others?)."""
import sys
import time
from datetime import datetime, timedelta
from pathlib import Path
import re
from iteration_utilities import deepflatten
from rivendell_lib import RDDatabase
def my_print(*p_args, **p_kwargs):
"""My print function that always goes to STDERR."""
print(*p_args, **p_kwargs, file=sys.stderr)
class Event():
"""An Event is an atomic element containing rules for scheduling Carts."""
def __init__(self, service_name, event_name):
"""Instantiate an Event with the associated fields.
:param service_name: The name of (typically) the Rivendell
Reference Service.
:param event_name: The name of a Rivendell Event to retrieve.
"""
self.attributes = {}
self.service_name = service_name
self.event_name = event_name
self.query = ("SELECT DISTINCT LCASE(ev.sched_group) AS sched_group, "
"ev.have_code AS schedcode1, ev.have_code2 AS schedcode2, "
"ev.artist_sep AS artist_sep, ev.title_sep AS title_sep "
"FROM SERVICE_CLOCKS sc "
"LEFT JOIN CLOCK_LINES cl ON (sc.clock_name = cl.clock_name) "
"LEFT JOIN EVENTS ev ON (cl.event_name = ev.name) "
"WHERE sc.service_name = %s AND "
"ev.name = %s")
self.query_args = (service_name, event_name,)
db = RDDatabase(None)
event = db.fetchone(self.query, self.query_args, dictionary=True)
self.attributes['sched_group'] = event['sched_group']
self.attributes['schedcode1'] = event['schedcode1']
self.attributes['schedcode2'] = event['schedcode2']
self.attributes['artist_sep'] = event['artist_sep']
self.attributes['title_sep'] = event['title_sep']
self.attributes['codes'] = event['schedcode1'] + '|' + event['schedcode2']
db.close()
def list_attributes(self):
"""Return the list of Event attributes."""
return list(dict.fromkeys(self.attributes))
def get_query(self):
"""Return a string containing the formatted query for an Event."""
return self.query % self.query_args
class Hour():
"""An Hour is a list of Events each with a start time and a duration."""
def __init__(self, service_name, hour):
"""Instantiate an Hour, getting all the hour's Events.
:param service_name: The name of (typically) the Rivendell
Reference Service.
:param hour: A "Rivendell hour of the week" to retrieve (from
0 [Midnight Monday] to 167 [11pm Sunday]).
"""
self.service_name = service_name
self.hour = hour
self.events = []
self.query = ("SELECT cl.start_time AS start_time, "
"cl.length AS length, cl.event_name AS event_name "
"FROM CLOCK_LINES AS cl "
"LEFT JOIN SERVICE_CLOCKS AS sc ON (sc.clock_name = cl.clock_name) "
"WHERE sc.service_name = %s AND hour = %s")
self.query_args = (self.service_name, self.hour,)
db = RDDatabase(None)
rows = db.fetchall(self.query, self.query_args, dictionary=True)
for row in rows:
self.events.append({
'start_time': row['start_time'],
'length': row['length'],
'event': Event(service_name, row['event_name'])
})
def values(self, attribute):
"""Return a list of unique values and their counts.
The list is for the given Events attribute in this Hour.
See the constructor for the list of attributes for each Event.
:param attribute: An Event attribute (see Event()) for which
to retrieve values.
"""
values = {}
# If the first one has this attribute, they will all have it.
if not attribute in self.events[0]['event'].attributes:
print("Hour::attributes(): ERROR: no such attribute: {attr}. Try one of '{l}'."
.format(attr=attribute, l=self.events[0]['event'].list_attributes()), file=sys.stderr)
return None
values_for_hour = [self.events[x]['event'].attributes[attribute]
for x, _ in enumerate(self.events)]
# Get the counts of values for each instance of the specified
# Event attribute for this Hour.
for v in list(dict.fromkeys(values_for_hour)):
values[v] = values_for_hour.count(v)
return values
def get_query(self):
"""Return a string containing the formatted query with query_args for an Hour."""
return self.query % self.query_args
class Day():
"""A Day is a list of 24 Hours."""
def __init__(self, service_name, clock_date):
"""Instantiate a Day, getting all 24 Hours.
:param service_name: The name of (typically) the Rivendell
Reference Service.
:param clock_date: A date (in the form YYYY-MM-DD,
Zero-filled) representing the day for which to generate a
Rivendell Log. This date is simply used to calculate the
starting hour of the week (from 0 [Midnight Monday] to 167
[11pm Sunday]).
"""
self.hours = []
self.service_name = service_name
self.clock_date = clock_date
# These "hours" are Rivendell hours of the week (0 - 167).
self.first_hour = (int(time.strftime("%u", time.strptime(clock_date, "%Y-%m-%d"))) - 1) * 24
self.last_hour = self.first_hour + 23
self.query = ("SELECT hour, clock_name FROM SERVICE_CLOCKS "
"WHERE service_name = %s AND "
"hour BETWEEN %s AND %s")
self.query_args = (service_name, self.first_hour, self.last_hour)
db = RDDatabase(None)
rows = db.fetchall(self.query, self.query_args, dictionary=True)
for row in rows:
self.hours.append({
'hour': row['hour'],
'clock_name': row['clock_name'],
'clock': Hour(service_name, row['hour'])
})
def values(self, attribute):
"""Return a sorted list of unique values for the given Event attribute for the Day.
Technique from
https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists
"""
values = {}
values_by_hour = [[self.hours[h]['clock'].events[e]['event'].attributes[attribute]
for e, _ in enumerate(self.hours[h]['clock'].events)]
for h, _ in enumerate(self.hours)]
# Get the counts of values for each instance of the specified
# Event attribute for this Day.
for v in list(dict.fromkeys([item for sublist in values_by_hour for item in sublist])):
values[v] = [item for sublist in values_by_hour for item in sublist].count(v)
return values
def get_query(self):
"""Return a string containing the formatted query with query_args for a Day."""
return self.query % self.query_args
class Batch():
"""A Batch is a collection of Days in a scheduling session."""
def __init__(self, service_name, start_date, day_count=1):
"""Instantiate a Batch getting all the Days, Hours and Events.
:param service_name: The name of (typically) the Rivendell
Reference Service.
:param start_date: The batch start date (in the form
YYYY-MM-DD, Zero-filled).
:param day_count: The number of days in this batch.
A specific Event in a Batch is referenced with, e.g.,
Batch('service-name',
'yyyy-mm-dd').days[daynum].hours[hournum]['clock'].events[eventnum]['event'].event_name
"""
self.days = []
self.service_name = service_name
self.start_date = start_date
self.day_count = day_count
self.days = [Day(service_name,
(datetime.strptime(start_date, '%Y-%m-%d') +
timedelta(days=count)).strftime("%F"))
for count in range(day_count)]
def values(self, attribute):
"""Return the values for an entire Batch.
This returns a dict of occurances indexed by "attribute"
for the given Event attribute for an entire Batch.
See above for the details.
:param attribute: An Event attribute to summarize.
:returns: A dict indexed by attribute values, the dict values
being the number of occurances of that attribute value.
"""
values = {}
values_by_day_by_hour = [[[self.days[d].hours[h]['clock'].events[e]['event'].attributes[attribute]
for e, _ in enumerate(self.days[d].hours[h]['clock'].events)]
for h, _ in enumerate(self.days[d].hours)]
for d, _ in enumerate(self.days)]
# Get the counts of values for each instance of the specified
# Event attribute for this whole Batch.
for v in list(dict.fromkeys(list(deepflatten(values_by_day_by_hour, depth=2)))):
values[v] = list(deepflatten(values_by_day_by_hour, depth=2)).count(v)
return values
def refresh(self):
"""Reload the entire configuration.
The reload uses the original instantiation values.
This might be used in a long-running process during
which the database may have changed.
"""
self.days = [Day(self.service_name,
(datetime.strptime(self.start_date, '%Y-%m-%d') +
timedelta(days=count)).strftime("%F"))
for count in range(self.day_count)]
class OutputFile():
"""An output file.
Including name generation, path manipulation, and reading
and writing the output file.
"""
def __init__(self, service_name, import_date, debug):
"""Construct the object and set the directory name for the file.
:param service_name: The (case-insensitive) Implementation Service name.
:param import_date: the date (in YYYY-MM-DD format) for the output file.
:param debug: Debug mode (boolean)
"""
self.service_name = service_name
self.import_date = import_date
self.debug = debug
self.fullpath = None
self.query = "SELECT mus_path FROM SERVICES WHERE name = %s"
self.query_args = (self.service_name,)
rows = RDDatabase(None).fetchall(self.query, self.query_args, dictionary=True)
self.mus_path = Path(rows[0]['mus_path'])
def get_query(self):
"""Return a string containing the formatted query with query_args for a Day.
:returns: The query with the '%s' directive(s) expanded.
"""
return self.query % self.query_args
def make_directory(self):
"""Create the directory hierarchy if it does not exist.
:returns: True or False depending on the success or failure of
creating the directory
"""
if not self.mus_path.parent.is_dir():
try:
if self.debug:
my_print("make_directory: '{dir}' is missing, attempting to create it."
.format(dir=str(self.mus_path.parent.name)))
self.mus_path.parent.mkdir(parents=True, exist_ok=True)
except OSError as e:
print("schedlib.OutputFile: ERROR: Unable to create directory '{d}' ('{e}')."
.format(d=self.mus_path.parent.name, e=e), file=sys.stderr)
return False
return True
def make_name(self):
"""Normalize the import date specified in the constructor.
:returns: A string representing the import date. Also resets self.import_date.
"""
date1_regexp = re.compile(r'(?P<year>\d{4}).?(?P<month>\d{1,2}).?(?P<day>\d{1,2})')
match = date1_regexp.search(self.import_date)
if match is None:
date2_regexp = re.compile(r'(?P<day_or_month>\d{1,2}).?(?P<month_or_day>\d{1,2}).?(?P<year>\d{4})')
match = date2_regexp.search(self.import_date)
if match is None:
print("btd-sched.py: ERROR: '{date}': unknown date format, please use 'YYYY-MM-DD' or similar."
.format(date=self.import_date), file=sys.stderr)
sys.exit(1)
d_parts = match.groupdict()
# We cannot be assured this will work as they intend, but
# give it a shot.
if int(d_parts['day_or_month']) > 12:
d_parts['day'] = d_parts['day_or_month']
d_parts['month'] = d_parts['month_or_day']
elif int(d_parts['month_or_day']) > 12:
d_parts['day'] = d_parts['month_or_day']
d_parts['month'] = d_parts['day_or_month']
else:
print("make_name: ERROR: '{date}': ambiguous date, please use 'YYYY-MM-DD' or similar."
.format(date=self.import_date), file=sys.stderr)
return None
else:
d_parts = match.groupdict()
self.import_date = "{yyyy}-{mm}-{dd}.txt".format(yyyy=d_parts['year'],
mm=d_parts['month'],
dd=d_parts['day'])
if self.debug:
print("make_name: set self.filename to '{f}'."
.format(f=self.import_date), file=sys.stderr)
return self.import_date
def make_pathname(self):
"""Create a Path object pointing to the full path of the output file.
:returns: a Path object containing the full path of the
file. Also saved with the object as self.fullpath.
"""
# Try to avoid breakage if they use Rivendell-specific
# ("non-strftime(3)") placeholders in the Import Path setting
# in Rivendell.SERVICES.mus_path.
try:
import_date = Path(time.strftime(self.mus_path.name,
time.strptime(self.import_date,
'%Y-%m-%d')))
except ValueError as e:
if self.debug:
print("schedlib.OutputFile: unknown directive in Music Data Import Path: {e}"
.format(e=e), file=sys.stderr)
import_date = self.make_name()
self.fullpath = self.mus_path.parent / import_date
return self.fullpath
| {
"repo_name": "opensourceradio/ram",
"path": "usr/local/bin/btd_sched/schedlib.py",
"copies": "1",
"size": "14684",
"license": "bsd-2-clause",
"hash": 4520422525656754700,
"line_mean": 40.1316526611,
"line_max": 111,
"alpha_frac": 0.5732770362,
"autogenerated": false,
"ratio": 4.0307438923963765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5104020928596377,
"avg_score": null,
"num_lines": null
} |
"""A set of classes for reading and writing packets from X-Plane."""
import socket
import struct
import pint
units = pint.UnitRegistry()
GRAVITY = 9.81 * (units.meter / units.second ** 2)
LEAVE_ALONE = -999
class DataPacket:
"""
Contains methods for reading data from a 'DATA' packet.
Parameters
----------
data : bytes
The raw bytes in the packet, passed to :func:`.read`.
"""
def __init__(self, data=None):
self.data = {}
if data is not None:
self.read(data)
def read(self, data):
"""
Parse the data in the packet and read it into this class.
Parameters
----------
data : bytes
The raw bytes in the packet.
"""
if not data.startswith(b'DATA'):
raise ValueError("Not a 'DATA' packet.")
data = data[5:]
for i in range(len(data) // 36):
index = struct.unpack_from('<i', data, 0 + (i * 36))[0]
values = []
for j in range(8):
value = struct.unpack_from('<f', data, 4 + (i * 36) + (j * 4))[0]
values.append(value)
self.data[index] = values
def write(self):
"""
Write the contents of this packet to a byte string suitable for sending
back to X-Plane.
Returns
-------
bytes
The array of bytes.
"""
data = b''
for index, values in self.data.items():
data += struct.pack('<i', index)
assert len(values) == 8
for value in values:
data += struct.pack('<f', value)
assert len(data) == len(self.data) * 36
return b'DATA\x00' + data
def __getitem__(self, index):
"""
Get the 8-valued tuple for the specific index.
Returns
-------
tuple
A tuple of length 8 containing floats.
Raises
------
IndexError
If said index is not in the data.
"""
try:
return self.data[index]
except KeyError:
raise IndexError('Packet does not contain index {}.'.format(index))
def __setitem__(self, index, values):
"""
Set the 8-valued tuple for the specific index.
Parameters
----------
index : int
The index of the values.
values : tuple
The 8 floats for this index.
Raises
------
ValueError
If the tuple doesn't contain exactly 8 values.
"""
if len(values) != 8:
raise ValueError('Tried to set values of length {}, should be 8.'
.format(len(values)))
self.data[index] = values
def read_speeds(self):
"""
Read the speeds (index 3).
Returns
-------
Indicated Airspeed : m/s
Equivalent Airspeed : m/s
True Airspeed : m/s
Groundspeed : m/s
"""
values = self[3]
meters_per_second = units.meter / units.second
indicated_airspeed = (values[0] * units.knot).to(meters_per_second)
equivalent_airspeed = (values[1] * units.knot).to(meters_per_second)
true_airspeed = (values[2] * units.knot).to(meters_per_second)
groundspeed = (values[3] * units.knot).to(meters_per_second)
return indicated_airspeed, equivalent_airspeed, true_airspeed, \
groundspeed
def write_joystick_elevator_aileron_rudder(self, elevator=LEAVE_ALONE,
aileron=LEAVE_ALONE,
rudder=LEAVE_ALONE):
self[8] = (elevator, aileron, rudder) + (0,) * 5
def read_angular_moments(self):
"""
Read the angular moments (index 15).
Returns
-------
L : Nm
M : Nm
N : Nm
"""
values = self[15]
newton_meters = units.newton * units.meter
"""
M = values[0] * (units.foot * units.pound).to(newton_meters)
L = values[1] * (units.foot * units.pound).to(newton_meters)
N = values[2] * (units.foot * units.pound).to(newton_meters)
"""
M = (values[0] * 1.35581795) * newton_meters
L = (values[1] * 1.35581795) * newton_meters
N = (values[2] * 1.35581795) * newton_meters
return L, M, N
def read_gear_break(self):
"""
Read the gear and breaks (index 16).
Returns
-------
Gear : float
W-break : float
L-break : float
R-break : float
"""
values = self[16]
gear = values[0]
wbrak = values[1]
lbrak = values[2]
rbrak = values[3]
return gear, wbrak, lbrak, rbrak
def write_gear_break(self, gear=LEAVE_ALONE, wbrak=LEAVE_ALONE,
lbrak=LEAVE_ALONE, rbrak=LEAVE_ALONE):
"""
Write the gear and breaks (index 16).
Parameters
-------
gear : float
wbrak : float
lbrak : float
rbrak : float
"""
self[16] = (gear, wbrak, lbrak, rbrak) + (0,) * 4
def read_angular_velocities(self):
"""
Read the angular velocities (index 16).
Returns
-------
P : rad/s
Q : rad/s
R : rad/s
"""
values = self[16]
radians_per_second = units.radian / units.second
Q = values[0] * radians_per_second
P = values[1] * radians_per_second
R = values[2] * radians_per_second
return P, Q, R
def read_pitch_roll_headings(self):
"""
Read the pitch, roll and headings (index 17).
Returns
-------
Pitch : rad
Roll : rad
True Heading : rad
Magnetic Heading : rad
"""
values = self[17]
pitch = (values[0] * units.degree).to(units.radian)
roll = (values[1] * units.degree).to(units.radian)
true_heading = (values[2] * units.degree).to(units.radian)
magnetic_heading = (values[2] * units.degree).to(units.radian)
return pitch, roll, true_heading, magnetic_heading
def read_latitude_longitude_altitude(self):
"""
Read the latitude, longitude and altitude (index 20).
Returns
-------
Latitde : rad
Longitude : rad
Mean Sea Level Altitude : m
Above Ground Level Altitude : m
"""
values = self[20]
latitude = (values[0] * units.degree).to(units.radian)
longitude = (values[1] * units.degree).to(units.radian)
mean_sea_level_altitude = (values[2] * units.feet).to(units.meter)
above_ground_level_altitude = (values[4] * units.feet).to(units.meter)
return latitude, longitude, mean_sea_level_altitude, \
above_ground_level_altitude
def read_angle_of_attack_side_slip_paths(self):
"""
Read the angle of attack, side slip and paths (index 18).
Returns
-------
Alpha : rad
Beta : rad
H-path : rad
V-path : rad
Slip : rad
"""
values = self[18]
alpha = (values[0] * units.degree).to(units.radian)
beta = (values[1] * units.degree).to(units.radian)
hpath = (values[2] * units.degree).to(units.radian)
vpath = (values[3] * units.degree).to(units.radian)
slip = (values[4] * units.degree).to(units.radian)
return alpha, beta, hpath, vpath, slip
def write_throttle_command(self, value):
"""
Write a throttle command.
Parameters
----------
value : float
The value of the throttle.
"""
self[25] = (value,) + (0,) * 7
def read_engine_thrust(self):
"""
Read the engine thrust (index 35).
Returns
-------
Engine Thrust : N
"""
values = self[35]
v = ((values[0] * units.lb).to(units.kg) * GRAVITY).to(units.newton)
return v
def read_aero_forces(self):
"""
Read the aero forces (index 64).
Returns
-------
Lift : N
Drag : N
Side : N
"""
values = self[64]
lift = ((values[0] * units.lb).to(units.kg) * GRAVITY).to(units.newton)
drag = ((values[1] * units.lb).to(units.kg) * GRAVITY).to(units.newton)
side = ((values[2] * units.lb).to(units.kg) * GRAVITY).to(units.newton)
return lift, drag, side
def read_aileron_angle(self):
"""
Read the aileron angle (index 70).
Returns
-------
1 : (rad, rad)
2 : (rad, rad)
3 : (rad, rad)
4 : (rad, rad)
"""
values = self[70]
left_1 = (values[0] * units.degree).to(units.radian)
right_1 = (values[1] * units.degree).to(units.radian)
left_2 = (values[2] * units.degree).to(units.radian)
right_2 = (values[3] * units.degree).to(units.radian)
left_3 = (values[4] * units.degree).to(units.radian)
right_3 = (values[5] * units.degree).to(units.radian)
left_4 = (values[6] * units.degree).to(units.radian)
right_4 = (values[7] * units.degree).to(units.radian)
return (left_1, right_1), (left_2, right_2), (left_3, right_3), \
(left_4, right_4)
def read_elevator_angle(self):
"""
Read the elevator angle (index 74).
Returns
-------
1 : (rad, rad)
2 : (rad, rad)
"""
values = self[74]
elev1_1 = (values[0] * units.degree).to(units.radian)
elev1_2 = (values[1] * units.degree).to(units.radian)
elev2_1 = (values[2] * units.degree).to(units.radian)
elev2_2 = (values[3] * units.degree).to(units.radian)
return (elev1_1, elev1_2), (elev2_1, elev2_2)
def read_rudder_angle(self):
"""
Read the rudder angle (index 75).
Returns
-------
1 : (rad, rad)
2 : (rad, rad)
"""
values = self[75]
rudd1_1 = (values[0] * units.degree).to(units.radian)
rudd1_2 = (values[1] * units.degree).to(units.radian)
rudd2_1 = (values[2] * units.degree).to(units.radian)
rudd2_2 = (values[3] * units.degree).to(units.radian)
return (rudd1_1, rudd1_2), (rudd2_1, rudd2_2)
class CommandPacket:
def __init__(self, command=None, data=None):
self.command = command
if data is not None:
self.read(data)
def read(self, data):
if not data.startswith(b'CMND'):
raise ValueError("Not a 'DATA' packet.")
data = data[5:]
self.command = data.decode()
def write(self):
return b'CMND0' + self.command.encode()
| {
"repo_name": "thomasleese/snakes-on-a-plane",
"path": "xplane/packets.py",
"copies": "2",
"size": "11000",
"license": "mit",
"hash": 3873570708193102300,
"line_mean": 24.5813953488,
"line_max": 81,
"alpha_frac": 0.5069090909,
"autogenerated": false,
"ratio": 3.6315615714757348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5138470662375735,
"avg_score": null,
"num_lines": null
} |
"""A set of classes that can be used to represent electric cars."""
from car import Car
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=70):
"""Initialize the battery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print("This car has a " + str(self.battery_size) + "-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full chage."
print(message)
class ElectricCar(Car):
"""Represenst aspects of a car, specific to electric vehicles."""
def __init__(self, make, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(make, model, year)
self.battery = Battery()
def fill_gas_tank(self):
"""Electric cars don't have gas tanks."""
print("This car doesn't need a gas tank!")
"""
my_tesla = ElectricCar('tesla', 'model s', 2016)
print(my_tesla.get_descriptive_name())
my_tesla.battery.describe_battery()
my_tesla.battery.get_range()
"""
| {
"repo_name": "mccarrion/python-practice",
"path": "crash_course/chapter09/electric_car.py",
"copies": "1",
"size": "1478",
"license": "mit",
"hash": -98478923571663120,
"line_mean": 30.4468085106,
"line_max": 75,
"alpha_frac": 0.6136671177,
"autogenerated": false,
"ratio": 3.751269035532995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4864936153232995,
"avg_score": null,
"num_lines": null
} |
"""A set of classes that can be used to represent electric cars."""
from car import Car
class Battery():
"""A simple attempt to model a battery for an electric car."""
def __init__(self, battery_size=60):
"""Initialize the batteery's attributes."""
self.battery_size = battery_size
def describe_battery(self):
"""Print a statement describing the battery size."""
print("This car has a " + str(self.battery_size) + "-kWh battery.")
def get_range(self):
"""Print a statement about the range this battery provides."""
if self.battery_size == 60:
range = 140
elif self.battery_size == 85:
range = 185
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
def upgrade_battery(self):
if self.battery_size < 85:
self.battery_size = 85
pass
def get_battery(self):
print(self.battery_size)
class ElectricCar(Car):
"""Models aspects of a car, specific to electric vehicles."""
def __init__(self, manufacturer, model, year):
"""
Initialize attributes of the parent class.
Then initialize attributes specific to an electric car.
"""
super().__init__(manufacturer, model, year)
self.battery = Battery()
electricCar1 = ElectricCar('BWM', 'hle', 1999)
electricCar1.battery.get_range()
electricCar1.battery.upgrade_battery()
electricCar1.battery.get_range()
electricCar1.battery.get_battery()
#错误还是很难排的,特别是没有报错的时候 | {
"repo_name": "lluxury/pcc_exercise",
"path": "09/upgrade_battery.py",
"copies": "1",
"size": "1709",
"license": "mit",
"hash": 5899417994760458000,
"line_mean": 29.0185185185,
"line_max": 77,
"alpha_frac": 0.5953377167,
"autogenerated": false,
"ratio": 3.7936507936507935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48889885103507935,
"avg_score": null,
"num_lines": null
} |
"""a set of common performance metrics"""
from scipy import stats
from functools import lru_cache
from itertools import chain, repeat, permutations
from operator import itemgetter
from decimal import Decimal
from .general_functions import groupby_unsorted
from . import log
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class _metric_keys(object):
"""
a class to get all available variable-keys that can be used to
calculate metrics
"""
def __init__(self, fit, d1=None, d2=None, auxdat=None):
self._datakeys = fit.dataset.select_dtypes(include="number").keys()
self._modelkeys = ["tot", "surf", "vol"]
self._retrievalkeys = fit.res_dict.keys()
if auxdat is not None:
self._auxkeys = auxdat.keys()
else:
self._auxkeys = []
if fit.int_Q is True:
self._modelkeys += ["inter"]
self._check_keys()
if d1 is not None:
assert isinstance(d1, str), "d1 must be a string!"
setattr(self, d1, _RTmetrics0())
if d2 is None:
for k2 in self._all_keys:
setattr(
getattr(self, d1),
k2,
_RTmetrics1(
d1=d1,
d2=k2,
fit=fit,
auxdat=auxdat,
all_keys=self._all_keys,
),
)
elif isinstance(d2, str):
setattr(getattr(self, d1), d2, _RTmetrics1(d1=d1, d2=d2, fit=fit))
else:
try:
d2name = d2.name
except AttributeError:
d2name = "aux"
setattr(
getattr(self, d1),
d2name,
_RTmetrics1(
d1=d1,
d2=d2,
fit=fit,
auxdat=auxdat,
all_keys=self._all_keys,
),
)
else:
for k1, k2 in permutations(self._all_keys, 2):
if not hasattr(self, k1):
setattr(self, k1, _RTmetrics0())
setattr(
getattr(self, k1),
k2,
_RTmetrics1(
d1=k1,
d2=k2,
fit=fit,
auxdat=auxdat,
all_keys=self._all_keys,
),
)
def _check_keys(self):
# a list of all possible keys that can be used for metric calculations
all_keys = chain(
self._datakeys, self._modelkeys, self._retrievalkeys, self._auxkeys
)
# a list of the "sources" that belong to the keys
suffix = chain(
repeat("dataset", len(self._datakeys)),
repeat("calc_model", len(self._modelkeys)),
repeat("res_df", len(self._retrievalkeys)),
repeat("auxdat", len(self._auxkeys)),
)
# group by the sources to check if any key is defined more than once
grps = groupby_unsorted(
zip(suffix, all_keys), key=itemgetter(1), get=itemgetter(0)
)
# make all keys unique (e.g. add a suffix if there are multiple
# appearances of the same key -> also warn the user of multiple keys!)
newgrps = dict()
warnmsg = ""
for key, val in grps.items():
if len(val) > 1:
warnmsg += f'"{key}": '.ljust(15) + "[" + ", ".join(val) + "]"
warnmsg += "\n"
for i in val:
newgrps[key + "__" + i] = i
else:
newgrps[key] = val[0]
if len(warnmsg) > 0:
log.warning(
"the following keys are present in multiple sources!\n" + warnmsg
)
self._all_keys = newgrps
class _RTmetrics0(object):
"""a dummy class to pass variable names"""
def __init__(self):
pass
class _RTmetrics1(object):
def __init__(self, d1, d2, fit, auxdat, all_keys):
assert d1 in all_keys, f'the key "{d1}" could not be found'
assert d2 in all_keys, f'the key "{d2}" could not be found'
self._s1 = all_keys[d1]
self._s2 = all_keys[d2]
if d1.endswith(f"__{self._s1}"):
self._d1 = d1[: -len(f"__{self._s1}")]
else:
self._d1 = d1
if d2.endswith(f"__{self._s2}"):
self._d2 = d2[: -len(f"__{self._s2}")]
else:
self._d2 = d2
self.fit = fit
self.auxdat = auxdat
def _get_data(self, source, key):
if source == "auxdat":
return self.auxdat[key]
elif source == "dataset":
return self.fit.dataset[key]
elif source == "calc_model" and key == "tot":
return self.fit.calc_model(return_components=False)[key]
elif source == "calc_model":
return self.fit.calc_model(return_components=True)[key]
elif source == "res_df":
return self.fit.res_df[key]
@property
@lru_cache()
def d1(self):
d1 = self._get_data(self._s1, self._d1)
return d1
@property
@lru_cache()
def d2(self):
d2 = self._get_data(self._s2, self._d2)
return d2
@property
@lru_cache()
def _unify_idx_data(self):
if len(self.d1) != len(self.d2):
log.warning(
f'index of "{self._d1}" and "{self._d2}" is not '
+ "the same! -> a concatenation is performed!"
)
# try to unify the index
df = pd.concat([self.d1, self.d2], axis=1, copy=False)
d1 = df[self._d1].dropna()
d2 = df[self._d2].dropna()
assert len(d1) == len(d2), (
"the length of the 2 datasets is "
+ "not the same!"
+ f"({len(self.d1)} != {len(d2)})"
)
return d1, d2
else:
return self.d1, self.d2
@property
def pearson(self):
return RTmetrics.pearson(*self._unify_idx_data)
@property
def spearman(self):
return RTmetrics.spearman(*self._unify_idx_data)
@property
def linregress(self):
return RTmetrics.linregress(*self._unify_idx_data)
@property
def rmsd(self):
return RTmetrics.rmsd(*self._unify_idx_data)
@property
def ub_rmsd(self):
return RTmetrics.ub_rmsd(*self._unify_idx_data)
@property
def bias(self):
return RTmetrics.bias(*self._unify_idx_data)
@property
def mae(self):
return RTmetrics.mae(*self._unify_idx_data)
@property
def mape(self):
return RTmetrics.mape(*self._unify_idx_data)
@property
def std_ratio(self):
return RTmetrics.std_ratio(*self._unify_idx_data)
@property
def allmetrics(self):
return RTmetrics.allmetrics(*self._unify_idx_data)
@property
def metrics_table(self):
return RTmetrics.metrics_table(*self._unify_idx_data)
def scatterplot(self):
RTmetrics.scatterplot(*self._unify_idx_data, self._d1, self._d2)
class RTmetrics(object):
# registry of metric methods used for allmetrics and metrics_table
# enter the function name of a new metric in here
# functions listed in here must have two pandas series d1, d2 as parameters
metrics_registry = [
"pearson",
"spearman",
"linregress",
"rmsd",
"ub_rmsd",
"bias",
"mae",
"mape",
"std_ratio",
]
def __init__(self):
pass
@staticmethod
def pearson(d1, d2):
"""
evaluates pearson correlation coefficient of given series d1 and d2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
pearson correlation coefficient
"""
return d1.corr(d2, method="pearson")
@staticmethod
def spearman(d1, d2):
"""
evaluates spearman's rank correlation coefficient of given series d1 and d2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
spearman's rank correlation coefficient
"""
return d1.corr(d2, method="spearman")
@staticmethod
def linregress(d1, d2):
"""
evaluates pearson correlation coefficient of given series d1 and d2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
dictionary {string: float}
float values for slope, intercept, pearson, pvalue, stderr
"""
return dict(
zip(
["slope", "intercept", "pearson", "pvalue", "stderr"],
stats.linregress(d1, d2),
)
)
@staticmethod
def rmsd(d1, d2):
"""
evaluates root mean square deviation of given series d1 and d2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
root mean square deviation
"""
diff_sq = d1.subtract(d2).pow(2)
return np.sqrt(diff_sq.mean())
@staticmethod
def ub_rmsd(d1, d2):
"""
evaluates unbiased root mean square deviation of given series d1 and d2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
unbiased root mean square deviation
"""
d1_corr = d1 - d1.mean()
d2_corr = d2 - d2.mean()
diff_sq = d1_corr.subtract(d2_corr).pow(2)
return np.sqrt(diff_sq.mean())
@staticmethod
def bias(d1, d2):
"""
evaluates bias of given series d1 and d2: mu_1 - mu_2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
bias
"""
return d1.mean() - d2.mean()
@staticmethod
def mae(d1, d2):
"""
evaluates mean absolute error of given Series d1 and d2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
mean absolute error
"""
abs_diff = d1.subtract(d2).abs()
return abs_diff.mean()
@staticmethod
def mape(d1, d2):
"""
evaluates mean absolute percentage error of given Series d1 and d2 with respect to d1
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
mean absolute percentage error
"""
abs_rel_diff = d1.subtract(d2).div(d1).abs()
return abs_rel_diff.mean()
@staticmethod
def std_ratio(d1, d2):
"""
evaluates standard deviation ratio of given Series d1 and d2: sigma_1 / sigma_2
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
float
standard deviation ratio
"""
return d1.std() / d2.std()
@classmethod
def allmetrics(cls, d1, d2):
"""
run all metrics specified in RTmetrics.metrics_registry of given Series d1 and d2
metrics have to be specified by function name in metrics_registry
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
dictionary {string: (float or dictionary)}
function/metric name and corresponding value
"""
return {func: getattr(cls, func)(d1, d2) for func in cls.metrics_registry}
@classmethod
def scatterplot(cls, d1, d2, d1_name, d2_name):
"""
draws scatterplot of two given series d1 and d2
list all metrics in a table beside the scatterplot
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
d1_name : string
name of time series 1
d2_name : string
name of time series 2
Returns
-------
scatterplot figure object
"""
# create plot and axes
fig, (ax1, ax2) = plt.subplots(
1, 2, figsize=(14, 6), gridspec_kw={"width_ratios": [5, 1]}
)
# create scatterplot
ax1.scatter(d1, d2)
ax1.set_xlabel(d1_name)
ax1.set_ylabel(d2_name)
# get all metrics and define lists for table data
metrics_dict = cls.allmetrics(d1, d2)
metric_names = []
metric_values = []
# flatten metrics array and format float values
for key, val in cls._flatten_dictionary(metrics_dict).items():
metric_names.append(key)
if isinstance(val, float):
metric_values.append("%1.3f" % val)
else:
metric_values.append(val)
# add another dimension for usage in table
two_dim_metric_values = [[metric_value] for metric_value in metric_values]
# remove border to only show table itself
ax2.axis("off")
# plot and create table object
metrics_table = ax2.table(
cellText=two_dim_metric_values,
rowLabels=metric_names,
colLabels=["Value"],
loc="center",
)
# scale for higher cells
metrics_table.scale(1, 1.5)
plt.show()
return fig
@classmethod
def _flatten_dictionary(cls, dictionary, depth=0):
"""
recursively flattens a dictionary, only returns float values from that dictionary
keys of sub-dictionaries are prefixed with a '-' according to the depth
Parameters
----------
dictionary : dict
dictionary that should be flattened, should contain float or dict values
depth : integer
recursion depth used for prefixing
Returns
-------
dict
flat dictionary containing numbers or strings
"""
items = []
for key, val in dictionary.items():
new_key = "-" * depth + " " + key
if isinstance(val, dict):
items.append((new_key, ""))
items.extend(cls._flatten_dictionary(val, depth + 1).items())
elif isinstance(val, float):
items.append((new_key, val))
return dict(items)
@classmethod
def metrics_table(cls, d1, d2):
"""
prints a table with all metrics and values returned from the allmetrics method for given series d1 and d2
dictionaries returned by allmetrics are handled recursively by _metrics_table_dict_entry
Parameters
----------
d1 : pandas.Series
time series 1
d2 : pandas.Series
time series 2
Returns
-------
void
"""
metrics_dict = cls.allmetrics(d1, d2)
header = "-" * 11 + " METRICS " + "-" * 11 + "\n"
columns = " METRIC".ljust(14) + "| VALUE".ljust(15) + " |" + "\n"
entries = cls._metrics_table_dict_entry(metrics_dict)
outstr = header + columns + entries
print(outstr)
@classmethod
def _metrics_table_dict_entry(cls, metrics_dict, depth=0):
"""
recursively generates entries string for dictionaries in a metrics table
entries of sub-dictionaries are indented
Parameters
----------
metrics_dict : dictionary
metrics dictionary to generate string from
depth : integer
recursion depth
Returns
-------
string
multiline string containing all floating point entries of a dictionary and its sub-dictionaries
"""
entries = ""
for key, val in metrics_dict.items():
depth_offset = 2 * depth
metric = f"{key:<{13 - depth_offset}}"
if isinstance(val, (float, int)):
valstr = (
f"{val:.6f}".ljust(14)
if abs(val) < 1e5
else f"{Decimal(val):.6E}".ljust(14)
)
entries += " " + "--" * depth + f"{metric}| {valstr}|\n"
elif isinstance(val, dict):
entries += (
" " + "--" * depth + f"{metric}|".ljust(29 - depth_offset) + "|\n"
)
entries += cls._metrics_table_dict_entry(val, depth + 1)
return entries
| {
"repo_name": "TUW-GEO/rt1",
"path": "rt1/rtmetrics.py",
"copies": "1",
"size": "17538",
"license": "apache-2.0",
"hash": -6301392051732861000,
"line_mean": 26.1066460587,
"line_max": 113,
"alpha_frac": 0.5023947999,
"autogenerated": false,
"ratio": 4.152971820980346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027914813876545735,
"num_lines": 647
} |
"""A set of constants and methods to manage permissions and security"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from flask_appbuilder.security.sqla import models as ab_models
from superset import conf, db, sm
from superset.models import core as models
from superset.connectors.connector_registry import ConnectorRegistry
READ_ONLY_MODEL_VIEWS = {
'DatabaseAsync',
'DatabaseView',
'DruidClusterModelView',
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
'SqlMetricInlineView',
'TableColumnInlineView',
'TableModelView',
'DruidColumnInlineView',
'DruidDatasourceModelView',
'DruidMetricInlineView',
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
'AccessRequestsModelView',
'Manage',
'SQL Lab',
'Queries',
'Refresh Druid Metadata',
'ResetPasswordView',
'RoleModelView',
'Security',
'UserDBModelView',
}
ADMIN_ONLY_PERMISSIONS = {
'all_database_access',
'can_sql_json', # TODO: move can_sql_json to sql_lab role
'can_override_role_permissions',
'can_sync_druid_source',
'can_override_role_permissions',
'can_approve',
'can_update_role',
}
READ_ONLY_PERMISSION = {
'can_show',
'can_list',
}
ALPHA_ONLY_PERMISSIONS = set([
'muldelete',
'all_datasource_access',
])
OBJECT_SPEC_PERMISSIONS = set([
'database_access',
'schema_access',
'datasource_access',
'metric_access',
])
def merge_perm(sm, permission_name, view_menu_name):
# Implementation copied from sm.find_permission_view_menu.
# TODO: use sm.find_permission_view_menu once issue
# https://github.com/airbnb/superset/issues/1944 is resolved.
permission = sm.find_permission(permission_name)
view_menu = sm.find_view_menu(view_menu_name)
pv = None
if permission and view_menu:
pv = sm.get_session.query(sm.permissionview_model).filter_by(
permission=permission, view_menu=view_menu).first()
if not pv and permission_name and view_menu_name:
sm.add_permission_view_menu(permission_name, view_menu_name)
def is_user_defined_permission(perm):
return perm.permission.name in OBJECT_SPEC_PERMISSIONS
def get_or_create_main_db():
logging.info("Creating database reference")
dbobj = (
db.session.query(models.Database)
.filter_by(database_name='main')
.first()
)
if not dbobj:
dbobj = models.Database(database_name="main")
dbobj.set_sqlalchemy_uri(conf.get("SQLALCHEMY_DATABASE_URI"))
dbobj.expose_in_sqllab = True
dbobj.allow_run_sync = True
db.session.add(dbobj)
db.session.commit()
return dbobj
def is_admin_only(pvm):
# not readonly operations on read only model views allowed only for admins
if (pvm.view_menu.name in READ_ONLY_MODEL_VIEWS and
pvm.permission.name not in READ_ONLY_PERMISSION):
return True
return (pvm.view_menu.name in ADMIN_ONLY_VIEW_MENUS or
pvm.permission.name in ADMIN_ONLY_PERMISSIONS)
def is_alpha_only(pvm):
if (pvm.view_menu.name in GAMMA_READ_ONLY_MODEL_VIEWS and
pvm.permission.name not in READ_ONLY_PERMISSION):
return True
return pvm.permission.name in ALPHA_ONLY_PERMISSIONS
def is_admin_pvm(pvm):
return not is_user_defined_permission(pvm)
def is_alpha_pvm(pvm):
return not (is_user_defined_permission(pvm) or is_admin_only(pvm))
def is_gamma_pvm(pvm):
return not (is_user_defined_permission(pvm) or is_admin_only(pvm) or
is_alpha_only(pvm))
def is_sql_lab_pvm(pvm):
return pvm.view_menu.name in {'SQL Lab'} or pvm.permission.name in {
'can_sql_json', 'can_csv', 'can_search_queries'}
def is_granter_pvm(pvm):
return pvm.permission.name in {'can_override_role_permissions',
'can_approve'}
def set_role(role_name, pvm_check):
logging.info("Syncing {} perms".format(role_name))
sesh = sm.get_session()
pvms = sesh.query(ab_models.PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = sm.add_role(role_name)
role_pvms = [p for p in pvms if pvm_check(p)]
role.permissions = role_pvms
sesh.merge(role)
sesh.commit()
def create_custom_permissions():
# Global perms
merge_perm(sm, 'all_datasource_access', 'all_datasource_access')
merge_perm(sm, 'all_database_access', 'all_database_access')
def create_missing_perms():
"""Creates missing perms for datasources, schemas and metrics"""
logging.info(
"Fetching a set of all perms to lookup which ones are missing")
all_pvs = set()
for pv in sm.get_session.query(sm.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu, perm):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
merge_perm(sm, view_menu, perm)
logging.info("Creating missing datasource permissions.")
datasources = ConnectorRegistry.get_all_datasources(db.session)
for datasource in datasources:
merge_pv('datasource_access', datasource.get_perm())
merge_pv('schema_access', datasource.schema_perm)
logging.info("Creating missing database permissions.")
databases = db.session.query(models.Database).all()
for database in databases:
merge_pv('database_access', database.perm)
logging.info("Creating missing metrics permissions")
metrics = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(db.session.query(datasource_class.metric_class).all())
for metric in metrics:
if (metric.is_restricted):
merge_pv('metric_access', metric.perm)
def sync_role_definitions():
"""Inits the Superset application with security roles and such"""
logging.info("Syncing role definition")
get_or_create_main_db()
create_custom_permissions()
# Creating default roles
set_role('Admin', is_admin_pvm)
set_role('Alpha', is_alpha_pvm)
set_role('Gamma', is_gamma_pvm)
set_role('granter', is_granter_pvm)
set_role('sql_lab', is_sql_lab_pvm)
if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False):
set_role('Public', is_gamma_pvm)
create_missing_perms()
# commit role and view menu updates
sm.get_session.commit()
| {
"repo_name": "nekia/incubator-superset-dev",
"path": "superset/security.py",
"copies": "3",
"size": "6546",
"license": "apache-2.0",
"hash": -7094708657664469000,
"line_mean": 29.0275229358,
"line_max": 78,
"alpha_frac": 0.6729300336,
"autogenerated": false,
"ratio": 3.365552699228792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 218
} |
"""A set of datetime.datetime and datetime.date related functions"""
from __future__ import absolute_import
from datetime import timedelta, date, datetime
def start_of_day(val):
"""
Return a new datetime.datetime object with values that represent
a start of a day.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
"""
if type(val) == date:
val = datetime.fromordinal(val.toordinal())
return val.replace(hour=0, minute=0, second=0, microsecond=0)
def end_of_day(val):
"""
Return a new datetime.datetime object with values that represent
a end of a day.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
"""
if type(val) == date:
val = datetime.fromordinal(val.toordinal())
return start_of_day(val) + timedelta(days=1, microseconds=-1)
def start_of_month(val):
"""
Return a new datetime.datetime object with values that represent
a start of a month.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
"""
if type(val) == date:
val = datetime.fromordinal(val.toordinal())
return start_of_day(val).replace(day=1)
def end_of_month(val):
"""
Return a new datetime.datetime object with values that represent
a end of a month.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
"""
if type(val) == date:
val = datetime.fromordinal(val.toordinal())
if val.month == 12:
return start_of_month(val).replace(year=val.year + 1, month=1) \
- timedelta(microseconds=1)
else:
return start_of_month(val).replace(month=val.month + 1) \
- timedelta(microseconds=1)
def _set_week_day(val, week_day, val_weekday, sign):
if val_weekday == week_day:
return val
diff = sign * (val_weekday - week_day)
diff = -diff if diff < 0 else 7 - diff
val += sign * timedelta(days=diff)
return val
def set_next_week_day(val, week_day, iso=False):
"""
Set week day.
New date will be greater or equal than input date.
:param val: datetime or date
:type val: datetime.datetime | datetime.date
:param week_day: Week day to set
:type week_day: int
:param iso: week_day in ISO format, or not
:type iso: bool
:return: datetime.datetime | datetime.date
"""
return _set_week_day(val, week_day,
val.isoweekday() if iso else val.weekday(), sign=1)
def set_prev_week_day(val, week_day, iso=False):
"""
Set week day.
New date will be less or equal than input date.
:param val: datetime or date
:type val: datetime.datetime | datetime.date
:param week_day: Week day to set
:type week_day: int
:param iso: week_day in ISO format, or not
:type iso: bool
:return: datetime.datetime | datetime.date
"""
return _set_week_day(val, week_day,
val.isoweekday() if iso else val.weekday(), sign=-1)
| {
"repo_name": "tomi77/python-t77-date",
"path": "t77_date/datetime.py",
"copies": "1",
"size": "3132",
"license": "mit",
"hash": -7109223718385193000,
"line_mean": 29.4077669903,
"line_max": 77,
"alpha_frac": 0.630587484,
"autogenerated": false,
"ratio": 3.6208092485549135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47513967325549133,
"avg_score": null,
"num_lines": null
} |
"""A set of datetime.timedelta related functions"""
from __future__ import absolute_import, division
import re
from datetime import timedelta
import six
from .constants import MICROSECONDS_IN_SECOND, SECONDS_IN_DAY, \
SECONDS_IN_HOUR, SECONDS_IN_MINUTE, HOURS_IN_DAY, INTERVAL_REGEX_STR
INTERVAL_REGEX = re.compile(INTERVAL_REGEX_STR, re.VERBOSE)
def timedelta_to_seconds(value, with_microseconds=False):
"""
Convert datetime.timedelta to seconds
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: seconds/seconds with microseconds or None if val is None
:rtype: int/float/None
:raise: TypeError when val is not timedelta
"""
if value is None:
return None
if not isinstance(value, timedelta):
raise TypeError('value must be a datetime.timedelta object')
microseconds = value.microseconds / MICROSECONDS_IN_SECOND \
if with_microseconds else 0
return value.days * SECONDS_IN_DAY + value.seconds + microseconds
def timedelta_to_str(value, with_microseconds=False):
"""
String representation of datetime.timedelta
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: String representation of datetime.timedelta or None if val is None
:rtype: string/None
:raise: TypeError when val is not timedelta
"""
if value is None:
return None
if not isinstance(value, timedelta):
raise TypeError('value must be a datetime.timedelta object')
hours, remainder = divmod(value.seconds, SECONDS_IN_HOUR)
hours += value.days * HOURS_IN_DAY
minutes, seconds = divmod(remainder, SECONDS_IN_MINUTE)
if with_microseconds:
return '%02d:%02d:%02d.%06d' % (hours, minutes, seconds,
value.microseconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
def parse_timedelta(value):
"""
Parses a string and return a datetime.timedelta.
:param value: string to parse
:type value: str
:return: timedelta object or None if value is None
:rtype: timedelta/None
:raise: TypeError when value is not string
:raise: ValueError when value is not proper timedelta string
"""
if value is None:
return None
if not isinstance(value, six.string_types):
raise TypeError('value must be a string type')
match = INTERVAL_REGEX.search(value)
if match:
data = match.groupdict()
return timedelta(**dict((key, int(data[key] or 0)) for key in data))
else:
raise ValueError("Value '%s' doesn't appear to be a valid timedelta "
"string" % value)
| {
"repo_name": "tomi77/python-t77-date",
"path": "t77_date/timedelta.py",
"copies": "1",
"size": "2823",
"license": "mit",
"hash": 3443831297315643000,
"line_mean": 30.7191011236,
"line_max": 79,
"alpha_frac": 0.672334396,
"autogenerated": false,
"ratio": 4.103197674418604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5275532070418605,
"avg_score": null,
"num_lines": null
} |
"""A set of dateutil.tz related functions"""
from __future__ import print_function, division
from datetime import datetime
from dateutil.tz import tzlocal, tzutc
def _convert(value, tzto, defaulttz):
"""Convert datetime.datetime object between timezones"""
if not isinstance(value, datetime):
raise ValueError('value must be a datetime.datetime object')
if value.tzinfo is None:
value = value.replace(tzinfo=defaulttz)
return value.astimezone(tzto)
def to_local(value, defaulttz=None):
"""Convert datetime.datetime time to local time zone
If value doesn't have tzinfo, then defaulttz is set.
Default value of defaulttz is UTC.
"""
if defaulttz is None:
defaulttz = tzutc()
return _convert(value, tzlocal(), defaulttz)
def to_utc(value, defaulttz=None):
"""Convert datetime.datetime time to UTC
If value doesn't have tzinfo, then defaulttz is set.
Default value of defaulttz is local time zone.
"""
if defaulttz is None:
defaulttz = tzlocal()
return _convert(value, tzutc(), defaulttz)
| {
"repo_name": "tomi77/python-t77-date",
"path": "t77_date/tz.py",
"copies": "1",
"size": "1093",
"license": "mit",
"hash": -479674576662165250,
"line_mean": 27.0256410256,
"line_max": 68,
"alpha_frac": 0.6944190302,
"autogenerated": false,
"ratio": 4.048148148148148,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 39
} |
""" a set of defs used to manage xpi
"""
import os
import shutil
import simplejson
import subprocess
import time
import waffle
import commonware.log
from statsd import statsd
from django.conf import settings
#from django.utils.translation import ugettext as _
log = commonware.log.getLogger('f.xpi_utils')
def info_write(path, status, message, hashtag=None):
data = {
'status': status,
'message': str(message)}
if hashtag:
data['hashtag'] = hashtag
with open(path, 'w') as info:
info.write(simplejson.dumps(data))
def sdk_copy(sdk_source, sdk_dir):
log.debug("Copying SDK from (%s) to (%s)" % (sdk_source, sdk_dir))
with statsd.timer('xpi.copy'):
if os.path.isdir(sdk_dir):
for d in os.listdir(sdk_source):
s_d = os.path.join(sdk_source, d)
if os.path.isdir(s_d):
shutil.copytree(s_d, os.path.join(sdk_dir, d))
else:
shutil.copy(s_d, sdk_dir)
else:
shutil.copytree(sdk_source, sdk_dir)
def build(sdk_dir, package_dir, filename, hashtag, tstart=None, options=None,
temp_dir=None):
"""Build xpi from SDK with prepared packages in sdk_dir.
:params:
* sdk_dir (String) SDK directory
* package_dir (string) dir of the Add-on package
* filename (string) XPI will be build with this name
* hashtag (string) XPI will be copied to a file which name is creted
using the unique hashtag
* t1 (integer) time.time() of the process started
:returns: (list) ``cfx xpi`` response where ``[0]`` is ``stdout`` and
``[1]`` ``stderr``
"""
t1 = time.time()
if not temp_dir:
temp_dir = sdk_dir
# create XPI
os.chdir(package_dir)
cfx = [settings.PYTHON_EXEC, '%s/bin/cfx' % sdk_dir,
'--binary=%s' % settings.XULRUNNER_BINARY,
'--keydir=%s/%s' % (sdk_dir, settings.KEYDIR), 'xpi']
if options:
cfx.append(options)
log.debug(cfx)
info_targetfilename = "%s.json" % hashtag
info_targetpath = os.path.join(settings.XPI_TARGETDIR, info_targetfilename)
env = dict(PATH='%s/bin:%s' % (sdk_dir, os.environ['PATH']),
DISPLAY=os.environ.get('DISPLAY', ':0'),
VIRTUAL_ENV=sdk_dir,
CUDDLEFISH_ROOT=sdk_dir,
PYTHONPATH=os.path.join(sdk_dir, 'python-lib'))
try:
process = subprocess.Popen(cfx, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
response = process.communicate()
except subprocess.CalledProcessError, err:
info_write(info_targetpath, 'error', str(err), hashtag)
log.critical("[xpi:%s] Failed to build xpi: %s. Command(%s)" % (
hashtag, str(err), cfx))
shutil.rmtree(temp_dir)
raise
if (waffle.switch_is_active('SDKErrorInStdOutWorkaround') and
not os.path.exists(os.path.join(package_dir, '%s.xpi' % filename))):
badresponse = response[0]
response = ['', '']
response[1] = badresponse
xpi_path = os.path.join(package_dir, "%s.xpi" % filename)
if process.returncode != 0:
info_write(info_targetpath, 'error', response[1], hashtag)
log.critical("[xpi:%s] Failed to build xpi., stderr: %s" % (
hashtag, response[1]))
shutil.rmtree(temp_dir)
return response
t2 = time.time()
# XPI: move the XPI created to the XPI_TARGETDIR (local to NFS)
xpi_targetfilename = "%s.xpi" % hashtag
xpi_targetpath = os.path.join(settings.XPI_TARGETDIR, xpi_targetfilename)
try:
shutil.copy(xpi_path, xpi_targetpath)
except IOError, err:
info_write(info_targetpath, 'error',
'XPI file can not be copied.',
hashtag)
log.critical("[xpi:%s] Failed to copy xpi.\n%s" % (hashtag, str(err)))
shutil.rmtree(temp_dir)
return response
shutil.rmtree(temp_dir)
ret = [xpi_targetfilename]
ret.extend(response)
t3 = time.time()
copy_xpi_time = (t3 - t2) * 1000
build_time = (t2 - t1) * 1000
preparation_time = ((t1 - tstart) * 1000) if tstart else 0
statsd.timing('xpi.build.prep', preparation_time)
statsd.timing('xpi.build.build', build_time)
statsd.timing('xpi.build.copyresult', copy_xpi_time)
log.info('[xpi:%s] Created xpi: %s (prep time: %dms) (build time: %dms) '
'(copy xpi time: %dms)' % (hashtag, xpi_targetpath,
preparation_time, build_time,
copy_xpi_time))
info_write(info_targetpath, 'success', response[0], hashtag)
return response
def remove(path):
" clear directory "
log.debug("Removing directory (%s)" % path)
os.remove(path)
def get_queued_cache_key(hashtag, request=None):
session = request.session.session_key if request else None
key = 'xpi:timing:queued:%s:%s' % (hashtag, session)
return key
| {
"repo_name": "mozilla/FlightDeck",
"path": "apps/xpi/xpi_utils.py",
"copies": "1",
"size": "5076",
"license": "bsd-3-clause",
"hash": 1465651140780257300,
"line_mean": 32.1764705882,
"line_max": 80,
"alpha_frac": 0.5914105595,
"autogenerated": false,
"ratio": 3.375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44664105595000003,
"avg_score": null,
"num_lines": null
} |
""" A set of descriptors that document intended types for attributes on
classes and implement convenience behaviors like default values, etc.
"""
from __future__ import print_function
import re
import datetime
import dateutil.parser
from importlib import import_module
from copy import copy
import inspect
import logging
logger = logging.getLogger(__name__)
from six import integer_types, string_types, add_metaclass, iteritems
import numpy as np
from . import enums, colors
from .utils import nice_join
bokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types
# used to indicate properties that are not set (vs null, None, etc)
class _NotSet(object):
pass
class Property(object):
''' Base class for all type properties. '''
def __init__(self, default=None):
""" This is how the descriptor is created in the class declaration """
self.validate(default)
self.default = default
# This gets set by the class decorator at class creation time
self.name = "unnamed"
def __str__(self):
return self.__class__.__name__
@property
def _name(self):
return "_" + self.name
@classmethod
def autocreate(cls, name=None):
""" Called by the metaclass to create a
new instance of this descriptor
if the user just assigned it to a property without trailing
parentheses.
"""
return cls()
def matches(self, new, old):
# XXX: originally this code warned about not being able to compare values, but that
# doesn't make sense, because most comparisons involving numpy arrays will fail with
# ValueError exception, thus warning about inevitable.
try:
return new == old
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
logger.debug("could not compare %s and %s for property %s (Reason: %s)", new, old, self.name, e)
return False
def transform(self, value):
return value
def validate(self, value):
pass
def is_valid(self, value):
try:
self.validate(value)
except ValueError:
return False
else:
return True
def __get__(self, obj, type=None):
return getattr(obj, self._name, self.default)
def __set__(self, obj, value):
self.validate(value)
value = self.transform(value)
old = self.__get__(obj)
obj._changed_vars.add(self.name)
if self._name in obj.__dict__ and self.matches(value, old):
return
setattr(obj, self._name, value)
obj._dirty = True
if hasattr(obj, '_trigger'):
if hasattr(obj, '_block_callbacks') and obj._block_callbacks:
obj._callback_queue.append((self.name, old, value))
else:
obj._trigger(self.name, old, value)
def __delete__(self, obj):
if hasattr(obj, self._name):
delattr(obj, self._name)
@property
def has_ref(self):
return False
class DataSpec(Property):
""" Because the BokehJS glyphs support a fixed value or a named
field for most data fields, we capture that in this descriptor.
Fields can have a fixed value, or be a name that is looked up
on the datasource (usually as a column or record array field).
Numerical data can also have units of screen or data space.
We mirror the JS convention in this Python descriptor. For details,
see renderers/properties.coffee in BokehJS, and specifically the
select() function.
There are multiple ways to set a DataSpec, illustrated below with comments
and example code.
Setting DataSpecs
Simple example::
class Foo(HasProps):
x = DataSpec("x", units="data")
f = Foo()
f.x = "fieldname" # Use the datasource field named "fieldname"
f.x = 12 # A fixed value of 12
Can provide a dict with the fields explicitly named::
f.width = {"name": "foo"}
f.size = {"name": "foo", "units": "screen"}
Reading DataSpecs
In the cases when the dataspec is set to just a field name or a
fixed value, then those are returned. If the no values have
been set, then the value of to_dict() is returned.
In all cases, to determine the full dict that will be used to
represent this dataspec, use the to_dict() method.
Implementation
The DataSpec instance is stored in the class dict, and acts as a
descriptor. Thus, it is shared between all instances of the class.
Instance-specific data is stored in the instance dict, in a private
variable named _[attrname]. This stores the actual value that the
user last set (and does not exist if the user has not yet set the
value).
"""
def __init__(self, field=None, units="data", min_value=None, default=_NotSet):
"""
Parameters
==========
**field** is the string name of a data column to look up.
**units** is either "data" or "screen"
"""
# Don't use .name because the HasProps metaclass uses that to
# store the attribute name on this descriptor.
self.field = field
self.units = units
self.min_value = min_value
self.default = default
@classmethod
def autocreate(cls, name=None):
# In this case, use the name the user assigned this DataSpec to
# as the default field name.
d = cls(field=name)
return d
def __get__(self, obj, cls=None):
""" Try to implement a "natural" interface: if the user just set
simple values or field names, the getter just returns those.
However, if the user has also overridden the "units" or "default"
settings, then a dictionary is returned.
"""
if hasattr(obj, self._name):
setval = getattr(obj, self._name)
if isinstance(setval, string_types):
# A string representing the field
return setval
elif not isinstance(setval, dict):
# Typically a number presenting the fixed value
return setval
else:
return self.to_dict(obj)
else:
# If the user hasn't set anything
if self.field is not None:
return self.field
if self.default != _NotSet:
return self.default
def to_dict(self, obj):
# Build the complete dict
setval = getattr(obj, self._name, None)
if isinstance(setval, string_types):
d = {"field": setval, "units": self.units}
elif isinstance(setval, dict):
d = {"units": self.units}
d.update(setval)
elif setval is not None:
# a fixed value of some sort; no need to store the default value
d = {"value": setval, "units": self.units}
else:
# If the user never set a value
if self.field is not None:
d = {"field": self.field, "units": self.units}
elif self.default != _NotSet:
d = {"value": self.default, "units": self.units}
else:
d = {}
if "value" in d and self.min_value is not None:
if d["value"] < self.min_value:
raise ValueError("value must be greater than %s" % str(self.min_value))
return d
def __repr__(self):
return "DataSpec(field=%r, units=%r)" % (self.field, self.units)
class ColorSpec(DataSpec):
""" Subclass of DataSpec for specifying colors.
Although this serves the same role as a DataSpec, its usage is somewhat
different because:
* Specifying a fixed value is much more common
* Strings can be both field identifiers or refer to one of the SVG
Named Colors (or be a hex value starting with "#")
* There are no units
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value. Otherwise,
it is treated as a field name.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
Unlike DataSpec, ColorSpecs do not have a "units" property.
When reading out a ColorSpec, it returns a tuple, hex value, or
field name
There are two common use cases for ColorSpec: setting a constant value,
and indicating a field name to look for on the datasource:
>>> class Bar(HasProps):
... col = ColorSpec("green")
... col2 = ColorSpec("colorfield")
>>> b = Bar()
>>> b.col = "red" # sets a fixed value of red
>>> b.col
"red"
>>> b.col = "myfield" # Use the datasource field named "myfield"
>>> b.col
"myfield"
For more examples, see tests/test_glyphs.py
"""
NAMEDCOLORS = set(colors.__colors__)
def __init__(self, field_or_value=None, field=None, value=None, default=_NotSet):
""" ColorSpec(field_or_value=None, field=None, value=None)
"""
# The fancy footwork below is so we auto-interpret the first positional
# parameter as either a field or a fixed value. If either "field" or
# "value" are then supplied as keyword arguments, then those will
# override the inferred value from the positional argument.
self.field = field
self.value = value
self.default = default
if field_or_value is not None:
if self.isconst(field_or_value):
self.value = field_or_value
else:
self.field = field_or_value
# We need to distinguish if the user ever explicitly sets the attribute; if
# they explicitly set it to None, we should pass on None in the dict.
self._isset = False
@classmethod
def isconst(cls, arg):
""" Returns True if the argument is a literal color. Check for a
well-formed hexadecimal color value.
"""
return isinstance(arg, string_types) and \
((len(arg) == 7 and arg[0] == "#") or arg in cls.NAMEDCOLORS)
def _formattuple(self, colortuple):
if isinstance(colortuple, tuple):
if len(colortuple) == 3:
return "rgb%r" % (colortuple,)
else:
return "rgba%r" % (colortuple,)
else:
return colortuple
def __get__(self, obj, cls=None):
# One key difference in ColorSpec.__get__ from the base class is
# that we do not call self.to_dict() in any circumstance, because
# this could lead to formatting color tuples as "rgb(R,G,B)" instead
# of keeping them as tuples.
if hasattr(obj, self._name):
setval = getattr(obj, self._name)
if self.isconst(setval) or isinstance(setval, tuple):
# Fixed color value
return setval
elif isinstance(setval, string_types):
return setval
elif setval is None:
return None
else:
# setval should be a dict at this point
assert(isinstance(setval, dict))
return setval
else:
if self.value is not None:
return self.value
if self.default != _NotSet:
return self.default
else:
return self.field
def __set__(self, obj, arg):
self._isset = True
if isinstance(arg, tuple):
if len(arg) in (3, 4):
# RGB or RGBa
pass
else:
raise RuntimeError("Invalid tuple being assigned to ColorSpec; must be length 2, 3, or 4.")
elif isinstance(arg, colors.Color):
arg = arg.to_css()
super(ColorSpec, self).__set__(obj, arg)
def to_dict(self, obj):
setval = getattr(obj, self._name, None)
if self.default != _NotSet and not self._isset:
setval = self.default
if setval is not None:
if self.isconst(setval):
# Hexadecimal or named color
return {"value": setval}
elif isinstance(setval, tuple):
# RGB or RGBa
# TODO: Should we validate that alpha is between 0..1?
return {"value": self._formattuple(setval)}
elif isinstance(setval, string_types):
return {"field": setval}
elif isinstance(setval, dict):
# this is considerably simpler than the DataSpec case because
# there are no units involved, and we've handled all of the
# value cases above.
return setval.copy()
else:
if self._isset:
if self.value is None:
return {"value": None}
else:
return {"value": getattr(obj, self._name, self.value)}
else:
if self.value:
return {"value": self.value}
return {"field": self.field}
def __repr__(self):
return "ColorSpec(field=%r)" % self.field
class Include(object):
''' Include other properties from mixin Models, with a given prefix. '''
def __init__(self, delegate):
if not (isinstance(delegate, type) and issubclass(delegate, HasProps)):
raise ValueError("expected a subclass of HasProps, got %r" % delegate)
self.delegate = delegate
class MetaHasProps(type):
def __new__(cls, class_name, bases, class_dict):
names = set()
names_with_refs = set()
container_names = set()
# First pre-process to handle all the Includes
includes = {}
removes = set()
for name, prop in class_dict.items():
if not isinstance(prop, Include):
continue
delegate = prop.delegate
prefix = re.sub("_props$", "", name) + "_"
for subpropname in delegate.class_properties(withbases=False):
fullpropname = prefix + subpropname
subprop = lookup_descriptor(delegate, subpropname)
if isinstance(subprop, Property):
# If it's an actual instance, then we need to make a copy
# so two properties don't write to the same hidden variable
# inside the instance.
subprop = copy(subprop)
includes[fullpropname] = subprop
# Remove the name of the Include attribute itself
removes.add(name)
# Update the class dictionary, taking care not to overwrite values
# from the delegates that the subclass may have explicitly defined
for key, val in includes.items():
if key not in class_dict:
class_dict[key] = val
for tmp in removes:
del class_dict[tmp]
dataspecs = {}
for name, prop in class_dict.items():
if isinstance(prop, Property):
prop.name = name
if prop.has_ref:
names_with_refs.add(name)
elif isinstance(prop, ContainerProperty):
container_names.add(name)
names.add(name)
if isinstance(prop, DataSpec):
dataspecs[name] = prop
elif isinstance(prop, type) and issubclass(prop, Property):
# Support the user adding a property without using parens,
# i.e. using just the Property subclass instead of an
# instance of the subclass
newprop = prop.autocreate(name=name)
class_dict[name] = newprop
newprop.name = name
names.add(name)
# Process dataspecs
if issubclass(prop, DataSpec):
dataspecs[name] = newprop
class_dict["__properties__"] = names
class_dict["__properties_with_refs__"] = names_with_refs
class_dict["__container_props__"] = container_names
if dataspecs:
class_dict["_dataspecs"] = dataspecs
return type.__new__(cls, class_name, bases, class_dict)
def accumulate_from_subclasses(cls, propname):
s = set()
for c in inspect.getmro(cls):
if issubclass(c, HasProps):
s.update(getattr(c, propname))
return s
def lookup_descriptor(cls, propname):
for c in inspect.getmro(cls):
if issubclass(c, HasProps) and propname in c.__dict__:
return c.__dict__[propname]
raise KeyError("Property '%s' not found on class '%s'" % (propname, cls))
@add_metaclass(MetaHasProps)
class HasProps(object):
def __init__(self, **kwargs):
""" Set up a default initializer handler which assigns all kwargs
that have the same names as Properties on the class
"""
# Initialize the mutated property handling
self._changed_vars = set()
props = self.properties()
for key, value in kwargs.items():
if key in props:
setattr(self, key, value)
else:
raise AttributeError("unexpected attribute '%s' to %s, possible attributes are %s" %
(key, self.__class__.__name__, nice_join(props)))
super(HasProps, self).__init__()
def to_dict(self):
return dict((prop, getattr(self, prop)) for prop in self.properties())
def clone(self):
""" Returns a duplicate of this object with all its properties
set appropriately. Values which are containers are shallow-copied.
"""
return self.__class__(**self.to_dict())
@classmethod
def properties_with_refs(cls):
""" Returns a set of the names of this object's properties that
have references. We traverse the class hierarchy and
pull together the full list of properties.
"""
if not hasattr(cls, "__cached_allprops_with_refs"):
s = accumulate_from_subclasses(cls, "__properties_with_refs__")
cls.__cached_allprops_with_refs = s
return cls.__cached_allprops_with_refs
@classmethod
def properties_containers(cls):
""" Returns a list of properties that are containers
"""
if not hasattr(cls, "__cached_allprops_containers"):
s = accumulate_from_subclasses(cls, "__container_props__")
cls.__cached_allprops_containers = s
return cls.__cached_allprops_containers
@classmethod
def properties(cls):
""" Returns a set of the names of this object's properties. We
traverse the class hierarchy and pull together the full
list of properties.
"""
if not hasattr(cls, "__cached_allprops"):
s = cls.class_properties()
cls.__cached_allprops = s
return cls.__cached_allprops
@classmethod
def dataspecs(cls):
""" Returns a set of the names of this object's dataspecs (and
dataspec subclasses). Traverses the class hierarchy.
"""
if not hasattr(cls, "__cached_dataspecs"):
dataspecs = set()
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs.keys())
cls.__cached_dataspecs = dataspecs
return cls.__cached_dataspecs
@classmethod
def dataspecs_with_refs(cls):
dataspecs = {}
for c in reversed(inspect.getmro(cls)):
if hasattr(c, "_dataspecs"):
dataspecs.update(c._dataspecs)
return dataspecs
def changed_vars(self):
""" Returns which variables changed since the creation of the object,
or the last called to reset_changed_vars().
"""
return set.union(self._changed_vars, self.properties_with_refs(),
self.properties_containers())
def reset_changed_vars(self):
self._changed_vars = set()
def properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.properties() ])
def changed_properties_with_values(self):
return dict([ (attr, getattr(self, attr)) for attr in self.changed_vars() ])
@classmethod
def class_properties(cls, withbases=True):
if withbases:
return accumulate_from_subclasses(cls, "__properties__")
else:
return set(cls.__properties__)
def set(self, **kwargs):
""" Sets a number of properties at once """
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def pprint_props(self, indent=0):
""" Prints the properties of this object, nicely formatted """
for key, value in self.properties_with_values().items():
print("%s%s: %r" % (" "*indent, key, value))
class PrimitiveProperty(Property):
_underlying_type = None
def validate(self, value):
super(PrimitiveProperty, self).validate(value)
if not (value is None or isinstance(value, self._underlying_type)):
raise ValueError("expected a value of type %s, got %s of type %s" %
(nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__))
class Bool(PrimitiveProperty):
''' Boolean type property. '''
_underlying_type = (bool,)
class Int(PrimitiveProperty):
''' Signed integer type property. '''
_underlying_type = bokeh_integer_types
class Float(PrimitiveProperty):
''' Floating point type property. '''
_underlying_type = (float, ) + bokeh_integer_types
class Complex(PrimitiveProperty):
''' Complex floating point type property. '''
_underlying_type = (complex, float) + bokeh_integer_types
class String(PrimitiveProperty):
''' String type property. '''
_underlying_type = string_types
class Regex(String):
''' Regex type property validates that text values match the
given regular expression.
'''
def __init__(self, regex, default=None):
self.regex = re.compile(regex)
super(Regex, self).__init__(default=default)
def validate(self, value):
super(Regex, self).validate(value)
if not (value is None or self.regex.match(value) is not None):
raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value))
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
class ParameterizedProperty(Property):
""" Base class for Properties that have type parameters, e.g. `List(String)`. """
def _validate_type_param(self, type_param):
if isinstance(type_param, type):
if issubclass(type_param, Property):
return type_param()
else:
type_param = type_param.__name__
elif isinstance(type_param, Property):
return type_param
raise ValueError("expected a property as type parameter, got %s" % type_param)
@property
def type_params(self):
raise NotImplementedError("abstract method")
@property
def has_ref(self):
return any(type_param.has_ref for type_param in self.type_params)
class ContainerProperty(ParameterizedProperty):
''' Base class for Container-like type properties. '''
# Base class for container-like things; this helps the auto-serialization
# and attribute change detection code
pass
class List(ContainerProperty):
""" If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
People will also frequently pass in some other kind of property or a
class (to indicate a list of instances). In those cases, we want to
just create an empty list
"""
def __init__(self, item_type, default=None):
self.item_type = self._validate_type_param(item_type)
super(List, self).__init__(default=default)
@property
def type_params(self):
return [self.item_type]
def validate(self, value):
super(List, self).validate(value)
if value is not None:
if not (isinstance(value, list) and \
all(self.item_type.is_valid(item) for item in value)):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.item_type)
def __get__(self, obj, type=None):
if hasattr(obj, self._name):
return getattr(obj, self._name)
if self.default is None:
val = []
elif isinstance(self.default, list):
val = copy(self.default)
else:
val = self.default
setattr(obj, self._name, val)
return val
class Dict(ContainerProperty):
""" If a default value is passed in, then a shallow copy of it will be
used for each new use of this property.
"""
def __init__(self, keys_type, values_type, default={}):
self.keys_type = self._validate_type_param(keys_type)
self.values_type = self._validate_type_param(values_type)
super(Dict, self).__init__(default=default)
@property
def type_params(self):
return [self.keys_type, self.values_type]
def __get__(self, obj, type=None):
if not hasattr(obj, self._name) and isinstance(self.default, dict):
setattr(obj, self._name, copy(self.default))
return getattr(obj, self._name)
else:
return getattr(obj, self._name, self.default)
def validate(self, value):
super(Dict, self).validate(value)
if value is not None:
if not (isinstance(value, dict) and \
all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type)
class Tuple(ContainerProperty):
''' Tuple type property. '''
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
super(Tuple, self).__init__(default=kwargs.get("default", None))
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Tuple, self).validate(value)
if value is not None:
if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \
all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))):
raise ValueError("expected an element of %s, got %r" % (self, value))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
class Array(ContainerProperty):
""" Whatever object is passed in as a default value, np.asarray() is
called on it to create a copy for the default value for each use of
this property.
"""
def __init__(self, item_type, default=None):
self.item_type = self._validate_type_param(item_type)
super(Array, self).__init__(default=default)
@property
def type_params(self):
return [self.item_type]
def __get__(self, obj, type=None):
if not hasattr(obj, self._name) and self.default is not None:
setattr(obj, self._name, np.asarray(self.default))
return getattr(obj, self._name)
else:
return getattr(obj, self._name, self.default)
class Instance(Property):
''' Instance type property for referneces to other Models in the object
graph.
'''
def __init__(self, instance_type, default=None):
if not isinstance(instance_type, (type,) + string_types):
raise ValueError("expected a type or string, got %s" % instance_type)
if isinstance(instance_type, type) and not issubclass(instance_type, HasProps):
raise ValueError("expected a subclass of HasProps, got %s" % instance_type)
self._instance_type = instance_type
super(Instance, self).__init__(default=default)
@property
def instance_type(self):
if isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
self._instance_type = getattr(import_module(module, "bokeh"), name)
return self._instance_type
@property
def has_ref(self):
return True
def __get__(self, obj, type=None):
# If the constructor for Instance() supplied a class name, we should
# instantiate that class here, instead of returning the class as the
# default object
if not hasattr(obj, self._name):
if type and self.default and isinstance(self.default, type):
setattr(obj, self._name, self.default())
return getattr(obj, self._name, None)
def validate(self, value):
super(Instance, self).validate(value)
if value is not None:
if not isinstance(value, self.instance_type):
raise ValueError("expected an instance of type %s, got %s of type %s" %
(self.instance_type.__name__, value, type(value).__name__))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__)
class This(Property):
""" A reference to an instance of the class being defined. """
pass
# Fake types, ABCs
class Any(Property):
''' Any type property accepts any values. '''
pass
class Function(Property):
''' Function type property. '''
pass
class Event(Property):
''' Event type property. '''
pass
class Range(ParameterizedProperty):
''' Range type property ensures values are between a range. '''
def __init__(self, range_type, start, end, default=None):
self.range_type = self._validate_type_param(range_type)
self.range_type.validate(start)
self.range_type.validate(end)
self.start = start
self.end = end
super(Range, self).__init__(default=default)
@property
def type_params(self):
return [self.range_type]
def validate(self, value):
super(Range, self).validate(value)
if not (value is None or self.range_type.is_valid(value) and value >= self.start and value <= self.end):
raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.range_type, self.start, self.end, value))
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.range_type, self.start, self.end)
class Byte(Range):
''' Byte type property. '''
def __init__(self, default=0):
super(Byte, self).__init__(Int, 0, 255, default=default)
class Either(ParameterizedProperty):
""" Takes a list of valid properties and validates against them in succession. """
def __init__(self, tp1, tp2, *type_params, **kwargs):
self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params))
default = kwargs.get("default", self._type_params[0].default)
super(Either, self).__init__(default=default)
@property
def type_params(self):
return self._type_params
def validate(self, value):
super(Either, self).validate(value)
if not (value is None or any(param.is_valid(value) for param in self.type_params)):
raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value))
def transform(self, value):
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError("Could not transform %r" % value)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params)))
class Enum(Property):
""" An Enum with a list of allowed values. The first value in the list is
the default value, unless a default is provided with the "default" keyword
argument.
"""
def __init__(self, enum, *values, **kwargs):
if not (not values and isinstance(enum, enums.Enumeration)):
enum = enums.enumeration(enum, *values)
self.allowed_values = enum._values
default = kwargs.get("default", enum._default)
super(Enum, self).__init__(default=default)
def validate(self, value):
super(Enum, self).validate(value)
if not (value is None or value in self.allowed_values):
raise ValueError("invalid value for %s: %r; allowed values are %s" % (self.name, value, nice_join(self.allowed_values)))
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values)))
# Properties useful for defining visual attributes
class Color(Either):
""" Accepts color definition in a variety of ways, and produces an
appropriate serialization of its value for whatever backend.
For colors, because we support named colors and hex values prefaced
with a "#", when we are handed a string value, there is a little
interpretation: if the value is one of the 147 SVG named colors or
it starts with a "#", then it is interpreted as a value.
If a 3-tuple is provided, then it is treated as an RGB (0..255).
If a 4-tuple is provided, then it is treated as an RGBa (0..255), with
alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.)
"""
def __init__(self, default=None):
types = (Enum(enums.NamedColor),
Regex("^#[0-9a-fA-F]{6}$"),
Tuple(Byte, Byte, Byte),
Tuple(Byte, Byte, Byte, Percent))
super(Color, self).__init__(*types, default=default)
def __str__(self):
return self.__class__.__name__
class Align(Property):
pass
class DashPattern(Either):
"""
This is a property that expresses line dashes. It can be specified in
a variety of forms:
* "solid", "dashed", "dotted", "dotdash", "dashdot"
* A tuple or list of integers in the HTML5 Canvas dash specification
style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list
Note that if the list of integers has an odd number of elements, then
it is duplicated, and that duplicated list becomes the new dash list.
If dash is turned off, then the dash pattern is the empty list [].
"""
_dash_patterns = {
"solid": [],
"dashed": [6],
"dotted": [2,4],
"dotdash": [2,4,6,4],
"dashdot": [6,4,2,4],
}
def __init__(self, default=[]):
types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), List(Int)
super(DashPattern, self).__init__(*types, default=default)
def transform(self, value):
value = super(DashPattern, self).transform(value)
if isinstance(value, string_types):
try:
return self._dash_patterns[value]
except KeyError:
return [int(x) for x in value.split()]
else:
return value
def __str__(self):
return self.__class__.__name__
class Size(Float):
""" Equivalent to an unsigned int """
def validate(self, value):
super(Size, self).validate(value)
if not (value is None or 0.0 <= value):
raise ValueError("expected a non-negative number, got %r" % value)
class Percent(Float):
""" Percent is useful for alphas and coverage and extents; more
semantically meaningful than Float(0..1)
"""
def validate(self, value):
super(Percent, self).validate(value)
if not (value is None or 0.0 <= value <= 1.0):
raise ValueError("expected a value in range [0, 1], got %r" % value)
class Angle(Float):
''' Angle type property. '''
pass
class Date(Property):
''' Date (not datetime) type property. '''
def __init__(self, default=datetime.date.today()):
super(Date, self).__init__(default=default)
def validate(self, value):
super(Date, self).validate(value)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
raise ValueError("expected a date, string or timestamp, got %r" % value)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except ValueError:
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
class Datetime(Property):
''' Datetime type property. '''
def __init__(self, default=datetime.date.today()):
super(Datetime, self).__init__(default=default)
def validate(self, value):
super(Datetime, self).validate(value)
if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))):
return
try:
import pandas
if isinstance(value, (pandas.Timestamp)):
return
except ImportError:
pass
raise ValueError("Expected a datetime instance, got %r" % value)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
class RelativeDelta(Dict):
''' RelativeDelta type property for time deltas. '''
def __init__(self, default={}):
keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds")
values = Int
super(RelativeDelta, self).__init__(keys, values, default=default)
def __str__(self):
return self.__class__.__name__
| {
"repo_name": "jakevdp/bokeh",
"path": "bokeh/properties.py",
"copies": "1",
"size": "38439",
"license": "bsd-3-clause",
"hash": 2298246690737897700,
"line_mean": 34.8907563025,
"line_max": 134,
"alpha_frac": 0.5968677645,
"autogenerated": false,
"ratio": 4.163218888768547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5260086653268548,
"avg_score": null,
"num_lines": null
} |
"""A set of equations for calculating various values for a starmade entity."""
# For example, thrust and max speed given mass and number of thrusters.
# TODO(danmcg): Pull number from blockBehaviorConfig.xml <PowerCeiling>
POWER_CEILING = 2000000
# TODO(danmcg): Pull number from blockBehaviorConfig.xml <PowerDivFactor>
POWER_DIV_FACTOR = 0.333
# TODO(danmcg): Pull number from blockBehaviorConfig.xml <PowerGrowth>
POWER_GROWTH = 1.000348
# TODO(danmcg): Pull number from blockBehaviorConfig.xml <PowerLinearGrowth>
POWER_LINEAR_GROWTH = 25.0
# Returns the maximum possible power recharge rate based upon number of power
# modules and maximum dimensions of the containing area.
def calc_power_output(block_count, ship_dimensions):
""" Calculate power output given Power Reactor count and max dimensions
Given the number of Power Reactors and the sum of an areas x, y and z
dimensions, this function will return the maximum possible power recharge
rate that can be achieved.
Args:
block_count: The number of Power Reactors (block id 2)
ship_dimensions: The sum of a ships maximum X, Y and Z dimensions
Returns:
A float for the maximum possible 'e/sec' recharge rate
"""
block_power = block_count * POWER_LINEAR_GROWTH
max_dimensions = block_count + 2.0
if max_dimensions > ship_dimensions:
remainder_dimensions = (block_count % (ship_dimensions-2.0)) + 2.0
max_mod = (block_count - remainder_dimensions - 2.0)
max_mod /= (ship_dimensions-2.0)
group_power = pow(ship_dimensions/3.0,1.7) * max_mod
group_power += pow(remainder_dimensions/3.0,1.7)
size_power = 2.0/(1.0+pow(POWER_GROWTH,-POWER_DIV_FACTOR*group_power))-1.0
size_power *= POWER_CEILING
else:
size_power = 2/(1+pow(POWER_GROWTH,-POWER_DIV_FACTOR*pow(max_dimensions/3.0,1.7)))-1.0
size_power *= POWER_CEILING
if size_power > POWER_CEILING:
return block_power + POWER_CEILING
else:
return block_power + size_power
def calc_power_capacity(block_count):
""" Calculate the power capacity of a contiguous group of power capacitors
Given the number of Power Capacitors, this function will return the total
power storage they will support.
Args:
block_count: The number of Power Capacitors (block id 331)
Returns:
A float for the maximum possible 'e' power storage
"""
return 1000.0 * pow(block_count, 1.05)
def calc_thrust(block_count):
""" Calculate the thrust value generated by a number of Thruster Modules
Given the number of Thruster Modules, this function will return the amount
of thrust generated.
Args:
block_count: The number of Thruster Modukes (block id 25)
Returns:
A float for the amount of thrust
"""
return pow(block_count * 5.5, 0.87) * 0.75
def calc_speed_coefficient(thrust, total_mass):
""" Calculate a ships maximum speed multipler for the server speed limit
The maximum speed a ship can achieve on a server is a combination of the
thrust:mass ratio of the ship and the server speed limit. This function
returns a cofficient between 0.5 and 3.0 that when multiplied with the
server's speed limit will give you the ships maximum speed.
Args:
thrust: A float that represents the thrust of a ship. See calc_thrust()
Returns:
A float between (inclusive) 0.5 and 3.0. The max speed cofficient
"""
return min(thrust / total_mass, 2.5) + 0.5
def calc_thrust_power(block_count):
""" Calculate the power in 'e/sec' consumed when using a ships thrusters
Given the number of Thruster Modules, this function will return the power
per second (e/sec) consumed when thrusters are active.
Args:
block_count: The number of Thruster Modukes (block id 25)
Returns:
A float for 'e/sec' usage of active thrusters
"""
return block_count / 0.03
def calc_shield_capacity(block_count):
""" Calculate the maximum shield capacity by a number of Shield Capacitors
Given the number of Shield Capacitors, this function will return the
maximum amount of Shield Capacity 'C' they support.
Args:
block_count: The number of Shield Capacitors (block id 3)
Returns:
A float for the maximum shield capacity 'C'
"""
return pow(block_count, 0.9791797578) * 110.0 + 220.0
def calc_shield_recharge(block_count):
""" Calculate the shield recharge rate for a number of Shield Rechargers
Given the number of Shield Rechargers, this function will return the
rate of shield capacity charging 'C/sec' they generate.
Args:
block_count: The number of Shield Rechargers (block id 478)
Returns:
A float for the shield recharge rate in 'C/sec'
"""
return block_count * 5.5
def calc_shield_power(block_count, active=False):
""" Calculate the power usage of Shield Rechargers
Given the number of Shield Rechargers, this function will calculate how
much power they draw in 'e/sec'. It will calculate both inactive
(shields full) and active (shields charging) power consumption based on
the 'active' flag setting.
Args:
block_count: The number of Shield Rechargers (block id 478)
active: A boolean. True=actively recharging, False=for not (default)
Returns:
A float for 'e/sec' usage of active/inactive Shield Rechargers
"""
if active:
return block_count * 55.0
else:
return block_count * 5.5
def calc_jump_power(block_count, total_mass):
""" Calculate the power required to fully charge the jump drive of a ship
Given the number of Jump Modules and total ship mass, this function will
calculate the total power required to fully charge the jump drive.
Args:
block_count: The number of Jump Drive Modules (block id 545)
total_mass: The total mass of the ship
Returns:
A float for total power 'e' required to charge the jump drive
"""
ideal = math.ceil(total_mass * 0.5)
a = 50.0 - 100.0 * block_count * total_mass
return (-0.24 * total_mass)*a*a + 4600.0*a + 230000.0 + 1200.0 * ideal
def calc_jump_time(jump_power, block_count):
""" Calculate the time required to fully charge the jump drive of a ship
Given the number of Jump Modules and total ship mass, this function will
calculate the total time required to fully charge the jump drive.
Args:
block_count: The number of Jump Drive Modules (block id 545)
total_mass: The total mass of the ship
Returns:
a float for seconds required to charge the jump drive
"""
return jump_power / (10000.0 + 50.0 * block_count)
| {
"repo_name": "dmcgrath/starmade-blueprint-library",
"path": "starmade/equations.py",
"copies": "1",
"size": "6759",
"license": "apache-2.0",
"hash": 2951872896786239000,
"line_mean": 35.5351351351,
"line_max": 94,
"alpha_frac": 0.6900429058,
"autogenerated": false,
"ratio": 3.705592105263158,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9885504164024637,
"avg_score": 0.0020261694077042857,
"num_lines": 185
} |
"""A set of examples used for demonstrating the physt capabilities / in tests."""
import io
import pkgutil
import numpy as np
from physt.histogram1d import Histogram1D
from physt.histogram_nd import Histogram2D, HistogramND
from physt.facade import h1, h2, h3
def normal_h1(size: int = 10000, mean: float = 0, sigma: float = 1) -> Histogram1D:
"""A simple 1D histogram with normal distribution.
Parameters
----------
size : Number of points
mean : Mean of the distribution
sigma : Sigma of the distribution
"""
data = np.random.normal(mean, sigma, (size,))
return h1(data, name="normal", axis_name="x", title="1D normal distribution")
def normal_h2(size: int = 10000) -> Histogram2D:
"""A simple 2D histogram with normal distribution.
Parameters
----------
size : Number of points
"""
data1 = np.random.normal(0, 1, (size,))
data2 = np.random.normal(0, 1, (size,))
return h2(
data1,
data2,
name="normal",
axis_names=tuple("xy"),
title="2D normal distribution",
)
def normal_h3(size: int = 10000) -> HistogramND:
"""A simple 3D histogram with normal distribution.
Parameters
----------
size : Number of points
"""
data1 = np.random.normal(0, 1, (size,))
data2 = np.random.normal(0, 1, (size,))
data3 = np.random.normal(0, 1, (size,))
return h3(
[data1, data2, data3],
name="normal",
axis_names=tuple("xyz"),
title="3D normal distribution",
)
def fist() -> Histogram1D:
"""A simple histogram in the shape of a fist."""
widths = [0, 1.2, 0.2, 1, 0.1, 1, 0.1, 0.9, 0.1, 0.8]
edges = np.cumsum(widths)
heights = np.asarray([4, 1, 7.5, 6, 7.6, 6, 7.5, 6, 7.2]) + 5
return Histogram1D(edges, heights, axis_name="Is this a fist?", title='Physt "logo"')
ALL_EXAMPLES = [normal_h1, normal_h2, normal_h3, fist]
try:
import pandas as pd
def load_dataset(name: str) -> pd.DataFrame:
"""Load example dataset.
If seaborn is present, its datasets can be loaded.
Physt also includes some datasets in CSV format.
"""
# Our custom datasets:
try:
binary_data = pkgutil.get_data("physt", f"examples/{name}.csv")
if binary_data:
return pd.read_csv(io.BytesIO(binary_data))
except FileNotFoundError:
pass
# Seaborn datasets?
try:
import seaborn as sns
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if name in sns.get_dataset_names():
return sns.load_dataset(name)
except ImportError:
pass
# Fall through
raise KeyError(f"Dataset '{name}' not available.")
def munros(edge_length: float = 10) -> Histogram2D:
"""Number of munros in different rectangular areas of Scotland.
Parameters
----------
edge_length : Size of the rectangular grid in minutes.
Returns
-------
h : physt.histogram_nd.Histogram2D
Histogram in latitude and longitude.
"""
data = load_dataset("munros")
return h2(
data["lat"],
data["long"],
"fixed_width",
bin_width=edge_length / 60,
name="munros",
title="Munros of Scotland",
)
ALL_EXAMPLES.append(munros)
except ImportError:
# Either pandas or seaborn not present
pass
| {
"repo_name": "janpipek/physt",
"path": "physt/examples/__init__.py",
"copies": "1",
"size": "3574",
"license": "mit",
"hash": 1190186319664142000,
"line_mean": 26.4923076923,
"line_max": 89,
"alpha_frac": 0.5769445999,
"autogenerated": false,
"ratio": 3.6807415036045312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4757686103504531,
"avg_score": null,
"num_lines": null
} |
"""A set of extra ufuncs inspired from PDL: Fused operations.
- add3
- multiply3
- multiply3_add
- multiply_add
- multiply_add2
- multiply4
- multiply4_add
Note: for many use-cases, numba may provide a better solution
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from . import _impl
def add3(a, b, c, **kwargs):
"""
Element-wise addition of 3 arrays: a + b + c.
Parameters
----------
a, b, c : (...) array
arrays with the addends
Returns
-------
add3 : (...) array
resulting element-wise addition.
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
See Also
--------
multiply3 : element-wise three-way multiplication.
multiply3_add : element-wise three-way multiplication and addition.
multiply_add : element-wise multiply-add.
multiply_add2 : element-wise multiplication with two additions.
multiply4 : element-wise four-way multiplication
multiply4_add : element-wise four-way multiplication and addition,
Examples
--------
>>> a = np.linspace(1.0, 30.0, 30)
>>> add3(a[0::3], a[1::3], a[2::3])
array([ 6., 15., 24., 33., 42., 51., 60., 69., 78., 87.])
"""
return _impl.add3(a, b, c, **kwargs)
def multiply3(a, b, c, **kwargs):
"""
Element-wise multiplication of 3 arrays: a*b*c.
Parameters
----------
a, b, c : (...) array
arrays with the factors
Returns
-------
m3 : (...) array
resulting element-wise product
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
See Also
--------
add3 : element-wise three-way addition
multiply3_add : element-wise three-way multiplication and addition.
multiply_add : element-wise multiply-add.
multiply_add2 : element-wise multiplication with two additions.
multiply4 : element-wise four-way multiplication
multiply4_add : element-wise four-way multiplication and addition,
Examples
--------
>>> a = np.linspace(1.0, 10.0, 10)
>>> multiply3(a, 1.01, a)
array([ 1.01, 4.04, 9.09, 16.16, 25.25, 36.36, 49.49,
64.64, 81.81, 101. ])
"""
return _impl.multiply3(a, b, c, **kwargs)
def multiply3_add(a, b, c, d, **kwargs):
"""
Element-wise multiplication of 3 arrays adding a 4th array to the
result: a*b*c + d
Parameters
----------
a, b, c : (...) array
arrays with the factors
d : (...) array
array with the addend
Returns
-------
m3a : (...) array
element-wise result (a*b*c + d)
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
See Also
--------
add3 : element-wise three-way addition
multiply3 : element-wise three-way multiplication.
multiply_add : element-wise multiply-add.
multiply_add2 : element-wise multiplication with two additions.
multiply4 : element-wise four-way multiplication
multiply4_add : element-wise four-way multiplication and addition,
Examples
--------
>>> a = np.linspace(1.0, 10.0, 10)
>>> multiply3_add(a, 1.01, a, 42e-4)
array([ 1.0142, 4.0442, 9.0942, 16.1642, 25.2542, 36.3642,
49.4942, 64.6442, 81.8142, 101.0042])
"""
return _impl.multiply3_add(a, b, c, d, **kwargs)
def multiply_add(a, b, c, **kwargs):
"""
Element-wise multiplication of 2 arrays, adding a 3rd array to the
result: a*b + c
Parameters
----------
a, b : (...) array
arrays with the factors
c : (...) array
array with the addend
Returns
-------
madd : (...) array
element-wise result (a*b + c)
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
See Also
--------
add3 : element-wise three-way addition
multiply3 : element-wise three-way multiplication.
multiply3_add : element-wise three-way multiplication and addition.
multiply_add2 : element-wise multiplication with two additions.
multiply4 : element-wise four-way multiplication
multiply4_add : element-wise four-way multiplication and addition,
Examples
--------
>>> a = np.linspace(1.0, 10.0, 10)
>>> multiply_add(a, a, 42e-4)
array([ 1.0042, 4.0042, 9.0042, 16.0042, 25.0042, 36.0042,
49.0042, 64.0042, 81.0042, 100.0042])
"""
return _impl.multiply_add(a, b, c, **kwargs)
def multiply_add2(a, b, c, d, **kwargs):
"""
Element-wise multiplication of 2 arrays, adding a 3rd and a 4th
array to the result: a*b + c + d
Parameters
----------
a, b : (...) array
arrays with the factors
c, d : (...) array
arrays with the addends
Returns
-------
mult_add2 : (...) array
element-wise result (a*b + c + d)
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
See Also
--------
add3 : element-wise three-way addition
multiply3 : element-wise three-way multiplication.
multiply3_add : element-wise three-way multiplication and addition.
multiply_add : element-wise multiply-add.
multiply4 : element-wise four-way multiplication
multiply4_add : element-wise four-way multiplication and addition,
Examples
--------
>>> a = np.linspace(1.0, 10.0, 10)
>>> multiply_add2(a, a, a, 42e-4)
array([ 2.0042, 6.0042, 12.0042, 20.0042, 30.0042, 42.0042,
56.0042, 72.0042, 90.0042, 110.0042])
"""
return _impl.multiply_add2(a, b, c, d, **kwargs)
def multiply4(a, b, c, d, **kwargs):
"""
Element-wise multiplication of 4 arrays: a*b*c*d
Parameters
----------
a, b, c, d : (...) array
arrays with the factors
Returns
-------
m4 : (...) array
element-wise result (a*b*c*d)
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
See Also
--------
add3 : element-wise three-way addition
multiply3 : element-wise three-way multiplication.
multiply3_add : element-wise three-way multiplication and addition.
multiply_add : element-wise multiply-add.
multiply_add2 : element-wise multiplication with two additions.
multiply4_add : element-wise four-way multiplication and addition,
Examples
--------
>>> a = np.linspace(1.0, 10.0, 10)
>>> multiply4(a, a, a[::-1], 1.0001)
array([ 10.001 , 36.0036, 72.0072, 112.0112, 150.015 , 180.018 ,
196.0196, 192.0192, 162.0162, 100.01 ])
"""
return _impl.multiply4(a, b, c, d, **kwargs)
def multiply4_add(a, b, c, d, e, **kwargs):
"""
Element-wise multiplication of 4 arrays, adding a 5th array to the
result: a*b*c*d + e
Parameters
----------
a, b, c, d : (...) array
arrays with the factors
e : (...) array
array with the addend
Returns
-------
add3 : (...) array
element-wise result (a*b*c*d + e)
Notes
-----
Numpy broadcasting rules apply.
Implemented for types single, double, csingle and cdouble. Numpy
conversion rules apply.
See Also
--------
add3 : element-wise three-way addition
multiply3 : element-wise three-way multiplication.
multiply3_add : element-wise three-way multiplication and addition.
multiply_add : element-wise multiply-add.
multiply_add2 : element-wise multiplication with two additions.
multiply4 : element-wise four-way multiplication
Examples
--------
>>> a = np.linspace(1.0, 10.0, 10)
>>> multiply4_add(a, a, a[::-1], 1.01, 42e-4)
array([ 10.1042, 36.3642, 72.7242, 113.1242, 151.5042, 181.8042,
197.9642, 193.9242, 163.6242, 101.0042])
"""
return _impl.multiply4_add(a, b, c, d, e,**kwargs)
| {
"repo_name": "ContinuumIO/gulinalg",
"path": "gulinalg/ufunc_extras.py",
"copies": "1",
"size": "8369",
"license": "bsd-2-clause",
"hash": -7210446646826693000,
"line_mean": 24.9906832298,
"line_max": 76,
"alpha_frac": 0.5933803322,
"autogenerated": false,
"ratio": 3.5826198630136985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46760001952136987,
"avg_score": null,
"num_lines": null
} |
# A set of functions helpful for analysing the OD matrices produced from tools
# such as OpenTripPlanner, NetView, etc.
# Patrick Sunter, 2013-2014.
# Uses OGR library for shape file manipulation aspects.
import csv
import os
import operator
import itertools
import numpy
import taz_files
### General utility functions - OTP
def readLatLons(otpfilename, nPoints):
"""Read a set of latitute and longitude points for a set of
Travel Analysis Zones (TAZs) - from the first columns of an OTP O-D
matrix file. Return these as a two-dimensional array,
where primary index is ID of each TAZ."""
latlons = numpy.zeros((nPoints+1,2))
otpfile = open(otpfilename)
otpreader = csv.reader(otpfile, delimiter=',')
print "Reading Lat-lons from OTP CSV ... "
#header row
header_row = otpreader.next()
for ii, row in enumerate(otpreader):
originID = int(row[0])
latlons[originID] = (float(row[1]), float(row[2]))
print "Done."
otpfile.close()
return latlons
### General utility functions - Netview
def readNVRouteIDs(nvfilename, nroutes):
docstring = """Returns an array containing the OriginID, DestID pair for \
each route in the Netview CSV file."""
nvroutes = numpy.zeros((nroutes,2))
nvfile = open(nvfilename)
nvreader = csv.reader(nvfile, delimiter=';')
#There are three mostly blank lines at the start
for ii in range(3):
nvreader.next()
#Then headers row
nv_header_row = nvreader.next()
#OK, now process rest of rows
nroutes = 0
for ii, row in enumerate(nvreader):
originIDText = row[0]
originID = int(originIDText[1:])
destIDText = row[1]
destID = int(destIDText[1:])
nvroutes[ii] = [originID, destID]
nvfile.close()
return nvroutes
### Entire OD Matrix reading
def readOTPMatrix(otpfilename, mat):
"""Read in an OD matrix from results created by OpenTripPlanner, and
then post-processed by the make_od_matrix.py script.
matrix 'mat' must be in numpy format, and already be of the correct
size."""
otpfile = open(otpfilename)
otpreader = csv.reader(otpfile, delimiter=',')
print "Reading OTP O-D matrix from CSV ..."
#Create lookup table from header row
header_row = otpreader.next()
destlookups = header_row[3:]
for ii, row in enumerate(otpreader):
if ii % 100 == 0:
print "Reading %dth row of O-Ds" % (ii)
originID = int(row[0])
timesToDests = row[3:]
for jj, time in enumerate(timesToDests):
mat[originID, int(destlookups[jj])] = int(float(time)+0.5)
print "Done."
otpfile.close()
return
def readNVMatrix(nvfilename, mat):
"""Read in an OD matrix in the format created by the Netview routing
tool. Matrix must be of the correct size.
Note: converts the Netview output (in minutes) into seconds."""
nvfile = open(nvfilename)
nvreader = csv.reader(nvfile, delimiter=';')
print "Reading Netview O-D matrix ..."
#There are three mostly blank lines at the start
for ii in range(3):
nvreader.next()
#Then headers row
nv_header_row = nvreader.next()
#OK, now process rest of rows
nroutes = 0
for ii, row in enumerate(nvreader):
if ii % 1000 == 0:
print "Reading and processing %dth row ... " % (ii)
originIDText = row[0]
originID = int(originIDText[1:])
destIDText = row[1]
destID = int(destIDText[1:])
time_min = row[9]
time_sec = float(time_min) * 60.0
mat[originID, destID] = time_sec
nroutes += 1
print "Done."
nvfile.close()
return nroutes
# High-level analysis functions.
def saveComparisonFile(routesArray, od_mat_1, od_mat_2, compfilename,
case_names):
compfile = open(compfilename, "w")
compwriter = csv.writer(compfile, delimiter=',')
# Header row
compwriter.writerow(['OriginID','DestID', '%s Time (s)' % case_names[0], \
'%s Time (s)' % case_names[1], 'Difference (s)', 'Abs. Diff (s)',
'Abs. Diff (%)','Diff (%)'])
for ii, route in enumerate(routesArray):
originID = route[0]
destID = route[1]
time_1 = int(od_mat_1[originID, destID])
time_2 = int(od_mat_2[originID, destID])
# Checking for OTP times that are null for some reason.
# NB: ideally would be good to keep some info with a matrix so
# we can interpret if it was created by OTP etc how to handle.
# would require more complex data structures, or an object-oriented
# wrapper with an is_valid() function etc.
if time_1 in [0,-1,-2] or time_2 in [0,-1,-2]:
diff = "NA"
diff_percent = "NA"
absdiff = "NA"
absdiff_percent = "NA"
else:
diff = time_1 - time_2
diff_percent = diff / float(time_1)
absdiff = abs(diff)
absdiff_percent = absdiff / float(time_1)
compwriter.writerow([originID, destID, time_1, time_2, diff, absdiff,\
absdiff_percent, diff_percent])
compfile.close()
return
def readComparisonFile(compfilename):
"""Read in a comparison file created by saveComparisonFile().
Returns a tuple containing 3 numpy arrays:- first being the routes
in terms of TAZ O-D pairs, the second being times for those routes
in the first case, the second being times in the second case.
Requires format of saved comparison file's first 4 columns to be origin ID,
dest ID, time in case 1, time in case 2
(e.g. case 1 being OTP, case 2 being Netview)."""
compfile = open(compfilename)
compreader = csv.reader(compfile, delimiter=',')
#headers
compreader.next()
nrows = 0
for ii, row in enumerate(compreader):
nrows += 1
routesArray = []
case1Times = []
case2Times = []
#Restart, now we know array sizes
compfile.seek(0)
compreader = csv.reader(compfile, delimiter=',')
#headers
compreader.next()
for ii, row in enumerate(compreader):
routesArray.append((int(row[0]), int(row[1])))
case1Times.append(int(row[2]))
case2Times.append(int(row[3]))
compfile.close()
return routesArray, case1Times, case2Times
def createShapefile(routesArray, lonlats, case1Times, case2Times, caseNames,
shapefilename):
"""Creates a Shape file stating the difference between times in two
OD matrices, which have been 'unrolled' as large arrays listing
travel time between OD pairs. 'caseNames' should be short strings
describing the cases, eg. 'OTP' and 'NV'.
Saves results to a shapefile determined by shapefilename.
N.B. :- thanks for overall strategy here are due to author of
https://github.com/glennon/FlowpyGIS"""
import osgeo.ogr
from osgeo import ogr
print "Creating shapefile of route lines with time attributes to file"\
" %s ..." % (shapefilename)
driver = ogr.GetDriverByName('ESRI Shapefile')
# create a new data source and layer
if os.path.exists(shapefilename):
driver.DeleteDataSource(shapefilename)
ds = driver.CreateDataSource(shapefilename)
if ds is None:
print 'Could not create file'
sys.exit(1)
c1TimeFieldName = 't %s' % caseNames[0]
c2TimeFieldName = 't %s' % caseNames[1]
layer = ds.CreateLayer('routeinfos', geom_type=ogr.wkbLineString)
fieldDefn = ogr.FieldDefn('OriginID', ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn('DestID', ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn(c1TimeFieldName, ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn(c2TimeFieldName, ogr.OFTReal)
layer.CreateField(fieldDefn)
fieldDefn = ogr.FieldDefn('Diff', ogr.OFTReal)
layer.CreateField(fieldDefn)
# END setup creation of shapefile
for ii, routePair in enumerate(routesArray):
originID = routePair[0]
destID = routePair[1]
case1time = case1Times[ii]
case2time = case2Times[ii]
linester = ogr.Geometry(ogr.wkbLineString)
linester.AddPoint(lonlats[originID][0], lonlats[originID][1])
linester.AddPoint(lonlats[destID][0], lonlats[destID][1])
featureDefn = layer.GetLayerDefn()
feature = ogr.Feature(featureDefn)
feature.SetGeometry(linester)
feature.SetField('OriginID', originID)
feature.SetField('DestID', destID)
feature.SetField(c1TimeFieldName, case1time)
feature.SetField(c2TimeFieldName, case2time)
if case1time in [0,-1,-2] or case2time in [0,-1,-2]:
diff = 0
else:
diff = case1time - case2time
feature.SetField('Diff', diff)
layer.CreateFeature(feature)
# shapefile cleanup
# destroy the geometry and feature and close the data source
linester.Destroy()
feature.Destroy()
ds.Destroy()
print "Done."
return
| {
"repo_name": "PatSunter/pyOTPA",
"path": "od_matrix_analysis.py",
"copies": "1",
"size": "9063",
"license": "bsd-3-clause",
"hash": 3155621054549830700,
"line_mean": 33.9922779923,
"line_max": 79,
"alpha_frac": 0.6439368862,
"autogenerated": false,
"ratio": 3.5611001964636544,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9668315168430421,
"avg_score": 0.007344382846646743,
"num_lines": 259
} |
"""A set of functions to help the protocols implementation."""
import six
from .graph.nodes import Root
from .helper import Logger
def can_execute(operation, component):
"""Check if an operation can be executed on a component."""
assert isinstance(operation, six.string_types)
assert isinstance(component, Root)
_log = Logger.get(__name__)
# component must have the opetaion in the current state
protocol = component.protocol
transition = protocol.next_transition(operation)
_log.info('component "%s" is in state "%s"', component.name, protocol.current_state.name)
if transition is None:
raise ValueError('cannot execute operation "{}" from state "{}".'
''.format(operation, protocol.current_state.name))
# all requirement of the transition and of the next state
# are satisfied.
for req in transition.target.requires + transition.requires:
for rel in component.relationships:
if rel.requirement == req:
# check that the capability needed is offered
# by the target of the relationship
_log.info('component "%s" require "%s" and "%s" offers %s',
component.name, rel.requirement, rel.to.name,
rel.to.protocol.current_state.offers)
if rel.capability not in rel.to.protocol.current_state.offers:
raise ValueError('component "{}" require "{}" that is not offers by "{}"'
''.format(component.name, rel.requirement, rel.to.name))
# all offers are not used by other component
_log.debug('what I offer %s', transition.source.offers)
_log.debug('who require me %s', [str(r) for r in component.up_requirements])
for off in transition.source.offers:
for rel in component.up_requirements:
if rel.capability == off:
# check that the capability needed is offered
# by the target of the relationship
_log.info('component "%s" offers "%s" and "%s" requires %s',
component.name, rel.capability, rel.origin.name,
rel.origin.protocol.current_state.requires)
if rel.requirement in rel.origin.protocol.current_state.requires and\
rel.requirement not in transition.target.offers:
raise ValueError('component "{}" offers "{}" that is required by "{}"'
''.format(component.name, rel.capability, rel.origin.name))
| {
"repo_name": "di-unipi-socc/tosKer",
"path": "tosker/protocol_helper.py",
"copies": "1",
"size": "2598",
"license": "mit",
"hash": 7348810131905121000,
"line_mean": 49.9411764706,
"line_max": 96,
"alpha_frac": 0.6085450346,
"autogenerated": false,
"ratio": 4.639285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5747830748885714,
"avg_score": null,
"num_lines": null
} |
"""A set of functions to standardize some options for python scripts"""
import logging
import argparse
from os import getcwd, path
logger = logging.getLogger(__name__)
def setup_parser_help(parser, additional_docs=None):
"""
Set formatting for parser to raw and add docstring to help output
Parameters
----------
parser : `ArgumentParser`
The parser to be modified.
additional_docs: str
Any documentation to be added to the documentation produced by
`argparse`
"""
parser.formatter_class = argparse.RawDescriptionHelpFormatter
if additional_docs is not None:
parser.epilog = additional_docs
def add_verbose(parser):
"""
Add a verbose option (--verbose or -v) to parser.
Parameters:
-----------
parser : `ArgumentParser`
"""
verbose_help = "provide more information during processing"
parser.add_argument("-v", "--verbose", help=verbose_help,
action="store_true")
def add_directories(parser, nargs_in='+'):
"""
Add a positional argument that is one or more directories.
Parameters
----------
parser : `ArgumentParser`
"""
parser.add_argument("dir", metavar='dir', nargs=nargs_in,
help="Directory to process")
def add_destination_directory(parser):
"""
Add a destination directory option
Parameters
----------
parser : `ArgumentParser`
"""
arg_help = 'Directory in which output from this script will be stored'
parser.add_argument("-d", "--destination-dir",
help=arg_help,
default=None)
def add_debug(parser):
"""
Add a debug option to produce very verbose output
Parameters
----------
parser : `ArgumentParser`
"""
arg_help = 'Turn on very detailed logging output'
parser.add_argument('--debug', help=arg_help, action='store_true')
def add_no_log_destination(parser):
"""
Add option to suppress logging to files in destination directory
"""
arg_help = 'Do not write log files to destination directory'
parser.add_argument('-n', '--no-log-destination',
help=arg_help, action='store_true')
def add_console_output_args(parser):
parser.add_argument('--quiet-console',
help=('Log only errors (or worse) to console '
'while running scripts'),
action='store_true')
parser.add_argument('--silent-console',
help=('Turn off all logging output to console'),
action='store_true')
def construct_default_parser(docstring=None):
parser = argparse.ArgumentParser()
if docstring is not None:
setup_parser_help(parser, docstring)
add_verbose(parser)
add_directories(parser)
add_destination_directory(parser)
add_debug(parser)
add_no_log_destination(parser)
add_console_output_args(parser)
return parser
def setup_logging(logger, args, screen_handler):
logger.setLevel(logging.WARNING)
if args.verbose:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
if args.quiet_console:
screen_handler.setLevel(logging.WARNING)
if args.silent_console:
logger.removeHandler(screen_handler)
def handle_destination_dir_logging_check(args):
"""
Perform error checking for command line arguments
"""
# turn off destination logging if we are running in the destination
# directory because we always create logs in the working directory...
do_not_log_in_destination = args.no_log_destination
cwd = getcwd()
logger.debug('cwd: %s', cwd)
if args.destination_dir:
try:
dest_is_cwd = path.samefile(args.destination_dir, cwd)
except OSError:
dest_is_cwd = False
elif args.dir:
try:
dest_is_cwd = any([path.samefile(d, cwd) for d in args.dir])
except OSError:
dest_is_cwd = False
else:
dest_is_cwd = False
logger.debug('dest_is_cwd: %s', dest_is_cwd)
if dest_is_cwd:
if do_not_log_in_destination:
raise RuntimeError('option --no-log-destination cannot be used '
'when running in the destination directory '
'because a log is always made in the '
'directory in which the script is run')
do_not_log_in_destination = True
return do_not_log_in_destination
def _main_function_docstring(command_name):
"""
Wrapper for invoking {} from the command line
Parameters
----------
arglist : list of strings, optional
If set, use this arglist instead of `sys.argv` for parsing command
line arguments. Primarily useful for testing.
"""
return _main_function_docstring.__doc__.format(command_name)
| {
"repo_name": "mwcraig/msumastro",
"path": "msumastro/scripts/script_helpers.py",
"copies": "1",
"size": "4983",
"license": "bsd-3-clause",
"hash": -9129508277166882000,
"line_mean": 26.3791208791,
"line_max": 76,
"alpha_frac": 0.6128838049,
"autogenerated": false,
"ratio": 4.371052631578947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5483936436478947,
"avg_score": null,
"num_lines": null
} |
"""A set of functions used to validate HTTP input data.
These functions are primarily used to valid that arguments sent in http
requests are what they are supposed to be.
"""
from typing import Union, Any, Dict, List, cast, Optional, SupportsInt
from irisett.webapi.errors import InvalidData
def require_str(value: Any, convert: bool=False, allow_none: bool=False) -> Any:
"""Make sure a value is a str.
Used when dealing with http input data.
"""
if value is None and allow_none:
return value
if type(value) != str:
if not convert:
raise InvalidData('value was %s(%s), expected str' % (type(value), value))
value = str(value)
return value
def require_bool(value: Optional[Union[bool, str, int]], convert: bool=False, allow_none: bool=False) -> Any:
"""Make sure a value is a boolean.
Used when dealing with http input data.
"""
if value is None and allow_none:
return value
if type(value) != bool:
if not convert:
raise InvalidData()
if value in [None, 0, '0', 'false', 'False']:
value = False
elif value in [1, '1', 'true', 'True']:
value = True
else:
raise InvalidData('value was %s(%s), expected bool' % (type(value), value))
return cast(bool, value)
def require_dict(value: Optional[Dict[Any, Any]], key_type: Any=None, value_type: Any=None,
allow_none: bool=False) -> Any:
"""Make sure a value is a Dict[key_type, value_type].
Used when dealing with http input data.
"""
if value is None and allow_none:
return value
if type(value) != dict:
raise InvalidData('value was %s(%s), expected dict' % (type(value), value))
value = cast(Dict, value)
if key_type or value_type:
for k, v in value.items():
if key_type and type(k) != key_type:
raise InvalidData('dict key was %s(%s), expected %s' % (type(k), k, key_type))
if value_type and type(v) != value_type:
raise InvalidData('dict value was %s(%s), expected %s' % (type(v), v, key_type))
return value
def require_list(value: Optional[List[Any]], item_type: Any=None, allow_none: bool=False) -> Any:
"""Make sure a value is a List[item_type].
Used when dealing with http input data.
"""
if value is None and allow_none:
return value
if type(value) != list:
raise InvalidData('value was %s, expected list' % type(value))
value = cast(List, value)
if item_type:
for item in value:
if type(item) != item_type:
raise InvalidData('list item was %s, expected %s' % (type(item), item_type))
return value
def require_int(value: Optional[Union[SupportsInt, str, bytes]], allow_none: bool=False) -> Any:
"""Make sure a value is an int.
Used when dealing with http input data.
"""
if value is None and allow_none:
return value
value = cast(Union[SupportsInt, str, bytes], value)
try:
value = int(value)
except (ValueError, TypeError):
raise InvalidData('value was %s(%s), expected list' % (type(value), value))
return value
| {
"repo_name": "beebyte/irisett",
"path": "irisett/webapi/require.py",
"copies": "1",
"size": "3225",
"license": "mit",
"hash": -7142352400459288000,
"line_mean": 33.3085106383,
"line_max": 109,
"alpha_frac": 0.6096124031,
"autogenerated": false,
"ratio": 3.706896551724138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9797231273164697,
"avg_score": 0.0038555363318881174,
"num_lines": 94
} |
"""A set of generic utility functions."""
import subprocess
import optparse
import sys
def run(cmd, echo=False, verbose=False):
"""Run a command in a sub-process."""
result = []
if echo:
print(cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
line = p.stdout.readline()
while len(line):
line = line.rstrip().decode()
if verbose:
print(line)
result.append(line)
line = p.stdout.readline()
return result
def parse_cmd(name,
arguments,
options=None
):
"""Parse the command line.
Keyword arguments:
name - name of the command
arguments - a list, each element being a tuple (name, descr, [option])
where name is the name of the argument, descr is a humanly
readable description of the argument, and option is the
third optional item (can be anything, usually a boolean
flag, e.g. True) which indicates that this argument is
a list. If present, the option stops command processing
after the said argument.
options - a list, each element being
(switch, name, storage, default, descr)
If successful:
1) If options are given, return (ARGS, OPTS) as dictionaries.
2) If options are not supplied, return ARGS as a dictionary.
If failed:
Exit with code 2 if failed.
"""
# Compute the length of the longest argument name.
# We'll use this to make the usage columns look nice.
length = 0
for arg in arguments:
length = max(len(arg[0]), length)
# Assemble the usage string.
usage = ' %s '%name
if options:
usage += '[options] '
for arg in arguments:
usage += '%s'%arg[0]
if len(arg) == 3:
usage += '...'
usage += ' '
usage += '\n\nArguments:'
for arg in arguments:
usage += '\n %s'%arg[0]
for ii in range(length - len(arg[0])):
usage += ' '
usage += ' %s'%arg[1]
# Create the OptionParser object.
opt_parser = optparse.OptionParser(usage=usage)
if options:
for option in options:
opt_parser.add_option(option[0],
dest = option[1],
action = option[2],
default = option[3],
help = option[4])
# Parse the command line.
OPTS, ARGS = opt_parser.parse_args()
# Make sure arguments were given since
# OptionParser considers them optional.
if len(ARGS) < len(arguments):
opt_parser.print_help()
sys.exit(2)
# Assemble ARGS in a dictionary.
ARGS_DICT = {}
count = 0
for arg in arguments:
# If this argument had the optional third element,
# then give it the remainder of ARGS and stop processing.
if len(arg) == 3:
ARGS_DICT[arg[0]] = ARGS[count:]
break
else:
ARGS_DICT[arg[0]] = ARGS[count]
count += 1
# Assemble OPTS in a dictionary.
OPTS_DICT = {}
if options:
for opt in options:
name = opt[1]
exec('OPTS_DICT[name] = OPTS.%s'%name)
# Return the parsed result.
if options:
result = (ARGS_DICT, OPTS_DICT)
else:
result = ARGS_DICT
return result
# The end.
| {
"repo_name": "vmlaker/pythonwildmagic",
"path": "tool/util.py",
"copies": "1",
"size": "3571",
"license": "mit",
"hash": -1125724373356811100,
"line_mean": 29.5213675214,
"line_max": 78,
"alpha_frac": 0.5317838141,
"autogenerated": false,
"ratio": 4.2461355529131986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5277919367013199,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.