index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
21,800 | ec63900736aa4ea9882336ef44223981d5897efc | import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
import scipy.io
from RBM import *
mat = scipy.io.loadmat('input.mat')
train_data=mat['train_x']
test_data=mat['test_x']
train_label=mat['train_y']
test_label=mat['test_y']
r1 = RBM(num_visible = 256, num_hidden = 250)
train_data1 = train_data
test_data1 = test_data
r1.train(train_data1, max_epochs = 5000)
output_train1,prob_train1=r1.run_visible(train_data1)
output_test1,prob_test1=r1.run_visible(test_data1)
r2 = RBM(num_visible = 250, num_hidden = 220)
train_data2 = output_train1
test_data2 = output_test1
r2.train(train_data2, max_epochs = 5000)
output_train2,prob_train2=r2.run_visible(train_data2)
output_test2,prob_test2=r2.run_visible(test_data2)
X_train = output_train2
X_test=output_test2
Y_train=train_label
Y_test=test_label
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
rbm.learning_rate = 0.1
rbm.n_iter = 5000
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 200
classifier.fit(X_train, Y_train)
E=classifier.predict(X_test)
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
"""
r3 = RBM(num_visible = 220, num_hidden = 200)
train_data3 = output_train2
test_data3 = output_test2
r3.train(train_data3, max_epochs = 5000)
output_train3,prob_train3=r3.run_visible(train_data3)
output_test3,prob_test3=r3.run_visible(test_data3)
print output_test3
"""
|
21,801 | da1c7a2d3674cf8f56a489593fd1478cb15758f6 | from modules.lab.math_operations import calculate_expression
expression = "6.66 ^ 2"
print(calculate_expression(expression)) |
21,802 | 7e4c9bab08f93f0703017e9db56e5c9514fa9af1 | from .decorators import *
import json
import inspect
import websockets
try:
# noinspection PyPackageRequirements
from dominate.tags import html_tag
except ImportError:
# noinspection PyPep8Naming
class html_tag:
pass
def preprocess_jquery_arguments(x):
if isinstance(x, html_tag):
return str(x)
if inspect.iscoroutinefunction(x):
on(None, None)(x)
return id2handlerInfo[id(x)]
return x
class Selector(JSONSerializable):
def __init__(self, selector: str, websocket: websockets.WebSocketServerProtocol=None):
self.selector = selector
self.sub_actions = []
self.type = 'selector'
self._websocket = websocket
if websocket is not None:
self.command = 'immediate'
def __getattr__(self, action):
if self._websocket is None:
def func(*args):
args = tuple(preprocess_jquery_arguments(a) for a in args)
self.sub_actions.append((action,) + args)
return self
return func
async def func(*args):
self.sub_actions.append((action,) + args)
await self._websocket.send(str(self))
return json.loads(await self._websocket.recv())
return func
class Bundle(JSONSerializable):
def __init__(self, websocket: websockets.WebSocketServerProtocol, command='batch'):
self.command = command
self.actions = []
self._websocket = websocket
if command == 'batch':
self.broadcast = Bundle(websocket, command='broadcast')
def __call__(self, selector: str):
s = Selector(selector)
self.actions.append(s)
return s
def eval(self, code: str, **data):
"""Run arbitrary code in the browser after the handler returns.
Meant to be used for interacting with 3rd-party Javascript libraries or things not provided by jQuery API.
Args:
code: The Javascript code to be evaluated in the browser
**data: The data supplied to be used by the Javascript code, must be JSON serializable
All data will be attached to the `window` object.
"""
self.actions.append({'type': 'eval', 'code': code, 'data': data})
async def eval_immediately(self, code: str, **data):
"""Run arbitrary code in the browser and get the evaluated result back immediately.
Must be awaited.
Args:
code: The Javascript code to be evaluated in the browser
**data: The data supplied to be used by the Javascript code, must be JSON serializable
All data will be attached to the `window` object.
Returns: The evaluated result from browser
"""
await self._websocket.send(json.dumps({
'command': 'immediate',
'type': 'eval',
'code': code,
'data': data
}))
return json.loads(await self._websocket.recv())
def immediate(self, selector: str) -> Selector:
"""Execute jQuery function and get the result back immediately.
Must be awaited.
Meant to be used for dynamically getting attributes from UI.
Args:
selector: A jQuery selector expression
Returns: The resulting selector object
"""
return Selector(selector, websocket=self._websocket)
|
21,803 | d6ab14a1e38173f27b32eac28952046e378185c4 | # Remove duplicates from sorted array
# Given a sorted array nums, remove the duplicates in-place such that each element appear only once and return the new length.
# Do not allocate extra space for another array; you must do this by modifying the input array in-place with O(1) extra memory.
# Example:
# Given nums = [0,0,1,1,1,2,2,3,3,4],
# Your function should return length = 5, with the first five elements of nums being modified to 0, 1, 2, 3, and 4 respectively.
# It doesn't matter what values are set beyond the returned length.
# =============================================================================================
# Returns the number of valid entries after deletion
# Time complexity: O(n)
# Space complexity: O(1)
def remove_duplicates(arr):
new_index = 1
for i in range(1, len(arr)):
if arr[new_index-1] != arr[i]:
arr[new_index] = arr[i]
new_index += 1
print(arr)
return new_index
def remove_duplicates_2(nums):
j = 0
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i-1]:
continue
nums[j] = nums[i]
j += 1
return j
if __name__ == "__main__":
arr = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]
# print(remove_duplicates(arr))
print(remove_duplicates_2(arr))
|
21,804 | c6bdf1fe27aa4636718d18161ed36e0bccbf5076 | # -*- coding: utf-8 -*-
"""
@author: Nabil Hossain
nhossain@cs.rochester.edu
Dept. Computer Science
University of Rochester, NY
"""
'''
A naive baseline system for task 1
This baseline always predicts the mean funniness grade of all edited headlines in
training set.
'''
import pandas as pd
import numpy as np
import sys
import os
def baseline_task_1(train_loc, test_loc, out_loc):
train = pd.read_csv(train_loc)
test = pd.read_csv(test_loc)
pred = np.mean(train.meanGrade)
test['pred'] = pred
output = test[['id','pred']]
output.to_csv(out_loc, index=False)
print('Output file created:\n\t- '+os.path.abspath(out_loc))
if __name__ == '__main__':
# expect sys.argv[1] = ../data/task-1/train.csv
# expect sys.argv[2] = ../data/task-1/dev.csv
# expect sys.argv[3] = '../baseline_output/task-1-output.csv'
if len(sys.argv) <= 2:
out_loc = '../baseline_output/task-1-output.csv'
else:
out_loc = sys.argv[3]
baseline_task_1(sys.argv[1], sys.argv[2], out_loc)
|
21,805 | 8fc7f5088312b1cd605a65eae7698585fc28055e | import cv2
import os
import numpy as np
import pandas as pd
import json
import torch
from tqdm import tqdm
from os import listdir
from os.path import isfile, join
from scipy.misc import imread, imsave, imresize
from shutil import copyfile
from scipy import ndimage
from skimage.morphology import label
from imgaug import augmenters as iaa
import imgaug as ia
from timeit import default_timer as timer
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Rotate, IAAAffine,
IAASuperpixels, RGBShift, ChannelShuffle, RandomGamma, ToGray, InvertImg, ElasticTransform
)
TRAIN_DF = '../DATASET/humpback_whale/all/train.csv'
SUB_Df = '../DATASET/humpback_whale/all/sample_submission.csv'
TRAIN = '../DATASET/humpback_whale/size768/train/'
TRAIN_MASK = '../DATASET/humpback_whale/size768/train_mask/'
TEST = '../DATASET/humpback_whale/size768/test/'
TEST_MASK = '../DATASET/humpback_whale/size768/test_mask/'
mode = 'code'
def encode(img, mask):
for w in range(mask.shape[1]):
if mask[:, w].max() > 0:
start = -1
end = -1
for i in range(mask.shape[0]):
if mask[i, w] > 0:
start = i
break
for i in range(1, mask.shape[0]):
if mask[mask.shape[0]-i, w] > 0:
end = mask.shape[0]-i
break
if start != end:
# img[:, w, :] = imresize(img[start:end, w+1, :], (img.shape[0], 1, 3))
img[:, w] = imresize(img[start:end, w:w+1], (img.shape[0], 1))[:, 0]
return img
def code_vertical(img, mask):
for h in range(mask.shape[0]):
if mask[h, :].max() > 0:
start = -1
end = -1
for i in range(mask.shape[1]):
if mask[h, i] > 0:
start = i
break
for i in range(1, mask.shape[1]):
if mask[h, mask.shape[1]-i] > 0:
end = mask.shape[1]-i
break
if start != end:
# img[h, :, :] = imresize(img[h:h+1, start:end, :], (1, img.shape[1], 3))
img[h, :] = imresize(img[h:h + 1, start:end], (1, img.shape[1]))[0]
return img
def size_normalization(img, msk):
start_h = 0
end_h = img.shape[0]
for h in range(1, img.shape[0]):
if img[h, :].max() > 0 and start_h == 0:
start_h = h - 1
if img[img.shape[0]-h-1, :].max() > 0 and end_h == img.shape[0]:
end_h = img.shape[0]-h
start_w = 0
end_w = img.shape[1]
for w in range(1, img.shape[1]):
if img[:, w].max() > 0 and start_w == 0:
start_w = w - 1
if img[:, img.shape[1]-w-1].max() > 0 and end_w == img.shape[1]:
end_w = img.shape[1]-w
img = imresize(img[start_h:end_h, start_w:end_w], img.shape)
msk = imresize(msk[start_h:end_h, start_w:end_w], msk.shape)
return img, msk
def strong_aug(p=1.0):
return Compose([
RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=1.0),
Rotate((-30, 30), p=1.0, border_mode=cv2.BORDER_CONSTANT)
# ElasticTransform(alpha=600, sigma=25, alpha_affine=0, border_mode=cv2.BORDER_CONSTANT, p=1.0)
], p=p)
def read_for_training(p, augmentation=False):
"""
Read and preprocess an image with data augmentation (random transform).
"""
img = imread(TRAIN + p, mode='RGB')
msk = img
if mode == 'background':
data = {'image': img}
elif mode == 'instance' or mode == 'code':
msk = imread(TRAIN_MASK + p.replace('.jpg', '.png'))
data = {'image': img, 'mask': msk}
if augmentation:
data_aug = strong_aug()(**data)
img = data_aug['image']
if 'mask' in data_aug:
msk = data_aug['mask']
if mode == 'instance' or mode == 'code':
img[~msk.astype(np.bool)] = 0
img, msk = size_normalization(img, msk)
if mode == 'code':
img = encode(img, msk)
return img, msk
# draw -----------------------------------
def image_show(name, image, resize=1):
H,W = image.shape[0:2]
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.imshow(name, image.astype(np.uint8))
cv2.resizeWindow(name, round(resize*W), round(resize*H))
def draw_shadow_text(img, text, pt, fontScale, color, thickness, color1=None, thickness1=None):
if color1 is None:
color1 = (0, 0, 0)
if thickness1 is None:
thickness1 = thickness+2
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, pt, font, fontScale, color1, thickness1, cv2.LINE_AA)
cv2.putText(img, text, pt, font, fontScale, color, thickness, cv2.LINE_AA)
def mask_overlay(image, mask, color=(0, 255, 0)):
"""
Helper function to visualize mask on the top of the car
"""
mask = np.dstack((mask, mask, mask)) * np.array(color)
mask = mask.astype(np.uint8)
weighted_sum = cv2.addWeighted(mask, 0.5, image, 0.5, 0.)
img = image.copy()
ind = mask[:, :, 1] > 0
img[ind] = weighted_sum[ind]
return img
def remove_small_blobs(img, threshold=3000):
blobs = label(img)
if blobs.max() > 1:
for i in range(1, blobs.max() + 1):
blob_sum = blobs[blobs == i].sum() / i
if blob_sum <= threshold:
img[blobs == i] = 0
return img
def explore_images():
image_path = f'../DATASET/humpback_whale/size768/train/'
mask_path = f'../DATASET/humpback_whale/size768/train_mask/'
files = [f for f in listdir(image_path) if isfile(join(image_path, f))]
n = 1
while n <= len(files):
file = files[n]
img_name = image_path + file
msk_name = mask_path + file.replace('.jpg', '.png')
img = imread(img_name)
msk = imread(msk_name)
img_msk = mask_overlay(img, msk.astype(np.bool))
blobs = label(msk)
if blobs.max() > 1:
# print(blobs.max())
b = []
for i in range(1, blobs.max() + 1):
blob_sum = blobs[blobs == i].sum() / i
b.append(blob_sum)
b.sort()
bs = ', '.join([str(i) for i in b])
# img, msk = read_for_training(file, True)
# if len(img.shape) != len(msk.shape):
# msk = np.stack((msk, msk, msk), axis=2)
# img_msk = np.concatenate((img, msk), axis=1)
draw_shadow_text(img_msk, f'{n}/{len(files)} : {file} : {bs}', (5, 15), 0.5, (255, 255, 255), 1)
cv2.imshow('image-mask', img_msk)
key = cv2.waitKey(0)
if key % 256 == 27:
break
elif key % 256 == 83:
n += 1
elif key % 256 == 81:
n -= 1
elif key % 256 == 32:
copyfile(img_name, '../DATASET/humpback_whale/size768/train_error/image/' + file)
copyfile(msk_name, '../DATASET/humpback_whale/size768/train_error/mask/' + file.replace('.jpg', '.png'))
elif key % 256 == 102:
msk = ndimage.binary_fill_holes(msk.astype(np.bool))
img_msk = mask_overlay(img, msk)
draw_shadow_text(img_msk, f'{n}/{len(files)} : {file}', (5, 15), 0.5, (255, 255, 255), 1)
cv2.imshow('image-mask', img_msk)
key = cv2.waitKey(0)
n += 1
elif key % 256 == 114:
msk = remove_small_blobs(msk, b[0] + 1)
imsave(msk_name, msk)
img_msk = mask_overlay(img, msk.astype(np.bool))
draw_shadow_text(img_msk, f'{n}/{len(files)} : {file}', (5, 15), 0.5, (255, 255, 255), 1)
cv2.imshow('image-mask', img_msk)
key = cv2.waitKey(0)
# print(key % 256)
else:
n += 1
# print(key % 256)
def view_and_copy():
path_in = '../DATA/humpback_whale_siamese_torch/validation_errors/'
srs = '../DATASET/humpback_whale/all/train/'
dst = '../DATA/humpback_whale_siamese_torch/for_best_annotation'
files = [f for f in listdir(path_in) if isfile(join(path_in, f))]
n = 0
while n < len(files):
file = files[n]
img_name = path_in + file
img = imread(img_name)
draw_shadow_text(img, f'{n}/{len(files)}', (5, 15), 0.5, (255, 255, 255), 1)
cv2.imshow('image', img)
key = cv2.waitKey(0)
if key % 256 == 27:
break
elif key % 256 == 83:
n += 1
elif key % 256 == 81:
n -= 1
elif key % 256 == 32:
copyfile(join(srs, file), join(dst, file))
def explore_prediction():
def expand_path(image_name):
if isfile(TRAIN + image_name):
return TRAIN + image_name
if isfile(TRAIN_MASK + image_name):
return TRAIN_MASK + image_name
if isfile(TEST + image_name):
return TEST + image_name
if isfile(TEST_MASK + image_name):
return TEST_MASK + image_name
return image_name
submit = [image_name for _, image_name, _ in pd.read_csv(SUB_Df).to_records()]
train_dict = dict([(image_name, widx) for _, image_name, widx in pd.read_csv(TRAIN_DF).to_records()])
h2ws = {}
w2i ={}
for image_name, w in train_dict.items():
if w != 'new_whale': # Use only identified whales
if image_name not in h2ws:
h2ws[image_name] = []
if w not in h2ws[image_name]:
h2ws[image_name].append(w)
if w in w2i:
w2i[w].append(image_name)
else:
w2i[w] = []
w2i[w].append(image_name)
known = sorted(list(h2ws.keys()))
hand_result_df = pd.read_csv('./hand_result.csv')
hand_result = hand_result_df['test'].tolist()
target_df = pd.read_csv(
'../DATA/humpback_whale_siamese_torch/submissions/ensembling/'
'best-files183-first2-unique3517-score(0.99)-PL(0.895).csv')
target = [idx.split(' ') for idx in target_df['Id']]
score = None
known = None
submit = None
compile_scores = False
if compile_scores:
dirs = ['exp384-ch3-t1', 'exp384-ch3-t4', 'exp768-ch1-t1', 'exp768-ch3-t1']
files = []
for dir_name in dirs:
mypath = f'../DATA/humpback_whale_siamese_torch/scores/{dir_name}/'
files = files + [join(mypath, f) for f in listdir(mypath) if isfile(join(mypath, f))]
checkpoint = torch.load(files[0])
score = checkpoint['score_matrix']
known = checkpoint['known']
submit = checkpoint['submit']
for i in range(1, len(files)):
check_known = None
check_submit = None
if files[i].find('.npy') >= 0:
score += np.load(files[i])
else:
checkpoint = torch.load(files[i])
score += checkpoint['score_matrix']
check_known = (known == checkpoint['known'])
check_submit = (submit == checkpoint['submit'])
print(f'Loaded file: {files[i]}, known: {check_known}, submit: {check_submit}')
torch.save({'score_matrix': score / len(files), 'known': known, 'submit': submit, 'threshold': 0.99},
f'../DATA/humpback_whale_siamese_torch/scores/all-files{len(files)}.pt')
else:
checkpoint = torch.load('../DATA/humpback_whale_siamese_torch/scores/all-files101.pt')
score = checkpoint['score_matrix']
known = checkpoint['known']
submit = checkpoint['submit']
threshold = 0
for th in range(1):
# threshold -= 0.01
vtop = 0
vhigh = 0
pos = [0, 0, 0, 0, 0, 0]
result_whales = []
result_images = []
result_scores = []
print(f'test, train, whale, count_examples, position, score, confidence')
for i, p in enumerate(submit):
if p in hand_result:
continue
t = []
images = []
scores = []
s = set()
a = score[i, :]
args = list(reversed(np.argsort(a)))
for j in args:
image_name = known[j]
if a[j] < threshold and 'new_whale' not in s:
pos[len(t)] += 1
s.add('new_whale')
t.append('new_whale')
if len(t) == 5:
break
for w in h2ws[image_name]:
assert w != 'new_whale'
if w not in s:
if a[j] > 1.0:
vtop += 1
elif a[j] >= threshold:
vhigh += 1
s.add(w)
t.append(w)
images.append(image_name)
scores.append(a[j])
if len(t) == 5:
break
if len(t) == 5:
break
if 'new_whale' not in s:
pos[5] += 1
result_whales.append(t)
result_images.append(images)
result_scores.append(scores)
test_img = imread(expand_path(p))
draw_shadow_text(test_img, f'{i}/{len(submit)}', (5, 15), 0.5, (255, 255, 255), 1)
draw_shadow_text(test_img, f'name: {p}', (5, 35), 0.5, (255, 255, 255), 1)
n = 0
if scores[n] >= 0.99:
continue
if i < 4032:
continue
while True:
image_name = images[n]
count_examples = len(w2i[t[n]])
whale_score = scores[n]
img = imread(expand_path(image_name))
draw_shadow_text(img, f'{n + 1}', (5, 15), 0.5, (255, 255, 255), 1)
draw_shadow_text(img, f'name: {image_name}', (5, 35), 0.5, (255, 255, 255), 1)
draw_shadow_text(img, f'whale: {t[n]} / {count_examples}', (5, 55), 0.5, (255, 255, 255), 1)
draw_shadow_text(img, f'score: {whale_score}', (5, 75), 0.5, (255, 255, 255), 1)
next_score = 0
if n + 1 < len(scores):
next_score = scores[n + 1]
draw_shadow_text(img, f'next score: {round(next_score, 2)}', (5, 95), 0.5, (255, 255, 255), 1)
plot_img = np.concatenate((test_img, img), axis=1)
cv2.imshow('prediction', plot_img)
key = cv2.waitKey(0) % 256
# print(key)
if key == 83: # right
n += 1 if n < len(images) - 1 else -4
continue
elif key == 81: # left
n -= 1 if n > 0 else -4
continue
elif key == 82: # up
break
elif key == 104: # h
print(f'{p}, {image_name}, {t[n]}, {count_examples}, {n + 1}, {whale_score}, H, {next_score}')
elif key == 108: # l
print(f'{p}, {image_name}, {t[n]}, {count_examples}, {n + 1}, {whale_score}, L, {next_score}')
elif key == 99: # c
out_path = '../DATA/train_bb/'
copyfile(expand_path(image_name), join(out_path, image_name))
elif key == 116: # t
out_path = '../DATA/test_bb/'
copyfile(expand_path(p), join(out_path, p))
def test_something():
train_df = pd.read_csv(TRAIN_DF)
w2i = {}
for image_name, ids in zip(train_df['Image'], train_df['Id']):
if ids not in w2i:
w2i[ids] = []
w2i[ids].append(image_name)
count2 = 0
count = []
for i in range(11):
count.append(0)
for w in w2i:
if w == 'new_whale':
continue
for i in range(1, 11):
if len(w2i[w]) > i:
count[i] += len(w2i[w])
if len(w2i[w]) > 2:
count2 += len(w2i[w])
print(f'Whale with examples more than: {count}')
print(f'Whale with examples more than: {count2}')
def work_with_bb():
count = 0
TRAIN_PATH = '/home/igor/kaggle/DATASET/humpback_whale/all/train/'
old_bb = pd.read_csv('../DATA/humpback_whale_siamese_torch/metadata/bounding_boxes.csv')
new_bb = pd.read_csv('../DATA/0026.csv')
bb = new_bb['filename'].tolist()
list_x0 = []
list_x1 = []
list_y0 = []
list_y1 = []
for n, row in old_bb.iterrows():
if row['Image'] in bb:
count += 1
print('{}'.format(row['Image']))
new_row = new_bb[new_bb['filename'] == row['Image']]
row_data = json.loads(new_row['region_shape_attributes'].values[0])
x0 = row_data['x']
x1 = x0 + row_data['width']
y0 = row_data['y']
y1 = y0 + row_data['height']
row['x0'] = x0
row['x1'] = x1
row['y0'] = y0
row['y1'] = y1
# img = imread(join(TRAIN_PATH, row['Image']), mode='RGB')
# cv2.rectangle(img, (x0, y0), (x1, y1), (0, 255, 0), 2)
# cv2.rectangle(img, (row['x0'], row['y0']), (row['x1'], row['y1']), (0, 0, 255), 2)
# cv2.imshow('image', img)
# key = cv2.waitKey(0)
list_x0.append(x0)
list_x1.append(x1)
list_y0.append(y0)
list_y1.append(y1)
else:
list_x0.append(row['x0'])
list_x1.append(row['x1'])
list_y0.append(row['y0'])
list_y1.append(row['y1'])
old_bb['x0'] = list_x0
old_bb['x1'] = list_x1
old_bb['y0'] = list_y0
old_bb['y1'] = list_y1
old_bb.to_csv('../DATA/humpback_whale_siamese_torch/metadata/bounding_boxes.csv', header=True, index=False)
print(f'Processed images: {count}')
def draw_bb():
train_dir = '../DATASET/humpback_whale/all/train/'
my_dir = '../DATASET/humpback_whale/all/train/'
bb_df = pd.read_csv('../DATA/humpback_whale_siamese_torch/metadata/bounding_boxes.csv')
files = listdir(my_dir)
for n, r in tqdm(enumerate(bb_df.iterrows()), total=len(bb_df)):
# Image, x0, y0, x1, y1
dir_out = str(int(n / 1000)).zfill(4)
path_out = f'../DATA/train_bb/{dir_out}/'
os.makedirs(path_out, exist_ok=True)
row = r[1]
if row['Image'] in files:
img = imread(join(train_dir, row['Image']), mode='RGB')
cv2.rectangle(img, (row['x0'], row['y0']), (row['x1'], row['y1']), (255, 0, 0), 2)
imsave(join(path_out, row['Image']), img)
def aug_test():
def get_bb_points(msk):
h, w = msk.shape
x0 = 0
x1 = msk.shape[1]
y0 = 0
y1 = msk.shape[0]
for i in range(w):
if msk[:, i].max() > 200:
x0 = i
break
for i in range(w):
if msk[:, msk.shape[1] - i - 1].max() > 200:
x1 = msk.shape[1] - i - 1
break
for i in range(h):
if msk[i, :].max() > 200:
y0 = i
break
for i in range(h):
if msk[msk.shape[0] - i - 1, :].max() > 200:
y1 = msk.shape[0] - i - 1
break
return (x0, y0), (x1, y1)
image_name = '7aea0b3e2.jpg'
p1, p2 = (12, 84), (391, 248)
img = imread(f'../DATA/aug_test/src/{image_name}')
h = 300
alpha, sigma, alpha_affine = h * 2, h * 0.08, h * 0.08
augs = {'1_IAAAdditiveGaussianNoise': IAAAdditiveGaussianNoise(scale=(0.01 * 255, 0.05 * 255), p=1.0),
'1_GaussNoise': GaussNoise(var_limit=(20, 120), p=1.0),
'1_RandomGamma': RandomGamma(gamma_limit=(80, 120), p=1.0),
'2_RandomBrightnessContrast': RandomBrightnessContrast(p=1.0),
'2_MotionBlur': MotionBlur(p=1.0),
'2_MedianBlur': MedianBlur(blur_limit=6, p=1.0),
'2_Blur': Blur(blur_limit=9, p=1.0),
'2_IAASharpen': IAASharpen(p=1.0),
'2_IAAEmboss': IAAEmboss(p=1.0),
'2_IAASuperpixels': IAASuperpixels(n_segments=50, p_replace=0.05, p=1.0),
'3_CLAHE': CLAHE(clip_limit=8, p=1.0),
'3_RGBShift': RGBShift(p=1.0),
'3_ChannelShuffle': ChannelShuffle(p=1.0),
'3_HueSaturationValue': HueSaturationValue(p=1.0),
'3_ToGray': ToGray(p=1.0),
'4_OpticalDistortion': OpticalDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
'4_GridDistortion': GridDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
'4_IAAPiecewiseAffine': IAAPiecewiseAffine(nb_rows=4, nb_cols=4, p=1.0),
'4_IAAPerspective': IAAPerspective(p=1.0),
'4_IAAAffine': IAAAffine(mode='constant', p=1.0),
'4_ElasticTransform': ElasticTransform(alpha=alpha, sigma=sigma, alpha_affine=alpha_affine, border_mode=cv2.BORDER_CONSTANT, p=1.0)}
# im_merge.shape[1] * 2, im_merge.shape[1] * 0.08, im_merge.shape[1] * 0.08
for aug in augs:
mask = np.zeros(img.shape[:2], dtype=np.uint8)
cv2.rectangle(mask, p1, p2, 255, 2)
data = {"image": img.copy(), 'mask': mask}
augmented = augs[aug](**data)
augimg = augmented['image']
draw_shadow_text(augimg, f'{aug}', (5, 15), 0.5, (255, 255, 255), 1)
ap1, ap2 = get_bb_points(augmented['mask'])
cv2.rectangle(augimg, ap1, ap2, (0, 255, 0), 2)
imsave(f'../DATA/aug_test/aug/{aug}-{image_name}', augimg)
def test_time_aug():
h = 768
alpha, sigma, alpha_affine = h * 2, h * 0.08, h * 0.08
def strong_aug(p=0.9):
return Compose([
OneOf([
IAAAdditiveGaussianNoise(scale=(0.01 * 255, 0.05 * 255), p=1.0),
GaussNoise(var_limit=(20, 120), p=1.0),
RandomGamma(gamma_limit=(80, 120), p=1.0),
], p=0.9),
RandomBrightnessContrast(p=1.0),
OneOf([
# MotionBlur(p=1.0),
# MedianBlur(blur_limit=3, p=1.0),
Blur(blur_limit=5, p=1.0),
IAASharpen(p=1.0),
# IAAEmboss(p=1.0),
# IAASuperpixels(n_segments=10, p_replace=0.05, p=1.0),
], p=0.9),
OneOf([
CLAHE(clip_limit=8, p=1.0),
RGBShift(p=1.0),
ChannelShuffle(p=1.0),
HueSaturationValue(p=1.0),
# ToGray(p=1.0),
], p=0.9),
# OneOf([
# OpticalDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
# # GridDistortion(border_mode=cv2.BORDER_CONSTANT, p=1.0),
# IAAPiecewiseAffine(nb_rows=4, nb_cols=4, p=1.0),
# IAAPerspective(scale=(0.05, 0.075), p=1.0),
# # IAAAffine(mode='constant', p=1.0),
# ElasticTransform(alpha=alpha, sigma=sigma, alpha_affine=alpha_affine,
# border_mode=cv2.BORDER_CONSTANT,
# p=1.0),
# ], p=0.9),
], p=p)
def get_bb_points(msk):
h, w = msk.shape
x0 = 0
x1 = msk.shape[1]
y0 = 0
y1 = msk.shape[0]
for i in range(w):
if msk[:, i].max() > 200:
x0 = i
break
for i in range(w):
if msk[:, msk.shape[1] - i - 1].max() > 200:
x1 = msk.shape[1] - i - 1
break
for i in range(h):
if msk[i, :].max() > 200:
y0 = i
break
for i in range(h):
if msk[msk.shape[0] - i - 1, :].max() > 200:
y1 = msk.shape[0] - i - 1
break
return (x0, y0), (x1, y1)
my_aug = strong_aug()
bb_dict = {}
bb = pd.read_csv('../DATA/humpback_whale_siamese_torch/metadata/bounding_boxes.csv')
for name, x0, x1, y0, y1 in zip(bb['Image'], bb['x0'], bb['x1'], bb['y0'], bb['y1']):
bb_dict[name] = ((x0, y0), (x1, y1))
dir_out = '../DATA/aug_test/aug/'
dir_in = '../DATA/train_bb/0001/'
files = listdir(dir_in)
start = timer()
for file in tqdm(files, total=len(files)):
p1, p2 = bb_dict[file]
img = imread(join(dir_in, file), mode='RGB')
mask = np.zeros(img.shape[:2], dtype=np.uint8)
cv2.rectangle(mask, p1, p2, 255, 2)
data = {"image": img, 'mask': mask}
augmented = my_aug(**data)
img, mask = augmented['image'], augmented['mask']
ap1, ap2 = get_bb_points(mask)
cv2.rectangle(img, ap1, ap2, (0, 255, 0), 2)
imsave(join(dir_out, file), img)
print(f'Spend time: {timer() - start}')
# draw_bb()
test_time_aug()
# explore_prediction()
|
21,806 | 5775cabfd21630c0d2c49df1fcd6f8970726761a | '''
NRGsuite: PyMOL molecular tools interface
Copyright (C) 2011 Gaudreault, F., Morency, LP. & Najmanovich, R.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import print_function
from pymol import cmd
from pymol import util
#import threading
import Geometry
import Constants
class UpdateScreen(object):
def __init__(self, top, ID, colNo, Line, State, TOP, Translation, Rotation):
#threading.Thread.__init__(self)
self.top = top
self.Translation = Translation
self.Rotation = Rotation
# unique ID of complex
self.ID = ID
#print self.ID + " initialized!"
# TOPn to edit
self.TOP = TOP
# Starting index of line
self.colNo = colNo
# input line to parse
self.Line = Line
# State on which updating is happening
# self.State = State
self.State = 1
self.dictFlexBonds = self.top.dictFlexBonds
self.LigandName = self.top.LigandName
self.TargetName = self.top.TargetName
self.LigandObj = self.LigandName + '_' + str(self.TOP+1)
self.TargetObj = self.TargetName + '_' + str(self.TOP+1)
# Selections of molecules (ligand/side-chain/protein)
self.selTarget = '(' + self.TargetObj + ' & present)'
self.selLigand = '(' + self.LigandObj + ' & present)'
self.selSideChains = ''
# start thread
self.start()
def start(self):
self.Update()
# Updates the PyMOL interface
def Update(self):
try:
# Copy the initial protein (Frame 1) into the working state
cmd.create(self.TargetObj, self.TargetName, 1, self.State)
cmd.refresh()
# Display the last frame
cmd.frame(self.State)
#print "Switched to frame " + str(self.State)
except:
self.CriticalError("Object " + str(self.TargetName) + " no longer exists")
if not self.UpdateLigandAnchorPoint() and not self.UpdateLigandFlexibility():
self.selSideChains = self.UpdateSideChainConformations()
if self.WriteOutLigand() or self.EditView() or \
self.top.UpdateDataList(self.Line, self.TOP, self.top.Reference, self.dictCoord):
self.Delete_Object()
self.Delete_Object()
return
'''=========================================================================
Delete_Object: deleted the updating object if an error occurs
========================================================================='''
def Delete_Object(self):
try:
# delete temporary protein PDB file
cmd.delete(self.TargetObj)
cmd.refresh()
cmd.delete(self.LigandObj)
cmd.refresh()
except:
self.CriticalError("Object " + str(self.TargetObj) + " no longer exists")
'''=========================================================================
EditView: Edit the visual aspects of the PyMOL interface
========================================================================='''
def EditView(self):
#Display the ligand with the new coordinates
#print "will load " + self.LigandObj + " in state " + str(self.State)
try:
cmd.load(self.top.listTmpPDB[self.TOP+1], self.LigandObj, self.State)
cmd.refresh()
# No flexible side-chain(s)
if self.selSideChains == '':
selString = self.selLigand + " or " + self.selTarget
else:
selString = self.selSideChains + " or " + self.selLigand + " or " + self.selTarget
#print selString
# Create solution object
# Object contains the whole protein-ligand complex
SolutionObj = "TOP_" + str(self.TOP+1) + "__"
cmd.create(SolutionObj, selString, self.State, self.State)
cmd.refresh()
# Color ligand of solution TOP
Selection = "(resn LIG & " + SolutionObj + " & present)"
cmd.color(self.top.top.PymolColorList[self.TOP], Selection)
util.cnc(Selection)
cmd.refresh()
# Color side-chains of solution TOP
if self.selSideChains != '':
selSC = self.selSideChains.replace(self.TargetObj,SolutionObj)
cmd.color(self.top.top.PymolColorList[self.TOP], selSC)
util.cnc(selSC)
cmd.refresh()
#cmd.show(self.DefaultDisplay, "(resn LIG & sol_*__ & present)")
cmd.show(self.top.DefaultDisplay, "(resn LIG & " + SolutionObj + " & present)")
cmd.refresh()
if self.selSideChains != '':
#cmd.show("sticks", self.selSideChains.replace(self.TargetName__,"sol_*__"))
cmd.show("sticks", self.selSideChains.replace(self.TargetObj,SolutionObj))
cmd.refresh()
except:
self.CriticalError("Could not refresh the visual display")
return 1
return 0
'''=========================================================================
WriteOutLigand: Ouputs PDB ligand file with new coordinates
========================================================================='''
def WriteOutLigand(self):
try:
#Get the new coordinates of the ligand
self.dictCoord = {}
self.dictCoord = Geometry.buildcc(self.top.ListAtom,self.top.RecAtom,self.top.DisAngDih,self.top.Ori)
#Replace the coordinate in pdb file with the new one
#print "writing to " + self.top.listTmpPDB[self.TOP+1]
text_file = open(self.top.listTmpPDB[self.TOP+1], 'w')
for pdbLine in self.top.ReferenceLines:
type = pdbLine[0:6].strip()
if type == 'HETATM' or type == 'ATOM':
NoAtom = int(pdbLine[6:11])
#print NoAtom
atomX = float(self.dictCoord[NoAtom][0])
atomY = float(self.dictCoord[NoAtom][1])
atomZ = float(self.dictCoord[NoAtom][2])
tmpLine = pdbLine[0:30]
tmpLine += '%8.3f' % atomX # The atom X coordinate
tmpLine += '%8.3f' % atomY # The atom Y coordinate
tmpLine += '%8.3f' % atomZ # The atom Z coordinate
tmpLine += pdbLine[54:]
text_file.write(tmpLine)
else:
text_file.write(pdbLine)
text_file.close()
except IOError:
self.CriticalError("Could not write PDB ligand file.")
return 1
return 0
'''=========================================================================
UpdateLigandFlexibility: Updates the dihedral angles of the ligand
========================================================================='''
def UpdateLigandFlexibility(self):
# Flexible bond(s) selected?
try:
if self.top.FlexStatus != '':
for k in sorted(self.dictFlexBonds.keys()):
# k: Represent the FLEDIH number
# Which is the key of the dictionary
# ==================================================================
# If the dihangle between atoms NEED to be modified
if self.dictFlexBonds[k][0] == 1:
# Read and move to next column
ColValue = float(self.Line[self.colNo:self.colNo+10])
self.colNo = self.colNo + 11
# Is there ONLY 1 atom that define the flexible bond
if self.dictFlexBonds[k][2] == 1:
self.top.DisAngDih[int(self.dictFlexBonds[k][3])][2] = ColValue
# Is there MULTIPLE atoms that define the flexible bond
elif self.dictFlexBonds[k][2] > 1:
#print "How many defines this flexible bonds", self.dictFlexBonds[k][2]
# Multiple possibilities, need to mix up the atoms number
# Example: [1 ,2 ,3] will give [1], [1, 2], [2, 3]
# SET the 1st ATOM Dihedral Angle...
self.top.DisAngDih[int(self.dictFlexBonds[k][3])][2] = ColValue
for flexA in range(1, self.dictFlexBonds[k][2]):
ATflex_A = self.dictFlexBonds[k][flexA + 2]
ATflex_B = self.dictFlexBonds[k][flexA + 3]
ATmerge = ''
ATmergeAB = ATflex_A + ATflex_B
ATmergeBA = ATflex_B + ATflex_A
# Be sure the key exist before calling the value
if ATmergeAB in self.top.FixedAngle:
ATmerge = ATmergeAB
Factor = 1
elif ATmergeBA in self.top.FixedAngle:
ATmerge = ATmergeBA
Factor = -1
if ATmerge:
# Get the constant angle IN SHIFTVAL
ConstAngle = float(self.top.FixedAngle[ATmerge])
#print "ConstAngle", ConstAngle
# ADD the constant Angle WITH the Column value in the LOGFILE
ColValue = ColValue + float(Factor)*ConstAngle
#print "ColValue", ColValue
# SET the 2nd ATOM Dihedral Angle...
self.top.DisAngDih[int(ATflex_B)][2] = ColValue
except:
self.CriticalError("Could not update ligand flexibility")
return 1
return 0
'''=========================================================================
UpdateLigandAnchorPoint: Updates the position of the ligand on the screen
========================================================================='''
def UpdateLigandAnchorPoint(self):
try:
if self.Translation:
index = int(float(self.Line[self.colNo:self.colNo+10]))
coordX = self.top.GridVertex[index][0] # The atom X coordinate
coordY = self.top.GridVertex[index][1] # The atom Y coordinate
coordZ = self.top.GridVertex[index][2] # The atom Z coordinate
pointA = [coordX, coordY, coordZ]
pointB = [self.top.OriX[0], self.top.OriX[1], self.top.OriX[2]]
pointC = [self.top.Ori[0], self.top.Ori[1], self.top.Ori[2]]
pointD = [self.top.OriY[0], self.top.OriY[1], self.top.OriY[2]]
self.top.DisAngDih[self.top.VarAtoms[0]][0] = Geometry.distance(pointA, pointB)
self.top.DisAngDih[self.top.VarAtoms[0]][1] = Geometry.angle(pointA, pointB, pointC)
self.top.DisAngDih[self.top.VarAtoms[0]][2] = Geometry.dihedralAngle(pointA, pointB, pointC, pointD)
self.colNo += 11
if self.Rotation:
self.top.DisAngDih[self.top.VarAtoms[1]][1] = float(self.Line[self.colNo:self.colNo+10])
self.top.DisAngDih[self.top.VarAtoms[1]][2] = float(self.Line[self.colNo+11:self.colNo+21])
self.top.DisAngDih[self.top.VarAtoms[2]][2] = float(self.Line[self.colNo+22:self.colNo+32])
self.colNo += 33
except:
self.CriticalError(" Could not update ligand anchor point")
return 1
return 0
'''=========================================================================
.UpdateSideChainConformations: Update side-chain dihedral angles using rotamer library
========================================================================='''
def UpdateSideChainConformations(self):
try:
# temporary sel. var
strSelectSC = ''
# Loop through Flexible side-chains
for residue in self.top.listSideChain:
#print "Setting dihedrals for " + residue
# Were any rotamers accepted for this side-chain
if self.top.dictSideChainNRot.get(residue,''):
#print "List of possible rotamers:" + str(self.top.dictSideChainNRot[residue])
# Residu Name
Res = residue[0:3]
Num = residue[3:len(residue)-1]
Chn = residue[len(residue)-1:len(residue)]
strSelectSC += "(resn " + Res + " & resi " + Num
if Chn != '-':
strSelectSC += " & chain " + Chn
else:
strSelectSC += " & chain ''"
strSelectSC += " & ! name C+O+N " + " & " + self.TargetObj + " & present) or "
# Get Integer value from GA.
IntVal = int(float(self.Line[self.colNo:(self.colNo+10)].strip()) + 0.5)
nFlex = Constants.nFlexBonds[Res]
#print("IntVal", str(IntVal))
#print("nFlex", str(nFlex))
#print strSelectSC
if IntVal > 0: # 0 is the default PDB side-chain conf.
# Get List of Dihedrals to rebuild
for k in range(0,nFlex):
'''
print "for k=" + str(k) + " for residue=" + str(residue)
print self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+0])
print self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+1])
print self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+2])
print self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+3])
print self.top.dictSideChainRotamers[residue]
print self.top.dictSideChainRotamers[residue][(IntVal-1)*nFlex+k]
'''
# Set dihedrals for side-chain
cmd.set_dihedral(self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+0]),
self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+1]),
self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+2]),
self.Get_AtomString(Res,Num,Chn,Constants.setDihedrals[Res][4*k+3]),
self.top.dictSideChainRotamers[residue][(IntVal-1)*nFlex+k], self.State)
cmd.refresh()
# Get next column starting index
self.colNo = self.colNo + 11
# Side-chain selection string - remove last 4 chars
if strSelectSC != '':
strSelectSC = strSelectSC[:len(strSelectSC)-4]
return strSelectSC
except:
self.CriticalError("Could not update side-chain conformations")
'''=========================================================================
Get_AtomString: Retrives the PyMOL atom selection string
========================================================================='''
def Get_AtomString(self, R, N, C, Atom):
AtomString = "resn " + R + " & resi " + N
if C != '-':
AtomString += " & chain " + C
else:
AtomString += " & chain ''"
AtomString += " & name " + Atom
AtomString += " & " + self.TargetObj
return AtomString
'''=========================================================================
CriticalError: Abort simulation if there was a fatal error
========================================================================='''
def CriticalError(self, text):
print(" CRITICAL ERROR: " + text)
#Create the .abort file
abort_file = open(self.top.top.Manage.ABORT, 'w')
abort_file.close()
|
21,807 | 9d5e6b36b0e6698ab069e96e89280d128c96139a | import os
import sys
import time
import argparse
import pandas
import numpy
global args
def process_options():
global args
parser = argparse.ArgumentParser(description='Merge TFPSSM vectors')
parser.add_argument('-a', '--helix_file', help='alpha-helix vectors file')
parser.add_argument('-b', '--sheet_file', help='beta-sheet vectors file')
parser.add_argument('-c', '--coil_file', help='coil vectors file')
parser.add_argument('-o', '--output_file', default='all_vector.csv', help='output file')
args = parser.parse_args()
if not os.path.exists(args.helix_file):
print "helix_file not found."
sys.exit(1)
if not os.path.exists(args.sheet_file):
print "sheet_file not found."
sys.exit(1)
if not os.path.exists(args.coil_file):
print "coil_file not found."
sys.exit(1)
def main():
process_options()
h_df = pandas.read_csv(args.helix_file, header=None)
e_df = pandas.read_csv(args.sheet_file, header=None)
c_df = pandas.read_csv(args.coil_file, header=None)
h_ID = h_df.loc[:,0]
e_ID = e_df.loc[:,0]
c_ID = c_df.loc[:,0]
ID_list = numpy.intersect1d(h_ID,e_ID)
ID_list = numpy.intersect1d(ID_list,c_ID)
print ID_list
with open(args.output_file, 'w') as f:
for ID in ID_list:
cur_vec = list(h_df[h_df[0]==ID].loc[:,1:].values[0]) + list(e_df[e_df[0]==ID].loc[:,1:].values[0]) + list(c_df[c_df[0]==ID].loc[:,1:].values[0])
f.write(ID)
f.write(',')
f.write(','.join(map(str,cur_vec)))
f.write('\n')
if __name__ == "__main__":
start_time = time.time()
main()
print("--- %s mins ---" % round(float((time.time() - start_time))/60, 2))
|
21,808 | 5c50b03e5d8b1391d45cddbcec35419a0c353927 | import unittest
from trie.trie import Trie
from musiikkiluokat.aani import Aani
from musiikkiluokat.savel import Savel
class TrieTest(unittest.TestCase):
def setUp(self):
self.trie = Trie(4)
self.aani_a = Aani(69)
self.aani_d = Aani(62)
self.aani_c = Aani(60)
self.aani_f = Aani(65)
self.aani_b = Aani(71)
savelma_1 = [self.aani_a, self.aani_d, self.aani_c, self.aani_a]
savelma_2 = [self.aani_a, self.aani_d, self.aani_f, self.aani_b]
savelma_3 = [self.aani_b, self.aani_d, self.aani_a, self.aani_d]
self.trie.lisaa_savelma(savelma_1)
self.trie.lisaa_savelma(savelma_2)
self.trie.lisaa_savelma(savelma_3)
def test_trien_ensimmainen_taso_muodostuu_oikein(self):
juurisavelet = []
for solmu in self.trie.juurisolmu.lapset:
juurisavelet.append(solmu.aani)
self.assertTrue(self.aani_a in juurisavelet)
self.assertTrue(self.aani_b in juurisavelet)
self.assertFalse(self.aani_d in juurisavelet)
def test_kerran_lisatyn_maara_on_yksi(self):
self.assertEqual(self.trie.juurisolmu.lapset[1].maara, 1)
def test_kahdesti_lisatyn_maara_on_kaksi(self):
self.assertEqual(self.trie.juurisolmu.lapset[0].maara, 2)
def test_loyda_seuraava_solmu_loytaa_oikean(self):
testi_savelma = [Savel(69), Savel(62)]
solmu = self.trie.loyda_seuraava_solmu(testi_savelma)
self.assertEqual(solmu.aani, self.aani_d)
def test_palauttaa_tyhjan_jos_savelmalle_ei_vastinetta(self):
solmu = self.trie.loyda_seuraava_solmu([Savel(62)])
self.assertEqual(solmu, None)
def test_lisaa_b_ja_a_trieen_kasvattaa_b_maaraa(self):
aanet = [71, 69]
self.trie.lisaa_aanet_trieen(aanet)
self.assertEqual(self.trie.juurisolmu.lapset[1].maara, 2)
def test_lisaa_b_ja_a_trieen_lisaa_a_haaran(self):
self.assertEqual(len(self.trie.juurisolmu.lapset[1].lapset), 1)
aanet = [71, 69]
self.trie.lisaa_aanet_trieen(aanet)
self.assertEqual(len(self.trie.juurisolmu.lapset[1].lapset), 2)
|
21,809 | 5c92c34ac7828d8a96d137d5aa218dd1a434a348 | # python3
# 可以选择其它语言实现
# 1. 自己实现一个先进后出的栈 MyStack
class Node:
def __init__(self, value):
self.value = value
self.next = None
class MyStack():
"""
# 不可使用内置标准类 list
# 实现和list同样功能的实例方法 append, remove, pop,index, __len__, __eq__, __str__
"""
def __init__(self, *args):
pass
def append(self, x):
pass
def remove(self, x):
pass
def pop(self):
pass
def index(self, x):
pass
def __len__(self):
pass
def __eq__(self, other):
pass
def __str__(self):
pass
def test_my_stack():
a = MyStack(1, 2, 3)
assert len(a) == 3
x = a.pop()
assert x == 3
a.append(4)
print(a)
# [1, 2, 4]
a.remove(2)
print(a)
# [1, 4]
i = a.index(4)
assert i == 2
b = MyStack(1, 4)
c = MyStack(4, 1)
assert a == b
assert b != c
|
21,810 | 02a38721b4a7ce4249b6b10b0aea4f04b8f4dbbe | __author__ = 'powergx'
from flask import request, Response, render_template, session, url_for, redirect
from crysadm import app, r_session
from auth import requires_admin, requires_auth
import json
import requests
from urllib.parse import urlparse, parse_qs
import time
from api import collect, ubus_cd
@app.route('/excavators')
@requires_auth
def excavators():
user = session.get('user_info')
err_msg = None
if session.get('error_message') is not None:
err_msg = session.get('error_message')
session['error_message'] = None
info_msg = None
if session.get('info_message') is not None:
info_msg = session.get('info_message')
session['info_message'] = None
accounts_key = 'accounts:%s' % user.get('username')
accounts = list()
for acct in sorted(r_session.smembers(accounts_key)):
account_key = 'account:%s:%s' % (user.get('username'), acct.decode("utf-8"))
account_data_key = account_key + ':data'
account_data_value = r_session.get(account_data_key)
account_info = json.loads(r_session.get(account_key).decode("utf-8"))
if account_data_value is not None:
account_info['data'] = json.loads(account_data_value.decode("utf-8"))
accounts.append(account_info)
show_drawcash = not (r_session.get('can_drawcash') is None or
r_session.get('can_drawcash').decode('utf-8') == '0')
return render_template('excavators.html', err_msg=err_msg, info_msg=info_msg, accounts=accounts,
show_drawcash=show_drawcash)
@app.route('/collect/<user_id>', methods=['POST'])
@requires_auth
def collect_all(user_id):
user = session.get('user_info')
account_key = 'account:%s:%s' % (user.get('username'), user_id)
account_info = json.loads(r_session.get(account_key).decode("utf-8"))
session_id = account_info.get('session_id')
user_id = account_info.get('user_id')
cookies = dict(sessionid=session_id, userid=str(user_id))
r = collect(cookies)
if r.get('r') != 0:
session['error_message'] = r.get('rd')
return redirect(url_for('excavators'))
session['info_message'] = '收取水晶成功.'
account_data_key = account_key + ':data'
account_data_value = json.loads(r_session.get(account_data_key).decode("utf-8"))
account_data_value.get('mine_info')['td_not_in_a'] = 0
r_session.set(account_data_key, json.dumps(account_data_value))
return redirect(url_for('excavators'))
@app.route('/collect/all', methods=['POST'])
@requires_auth
def collect_all_crystal():
user = session.get('user_info')
username = user.get('username')
error_message = ''
success_message = ''
for b_user_id in r_session.smembers('accounts:%s' % username):
account_key = 'account:%s:%s' % (username, b_user_id.decode("utf-8"))
account_info = json.loads(r_session.get(account_key).decode("utf-8"))
session_id = account_info.get('session_id')
user_id = account_info.get('user_id')
cookies = dict(sessionid=session_id, userid=str(user_id))
r = collect(cookies)
if r.get('r') != 0:
error_message += 'Id:%s : %s<br />' % (user_id, r.get('rd'))
else:
success_message += 'Id:%s : 收取水晶成功.<br />' % user_id
account_data_key = account_key + ':data'
account_data_value = json.loads(r_session.get(account_data_key).decode("utf-8"))
account_data_value.get('mine_info')['td_not_in_a'] = 0
r_session.set(account_data_key, json.dumps(account_data_value))
if len(success_message) > 0:
session['info_message'] = success_message
if len(error_message) > 0:
session['error_message'] = error_message
return redirect(url_for('excavators'))
@app.route('/drawcash/<user_id>', methods=['POST'])
@requires_auth
def drawcash(user_id):
user = session.get('user_info')
account_key = 'account:%s:%s' % (user.get('username'), user_id)
account_info = json.loads(r_session.get(account_key).decode("utf-8"))
session_id = account_info.get('session_id')
user_id = account_info.get('user_id')
cookies = dict(sessionid=session_id, userid=str(user_id))
from api import exec_draw_cash
r = exec_draw_cash(cookies)
if r.get('r') != 0:
session['error_message'] = r.get('rd')
return redirect(url_for('excavators'))
else:
session['info_message'] = r.get('rd')
account_data_key = account_key + ':data'
account_data_value = json.loads(r_session.get(account_data_key).decode("utf-8"))
account_data_value.get('income')['r_can_use'] = 0
r_session.set(account_data_key, json.dumps(account_data_value))
return redirect(url_for('excavators'))
@app.route('/reboot_device', methods=['POST'])
@requires_auth
def reboot_device():
device_id = request.values.get('device_id')
session_id = request.values.get('session_id')
account_id = request.values.get('account_id')
ubus_cd(session_id, account_id, 'reboot', ["mnt", "reboot", {}], '&device_id=%s' % device_id)
return redirect(url_for('excavators'))
@app.route('/set_device_name', methods=['POST'])
@requires_auth
def set_device_name():
setting_url = request.values.get('url')
new_name = request.values.get('name')
query_s = parse_qs(urlparse(setting_url).query, keep_blank_values=True)
device_id = query_s['device_id'][0]
session_id = query_s['session_id'][0]
account_id = query_s['user_id'][0]
ubus_cd(session_id, account_id, 'set_device_name',
["server", "set_device_name", {"device_name": new_name, "device_id": device_id}])
return json.dumps(dict(status='success')) |
21,811 | 3bc779ad28402ad08274cabca8deb1703199d348 | import time
from hashlib import sha256
import json
class Block:
#For miner program will only take in blockHash input.
def __init__(self, index=None, transactions=None, prevBlockHash=None, difficulty=None):
self.dateCreated = time.time()
self.difficulty = difficulty
self.index=index
self.transactions = transactions
self.prevBlockHash = prevBlockHash
self.nonce = 0
self.minedBy = None; # to be filled in by miner
self.blockHash = None; # to be filled in by miner
if (index==0):
blockData=""
self.prevBlockHash = ""
else:
blockData = json.dumps([ob.__dict__ for ob in self.transactions])
self.blockDataHash = sha256(blockData.encode()).hexdigest()
self.blockString= (str(self.dateCreated)+\
str(self.difficulty)+\
str(self.index)+ \
self.prevBlockHash+\
self.blockDataHash)
|
21,812 | 61d9d873d99c6977bf2fe6fddbebfa0ba9226439 | #!/usr/bin/env python3
#
# lc0_analyzer.py --help
#
# See https://github.com/killerducky/lc0_analyzer/README.md for description
#
# See example.sh
#
import chess
import chess.pgn
import chess.uci
import chess.svg
import re
import matplotlib.pyplot as plt
import matplotlib.axes
import pandas as pd
import numpy as np
import os
import math
import argparse
from collections import OrderedDict
import svgutils.transform as sg
from svgutils.compose import *
#import cairosvg
class Lc0InfoHandler(chess.uci.InfoHandler):
def __init__(self, board):
super().__init__()
self.reset()
self.board = board
def reset(self):
self.strings = []
self.moves = {}
def post_info(self):
if "string" in self.info:
#self.strings.append(self.info["string"])
# "c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
(move, info) = self.info["string"].split(maxsplit=1)
move = self.board.san(self.board.parse_uci(move))
self.strings.append("%6s %s" % (move, self.info["string"]))
super().post_info() # Release the lock
def q2cp(q):
return 290.680623072 * math.tan(1.548090806 * q) / 100.0
def cp2q(cp):
return math.atan(cp*100.0/290.680623072)/1.548090806
def set_q2cp_ticks(ax):
ax.set_ylim(-1, 1)
ax2 = ax.twinx()
ax2.set_ylim(-1, 1)
cp_vals = [-128, -8, -4, -2, -1, 0, 1, 2, 4, 8, 128]
q_vals = [cp2q(x) for x in cp_vals]
ax2.set_yticks(q_vals)
ax2.set_yticklabels(cp_vals)
ax2.set_ylabel("CP")
def parse_info(info):
# "INFO: TN: 1 Qf4 c7f4 (268 ) N: 40 (+37) (P: 20.23%) (Q: -0.04164) (U: 0.08339) (Q+U: 0.04175) (V: 0.1052)"
m = re.match("^INFO:", info)
if not m: return None
(_, _, TN, sanmove, ucimove, info) = info.split(maxsplit=5)
floats = re.split(r"[^-.\d]+", info)
(_, _, N, _, P, Q, U, Q_U, V, _) = floats
move_infos = {}
move_infos["TN"] = int(TN)
move_infos["sanmove"] = sanmove
move_infos["ucimove"] = ucimove
move_infos["N"] = int(N)
move_infos["P"] = float(P)
move_infos["Q"] = float(Q)
move_infos["U"] = float(U)
if V == "-.----": V = 0
move_infos["V"] = float(V)
return move_infos
def getgame(pgn_filename, gamenum):
with open(pgn_filename) as pgn:
# find the game (probably a better way to do this?)
game = None
while True:
game = chess.pgn.read_game(pgn)
if not game:
break
if not gamenum or game.headers["Round"] == gamenum:
break
if not game:
raise("Game not found")
return game
# plynum = 0 = after White's move 1
# plynum = 1 = after Black's move 1
# plynum = 2(M-1)+0 = after White's move M
# plynum = 2(M-1)+1 = after Black's move M
def get_board(pgn_filename, gamenum, plynum):
game = getgame(pgn_filename, gamenum)
info = ""
info += game.headers["White"] + "\n"
info += game.headers["Black"] + "\n"
nodes = list(game.mainline())
# There must be a better way to get the list of moves up to plynum P?
ucistr = ""
sanstr = ""
if plynum >= 0:
for node in nodes[0:plynum+1]:
ucistr += " " + str(node.move)
sanstr += " " + str(node.san())
info += "position startpos moves" + ucistr + "\n"
info += sanstr + "\n" # TODO: Add move numbers. Surely python-chess can do this for me?
# Something like this will work...
#game = getgame(pgn_filename, gamenum)
#end = game.end()
#board = end.board()
#print(game.board().variation_san(board.move_stack))
node = nodes[plynum]
board = node.board()
fig = chess.svg.board(board=board, lastmove=node.move)
else:
info += "position startpos\n"
board = chess.Board()
fig = chess.svg.board(board=board)
return (board, fig, info)
# TODO:
# gamenum/plynum vs fen is a mess right now...
def analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen)
plot(pgn_filename, gamenum, plynum)
def analyze(engine, info_handler, pgn_filename, gamenum, plynum, fen=None):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
datafilename = "%s/data.html" % (savedir)
if os.path.exists(datafilename):
return
if not os.path.exists(savedir):
os.makedirs(savedir)
outfile = open(datafilename, "w")
outfile.write("""
<img src="board.svg" height="100%"/> <br>
<img src="Q.svg"/> <br>
<img src="Q2.svg"/> <br>
<img src="N.svg"/> <br>
<img src="P.svg"/> <br>
<pre>""")
if fen:
board = chess.Board(fen)
fig = chess.svg.board(board=board)
info = "position %s\n" % (fen)
else:
(board, fig, info) = get_board(pgn_filename, gamenum, plynum)
outfile.write(info)
outfile.write(board.fen() + "\n")
outfile.write(str(board) + "\n")
open("%s/board.svg" % (savedir), "w").write(fig)
outfile.write(str(LC0) + "\n")
engine.uci()
outfile.write(engine.name + "\n")
# Reset engine search tree, but not engine NNCache, by setting different position
engine.position(chess.Board())
info_handler.reset()
info_handler.board = chess.Board()
engine.go(nodes=1)
for nodes in NODES:
# Do our position now
info_handler.reset()
info_handler.board = board
engine.position(board)
engine.go(nodes=nodes)
for s in info_handler.strings:
outfile.write("INFO: TN: %s %s\n" % (nodes, s))
outfile.write("</pre>\n")
outfile.close()
def plot(pgn_filename, gamenum, plynum):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+3)/2.0)
# Parse data into pandas
move_infos = []
with open("%s/data.html" % savedir) as infile:
for line in infile.readlines():
info = parse_info(line)
if not info: continue
move_infos.append(info)
df = pd.DataFrame(move_infos)
# Filter top 4 moves, and get P
TNmax = df["TN"].max()
best = df[df["TN"] == TNmax].sort_values("N", ascending=False).head(NUM_MOVES)
moves = list(best["sanmove"])
bestdf = df.loc[df["sanmove"].isin(moves)]
# Plots
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="N", logx=True, logy=True, ax=ax)
ax.legend(moves)
plt.title("Child Node Visits vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Child Nodes")
plt.savefig("%s/N.svg" % (savedir))
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="TN", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Total Nodes")
plt.xlabel("Total Nodes")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q.svg" % (savedir))
# This plot can have multiple entries with the same index="N", so pivot fails.
fig, ax = plt.subplots()
for move in moves:
tmp = bestdf[bestdf["sanmove"] == move]
tmp.plot.line(x="N", y="Q", logx=True, logy=False, ax=ax)
ax.legend(moves)
plt.title("Value vs Child Node Visits")
plt.xlabel("Child Node Visits")
plt.ylabel("Value")
set_q2cp_ticks(ax)
plt.savefig("%s/Q2.svg" % (savedir))
best.plot.bar(x="sanmove", y="P", legend=False)
plt.xlabel("")
plt.title("Policy")
plt.savefig("%s/P.svg" % (savedir))
plt.close("all")
def analyze_game(pgn_filename, gamenum, plynum, plies):
try:
# In case you have the data files already, but no lc0 exe.
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
except:
print("Warning: Could not open Lc0 engine.")
engine = None
info_handler = None
if not os.path.exists("plots"):
os.makedirs("plots")
outfile = open("plots/%s_%s_%0.3f_%s.html" % (pgn_filename, gamenum, (plynum+3)/2, plies), "w")
outfile.write('<table width="%d" height="500">\n' % (plies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for p in range(plies):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, (plynum+p+3)/2.0)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for p in range(plies):
analyze_and_plot(engine, info_handler, pgn_filename, gamenum, plynum+p)
if engine: engine.quit()
def analyze_fen(name, fen):
engine = chess.uci.popen_engine(LC0)
info_handler = Lc0InfoHandler(None)
engine.info_handlers.append(info_handler)
analyze_and_plot(engine, info_handler, name, 0, 0, fen)
engine.quit()
def compose(pgn_filename, gamenum, move_start, numplies, xsize=470, ysize=350, scale=0.6, scaleb=0.85):
outfile = open("plots/%s_%s_%05.1f_%03d.html" % (pgn_filename, gamenum, move_start, numplies), "w")
outfile.write('<table width="%d" height="500">\n' % (numplies*300))
outfile.write("<tr>\n")
for svgfile in ("board", "Q", "Q2", "N", "P"):
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "%s_%s_%05.1f" % (pgn_filename, gamenum, move)
outfile.write('<td> <img src="%s/%s.svg" width="100%%"/> </td>\n' % (savedir, svgfile))
outfile.write("</tr>\n")
outfile.write("</tr>\n")
outfile.write("</table>\n")
outfile.close()
for move in np.arange(move_start, move_start+numplies/2, 0.5):
savedir = "plots/%s_%s_%05.1f" % (pgn_filename, gamenum, move)
fig = Figure(xsize*scale, ysize*5*scale,
Panel(SVG("%s/board.svg" % (savedir)).scale(scale*scaleb)),
Panel(SVG("%s/Q.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/Q2.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/N.svg" % (savedir)).scale(scale)),
Panel(SVG("%s/P.svg" % (savedir)).scale(scale)),
)
fig.tile(1,5)
fig.save("%s/all.svg" % (savedir))
panels = []
for move in np.arange(move_start, move_start+numplies/2, 0.5):
panels.append(Panel(SVG("plots/%s_%s_%05.1f/all.svg" % (pgn_filename, gamenum, move))))
fig = Figure(xsize*(numplies)*scale, ysize*5*scale, *panels)
fig.tile(numplies, 1)
filename = "plots/%s_%s_%05.1f_all" % (pgn_filename, gamenum, move_start)
fig.save("%s.svg" % (filename))
# cariosvg doesn't parse units "px"
#cairosvg.svg2png(url="%s.svg" % (filename), write_to="%s.png" % (filename))
if __name__ == "__main__":
usage_str = """
lc0_analyzer --pgn pgnfile --move 4.0 --numplies 6
lc0_analyzer --fen fenstring --numplies 6"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage_str)
parser.add_argument("--pgn", type=str, help="pgn file to process")
parser.add_argument("--round", type=str, help="round of pgn file, omit to pick first game")
parser.add_argument("--move", type=float, help="""
4.0 = Before white's 4th move (analyze position after black's 3rd move)
4.5 = Before blacks's 4th move (analyze position after white's 4th move)
""")
parser.add_argument("--numplies", type=int, help="number of plies to analyze")
parser.add_argument("--fen", type=str, help="number of plies to analyze")
parser.add_argument("--fen_desc", type=str, help="description of fen position")
parser.add_argument("--lc0", type=str, required=True, help="lc0 executable")
parser.add_argument("--w", type=str, required=True, help="path to weights")
parser.add_argument("--nodes", type=int, default=2**16, help="number of nodes to analyze for each position, will be rounded to nearest power of 2")
parser.add_argument("--topn", type=int, default=4, help="plot top N moves")
parser.add_argument("--ply_per_page", type=int, default=6, help="how many plies to put together in one .svg page")
args = parser.parse_args()
LC0 = [
args.lc0,
"-w", args.w,
"-l", "lc0log.txt",
#"-t", "1",
#"--max-prefetch=0",
#"--no-out-of-order-eval", # Was trying to be more accurate, but see issue #680
#"--collision-visit-limit=1",
#"--minibatch-size=1",
"--minibatch-size=16", # because of #680, use this compromise between accuracy and speed
"--smart-pruning-factor=0", # We will start and stop in loops, so disable pruning
"--nncache=1000000",
"--verbose-move-stats",
]
NODES = [ 2**n for n in range(round(math.log(args.nodes, 2))+1)]
NUM_MOVES = args.topn
if args.pgn:
game = getgame(args.pgn, args.round)
gamelen = len(game.end().board().move_stack)
plynum = round(args.move*2-3)
if plynum + args.numplies > gamelen:
args.numplies = gamelen-plynum
analyze_game(args.pgn, args.round, round(args.move*2-3), args.numplies)
for m in np.arange(args.move, args.move+args.numplies/2, 0.5*args.ply_per_page):
compose(args.pgn, args.round, m, min(args.ply_per_page, min(args.ply_per_page, args.numplies-(m-args.move)*2)))
elif args.fen:
analyze_fen(args.fen_desc, args.fen)
#compose("plots/%s_%s" % (args.fen_desc, args.round), args.numplies)
else: raise(Exception("must provide --pgn or --fen"))
|
21,813 | 0c8aa9c8d2ff667918dd550f394f3de3bf5849cf | import numpy as np
import matplotlib.pylab as plt
# 微分
# 誤差を含んでいないため、使用しない
def diff(func,x):
h = 1e-4
return (func(x+h) - func(x))/h
# 数値微分
# 数値微分には誤差が含まれるため、
def numrerical_diff(func,x):
h = 1e-4 # 0.0001
return (func(x+h) - func(x-h)) / (2*h)
# 勾配は式に変数が2つ以上ある場合に、すべての変数の微分をベクトルにまとめること
# この関数はxの配列が2つの場合にのみ使用できる。
def numerical_gradient(func,x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
# func(x+h)の計算
x[idx] = tmp_val + h
fxh1 = func(x)
# func(x-h)の計算
x[idx] = tmp_val - h
fxh2 = func(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
return grad
# sample(簡単な二次間数)
# y = 0.01x^2 + 0.1x
def func1(x):
return 0.01 * x **2 + 0.1 * x
# x = np.arange(0.0,20.0,0.1)
# y = func1(x)
# plt.xlabel("x")
# plt.ylabel("f(x)")
# plt.plot(x,y)
# plt.show()
def func1_sample(t):
return t**2
print(diff(func1_sample,0))
# print(numrerical_diff(func1,10))
# sample2(引数の二乗和を計算する)
# 変数が二つある
# f(x0,x1) = x0^2 + x1^2
def func2(x):
return x[0]**2 + x[1]**2
# またはnp.sum(x**2)
def func2_tmp1(x0):
return x0**2.0 + 4.0**2.0
def func2_tmp2(x1):
return 3.0**2.0 + x1**2.0
# 偏微分は式に変数が2つ以上ある場合に、1つの変数の微分を求めること
# print(numrerical_diff(func2_tmp1,3.0))
# print(numrerical_diff(func2_tmp2,4.0))
# print(numerical_gradient(func2,np.array([3.0,4.0,3.0])))
# print(numerical_gradient(func2,np.array([0.0,2.0]))) |
21,814 | e780ad85b7c41b81b3c7ee15a883efc0baf5c8d2 | smallest=None
for number in[22,1,44,55,76]:
if smallest is None:
smallest=number
elif number<smallest:
smallest=number
print'smallest',smallest
largest=None
for value in[1,23,33,56,34]:
if largest is None:
largest=value
elif largest<value:
largest=value
print'largest',largest
|
21,815 | b136676de68f6cb25694083f29efdaa2a0709ce8 |
import os
import bob
def input_wav_file(file_name):
file_path = 'wav/'+file_name
Base_Processor = bob.bio.spear.preprocessor.Base()
return Base_Processor.read_original_data(file_path)
|
21,816 | abc5bc7b9cd037df779cc089a986d52c6dc391b4 | """You are given two jugs with capacities x and y litres. There is an infinite amount of water supply available. You need to determine whether it is possible to measure exactly z litres using these two jugs.
If z liters of water is measurable, you must have z liters of water contained within one or both buckets by the end.
Operations allowed:
Fill any of the jugs completely with water.
Empty any of the jugs.
Pour water from one jug into another till the other jug is completely full or the first jug itself is empty.
最开始时,想到了问题本质是找到 m * x + n * y = z(m, n为正是,表示装满水壶,为负表示清空水壶)中的m, n。
想着设置两个循环,让m, n遍历固定的数值,只要找到满足条件的m, n即可解题。
但这种思路存在两个问题:一是m, n遍历的范围怎么确定,二是题目需要回答的是是否可行,并非要过程。
果不其然,即使让m, n在-100到100之间遍历,仍无法解决问题
https://www.math.tamu.edu/~dallen/hollywood/diehard/diehard.htm
通过此网址,找到了解题思路,只要x, y 最大公约数为1,可以生成0-(x + y)之间的所有数字。返回True,
提交后发现2, 6, 8返回的是True。并考虑到一些特殊情况。生成了最终的代码
"""
class Solution:
def gcd(self, x, y):
"""求解最大公约数"""
if y == 0:
return x
else:
return self.gcd(y, x % y)
def canMeasureWater(self, x: int, y: int, z: int) -> bool:
# 特殊情况的处理
if z == 0 or z == x or z == y:
return True
if z > x + y:
return False
d = self.gcd(x, y)
if d != 0 and z % d == 0:
return True
return False
|
21,817 | 7c014a1b3b37c3325d2c591fa2571ece4243c436 | import os
import sys
import argparse
def parseArgument():
dir_tmp = os.getcwd()
version = '1.1'
usageTmp = '\r{}\n\
## ##\n\
## MSInspector (Quality control of the five experiments in ##\n\
## CPTAC assay portal) ##\n\
## ##\n\
## last change: 09/17/2020 ##\n\
## ##\n\
## ##\n\
###############################################################################\n\
\n'.format('###############################################################################'.ljust(len('usage:')))
usage = usageTmp+'Usage: MSInspector [-h] [<args>]\n\nExample 1:\nMSInspector -ps "C:\Program Files\SkylineDailyRunner\SkylineDailyRunner.exe" -pr "C:\Program Files\R\R-3.5.2\\bin\Rscript.exe" -i "D:\Skyline_analysis\qcAssayPortal\data\UVicPC_Borchers-MousePlasma_Agilent6490_directMRM-Exp1\20160309_MouseV2B1.5_refined_2018-07-03_14-59-18.sky.zip" -e exp1 -t "D:\Skyline_analysis\qcAssayPortal\data\UVicPC_Borchers-MousePlasma_Agilent6490_directMRM-Exp1\meta_data.tsv"\nExample 2:\nMSInspector -ps "C:\Program Files\Skyline\SkylineCmd.exe" -pr "C:\Program Files\R\R-3.5.2\\bin\Rscript.exe" -i "D:\Skyline_analysis\qcAssayPortal\data\UVicPC_Borchers-MousePlasma_Agilent6490_directMRM-Exp2" -e exp2\n'
#parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=usage, usage=argparse.SUPPRESS)
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=usage, usage=argparse.SUPPRESS)
parser.add_argument('-ps', required=True, dest='SkylineCmdBinary', metavar='<dir required>', help="the path to SkylineCmd.exe, SkylineRunner.exe, or SkylineDailyRunner.exe in the Windows OS")
parser.add_argument('-pr', required=True, dest='RscriptBinary', metavar='<dir required>', help="the path to Rscript.exe in the Windows OS")
parser.add_argument('-i', nargs='+', required=True, dest='input', metavar='input <required>', help='two options: 1. A directory where all the *.sky.zip files are located 2.*.sky.zip *.sky.zip ... (at least one input *.sky.zip)')
parser.add_argument('-e', required=True, dest='experiment_type', metavar='string <required>', help='the experiment type. Choose one from Options: exp1 , exp2 , exp3 , exp4, exp5')
parser.add_argument('-t', default='Null', dest='mypeptideType_file', metavar='peptide type file <required>', help='the directory of the file whose first column is the *.sky.zip and second column is peptide type ("purified" or "crude"). When -e is exp1, it must be assigned. Otherwise, please leave it blank.')
parser.add_argument('-o', default=dir_tmp, dest='output_dir', metavar='<dir>', help='the directory of the outputs (default: current directory)')
#parser.add_argument('-p', dest='plot_output', action = 'store_true', help='switch to plot figures and generate tables of the specific experiment type (default: off)')
parser.add_argument('--version', action='version', version='%s'%(version))
return parser.parse_args() |
21,818 | 24900bc28bd198c18cd6f308fc0156986ae97c3c | """Helper for projects of pivotal tracker"""
import json
import string
from random import choices
from core.logger.singleton_logger import SingletonLogger
from core.rest_client.request_manager import RequestManager
from definitions import STORED_ID
LOGGER = SingletonLogger().get_logger()
class ProjectHelper:
"""Project helper class"""
def __init__(self):
"""Utility class"""
@staticmethod
def create_project():
"""
This method create a project in pivotal tracker
"""
client = RequestManager()
project_name = "".join(choices(string.ascii_letters + string.digits, k=10))
client.set_method("POST")
client.set_endpoint("/projects")
body = {"name": project_name}
client.set_body(json.dumps(body))
response = client.execute_request()
STORED_ID['project_id'] = response.json()['id']
@staticmethod
def delete_project(project_id):
"""
Static method for delete a project.
"""
client = RequestManager()
client.set_method("DELETE")
client.set_endpoint("/projects/{0}".format(project_id))
client.execute_request()
@staticmethod
def delete_all_projects():
"""
Static method for delete all projects.
"""
client = RequestManager()
client.set_method("GET")
client.set_endpoint("/projects")
response = client.execute_request()
for project in response.json():
try:
ProjectHelper.delete_project(project["id"])
except TypeError:
LOGGER.info(project)
@staticmethod
def read_project(response):
"""
Static method who read all projects
:param response: Request response
"""
STORED_ID['project_id'] = response.json()["id"]
@staticmethod
def delete_stored_project():
"""
Static method for delete a project.
"""
client = RequestManager()
client.set_method("DELETE")
client.set_endpoint("/projects/{0}".format(STORED_ID['project_id']))
client.execute_request()
|
21,819 | fb5c8114ace898eb9f3de7f5e4fd565d771ad0e8 | from ota_update.main.ota_updater import OTAUpdater
def download_and_install_update_if_available():
ota_updater = OTAUpdater('https://github.com/santimai/main.git')
ota_updater.download_and_install_update_if_available('wifi-ssid', 'wifi-password')
def start():
print("COM")
print("COM_2")
def boot():
download_and_install_update_if_available()
start()
boot()
|
21,820 | 0835c1710247b2598f487e8bf5e5221fe169059a | #!/usr/local/eman/2.06/Python/bin/python
# Author: Pawel A.Penczek, 09/09/2006 (Pawel.A.Penczek@uth.tmc.edu)
# Copyright (c) 2000-2006 The University of Texas - Houston Medical School
#
# This software is issued under a joint BSD/GNU license. You may use the
# source code in this file under either license. However, note that the
# complete EMAN2 and SPARX software packages have some GPL dependencies,
# so you are responsible for compliance with the licenses of these packages
# if you opt to use BSD licensing. The warranty disclaimer below holds
# in either instance.
#
# This complete copyright notice must be included in any revised version of the
# source code. Additional authorship citations may be added, but existing
# author citations must be preserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from global_def import *
def params_2D_3D(alpha, sx, sy, mirror):
"""
Convert 2D alignment parameters (alpha, sx, sy, mirror) into
3D alignment parameters (phi, theta, psi, s2x, s2y, mirror)
"""
phi = 0
psi = 0
theta = 0
alphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)
if mirror > 0:
phi = (540.0 + phi)%360.0
theta = 180.0 - theta
psi = (540.0 - psi + alphan)%360.0
else:
psi = (psi + alphan)%360.0
return phi, theta, psi, s2x, s2y
def params_3D_2D(phi, theta, psi, s2x, s2y):
"""
Convert 3D alignment parameters (phi, theta, psi, s2x, s2y) # there is no mirror in 3D!
into 2D alignment parameters (alpha, sx, sy, mirror)
"""
if theta > 90.0:
mirror = 1
alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)
else:
mirror = 0
alpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)
return alpha, sx, sy, mirror
# Amoeba uses the simplex method of Nelder and Mead to maximize a
# function of 1 or more variables.
#
# Copyright (C) 2005 Thomas R. Metcalf
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
def amoeba(var, scale, func, ftolerance=1.e-4, xtolerance=1.e-4, itmax=500, data=None):
'''Use the simplex method to maximize a function of 1 or more variables.
Input:
var = the initial guess, a list with one element for each variable
scale = the search scale for each variable, a list with one
element for each variable.
func = the function to maximize.
Optional Input:
ftolerance = convergence criterion on the function values (default = 1.e-4)
xtolerance = convergence criterion on the variable values (default = 1.e-4)
itmax = maximum number of iterations allowed (default = 500).
data = data to be passed to func (default = None).
Output:
(varbest,funcvalue,iterations)
varbest = a list of the variables at the maximum.
funcvalue = the function value at the maximum.
iterations = the number of iterations used.
- Setting itmax to zero disables the itmax check and the routine will run
until convergence, even if it takes forever.
- Setting ftolerance or xtolerance to 0.0 turns that convergence criterion
off. But do not set both ftolerance and xtolerance to zero or the routine
will exit immediately without finding the maximum.
- To check for convergence, check if (iterations < itmax).
The function should be defined like func(var,data) where
data is optional data to pass to the function.
Example:
import amoeba
def afunc(var,data=None): return 1.0-var[0]*var[0]-var[1]*var[1]
print amoeba.amoeba([0.25,0.25],[0.5,0.5],afunc)
Version 1.0 2005-March-28 T. Metcalf
1.1 2005-March-29 T. Metcalf - Use scale in simsize calculation.
- Use func convergence *and* x convergence
rather than func convergence *or* x
convergence.
1.2 2005-April-03 T. Metcalf - When contracting, contract the whole
simplex.
'''
nvar = len(var) # number of variables in the minimization
nsimplex = nvar + 1 # number of vertices in the simplex
# first set up the simplex
simplex = [0]*(nvar+1) # set the initial simplex
simplex[0] = var[:]
for i in xrange(nvar):
simplex[i+1] = var[:]
simplex[i+1][i] += scale[i]
fvalue = []
for i in xrange(nsimplex): # set the function values for the simplex
fvalue.append(func(simplex[i],data=data))
# Ooze the simplex to the maximum
iteration = 0
while 1:
# find the index of the best and worst vertices in the simplex
ssworst = 0
ssbest = 0
for i in xrange(nsimplex):
if fvalue[i] > fvalue[ssbest]:
ssbest = i
if fvalue[i] < fvalue[ssworst]:
ssworst = i
# get the average of the nsimplex-1 best vertices in the simplex
pavg = [0.0]*nvar
for i in xrange(nsimplex):
if i != ssworst:
for j in range(nvar): pavg[j] += simplex[i][j]
for j in xrange(nvar): pavg[j] = pavg[j]/nvar # nvar is nsimplex-1
simscale = 0.0
for i in range(nvar):
simscale += abs(pavg[i]-simplex[ssworst][i])/scale[i]
simscale = simscale/nvar
# find the range of the function values
fscale = (abs(fvalue[ssbest])+abs(fvalue[ssworst]))/2.0
if fscale != 0.0:
frange = abs(fvalue[ssbest]-fvalue[ssworst])/fscale
else:
frange = 0.0 # all the fvalues are zero in this case
# have we converged?
if (((ftolerance <= 0.0 or frange < ftolerance) and # converged to maximum
(xtolerance <= 0.0 or simscale < xtolerance)) or # simplex contracted enough
(itmax and iteration >= itmax)): # ran out of iterations
return simplex[ssbest],fvalue[ssbest],iteration
# reflect the worst vertex
pnew = [0.0]*nvar
for i in xrange(nvar):
pnew[i] = 2.0*pavg[i] - simplex[ssworst][i]
fnew = func(pnew,data=data)
if fnew <= fvalue[ssworst]:
# the new vertex is worse than the worst so shrink
# the simplex.
for i in xrange(nsimplex):
if i != ssbest and i != ssworst:
for j in xrange(nvar):
simplex[i][j] = 0.5*simplex[ssbest][j] + 0.5*simplex[i][j]
fvalue[i] = func(simplex[i],data=data)
for j in xrange(nvar):
pnew[j] = 0.5*simplex[ssbest][j] + 0.5*simplex[ssworst][j]
fnew = func(pnew,data=data)
elif fnew >= fvalue[ssbest]:
# the new vertex is better than the best so expand
# the simplex.
pnew2 = [0.0]*nvar
for i in xrange(nvar):
pnew2[i] = 3.0*pavg[i] - 2.0*simplex[ssworst][i]
fnew2 = func(pnew2,data=data)
if fnew2 > fnew:
# accept the new vertex in the simplexe
pnew = pnew2
fnew = fnew2
# replace the worst vertex with the new vertex
for i in xrange(nvar):
simplex[ssworst][i] = pnew[i]
fvalue[ssworst] = fnew
iteration += 1
#print "Iteration:",iteration," ",ssbest," ",fvalue[ssbest]
def amoeba_multi_level(var, scale, func, ftolerance=1.e-4, xtolerance=1.e-4, itmax=500, data=None):
"""
Commented by Zhengfan Yang on 05/01/07
I made some change to the original amoeba so that it can now pass out some values
calculated by func other than the criteria. This is important in multi-level
amoeba refinement because otherwise, upper level refinement will lose the information
of lower level refinement.
"""
#print " ENTER AMOEBA MULTI LEVEL"
nvar = len(var) # number of variables in the minimization
nsimplex = nvar + 1 # number of vertices in the simplex
# first set up the simplex
simplex = [0]*(nvar+1) # set the initial simplex
simplex[0] = var[:]
for i in xrange(nvar):
simplex[i+1] = var[:]
simplex[i+1][i] += scale[i]
fvalue = []
for i in xrange(nsimplex): # set the function values for the simplex
result, passout = func(simplex[i], data=data)
#print " amoeba setting ",i,simplex[i],result, passout
fvalue.append([result, passout])
# Ooze the simplex to the maximum
iteration = 0
while 1:
# find the index of the best and worst vertices in the simplex
ssworst = 0
ssbest = 0
for i in xrange(nsimplex):
if fvalue[i][0] > fvalue[ssbest][0]:
ssbest = i
if fvalue[i][0] < fvalue[ssworst][0]:
ssworst = i
# get the average of the nsimplex-1 best vertices in the simplex
pavg = [0.0]*nvar
for i in xrange(nsimplex):
if i != ssworst:
for j in range(nvar): pavg[j] += simplex[i][j]
for j in xrange(nvar): pavg[j] = pavg[j]/nvar # nvar is nsimplex-1
simscale = 0.0
for i in range(nvar):
simscale += abs(pavg[i]-simplex[ssworst][i])/scale[i]
simscale = simscale/nvar
# find the range of the function values
fscale = (abs(fvalue[ssbest][0])+abs(fvalue[ssworst][0]))/2.0
if fscale != 0.0:
frange = abs(fvalue[ssbest][0]-fvalue[ssworst][0])/fscale
else:
frange = 0.0 # all the fvalues are zero in this case
# have we converged?
if (((ftolerance <= 0.0 or frange < ftolerance) and # converged to maximum
(xtolerance <= 0.0 or simscale < xtolerance)) or # simplex contracted enough
(itmax and iteration >= itmax)): # ran out of iterations
return simplex[ssbest],fvalue[ssbest][0],iteration,fvalue[ssbest][1]
# reflect the worst vertex
pnew = [0.0]*nvar
for i in xrange(nvar):
pnew[i] = 2.0*pavg[i] - simplex[ssworst][i]
fnew = func(pnew,data=data)
if fnew[0] <= fvalue[ssworst][0]:
# the new vertex is worse than the worst so shrink
# the simplex.
for i in xrange(nsimplex):
if i != ssbest and i != ssworst:
for j in xrange(nvar):
simplex[i][j] = 0.5*simplex[ssbest][j] + 0.5*simplex[i][j]
fvalue[i] = func(simplex[i],data=data)
for j in xrange(nvar):
pnew[j] = 0.5*simplex[ssbest][j] + 0.5*simplex[ssworst][j]
fnew = func(pnew, data=data)
elif fnew[0] >= fvalue[ssbest][0]:
# the new vertex is better than the best so expand
# the simplex.
pnew2 = [0.0]*nvar
for i in xrange(nvar):
pnew2[i] = 3.0*pavg[i] - 2.0*simplex[ssworst][i]
fnew2 = func(pnew2,data=data)
if fnew2[0] > fnew[0]:
# accept the new vertex in the simplexe
pnew = pnew2
fnew = fnew2
# replace the worst vertex with the new vertex
for i in xrange(nvar):
simplex[ssworst][i] = pnew[i]
fvalue[ssworst] = fnew
iteration += 1
#print "Iteration:",iteration," ",ssbest," ",fvalue[ssbest]
def golden(func, args=(), brack=None, tol=1.e-4, full_output=0):
""" Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol. A bracketing interval is a triple (a,b,c) where (a<b<c) and
func(b) < func(a),func(c). If bracket is two numbers then they are
assumed to be a starting interval for a downhill bracket search
(see bracket)
Uses analog of bisection method to decrease the bracketed interval.
"""
if brack is None:
xa,xb,xc,fa,fb,fc,funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa,xb,xc,fa,fb,fc,funcalls = bracket(func, xa=brack[0], xb=brack[1], args=args)
elif len(brack) == 3:
xa,xb,xc = brack
if (xa > xc): # swap so xa < xc can be assumed
dum = xa; xa=xc; xc=dum
assert ((xa < xb) and (xb < xc)), "Not a bracketing interval."
fa = apply(func, (xa,)+args)
fb = apply(func, (xb,)+args)
fc = apply(func, (xc,)+args)
assert ((fb<fa) and (fb < fc)), "Not a bracketing interval."
funcalls = 3
else:
raise ValueError, "Bracketing interval must be length 2 or 3 sequence."
_gR = 0.61803399
_gC = 1.0-_gR
x3 = xc
x0 = xa
if (abs(xc-xb) > abs(xb-xa)):
x1 = xb
x2 = xb + _gC*(xc-xb)
else:
x2 = xb
x1 = xb - _gC*(xb-xa)
f1 = apply(func, (x1,)+args)
f2 = apply(func, (x2,)+args)
funcalls += 2
while (abs(x3-x0) > tol*(abs(x1)+abs(x2))):
if (f2 < f1):
x0 = x1; x1 = x2; x2 = _gR*x1 + _gC*x3
f1 = f2; f2 = apply(func, (x2,)+args)
else:
x3 = x2; x2 = x1; x1 = _gR*x2 + _gC*x0
f2 = f1; f1 = apply(func, (x1,)+args)
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
if full_output:
return xmin, fval, funcalls
else:
return xmin
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""Given a function and distinct initial points, search in the downhill
direction (as defined by the initital points) and return new points
xa, xb, xc that bracket the minimum of the function:
f(xa) > f(xb) < f(xc)
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = apply(func, (xa,)+args)
fb = apply(func, (xb,)+args)
if (fa < fb): # Switch so fa > fb
dum = xa; xa = xb; xb = dum
dum = fa; fa = fb; fb = dum
xc = xb + _gold*(xb-xa)
fc = apply(func, (xc,)+args)
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa)*(fb-fc)
tmp2 = (xb - xc)*(fb-fa)
val = tmp2-tmp1
if abs(val) < _verysmall_num:
denom = 2.0*_verysmall_num
else:
denom = 2.0*val
w = xb - ((xb-xc)*tmp2-(xb-xa)*tmp1)/denom
wlim = xb + grow_limit*(xc-xb)
if iter > maxiter:
raise RuntimeError, "Too many iterations."
iter += 1
if (w-xc)*(xb-w) > 0.0:
fw = apply(func, (w,)+args)
funcalls += 1
if (fw < fc):
xa = xb; xb=w; fa=fb; fb=fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w; fc=fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold*(xc-xb)
fw = apply(func, (w,)+args)
funcalls += 1
elif (w-wlim)*(wlim-xc) >= 0.0:
w = wlim
fw = apply(func, (w,)+args)
funcalls += 1
elif (w-wlim)*(xc-w) > 0.0:
fw = apply(func, (w,)+args)
funcalls += 1
if (fw < fc):
xb=xc; xc=w; w=xc+_gold*(xc-xb)
fb=fc; fc=fw; fw=apply(func, (w,)+args)
funcalls += 1
else:
w = xc + _gold*(xc-xb)
fw = apply(func, (w,)+args)
funcalls += 1
xa=xb; xb=xc; xc=w
fa=fb; fb=fc; fc=fw
return xa, xb, xc, fa, fb, fc, funcalls
def ce_fit(inp_image, ref_image, mask_image):
""" Fit the histogram of the input image under mask with the reference image.
Usage : ce_fit(inp_image,ref_image,mask_image):
A and B, number of iterations and the chi-square
"""
hist_res = Util.histc(ref_image, inp_image, mask_image)
args = hist_res["args"]
scale = hist_res["scale"]
data = [hist_res['data'], inp_image, hist_res["ref_freq_bin"], mask_image, int(hist_res['size_img']), hist_res['hist_len']]
res = amoeba(args, scale, hist_func, 1.e-4, 1.e-4, 500, data)
resu = ["Final Parameter [A,B]:", res[0], "Final Chi-square :", -1*res[1], "Number of Iteration :", res[2]]
corrected_image = inp_image*res[0][0] + res[0][1]
result = [resu,"Corrected Image :",corrected_image]
del data[:], args[:], scale[:]
return result
def center_2D(image_to_be_centered, center_method = 1, searching_range = -1, Gauss_radius_inner = 2, Gauss_radius_outter = 7, self_defined_reference = None):
"""
Put an input image into image center (nx/2, ny/2) using method :
1. phase_cog
2. cross-correlate with Gaussian function
3. cross-correlate with donut shape image
4. cross-correlate with reference image provided by user
5. cross-correlate with self-rotated average
The function will return centered_image, and shifts
"""
from utilities import peak_search
from fundamentals import fshift
import types
if type(image_to_be_centered) == types.StringType: image_to_be_centered = get_im(image_to_be_centered)
if center_method == 0 : return image_to_be_centered,0.,0.
elif center_method == 1 :
cs = image_to_be_centered.phase_cog()
if searching_range > 0 :
if(abs(cs[0]) > searching_range): cs[0]=0.0
if(abs(cs[1]) > searching_range): cs[1]=0.0
return fshift(image_to_be_centered, -cs[0], -cs[1]), cs[0], cs[1]
elif center_method == 5:
from fundamentals import rot_avg_image,ccf
from math import sqrt
not_centered = True
tmp_image = image_to_be_centered.copy()
shiftx = 0
shifty = 0
while (not_centered):
reference = rot_avg_image(tmp_image)
ccmap = ccf(tmp_image, reference)
if searching_range > 0: ccmap = Util.window(ccmap, searching_range, searching_range, 1, 0, 0, 0)
peak = peak_search(ccmap)
centered_image = fshift(tmp_image, -peak[0][4], -peak[0][5])
if sqrt(peak[0][4]**2 + peak[0][5]**2) < 1. : not_centered = False
else : tmp_image = centered_image.copy()
shiftx += peak[0][4]
shifty += peak[0][5]
return centered_image, shiftx, shifty
elif center_method == 6:
from morphology import threshold_to_minval
nx = image_to_be_centered.get_xsize()
ny = image_to_be_centered.get_ysize()
r = nx//2-2
mask = model_circle(r, nx, ny)
[mean, sigma, xmin, xmax] = Util.infomask(image_to_be_centered, mask, True)
new_image = threshold_to_minval(image_to_be_centered, mean+sigma)
cs = new_image.phase_cog()
if searching_range > 0 :
if(abs(cs[0]) > searching_range): cs[0]=0.0
if(abs(cs[1]) > searching_range): cs[1]=0.0
return fshift(image_to_be_centered, -cs[0], -cs[1]), cs[0], cs[1]
else :
nx = image_to_be_centered.get_xsize()
ny = image_to_be_centered.get_ysize()
from fundamentals import ccf
if center_method == 2 :
reference = model_gauss(Gauss_radius_inner, nx, ny)
if center_method == 3 :
do1 = model_gauss(Gauss_radius_outter, nx, ny)
do2 = model_gauss(Gauss_radius_inner, nx, ny)
s = Util.infomask(do1, None, True)
do1/= s[3]
s = Util.infomask(do2, None, True)
do2/=s[3]
reference = do1 - do2
if center_method == 4: reference = self_defined_reference
ccmap = ccf(image_to_be_centered, reference)
if searching_range > 1: ccmap = Util.window(ccmap, searching_range, searching_range, 1, 0, 0, 0)
peak = peak_search(ccmap)
return fshift(image_to_be_centered, -peak[0][4], -peak[0][5]), peak[0][4], peak[0][5]
def common_line_in3D(phiA,thetaA,phiB,thetaB):
"""Find the position of the commone line in 3D
Formula is (RB^T zhat) cross (RA^T zhat)
Returns phi, theta of the common line in degrees. theta always < 90
Notice you don't need to enter psi's; they are irrelevant
"""
from math import pi, sqrt, cos, sin, asin, atan2
piOver=pi/180.0;
ph1 = phiA*piOver;
th1 = thetaA*piOver;
ph2 = phiB*piOver;
th2 = thetaB*piOver;
#nx = cos(thetaBR)*sin(thetaAR)*sin(phiAR) - cos(thetaAR)*sin(thetaBR)*sin(phiBR) ;
#ny = cos(thetaAR)*sin(thetaBR)*cos(phiBR) - cos(thetaBR)*sin(thetaAR)*cos(phiAR) ;
#nz = sin(thetaAR)*sin(thetaBR)*sin(phiAR-phiBR);
nx = sin(th1)*cos(ph1)*sin(ph2)-sin(th2)*sin(ph1)*cos(ph2)
ny = sin(th1)*cos(th2)*cos(ph1)*cos(ph2)-cos(th1)*sin(th2)*cos(ph1)*cos(ph2)
nz = cos(th2)*sin(ph1)*cos(ph2)-cos(th1)*cos(ph1)*sin(ph2)
norm = nx*nx + ny*ny + nz*nz
if norm < 1e-5:
#print 'phiA,thetaA,phiB,thetaB:', phiA, thetaA, phiB, thetaB
return 0.0, 0.0
if nz<0: nx=-nx; ny=-ny; nz=-nz;
#thetaCom = asin(nz/sqrt(norm))
phiCom = asin(nz/sqrt(norm))
#phiCom = atan2(ny,nx)
thetaCom = atan2(ny, nx)
return phiCom*180.0/pi , thetaCom*180.0/pi
def compose_transform2(alpha1, sx1, sy1, scale1, alpha2, sx2, sy2, scale2):
"""Print the composition of two transformations T2*T1
Here if v's are vectors: vnew = T2*T1 vold
with T1 described by alpha1, sx1, scale1 etc.
Usage: compose_transform2(alpha1,sx1,sy1,scale1,alpha2,sx2,sy2,scale2)
angles in degrees
"""
t1 = Transform({"type":"2D","alpha":alpha1,"tx":sx1,"ty":sy1,"mirror":0,"scale":scale1})
t2 = Transform({"type":"2D","alpha":alpha2,"tx":sx2,"ty":sy2,"mirror":0,"scale":scale2})
tt = t2*t1
d = tt.get_params("2D")
return d[ "alpha" ], d[ "tx" ], d[ "ty" ], d[ "scale" ]
def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):
"""
Compute the composition of two transformations T2*T1
Here if v's are vectors: vnew = T2*T1 vold
with T1 described by phi1, sx1, scale1 etc.
Usage: compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2)
angles in degrees
"""
R1 = Transform({"type":"spider","phi":float(phi1),"theta":float(theta1),"psi":float(psi1),"tx":float(sx1),"ty":float(sy1),"tz":float(sz1),"mirror":0,"scale":float(scale1)})
R2 = Transform({"type":"spider","phi":float(phi2),"theta":float(theta2),"psi":float(psi2),"tx":float(sx2),"ty":float(sy2),"tz":float(sz2),"mirror":0,"scale":float(scale2)})
Rcomp=R2*R1
d = Rcomp.get_params("spider")
return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["scale"]
def combine_params2(alpha1, sx1, sy1, mirror1, alpha2, sx2, sy2, mirror2):
"""
Combine 2D alignent parameters including mirror
"""
t1 = Transform({"type":"2D","alpha":alpha1,"tx":sx1,"ty":sy1,"mirror":mirror1,"scale":1.0})
t2 = Transform({"type":"2D","alpha":alpha2,"tx":sx2,"ty":sy2,"mirror":mirror2,"scale":1.0})
tt = t2*t1
d = tt.get_params("2D")
return d[ "alpha" ], d[ "tx" ], d[ "ty" ], d[ "mirror" ]
def create_spider_doc(fname,spiderdoc):
"""Convert a text file that is composed of columns of numbers into spider doc file
"""
from string import atoi,atof
infile = open(fname,"r")
lines = infile.readlines()
infile.close()
nmc = len(lines[0].split())
table=[]
for line in lines:
data = line.split()
for i in xrange(0,nmc):
data[i] = atof(data[i])
table.append(data)
drop_spider_doc(spiderdoc ,table)
def drop_image(imagename, destination, itype="h"):
"""Write an image to the disk.
Usage: drop_image(name_of_existing_image, "path/to/image",
type = <type>)
<type> is "h" (hdf) or "s" (spider)
"""
if type(destination) == type(""):
if(itype == "h"): imgtype = EMUtil.ImageType.IMAGE_HDF
elif(itype == "s"): imgtype = EMUtil.ImageType.IMAGE_SINGLE_SPIDER
else: ERROR("unknown image type","drop_image",1)
imagename.write_image(destination, 0, imgtype)
else:
ERROR("destination is not a file name","drop_image",1)
def drop_png_image(im, trg):
"""Write an image with the proper png save
Usage: drop_png_image(name_of_existing_image, 'path/to/image.png')
"""
if trg[-4:] != '.png':
ERROR('destination name must be png extension', 'drop_png_image', 1)
if isinstance(trg, basestring):
im['render_min'] = im['minimum']
im['render_max'] = im['maximum']
im.write_image(trg, 0)
else:
ERROR('destination is not a file name', 'drop_png_image', 1)
def drop_spider_doc(filename, data, comment = None):
"""Create a spider-compatible "Doc" file.
filename: name of the Doc file
data: List of lists, with the inner list being a list of floats
and is written as a line into the doc file.
"""
outf = open(filename, "w")
from datetime import datetime
outf.write(" ; %s %s %s\n" % (datetime.now().ctime(), filename, comment))
count = 1 # start key from 1; otherwise, it is confusing...
for dat in data:
try:
nvals = len(dat)
if nvals <= 5: datstrings = ["%5d %d" % (count, nvals)]
else : datstrings = ["%6d %d" % (count, nvals)]
for num in dat:
datstrings.append("%12.5g" % (num))
except TypeError:
# dat is a single number
datstrings = ["%5d 1%12.5g" % (count, dat)]
datstrings.append("\n")
outf.write("".join(datstrings))
count += 1
outf.close()
def dump_row(input, fname, ix=0, iz=0):
"""Output the data in slice iz, row ix of an image to standard out.
Usage: dump_row(image, ix, iz)
or
dump_row("path/to/image", ix, iz)
"""
fout = open(fname, "w")
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
fout.write("# z = %d slice, x = %d row)\n" % (iz, ix))
line = []
for iy in xrange(ny):
fout.write("%d\t%12.5g\n" % (iy, image.get_value_at(ix,iy,iz)))
fout.close()
def even_angles(delta = 15.0, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'S', phiEqpsi = "Minus", symmetry='c1'):
"""Create a list of Euler angles suitable for projections.
method is either 'S' - for Saff algorithm
or 'P' - for Penczek '94 algorithm
'S' assumes phi1<phi2 and phi2-phi1>> delta ;
symmetry - if this is set to point-group symmetry (cn or dn) or helical symmetry with point-group symmetry (scn or sdn), it will yield angles from the asymmetric unit, not the specified range;
"""
from math import pi, sqrt, cos, acos, tan, sin
from utilities import even_angles_cd
from string import lower,split
angles = []
symmetryLower = symmetry.lower()
symmetry_string = split(symmetry)[0]
if (symmetry_string[0] == "c"):
if(phi2 == 359.99):
angles = even_angles_cd(delta, theta1, theta2, phi1, phi2/int(symmetry_string[1:]), method, phiEqpsi)
if(int(symmetry_string[1:]) > 1):
if( int(symmetry_string[1:])%2 ==0):
qt = 360.0/int(symmetry_string[1:])
else:
qt = 180.0/int(symmetry_string[1:])
n = len(angles)
for i in xrange(n):
t = n-i-1
if(angles[t][1] == 90.0):
if(angles[t][0] >= qt): del angles[t]
else:
angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)
elif(symmetry_string[0] == "d"):
if(phi2 == 359.99):
angles = even_angles_cd(delta, theta1, theta2, phi1, 360.0/2/int(symmetry_string[1:]), method, phiEqpsi)
if (int(symmetry_string[1:])%2 == 0):
qt = 360.0/2/int(symmetry_string[1:])
else:
qt = 180.0/2/int(symmetry_string[1:])
n = len(angles)
for i in xrange(n):
t = n-i-1
if(angles[t][1] == 90.0):
if(angles[t][0] >= qt): del angles[t]
else:
angles = even_angles_cd(delta, theta1, theta2, phi1, phi2, method, phiEqpsi)
elif(symmetry_string[0] == "s"):
#if symetry is "s", deltphi=delta, theata intial=theta1, theta end=90, delttheta=theta2
# for helical, theta1 cannot be 0.0
if theta1 > 90.0:
ERROR('theta1 must be less than 90.0 for helical symmetry', 'even_angles', 1)
if theta1 == 0.0: theta1 =90.0
theta_number = int((90.0 - theta1)/theta2)
#for helical, symmetry = s or scn
cn = int(symmetry_string[2:])
for j in xrange(theta_number,-1, -1):
if( j == 0):
if (symmetry_string[1] =="c"):
if cn%2 == 0:
k=int(359.99/cn/delta)
else:
k=int(359.99/2/cn/delta)
elif (symmetry_string[1] =="d"):
if cn%2 == 0:
k=int(359.99/2/cn/delta)
else:
k=int(359.99/4/cn/delta)
else:
ERROR("For helical strucutre, we only support scn and sdn symmetry","even_angles",1)
else:
if (symmetry_string[1] =="c"):
k=int(359.99/cn/delta)
elif (symmetry_string[1] =="d"):
k=int(359.99/2/cn/delta)
for i in xrange(k+1):
angles.append([i*delta,90.0-j*theta2,90.0])
else : # This is very close to the Saff even_angles routine on the asymmetric unit;
# the only parameters used are symmetry and delta
# The formulae are given in the Transform Class Paper
# The symmetric unit nVec=[]; # x,y,z triples
# is defined by three points b,c, v of Fig 2 of the paper
# b is (0,0,1)
# c is (sin(thetac),0,cos(thetac))
# a is (sin(thetac)cos(Omega),sin(thetac)cos(Omega),cos(thetac))
# f is the normalized sum of all 3
# The possible symmetries are in list_syms
# The symmetry determines thetac and Omega
# The spherical area is Omega - pi/3;
# should be equal to 4 *pi/(3*# Faces)
#
# symmetry ='tet'; delta = 6;
scrunch = 0.9 # closeness factor to eliminate oversampling corners
#nVec=[] # x,y,z triples
piOver = pi/180.0
Count=0 # used to count the number of angles
if (symmetryLower[0:3] =="tet"): m=3.0; fudge=0.9 # fudge is a factor used to adjust phi steps
elif (symmetryLower[0:3] =="oct"): m=4.0; fudge=0.8
elif (symmetryLower[0:3] =="ico"): m=5.0; fudge=0.95
else: ERROR("allowable symmetries are cn, dn, tet, oct, icos","even_angles",1)
n=3.0
OmegaR = 2.0*pi/m; cosOmega= cos(OmegaR)
Edges = 2.0*m*n/(2.0*(m+n)-m*n)
Faces = 2*Edges/n
Area = 4*pi/Faces/3.0; # also equals 2*pi/3 + Omega
costhetac = cosOmega/(1-cosOmega)
deltaRad= delta*pi/180
NumPoints = int(Area/(deltaRad*deltaRad))
fheight = 1/sqrt(3)/ (tan(OmegaR/2.0))
z0 = costhetac # initialize loop
z = z0
phi = 0
Deltaz = (1-costhetac)/(NumPoints-1)
#[1, phi,180.0*acos(z)/pi,0.]
anglesLast = [phi,180.0*acos(z)/pi,0.]
angles.append(anglesLast)
nLast= [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]
nVec = []
nVec.append(nLast)
Count +=1
for k in xrange(1,(NumPoints-1)):
z=z0 + Deltaz*k # Is it higher than fhat or lower
r= sqrt(1-z*z)
if (z > fheight): phiRmax= OmegaR/2.0
if (z<= fheight):
thetaR = acos(z);
cosStuff = (cos(thetaR)/sin(thetaR))*sqrt(1. - 2 *cosOmega);
phiMax = 180.0*( OmegaR - acos(cosStuff))/pi
angleJump = fudge* delta/r
phi = (phi + angleJump)%(phiMax)
anglesNew = [phi,180.0*acos(z)/pi,0.];
nNew = [ sin(acos(z))*cos(phi*piOver) , sin(acos(z))*sin(phi*piOver) , z]
diffangleVec = [acos(nNew[0]*nVec[k][0] + nNew[1]*nVec[k][1] + nNew[2]*nVec[k][2] ) for k in xrange(Count)]
diffMin = min(diffangleVec)
if (diffMin>angleJump*piOver *scrunch):
Count +=1
angles.append(anglesNew)
nVec.append(nNew)
#[Count, phi,180*acos(z)/pi,0.]
anglesLast = anglesNew
nLast=nNew
angles.append( [0.0, 0.0, 0.0] )
nLast= [ 0., 0. , 1.]
nVec.append(nLast)
if(theta2 == 180.0): angles.append( [0.0, 180.0, 0.0] )
angles.reverse()
if(phiEqpsi == "Minus"):
for i in xrange(len(angles)): angles[i][2] = (720.0-angles[i][0])%360.0
#print(Count,NumPoints)
# look at the distribution
# Count =len(angles); piOver= pi/180.0;
# phiVec = [ angles[k][0] for k in range(Count)] ;
# thetaVec = [ angles[k][1] for k in range(Count)] ;
# xVec = [sin(piOver * angles[k][1]) * cos(piOver * angles[k][0]) for k in range(Count) ]
# yVec = [sin(piOver * angles[k][1])* sin(piOver * angles[k][0]) for k in range(Count) ]
# zVec = [cos(piOver * angles[k][1]) for k in range(Count) ]
# pylab.plot(yVec,zVec,'.'); pylab.show()
return angles
def even_angles_cd(delta, theta1=0.0, theta2=90.0, phi1=0.0, phi2=359.99, method = 'P', phiEQpsi='Minus'):
"""Create a list of Euler angles suitable for projections.
method is either 'S' - for Saff algorithm
or 'P' - for Penczek '94 algorithm
'S' assumes phi1<phi2 and phi2-phi1>> delta ;
phiEQpsi - set this to 'Minus', if you want psi=-phi;
"""
from math import pi, sqrt, cos, acos
angles = []
if (method == 'P'):
temp = Util.even_angles(delta, theta1, theta2, phi1, phi2)
# phi, theta, psi
for i in xrange(len(temp)/3): angles.append([temp[3*i],temp[3*i+1],temp[3*i+2]]);
else: #elif (method == 'S'):
Deltaz = cos(theta2*pi/180.0)-cos(theta1*pi/180.0)
s = delta*pi/180.0
NFactor = 3.6/s
wedgeFactor = abs(Deltaz*(phi2-phi1)/720.0)
NumPoints = int(NFactor*NFactor*wedgeFactor)
angles.append([phi1, theta1, 0.0])
z1 = cos(theta1*pi/180.0); phi=phi1 # initialize loop
for k in xrange(1,(NumPoints-1)):
z=z1 + Deltaz*k/(NumPoints-1)
r= sqrt(1-z*z)
phi = phi1+(phi + delta/r -phi1)%(abs(phi2-phi1))
#[k, phi,180*acos(z)/pi, 0]
angles.append([phi, 180*acos(z)/pi, 0.0])
#angles.append([p2,t2,0]) # This is incorrect, as the last angle is really the border, not the element we need. PAP 01/15/07
if (phiEQpsi == 'Minus'):
for k in xrange(len(angles)): angles[k][2] = (720.0 - angles[k][0])%360.0
if( theta2 == 180.0 ): angles.append( [0.0, 180.0, 0.0] )
return angles
def eigen_images_get(stack, eigenstack, mask, num, avg):
"""
Perform PCA on stack file
and Get eigen images
"""
from utilities import get_image
a = Analyzers.get('pca_large')
e = EMData()
if(avg == 1): s = EMData()
nima = EMUtil.get_image_count(stack)
for im in xrange(nima):
e.read_image(stack,im)
e *= mask
a.insert_image(e)
if( avg==1):
if(im==0): s = a
else: s += a
if(avg == 1): a -= s/nima
eigenimg = a.analyze()
if(num>= EMUtil.get_image_count(eigenimg)):
num=EMUtil.get_image_count(eigenimg)
for i in xrange(num): eigenimg.write_image(eigenstack,i)
def find_inplane_to_match(phiA,thetaA,phiB,thetaB,psiA=0,psiB=0):
"""Find the z rotation such that
ZA RA is as close as possible to RB
this maximizes trace of ( RB^T ZA RA) = trace(ZA RA RB^T)
"""
#from math import pi, sqrt, cos, acos, sin
RA = Transform({'type': 'spider', 'phi': phiA, 'theta': thetaA, 'psi': psiA})
RB = Transform({'type': 'spider', 'phi': phiB, 'theta': thetaB, 'psi': psiB})
RBT = RB.transpose()
RABT = RA * RBT
RABTeuler = RABT.get_rotation('spider')
RABTphi = RABTeuler['phi']
RABTtheta = RABTeuler['theta']
RABTpsi = RABTeuler['psi']
#deg_to_rad = pi/180.0
#thetaAR = thetaA*deg_to_rad
#thetaBR = thetaB*deg_to_rad
#phiAR = phiA*deg_to_rad
#phiBR = phiB *deg_to_rad
#d12=cos(thetaAR)*cos(thetaBR) + sin(thetaAR)*sin(thetaBR)*cos(phiAR-phiBR)
return (-RABTpsi-RABTphi),RABTtheta # 180.0*acos(d12)/pi;
def find(vv, cmp_str, n):
jFoundVec= [];
for jFound in xrange(len(vv)):
if (cmp_str=='lt'):
if (vv[jFound]<n):
jFoundVec.append(jFound);
if (cmp_str=='le'):
if (vv[jFound]<=n):
jFoundVec.append(jFound);
if (cmp_str=='eq'):
if (vv[jFound]==n):
jFoundVec.append(jFound);
if (cmp_str=='ge'):
if (vv[jFound]>=n):
jFoundVec.append(jFound);
if (cmp_str=='gt'):
if (vv[jFound]>n):
jFoundVec.append(jFound);
return jFoundVec;
def gauss_edge(sharp_edge_image, kernel_size = 7, gauss_standard_dev =3):
"""
smooth sharp_edge_image with Gaussian function
1. The sharp-edge image is convoluted with a gassian kernel
2. The convolution normalized
"""
from utilities import model_gauss
from EMAN2 import rsconvolution
nz = sharp_edge_image.get_ndim()
if(nz == 3): kern = model_gauss(gauss_standard_dev, kernel_size , kernel_size, kernel_size)
elif(nz == 2): kern = model_gauss(gauss_standard_dev, kernel_size , kernel_size)
else: kern = model_gauss(gauss_standard_dev, kernel_size)
aves = Util.infomask(kern, None, False)
nx = kern.get_xsize()
ny = kern.get_ysize()
nz = kern.get_zsize()
kern /= (aves[0]*nx*ny*nz)
return rsconvolution(sharp_edge_image, kern)
def get_image(imagename, nx = 0, ny = 1, nz = 1, im = 0):
"""Read an image from the disk or assign existing object to the output.
Usage: myimage = readImage("path/to/image")
or myimage = readImage(name_of_existing_image)
"""
if type(imagename) == type(""):
e = EMData()
e.read_image(imagename, im)
elif not imagename:
e = EMData()
if (nx > 0): e.set_size(nx, ny, nz)
else:
e = imagename
return e
def get_im(stackname, im = 0):
"""Read an image from the disk stack, or return im's image from the list of images
Usage: myimage = get_im("path/to/stack", im)
or: myimage = get_im( data, im )
"""
if type(stackname) == type(""):
e = EMData()
e.read_image(stackname, im)
return e
else:
return stackname[im].copy()
def get_image_data(img):
"""
Return a NumPy array containing the image data.
Note: The NumPy array and the image data share the same memory,
so if the NumPy array is altered then the image is altered
as well (and vice versa).
"""
from EMAN2 import EMNumPy
return EMNumPy.em2numpy(img)
def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1):
"""
Get the in_plane angle from two images
and output the crosss correlation value
The function won't destroy input two images
This is the angle that rotates the first image, ima, into the second image, ref.
The sense of the rotation is clockwise.
center=1 means image is first centered, then rotation angle is found
"""
from alignment import Numrinit, ringwe, Applyws, ormq
from filter import fshift
first_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp)
nx=ima.get_xsize()
if(last_ring == -1): last_ring=int(nx/2)-2
cnx = int(nx/2)+1
cny = cnx
mode = "F"
#precalculate rings
numr = Numrinit(first_ring, last_ring, rstep, mode)
wr = ringwe(numr, mode)
if(center==1):
cs = [0.0]*2 # additio
cs = ref.phase_cog()
ref1 = fshift(ref, -cs[0], -cs[1])
cimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode)
cs = ima.phase_cog()
ima1 = fshift(ima, -cs[0], -cs[1])
else:
ima1=ima.copy()
cimage=Util.Polar2Dm(ref, cnx, cny, numr, mode)
Util.Frngs(cimage, numr)
Applyws(cimage, numr, wr)
[angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny)
return angt,sxst, syst, mirrort, peakt
def get_sym(symmetry):
RA = Transform()
NTot = RA.get_nsym(symmetry)
angs = []
for j in xrange(NTot):
RNow = RA.get_sym(symmetry, j)
RNowE = RNow.get_rotation('spider')
angs.append([RNowE['phi'], RNowE['theta'], RNowE['psi']])
return angs
def get_textimage(fname):
"""
Return an image created from a text file. The first line of
the image should contain "nx ny nz" (separated by whitespace)
All subsequent lines contain "ix iy iz val", where ix, iy,
and iz are the integer x, y, and z coordinates of the point
and val is the floating point value of that point. All points
not explicitly listed are set to zero.
"""
from string import atoi,atof
infile = open(fname)
lines = infile.readlines()
infile.close()
data = lines[0].split()
nx = atoi(data[0])
ny = atoi(data[1])
nz = atoi(data[2])
e = EMData()
e.set_size(nx, ny, nz)
e.to_zero()
for line in lines[1:]:
data = line.split()
ix = atoi(data[0])
iy = atoi(data[1])
iz = atoi(data[2])
val = atof(data[3])
e[ix,iy,iz] = val
return e
def get_input_from_string(str_input):
"""
Extract input numbers from given string,
"""
from string import split
res = []
list_input = split(str_input)
for i in xrange(len(list_input)):
res.append(float(list_input[i]))
return res
def hist_func(args, data):
#Util.hist_comp_freq(float PA,float PB,int size_img, int hist_len, float *img_ptr, float *ref_freq_bin, float *mask_ptr, float ref_h_diff, float ref_h_min)
return Util.hist_comp_freq(args[0],args[1],data[4],data[5],data[1],data[2],data[3],data[0][0],data[0][1])
def info(image, mask=None, Comment=""):
"""Calculate and print the descriptive statistics of an image.
Usage: [mean, sigma, xmin, xmax, nx, ny, nz =] info(image object)
or
[mean, sigma, xmin, xmax, nx, ny, nz =] info("path/image")
Purpose: calculate basic statistical characteristics of an image.
"""
if(Comment): print " *** ", Comment
e = get_image(image)
[mean, sigma, imin, imax] = Util.infomask(e, mask, True)
nx = e.get_xsize()
ny = e.get_ysize()
nz = e.get_zsize()
if (e.is_complex()):
s = ""
if e.is_shuffled():
s = " (shuffled)"
if (e.is_fftodd()):
print "Complex odd image%s: nx = %i, ny = %i, nz = %i" % (s, nx, ny, nz)
else:
print "Complex even image%s: nx = %i, ny = %i, nz = %i" % (s, nx, ny, nz)
else:
print "Real image: nx = %i, ny = %i, nz = %i" % (nx, ny, nz)
print "avg = %g, std dev = %g, min = %g, max = %g" % (mean, sigma, imin, imax)
return mean, sigma, imin, imax, nx, ny, nz
def image_decimate(img, decimation=2, fit_to_fft=1,frequency_low=0, frequency_high=0):
from filter import filt_btwl
from fundamentals import smallprime, window2d
from utilities import get_image
"""
Window image to FFT-friendly size, apply Butterworth low pass filter,
and decimate 2D image
"""
if type(img) == str : img=get_image(img)
if decimation <= 1 : ERROR("Improper decimation ratio", "image_decimation", 1)
if frequency_low <= 0 :
frequency_low = .5/decimation- .05
if frequency_low <= 0: ERROR("Butterworth passband frequency is too low", "image_decimation", 1)
frequency_high = .5/decimation+ .05
if fit_to_fft :
nx_d = (img.get_xsize())/int(decimation)
ny_d = (img.get_ysize())/int(decimation)
nx_fft_d = smallprime(int(nx_d))
ny_fft_d = smallprime(int(ny_d))
nx_fft_m = nx_fft_d*int(decimation)
ny_fft_m = ny_fft_d*int(decimation)
e = window2d(img, nx_fft_m, ny_fft_m, "l")
e1 = filt_btwl(e, frequency_low, frequency_high)
img = Util.decimate(e1, int(decimation), int(decimation), 1)
else:
e1 = filt_btwl(img, frequency_low, frequency_high)
img = Util.decimate(e1, int(decimation), int(decimation), 1)
return img
def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):
"""Returns the inverse of the 2d rot and trans matrix
Usage: nalpha, ntx, nty, mirror = inverse_transform2(alpha,tx,ty,mirror)
"""
t = Transform({"type":"2D","alpha":alpha,"tx":tx,"ty":ty,"mirror":mirror,"scale":1.0})
t = t.inverse()
t = t.get_params("2D")
return t[ "alpha" ], t[ "tx" ], t[ "ty" ], t[ "mirror" ]
def inverse_transform3(phi, theta=0.0, psi=0.0, tx=0.0, ty=0.0, tz=0.0, mirror = 0, scale=1.0):
"""Returns the inverse of the 3d rot and trans matrix
Usage: nphi,ntheta,npsi,ntx,nty,ntz,nmirror,nscale = inverse_transform3(phi,theta,psi,tx,ty,tz,mirror,scale)
angles in degrees
"""
d = Transform({'type': 'spider', 'phi': phi, 'theta': theta, 'psi': psi, 'tx': tx, 'ty': ty, 'tz': tz, "mirror":mirror,"scale":scale})
d = d.inverse()
d = d.get_params("spider")
return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["mirror"],d["scale"]
def list_syms():
"""Create a list of available symmetries
"""
SymStringVec=[];
SymStringVec.append("CSYM");
SymStringVec.append("DSYM");
SymStringVec.append("TET_SYM");
SymStringVec.append("OCT_SYM");
SymStringVec.append("ICOS_SYM");
SymStringVec.append("ISYM");
return SymStringVec
#### -----M--------
def model_circle(r, nx, ny, nz=1):
"""
Create a centered circle (or sphere) having radius r.
"""
e = EMData()
e.set_size(nx, ny, nz)
e.process_inplace("testimage.circlesphere", {"radius":r, "fill":1})
return e
def model_square(d, nx, ny, nz=1):
"""
Create a centered square (or cube) with edge length of d.
"""
e = EMData()
e.set_size(nx, ny, nz)
e.process_inplace("testimage.squarecube", {"edge_length":d, "fill":1})
return e
def model_gauss(xsigma, nx, ny=1, nz=1, ysigma=None, zsigma=None, xcenter=None, ycenter=None, zcenter=None):
"""
Create a centered Gaussian image having standard deviation "sigma".
"""
e = EMData()
e.set_size(nx, ny, nz)
if( ysigma == None ) : ysigma = xsigma
if( zsigma == None ) : zsigma = xsigma
if( xcenter == None ) : xcenter = nx//2
if( ycenter == None ) : ycenter = ny//2
if( zcenter == None ) : zcenter = nz//2
e.process_inplace("testimage.puregaussian", {"x_sigma":xsigma,"y_sigma":ysigma,"z_sigma":zsigma,"x_center":xcenter,"y_center":ycenter,"z_center":zcenter} )
return e
def model_cylinder(radius, nx, ny, nz):
"""
create a cylinder along z axis
"""
e = EMData()
e.set_size(nx, ny, nz)
e.process_inplace("testimage.cylinder", {"radius":radius})
return e
def model_gauss_noise(sigma, nx, ny=1, nz=1):
"""
Create an image of noise having standard deviation "sigma",
and average 0.
"""
e = EMData()
e.set_size(nx, ny, nz)
e.process_inplace("testimage.noise.gauss", {"sigma":sigma})
return e
def model_blank(nx, ny=1, nz=1, bckg = 0.0):
"""
Create a blank image.
"""
e = EMData()
e.set_size(nx, ny, nz)
e.to_zero()
if( bckg != 0.0): e+=bckg
return e
def set_seed(sde):
from random import seed
seed(int(sde))
e = EMData()
e.set_size(1,1,1)
e.process_inplace("testimage.noise.gauss", {"sigma":1.0, "seed":int(sde)})
###----P-------
def parse_spider_fname(mystr, *fieldvals):
"""
Parse a Spider filename string and insert parameters.
Example input: "foo{***}/img{****}.mrc"
This string has two fields that should be replaced by integers,
and the number of '*'s determines how "wide" that field should be.
So, if the parameters to be inserted are 10 and 3, then the resulting
filename should be "foo010/img0003.mrc".
Note: If the image is a stack file, the last character in the string
must be a '@' (except for possible extraneous whitespace, which is
ignored). This stack symbol will be stripped in the output filename.
Example:
In [1]: mystr = "foo{***}/img{****}.mrc"
In [2]: parse_spider_fname(mystr, 10, 3)
Out[2]: 'foo010/img0003.mrc'
@param mystr Spider filename string to be parsed
@param fieldvals Integer values to be placed into the fields
@return Parsed filename
"""
# helper functions and classes
def rm_stack_char(mystr):
"Helper function to remove a stack character if it exists"
stackloc = mystr.find("@")
if stackloc != -1:
# there's an '@' somewhere
if len(mystr) - 1 == stackloc:
# It's at the end of the string
return mystr[:-1]
else:
# '@' not at the end, so it's an error
raise ValueError, "Invalid format: misplaced '@'."
else:
# no '@' at all
return mystr
class Fieldloc:
"Helper class to store description of a field"
def __init__(self, begin, end):
self.begin = begin
self.end = end
def count(self):
"Size of the field (including braces)"
return self.end - self.begin + 1
def find_fields(mystr):
"Helper function to identify and validate fields in a string"
fields = []
loc = 0
while True:
begin = mystr.find('{', loc)
if begin == -1: break
end = mystr.find('}', begin)
field = Fieldloc(begin, end)
# check validity
asterisks = mystr[begin+1:end]
if asterisks.strip("*") != "":
raise ValueError, "Malformed {*...*} field: %s" % \
mystr[begin:end+1]
fields.append(Fieldloc(begin, end))
loc = end
return fields
# remove leading whitespace
mystr.strip()
# remove stack character (if it exists)
mystr = rm_stack_char(mystr)
# locate fields to replace
fields = find_fields(mystr)
if len(fields) != len(fieldvals):
# wrong number of fields?
raise ValueError, "Number of field values provided differs from" \
"the number of {*...*} fields."
newstrfrags = []
loc = 0
for i, field in enumerate(fields):
# text before the field
newstrfrags.append(mystr[loc:field.begin])
# replace the field with the field value
fieldsize = field.count() - 2
fielddesc = "%0" + str(fieldsize) + "d"
newstrfrags.append(fielddesc % fieldvals[i])
loc = field.end + 1
newstrfrags.append(mystr[loc:])
return "".join(newstrfrags)
def peak_search(e, npeak = 1, invert = 1, print_screen = 0):
peaks = e.peak_search(npeak, invert)
ndim = peaks[0]
nlist = int((len(peaks)-1)/((ndim+1)*2))
if(nlist > 0):
outpeaks = []
if(print_screen):
if ndim == 1 :
print '%10s%10s%10s%10s%10s'%("Index "," Peak_value","X ", "Peak/P_max", "X-NX/2")
print_list_format(peaks[1:], 4)
elif ndim == 2 :
print '%10s%10s%10s%10s%10s%10s%10s'%("Index ", "Peak_value","X ","Y ", "Peak/P_max", "X-NX/2", "Y-NY/2")
print_list_format(peaks[1:], 6)
elif ndim == 3 :
print '%10s%10s%10s%10s%10s%10s%10s%10s%10s'%("Index ", "Peak_value","X ","Y ","Z ", "Peak/P_max", "X-NX/2", "Y-NY/2", "Z-NZ/2")
print_list_format(peaks[1:], 8)
else: ERROR("Image dimension extracted in peak_search is wrong", "Util.peak_search", 1)
for i in xrange(nlist):
k=int((ndim+1)*i*2)
if ndim == 1 : p=[peaks[k+1], peaks[k+2], peaks[k+3], peaks[k+4]]
elif ndim == 2 : p=[peaks[k+1], peaks[k+2], peaks[k+3], peaks[k+4], peaks[k+5], peaks[k+6]]
elif ndim == 3 : p=[peaks[k+1], peaks[k+2], peaks[k+3], peaks[k+4], peaks[k+5], peaks[k+6], peaks[k+7], peaks[k+8]]
outpeaks.append(p)
else:
ndim = e.get_ndim()
#ERROR("peak search fails to find any peaks, returns image center as a default peak position","peak_search",0)
if ndim == 1 :
nx = e.get_xsize()
outpeaks = [[1.0, float(nx/2), 1.0, 0.0]]
elif ndim == 2 :
nx = e.get_xsize()
ny = e.get_ysize()
outpeaks = [[1.0, float(nx/2), float(ny/2), 1.0, 0.0, 0.0]]
elif ndim == 3 :
nx = e.get_xsize()
ny = e.get_ysize()
nz = e.get_ysize()
outpeaks = [[1.0, float(nx/2), float(ny/2), float(nz/2), 1.0, 0.0, 0.0, 0.0]]
return outpeaks
####--------------------------------------------------------------------------------------------------#########
def print_row(input, ix=0, iz=0):
"""Print the data in slice iz, row ix of an image to standard out.
Usage: print_row(image, ix, iz)
or
print_row("path/to/image", ix, iz)
"""
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice, x = %d row)" % (iz, ix)
line = []
for iy in xrange(ny):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((iy + 1) % 5 == 0): line.append("\n ")
line.append("\n")
print "".join(line)
def print_col(input, iy=0, iz=0):
"""Print the data in slice iz, column iy of an image to standard out.
Usage: print_col(image, iy, iz)
or
print_col("path/to/image", iy, iz)
"""
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice, y = %d col)" % (iz, iy)
line = []
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0): line.append("\n ")
line.append("\n")
print "".join(line)
def print_slice(input, iz=0):
"""Print the data in slice iz of an image to standard out.
Usage: print_image(image, int)
or
print_image("path/to/image", int)
"""
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice)" % (iz)
line = []
for iy in xrange(ny):
line.append("Row ")
line.append("%4i " % iy)
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0):
line.append("\n ")
line.append(" ")
line.append("\n")
if(nx%5 != 0): line.append("\n")
print "".join(line)
def print_image(input):
"""Print the data in an image to standard out.
Usage: print_image(image)
or
print_image("path/to/image")
"""
image=get_image(input)
nz = image.get_zsize()
for iz in xrange(nz): print_slice(input, iz)
def print_image_col(input, ix=0, iz=0):
"""Print the data in slice iz, row ix of an image to standard out.
Usage: print_image_col(image, ix, iz)
or
print_image_col("path/to/image", ix, iz)
"""
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice, x = %d row)" % (iz, ix)
line = []
for iy in xrange(ny):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((iy + 1) % 5 == 0): line.append("\n ")
line.append("\n")
print "".join(line)
def print_image_row(input, iy=0, iz=0):
"""Print the data in slice iz, column iy of an image to standard out.
Usage: print_image_row(image, iy, iz)
or
print_image_row("path/to/image", iy, iz)
"""
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice, y = %d col)" % (iz, iy)
line = []
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0): line.append("\n ")
line.append("\n")
print "".join(line)
def print_image_slice(input, iz=0):
"""Print the data in slice iz of an image to standard out in a format that agrees with v2
Usage: print_image_slice(image, int)
or
print_image_slice("path/to/image", int)
"""
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
print "(z = %d slice)" % (iz)
line = []
for iy in xrange(ny-1,-1,-1):
line.append("Row ")
line.append("%4i " % iy)
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0):
line.append("\n ")
line.append(" ")
line.append("\n")
if(nx%5 != 0): line.append("\n")
print "".join(line)
def print_image_slice_3d(input, num=0,direction="z"):
"""Print the data in slice iz of an image to standard out in a format that agrees with v2
Usage: print_image_slice(image, int)
or
print_image_slice("path/to/image", int)
"""
#print "print slice at 3 directions"
image=get_image(input)
nx = image.get_xsize()
ny = image.get_ysize()
nz = image.get_zsize()
if(direction=="x"):
#print "xxxxx"
ix=num
print "(x = %d slice)" % (ix)
line = []
for iz in xrange(nz-1,-1,-1):
line.append("Z ")
line.append("%4i " % iz)
for iy in xrange(ny):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((iy + 1) % 5 == 0):
line.append("\n ")
line.append(" ")
line.append("\n")
if(ny%5 != 0): line.append("\n")
print "".join(line)
elif(direction=="y"):
#print "yyy"
iy=num
print "(y = %d slice)" % (iy)
line = []
for iz in xrange(nz-1,-1,-1):
line.append("Z ")
line.append("%4i " % iz)
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0):
line.append("\n ")
line.append(" ")
line.append("\n")
if(nx%5 != 0): line.append("\n")
print "".join(line)
else:
#print "zzzz"
iz=num
print "(z = %d slice)" % (iz)
line = []
for iy in xrange(ny-1,-1,-1):
line.append("Row ")
line.append("%4i " % iy)
for ix in xrange(nx):
line.append("%12.5g " % (image.get_value_at(ix,iy,iz)))
if ((ix + 1) % 5 == 0):
line.append("\n ")
line.append(" ")
line.append("\n")
if(nx%5 != 0): line.append("\n")
print "".join(line)
def print_list_format(m, narray = 0):
from string import split
from math import sqrt
import string
import types
"""
Print formated elements in a list to screen
The screen output is in the form of narray*int(len(m)/narray)
Or when narray is zero, int(sqrt(len(m)))*int(sqrt(len(m)))
"""
flist = []
for i in xrange(len(m)):
if type(m[i]) is types.FloatType: flist.append('%10.3g'%(m[i]))
elif type(m[i]) is types.IntType : flist.append( '%10d'%(m[i]))
else : flist.append( '%10s'%(m[i]))
if(narray > len(m)):
narray = 0
ERROR("improper input narray number, use default value", "print_list_foramt",0)
if(narray == 0 ):
num = int(sqrt(len(m)))
if( len(m) % num != 0): lnum = int(len(m)/num) + 1
else: lnum = int(len(m)/num)
else:
num = narray
if( len(m) % num == 0): lnum = int(len(m)/num)
else: lnum = int(len(m)/num) + 1
ncount = -1
plist = []
for i in xrange(lnum):
qlist = ""
for j in xrange(num):
ncount += 1
if ncount <= len(m) - 1: qlist=qlist+flist[ncount]
else: break
plist.append(qlist)
for i in xrange(lnum):
print '%6d '%(i+1),plist[i]
def pad(image_to_be_padded, new_nx, new_ny = 1, new_nz = 1, background = "average", off_center_nx = 0, off_center_ny = 0, off_center_nz = 0):
import types
if type(background) != types.StringType: background = str(background)
if background == "average" : image_padded = Util.pad(image_to_be_padded, new_nx, new_ny, new_nz, off_center_nx, off_center_ny, off_center_nz, "average")
elif background == "circumference" : image_padded = Util.pad(image_to_be_padded, new_nx, new_ny, new_nz, off_center_nx, off_center_ny, off_center_nz, "circumference")
else: image_padded = Util.pad(image_to_be_padded, new_nx, new_ny, new_nz, off_center_nx, off_center_ny, off_center_nz, background )
return image_padded
def read_spider_doc(fnam):
from string import atof, atoi
"""
spider doc file format:
key nrec record ...
5 2 12 ...(when key <=99999)
6 2 12 ...(when key >99999)
"""
inf = file(fnam, "r")
comment_line=inf.readline() # assume there is only one comment line
docf_in = []
data = []
line = inf.readline()
while len(line) > 0:
line_data=[]
if(line[11:12]==" " and line[8:10] != " "): # new data format
start= 13
end = 15
#line_data.append(atoi(line[start:end])) # 03/21/12 Anna: according to the previous version of this function this value was omitted
start= end+3
end = start+6
line_data.append(atof(line[start:end]))
colNo = (len(line)-end)/12 - 1
for i in xrange(colNo):
start= end+6
end = start+7
line_data.append(atof(line[start:end]))
data.append(line_data)
line = inf.readline()
else: # old data format
if(line[5:6] == " "): ibeg = 6
else: ibeg = 7
for irec in xrange(atoi(line[ibeg:ibeg+2])):
start= ibeg+2+irec*12
end = ibeg+2+(irec+1)*12
line_data.append(atof(line[start:end]))
data.append(line_data)
line = inf.readline()
return data
def read_text_row(fnam, format="", skip=";"):
"""
Read a column-listed txt file.
INPUT: filename: name of the Doc file
OUTPUT:
nc : number of entries in each lines (number of columns)
len(data)/nc : number of lines (rows)
data: List of numbers from the doc file
"""
from string import split
inf = file(fnam, "r")
strg = inf.readline()
x = []
data = []
while (len(strg) > 0):
com_line = False
for j in xrange(len(strg)):
if(strg[j] == skip): com_line = True
if com_line == False:
word=split(strg)
if format == "s" :
key = int(word[1])
if key != len(word) - 2:
del word
word = []
word.append(strg[0 : 5])
word.append(strg[6 : 7])
for k in xrange(key):
k_start = 7 + k*13
k_stop = k_start + 13
word.append(strg[k_start : k_stop])
line=[]
for i in xrange(len(word)):
line.append(float(word[i]))
data.append(line)
strg=inf.readline()
inf.close
return data
def write_text_row(data, file_name):
"""
Write to an ASCII file a list of lists containing floats.
filename: name of the text file
data: List of lists, with the inner list being a list of floats, i.e., [ [first list], [second list], ...]
First list will be written as a first line, second as a second, and so on...
If only one list is given, the file will contain one line
"""
import types
outf = open(file_name, "w")
if (type(data[0]) == types.ListType):
# It is a list of lists
for i in xrange(len(data)):
for j in xrange(len(data[i])):
outf.write(" %12.5g"%data[i][j])
outf.write("\n")
else:
# Single list
for j in xrange(len(data)):
outf.write(" %12.5g"%data[j])
outf.write(" \n")
outf.close()
def read_text_file(file_name, ncol = 0):
"""
Read data from text file, if ncol = -1, read all columns
if ncol >= 0, just read the (ncol+1)-th column.
"""
from string import split
inf = file(file_name, "r")
line = inf.readline()
data = []
while len(line) > 0:
if ncol == -1:
vdata = split(line)
if data == []:
for i in xrange(len(vdata)):
data.append([float(vdata[i])])
else:
for i in xrange(len(vdata)):
data[i].append(float(vdata[i]))
else:
vdata = float(split(line)[ncol])
data.append(vdata)
line = inf.readline()
return data
def write_text_file(data, file_name):
"""
Write to an ASCII file a list of lists containing floats.
filename: name of the text file
data: List of lists, with the inner list being a list of floats, i.e., [ [first list], [second list], ...]
First list will be written as a first column, second as a second, and so on...
If only one list is given, the file will contain one column
"""
import types
outf = open(file_name, "w")
if (type(data[0]) == types.ListType):
# It is a list of lists
for i in xrange(len(data[0])):
for j in xrange(len(data)):
if type(data[j][i]) == type(0):
outf.write(" %12d"%data[j][i])
else:
outf.write(" %12.5g"%data[j][i])
outf.write("\n")
else:
# Single list
for j in xrange(len(data)):
if type(data[j]) == type(0):
outf.write(" %12d\n"%data[j])
else:
outf.write(" %12.5g\n"%data[j])
outf.close()
def reconstitute_mask(image_mask_applied_file, new_mask_file, save_file_on_disk = True, saved_file_name = "image_in_reconstituted_mask.hdf"):
import types
"""
Substitute masked area value with image average
"""
if type(image_mask_applied_file) == types.StringType:
nima = EMUtil.get_image_count(image_mask_applied_file)
if (nima > 1):
image_mask_applied = []
for ima in xrange(nima):
e = EMData()
e.read_image(image_mask_applied_file, ima)
image_mask_applied.append(e)
else:
image_mask_applied = get_im(image_mask_applied_file)
elif type(image_mask_applied_file) == types.ListType:
nima = len( image_mask_applied )
image_mask_applied = image_mask_applied_file
if type(new_mask_file) == types.StringType:
new_mask = get_im( new_mask_file )
elif type(new_mask_file) == types.IntType or type( new_mask_file ) == types.floatType:
if nima > 1:
e = image_mask_applied[0]
nx = e.get_xsize()
ny = e.get_ysize()
nz = e.get_zsize()
else :
nx = image_mask_applied.get_xsize()
ny = image_mask_applied.get_ysize()
nz = image_mask_applied.get_zsize()
new_mask = model_circle(new_mask_file, nx, ny, nz)
if nima > 1:
image_in_reconstituted_mask = []
for i in xrange(nima):
tmp_image = Util.reconstitute_image_mask(image_mask_applied[i], new_mask)
image_in_reconstituted_mask.append (tmp_image)
if (save_file_on_disk ): image_in_reconstituted_mask[i].write_image(saved_file_name, i)
if(not save_file_on_disk): return image_in_reconstituted_mask
else :
if(save_file_on_disk ):
image_in_reconstituted_mask = Util.reconstitute_image_mask(image_mask_applied, new_mask)
image_in_reconstituted_mask.write_image(saved_file_name)
else: return Util.reconstitute_image_mask(image_mask_applied, new_mask)
def rotate_about_center(alpha, cx, cy):
"""Rotate about a different center
Usage: rotate_about_center(alpha,cx,cy):
angles in degrees
"""
cmp1 = compose_transform2(0, -cx, -cy, 1, alpha, 0, 0, 1)
cmp2 = compose_transform2(cmp1[0], cmp1[1], cmp1[2], cmp1[3], 0, cx, cy, 1)
# return compalpha, comptrans.at(0),comptrans.at(1), compscale
return cmp2[0], cmp2[1], cmp2[2], cmp2[3]
def reshape_1d(input_object, length_current=0, length_interpolated=0, Pixel_size_current = 0., Pixel_size_interpolated = 0.):
"""
linearly interpolate a 1D power spectrum to required length with required Pixel size
input_object - a 1D list with a 1D curve to be interpolated
length_current - half size of the image size (in case of power spectrum, it can be different from the length of the input_object)
length_interpolated - length of the interpolated 1D curve
Pixel_size_current - pixel size of the input 1D list
Pixel_size_interpolated - pixel size of the target 1D list
One can either input the two lengths or two respective pixel sizes
"""
interpolated = []
if length_current == 0: length_current = len(input_object)
lt = len(input_object) - 2
if length_interpolated == 0:
if( Pixel_size_interpolated != Pixel_size_current):
length_interpolated = int(length_current*Pixel_size_current/Pixel_size_interpolated + 0.5)
else:
ERROR("Incorrect input parameters","reshape_1d",1)
return []
if Pixel_size_current == 0.:
Pixel_size_current = 1.
Pixel_size_interpolated = Pixel_size_current*float(length_current)/float(length_interpolated)
qt =Pixel_size_interpolated/Pixel_size_current
for i in xrange(length_interpolated):
xi = float(i)*qt
ix = min(int(xi),lt)
df = xi -ix
xval = (1.0-df)*input_object[ix] + df*input_object[ix+1]
interpolated.append(xval)
return interpolated
def rops_dir(indir, output_dir = "1dpw2_dir"):
"""
Calculate 1D rotationally averaged power spectra from
image stack listed in a directory
"""
from EMAN2 import periodogram
import os
flist = os.listdir(indir)
print flist
if os.path.exists(output_dir) is False: os.mkdir(output_dir)
for i, v in enumerate(flist):
(filename, filextension) = os.path.splitext(v)
nima = EMUtil.get_image_count(os.path.join(indir,v))
print nima
for im in xrange(nima):
e = EMData()
file_name = os.path.join(indir,v)
e.read_image(file_name, im)
tmp1 = periodogram(e)
tmp = tmp1.rotavg()
if im == 0:
sum_ima = model_blank(tmp.get_xsize())
sum_ima += tmp
else : sum_ima += tmp
table = []
nr = sum_ima.get_xsize()
for ir in xrange(nr): table.append([sum_ima.get_value_at(ir)])
drop_spider_doc(os.path.join(output_dir, "1dpw2_"+filename+".txt"), table)
def estimate_3D_center(data):
from math import cos, sin, pi
from numpy import matrix
from numpy import linalg
ali_params = []
for im in data:
phi, theta, psi, s2x, s2y = get_params_proj(im)
ali_params.append([phi, theta, psi, s2x, s2y])
N = len(ali_params)
A = []
b = []
for i in xrange(N):
phi_rad = ali_params[i][0]/180*pi
theta_rad = ali_params[i][1]/180*pi
psi_rad = ali_params[i][2]/180*pi
A.append([cos(psi_rad)*cos(theta_rad)*cos(phi_rad)-sin(psi_rad)*sin(phi_rad),
cos(psi_rad)*cos(theta_rad)*sin(phi_rad)+sin(psi_rad)*cos(phi_rad), -cos(psi_rad)*sin(theta_rad), 1, 0])
A.append([-sin(psi_rad)*cos(theta_rad)*cos(phi_rad)-cos(psi_rad)*sin(phi_rad),
-sin(psi_rad)*cos(theta_rad)*sin(phi_rad)+cos(psi_rad)*cos(phi_rad), sin(psi_rad)*sin(theta_rad), 0, 1])
b.append([ali_params[i][3]])
b.append([ali_params[i][4]])
A_matrix = matrix(A)
b_matrix = matrix(b)
K = linalg.solve(A_matrix.T*A_matrix, A_matrix.T*b_matrix)
return float(K[0][0]), float(K[1][0]), float(K[2][0]), float(K[3][0]), float(K[4][0])
def estimate_3D_center_MPI(data, nima, myid, number_of_proc, main_node):
from math import cos, sin, pi
from numpy import matrix
from numpy import linalg
from mpi import MPI_COMM_WORLD
from mpi import mpi_recv, mpi_send, MPI_FLOAT
from applications import MPI_start_end
ali_params_series = []
for im in data:
phi, theta, psi, s2x, s2y = get_params_proj(im)
ali_params_series.append(phi)
ali_params_series.append(theta)
ali_params_series.append(psi)
ali_params_series.append(s2x)
ali_params_series.append(s2y)
if myid == main_node:
for proc in xrange(number_of_proc):
if proc != main_node:
image_start_proc, image_end_proc = MPI_start_end(nima, number_of_proc, proc)
n_params = (image_end_proc - image_start_proc)*5
temp = mpi_recv(n_params, MPI_FLOAT, proc, proc, MPI_COMM_WORLD)
for nn in xrange(n_params): ali_params_series.append(float(temp[nn]))
ali_params = []
N = len(ali_params_series)/5
for im in xrange(N):
ali_params.append([ali_params_series[im*5], ali_params_series[im*5+1], ali_params_series[im*5+2], ali_params_series[im*5+3], ali_params_series[im*5+4]])
A = []
b = []
DEG_to_RAD = pi/180.0
for i in xrange(N):
phi_rad = ali_params[i][0]*DEG_to_RAD
theta_rad = ali_params[i][1]*DEG_to_RAD
psi_rad = ali_params[i][2]*DEG_to_RAD
A.append([cos(psi_rad)*cos(theta_rad)*cos(phi_rad)-sin(psi_rad)*sin(phi_rad),
cos(psi_rad)*cos(theta_rad)*sin(phi_rad)+sin(psi_rad)*cos(phi_rad), -cos(psi_rad)*sin(theta_rad), 1, 0])
A.append([-sin(psi_rad)*cos(theta_rad)*cos(phi_rad)-cos(psi_rad)*sin(phi_rad),
-sin(psi_rad)*cos(theta_rad)*sin(phi_rad)+cos(psi_rad)*cos(phi_rad), sin(psi_rad)*sin(theta_rad), 0, 1])
b.append([ali_params[i][3]])
b.append([ali_params[i][4]])
A_matrix = matrix(A)
b_matrix = matrix(b)
K = linalg.solve(A_matrix.T*A_matrix, A_matrix.T*b_matrix)
return float(K[0][0]), float(K[1][0]), float(K[2][0]), float(K[3][0]), float(K[4][0])
else:
image_start_proc, image_end_proc = MPI_start_end(nima, number_of_proc, myid)
n_params = (image_end_proc - image_start_proc)*5
mpi_send(ali_params_series, n_params, MPI_FLOAT, main_node, myid, MPI_COMM_WORLD)
return 0.0, 0.0, 0.0, 0.0, 0.0
def rotate_3D_shift(data, shift3d):
t = Transform({"type":"spider","phi":0.0,"theta":0.0,"psi":0.0,"tx":-shift3d[0],"ty":-shift3d[1],"tz":-shift3d[2],"mirror":0,"scale":1.0})
for i in xrange(len(data)):
d = data[i].get_attr('xform.projection')
c = d*t
data[i].set_attr('xform.projection', c)
def sym_vol(image, symmetry="c1"):
" Symmetrize a volume"
if(symmetry == "c1"): return image.copy()
else: return image.symvol(symmetry)
##----------------------------------HDF headers related code --------------------------
def set_arb_params(img, params, par_str):
"""
filling arbitary headers
"""
for i in xrange(len(par_str)): img.set_attr_dict({par_str[i]:params[i]})
def get_arb_params(img, par_str):
"""
reading arbitary headers
"""
params=[]
for i in xrange(len(par_str)): params.append(img.get_attr(par_str[i]))
return params
###------------------------------------------------------------------------------------------
def start_time():
import time
start_time = time.time()
return start_time
def finish_time(start_time):
import time
finish_time = time.time()
print ("Running time is"), finish_time-start_time
return finish_time
def ttime():
import time
now = time.localtime(time.time())
return time.asctime(now)
def running_time(start_time):
from utilities import print_msg
from time import time
time_run = int(time() - start_time)
time_h = time_run / 3600
time_m = (time_run % 3600) / 60
time_s = (time_run % 3600) % 60
print_msg('\nRunning time is: %s h %s min %s s\n\n' % (str(time_h).rjust(2, '0'), str(time_m).rjust(2, '0'), str(time_s).rjust(2, '0')))
def running_time_txt(start_time):
from time import time
time_run = int(time() - start_time)
time_h = time_run / 3600
time_m = (time_run % 3600) / 60
time_s = (time_run % 3600) % 60
return 'Running time is: %s h %s min %s s' % (str(time_h).rjust(2, '0'), str(time_m).rjust(2, '0'), str(time_s).rjust(2, '0'))
'''
def reduce_array_to_root(data, myid, main_node = 0, comm = -1):
from numpy import array, shape, reshape
from mpi import MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD, mpi_reduce, mpi_barrier
if comm == -1: comm = MPI_COMM_WORLD
n = shape(data)
ntot = 1
for i in xrange(len(n)): ntot *= n[i]
count = 500000
array1d = reshape(data, (ntot,))
ntime = (ntot-1) /count + 1
for i in xrange(ntime):
block_begin = i*count
block_end = i*count + count
if block_end > ntot: block_end = ntot
block_size = block_end - block_begin
tmpsum = mpi_reduce(array1d[block_begin], block_size, MPI_FLOAT, MPI_SUM, main_node, comm)
mpi_barrier(comm)
if myid == main_node:
array1d[block_begin:block_end] = tmpsum[0:block_size]
'''
def reduce_EMData_to_root(data, myid, main_node = 0, comm = -1):
from numpy import array, shape, reshape
from mpi import mpi_reduce, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD, mpi_barrier
from utilities import get_image_data
if comm == -1: comm = MPI_COMM_WORLD
array = get_image_data(data)
n = shape(array)
ntot = 1
for i in n: ntot *= i
count = (75*4+2)*(75*4)**2
array1d = reshape( array, (ntot,))
ntime = (ntot-1) /count + 1
for i in xrange(ntime):
block_begin = i*count
block_end = min(block_begin + count, ntot)
block_size = block_end - block_begin
tmpsum = mpi_reduce(array1d[block_begin:block_begin+block_size], block_size, MPI_FLOAT, MPI_SUM, main_node, comm)
mpi_barrier(comm)
if myid == main_node:
array1d[block_begin:block_end] = tmpsum[0:block_size]
def bcast_EMData_to_all(tavg, myid, source_node = 0, comm = -1):
from EMAN2 import EMNumPy
from numpy import array, shape, reshape
from mpi import mpi_bcast, MPI_FLOAT, MPI_COMM_WORLD
if comm == -1: comm = MPI_COMM_WORLD
tavg_data = EMNumPy.em2numpy(tavg)
n = shape(tavg_data)
ntot = 1
for i in n: ntot *= i
tavg_tmp = mpi_bcast(tavg_data, ntot, MPI_FLOAT, source_node, comm)
if(myid != source_node):
tavg_data1d = reshape(tavg_data,(ntot,))
tavg_data1d[0:ntot] = tavg_tmp[0:ntot]
'''
def bcast_EMData_to_all(img, myid, main_node = 0, comm = -1):
# Comment by Zhengfan Yang on 01/05/10
#
# Notice:
# (1) one should use this new version of broadcasting EMData in the following way:
# img = bcast_EMData_to_all(img, myid, main_node, comm)
# instead of
# bcast_EMData_to_all(img, myid, main_node, comm)
# The latter is inconsistent with mpi_bcast() and difficult to implement efficiently
#
# (2) To be consistent with send_EMData() and recv_EMData(), we assume that the node
# other than the broadcasting node know nothing about the EMData(). Therefore, one
# need to broadcast the size of EMData() and two attributes: is_complex and is_ri.
# For all other attributes, you are on your own.
from numpy import reshape
from mpi import mpi_bcast, MPI_INT, MPI_FLOAT, MPI_COMM_WORLD
if comm == -1: comm = MPI_COMM_WORLD
img_head = []
if myid == main_node:
img_head.append(img.get_xsize())
img_head.append(img.get_ysize())
img_head.append(img.get_zsize())
img_head.append(img.is_complex())
img_head.append(img.is_ri())
img_head = mpi_bcast(img_head, 5, MPI_INT, main_node, comm)
nx = int(img_head[0])
ny = int(img_head[1])
nz = int(img_head[2])
is_complex = int(img_head[3])
is_ri = int(img_head[4])
ntot = nx*ny*nz
img_data = EMNumPy.em2numpy(img)
img_data = mpi_bcast(img_data, ntot, MPI_FLOAT, main_node, comm)
if nz != 1:
img_data = reshape(img_data, (nz, ny, nx)) # For some reason, the order should be like this -- Zhengfan Yang
elif ny != 1:
img_data = reshape(img_data, (ny, nx))
else:
pass
img = EMNumPy.numpy2em(img_data)
img.set_complex(is_complex)
img.set_ri(is_ri)
return img
def reduce_EMData_to_root(img, myid, main_node = 0, comm = -1):
# Comment by Zhengfan Yang on 01/05/10
#
# Notice:
# (1) one should use this new version of reducing EMData in the following way:
# img = reduce_EMData_to_root(img, myid, main_node, comm)
# instead of
# reduce_EMData_to_root(img, myid, main_node, comm)
# The latter is inconsistent with mpi_bcast() and difficult to implement efficiently
from numpy import reshape
from mpi import mpi_reduce, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD
if comm == -1: comm = MPI_COMM_WORLD
nx = img.get_xsize()
ny = img.get_ysize()
nz = img.get_zsize()
is_complex = img.is_complex()
is_ri = img.is_ri()
ntot = nx*ny*nz
img_data = EMNumPy.em2numpy(img)
img_data = mpi_reduce(img_data, ntot, MPI_FLOAT, MPI_SUM, main_node, comm)
if myid == main_node:
if nz!=1:
img_data = reshape(img_data, (nz, ny, nx))
elif ny!=1:
img_data = reshape(img_data, (ny, nx))
else:
pass
img = EMNumPy.numpy2em(img_data)
img.set_complex(is_complex)
img.set_ri(is_ri)
return img
else:
return img
'''
def send_EMData(img, dst, tag, comm=-1):
from mpi import mpi_send, MPI_INT, MPI_FLOAT, MPI_COMM_WORLD
if comm == -1: comm = MPI_COMM_WORLD
img_head = []
img_head.append(img.get_xsize())
img_head.append(img.get_ysize())
img_head.append(img.get_zsize())
img_head.append(img.is_complex())
img_head.append(img.is_ri())
head_tag = 2*tag
mpi_send(img_head, 5, MPI_INT, dst, head_tag, comm)
img_data = get_image_data(img)
data_tag = 2*tag+1
ntot = img_head[0]*img_head[1]*img_head[2]
mpi_send(img_data, ntot, MPI_FLOAT, dst, data_tag, comm)
'''
count = 100000
data1d = reshape(img_data, (ntot,))
ntime = (ntot-1) /count + 1
for i in xrange(ntime):
block_begin = i*count
block_end = i*count + count
if block_end > ntot:
block_end = ntot
block_size = block_end - block_begin
mpi_send(data1d[block_begin], block_size, MPI_FLOAT, dst, data_tag*ntime+i, comm)
'''
def recv_EMData(src, tag, comm=-1):
from mpi import mpi_recv, MPI_INT, MPI_FLOAT, MPI_COMM_WORLD
from numpy import reshape
from EMAN2 import EMNumPy
if comm==-1: comm = MPI_COMM_WORLD
head_tag = 2*tag
img_head = mpi_recv(5, MPI_INT, src, head_tag, comm)
nx = int(img_head[0])
ny = int(img_head[1])
nz = int(img_head[2])
is_complex = int(img_head[3])
is_ri = int(img_head[4])
data_tag = 2*tag+1
ntot = nx*ny*nz
img_data = mpi_recv(ntot, MPI_FLOAT, src, data_tag, comm)
if nz != 1:
img_data = reshape(img_data, (nz, ny, nx))
elif ny != 1:
img_data = reshape(img_data, (ny, nx))
else:
pass
img = EMNumPy.numpy2em(img_data)
img.set_complex(is_complex)
img.set_ri(is_ri)
return img
'''
#construct a EMData by taking the ownership of numpy array, no memory copying --Grant Tang
#recv_data_numeric = mpi_recv(ntot, MPI_FLOAT, src, data_tag, comm)
#recv_data_numpy = numpy.array(recv_data_numeric)
#numpy_data = recv_data.reshape(recv_data, (nz,ny,nx))
#img = EMNumPy.numpy2em(numpy_data)
#comment out Wei's original code, which makes memory copy to construct EMData from numpy array --Grant Tang
img = EMData()
img.set_size(nx, ny, nz)
if( complex > 0 ):
img.set_complex(True)
else:
img.set_complex(False)
data1d = reshape( get_image_data(img), (ntot,) )
tmp_data = mpi_recv(ntot, MPI_FLOAT, src, data_tag, comm)
data1d[0:ntot] = tmp_data[0:ntot]
count = 100000
ntime = (ntot-1)/count + 1
for i in xrange(ntime):
block_begin = i*count
block_end = i*count + count
if block_end > ntot:
block_end = ntot
block_size = block_end - block_begin
tmp_data = mpi_recv(block_size, MPI_FLOAT, src, data_tag*ntime+i, comm)
data1d[block_begin:block_end] = tmp_data[0:block_size]
return img
'''
def gather_EMData(data, number_of_proc, myid, main_node):
"""
Gather the a list of EMData on all nodes to the main node, we assume the list has the same length on each node.
"""
from mpi import MPI_COMM_WORLD, MPI_INT, MPI_TAG_UB
from mpi import mpi_send, mpi_recv
l = len(data)
gathered_data = []
inc = 1 # A temp measure
if myid == main_node:
for i in xrange(0, number_of_proc*inc, inc):
if i == main_node:
for k in xrange(l):
gathered_data.append(data[k])
else:
for k in xrange(l):
im = recv_EMData(i, i*l+k)
mem_len = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
members = mpi_recv(int(mem_len[0]), MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)
members = map(int, members)
im.set_attr('members', members)
gathered_data.append(im)
else:
for k in xrange(l):
send_EMData(data[k], main_node, myid*l+k)
mem = data[k].get_attr('members')
mpi_send(len(mem), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
mpi_send(mem, len(mem), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)
return gathered_data
def bcast_string_to_all(str_to_send, source_node = 0):
from mpi import mpi_bcast, MPI_INT, MPI_COMM_WORLD
str_tmp = ""
str_TMP = mpi_bcast(str_to_send, len(str_to_send), MPI_INT, source_node, MPI_COMM_WORLD)
for i in xrange(len(str_to_send)): str_tmp += chr(str_TMP[i])
return str_tmp
def bcast_number_to_all(number_to_send, source_node = 0):
"""
number_to_send has to be pre-defined in each node
"""
from mpi import mpi_bcast, MPI_INT, MPI_COMM_WORLD, MPI_FLOAT
import types
if type(number_to_send) is types.IntType:
TMP = mpi_bcast(number_to_send, 1, MPI_INT, source_node, MPI_COMM_WORLD)
return int(TMP[0])
elif type(number_to_send) is types.FloatType:
TMP = mpi_bcast(number_to_send, 1, MPI_FLOAT, source_node, MPI_COMM_WORLD)
return float(TMP[0])
else:
print " ERROR in bcast_number_to_all"
def bcast_list_to_all(list_to_send, source_node = 0):
from mpi import mpi_bcast, MPI_COMM_WORLD, MPI_FLOAT
import types
list_tmp = mpi_bcast(list_to_send, len(list_to_send), MPI_FLOAT, source_node, MPI_COMM_WORLD)
list_to_bcast = []
for i in xrange(len(list_to_send)):
if (type(list_to_send[i]) == types.IntType ): list_to_bcast.append( int( list_tmp[i] ) )
else: list_to_bcast.append( float( list_tmp[i] ) )
return list_to_bcast
def recv_attr_dict(main_node, stack, data, list_params, image_start, image_end, number_of_proc, comm = -1):
import types
from utilities import get_arb_params, set_arb_params
from mpi import mpi_recv
from mpi import MPI_FLOAT, MPI_INT, MPI_TAG_UB, MPI_COMM_WORLD
# hdf version!
# This is done on the main node, so for images from the main node, simply write headers
if comm == -1: comm = MPI_COMM_WORLD
TransType = type(Transform())
# prepare keys for float/int
value = get_arb_params(data[0], list_params)
ink = []
len_list = 0
for il in xrange(len(list_params)):
if type(value[il]) is types.IntType:
ink.append(1)
len_list += 1
elif type(value[il]) is types.FloatType:
ink.append(0)
len_list += 1
elif type(value[il]) is TransType:
ink.append(2)
len_list += 12
ldis = []
headers = []
for n in xrange(number_of_proc):
if n != main_node:
dis = mpi_recv(2, MPI_INT, n, MPI_TAG_UB, comm)
value = mpi_recv(len_list*(dis[1]-dis[0]), MPI_FLOAT, n, MPI_TAG_UB, comm)
ldis.append([dis[0], dis[1]])
headers.append(value)
del dis
del value
for im in xrange(image_start, image_end):
data[im-image_start].write_image(stack, data[im-image_start].get_attr_default('ID', im), EMUtil.ImageType.IMAGE_HDF, True)
for n in xrange(len(ldis)):
img_begin = ldis[n][0]
img_end = ldis[n][1]
for im in xrange(img_begin, img_end):
par_begin = (im-img_begin)*len_list
nvalue = []
header = headers[n]
ilis = 0
for il in xrange(len(list_params)):
if(ink[il] == 1):
nvalue.append(int(header[par_begin+ilis]))
ilis += 1
elif ink[il]==0:
nvalue.append(float(header[par_begin+ilis]))
ilis += 1
else:
assert ink[il]==2
t = Transform()
tmp = []
for iii in xrange(par_begin+ilis, par_begin+ilis+12):
tmp.append(float(header[iii]))
t.set_matrix(tmp)
ilis += 12
nvalue.append(t)
ISID = list_params.count('ID')
if(ISID == 0):
imm = im
else:
imm = nvalue[ISID]
# read head, set params, and write it
dummy = EMData()
dummy.read_image(stack, imm, True)
set_arb_params(dummy, nvalue, list_params)
dummy.write_image(stack, dummy.get_attr_default('ID', im), EMUtil.ImageType.IMAGE_HDF, True)
def send_attr_dict(main_node, data, list_params, image_start, image_end, comm = -1):
import types
from utilities import get_arb_params
from mpi import mpi_send
from mpi import MPI_FLOAT, MPI_INT, MPI_TAG_UB, MPI_COMM_WORLD
# This function is called from a node other than the main node
if comm == -1: comm = MPI_COMM_WORLD
TransType = type(Transform())
mpi_send([image_start, image_end], 2, MPI_INT, main_node, MPI_TAG_UB, comm)
nvalue = []
for im in xrange(image_start, image_end):
value = get_arb_params(data[im-image_start], list_params)
for il in xrange(len(value)):
if type(value[il]) is types.IntType: nvalue.append(float(value[il]))
elif type(value[il]) is types.FloatType: nvalue.append(value[il])
elif type(value[il]) is TransType:
m = value[il].get_matrix()
assert (len(m)==12)
for f in m: nvalue.append(f)
mpi_send(nvalue, len(nvalue), MPI_FLOAT, main_node, MPI_TAG_UB, comm)
def recv_attr_dict_bdb(main_node, stack, data, list_params, image_start, image_end, number_of_proc, comm = -1):
import types
from utilities import get_arb_params, set_arb_params
from mpi import mpi_recv
from mpi import MPI_FLOAT, MPI_INT, MPI_TAG_UB, MPI_COMM_WORLD
from EMAN2db import db_open_dict
# bdb version!
# This is done on the main node, so for images from the main node, simply write headers
if comm == -1: comm = MPI_COMM_WORLD
DB = db_open_dict(stack)
TransType = type(Transform())
# prepare keys for float/int
value = get_arb_params(data[0], list_params)
ink = []
len_list = 0
ISID = -1
for il in xrange(len(list_params)):
if(list_params[il] == 'ID'): ISID = il
if type(value[il]) is types.IntType:
ink.append(1)
len_list += 1
elif type(value[il]) is types.FloatType:
ink.append(0)
len_list += 1
elif type(value[il]) is TransType:
ink.append(2)
len_list += 12
ldis = []
headers = []
for n in xrange(number_of_proc):
if n != main_node:
dis = mpi_recv(2, MPI_INT, n, MPI_TAG_UB, comm)
img_begin = int(dis[0])
img_end = int(dis[1])
header = mpi_recv(len_list*(img_end-img_begin), MPI_FLOAT, n, MPI_TAG_UB, comm)
for im in xrange(img_begin, img_end):
par_begin = (im-img_begin)*len_list
nvalue = []
ilis = 0
for il in xrange(len(list_params)):
if(ink[il] == 1):
nvalue.append(int(header[par_begin+ilis]))
ilis += 1
elif ink[il]==0:
nvalue.append(float(header[par_begin+ilis]))
ilis += 1
else:
assert ink[il]==2
t = Transform()
tmp = []
for iii in xrange(par_begin+ilis, par_begin+ilis+12):
tmp.append(float(header[iii]))
t.set_matrix(tmp)
ilis += 12
nvalue.append(t)
if(ISID == -1):
imm = im
else:
imm = nvalue[ISID]
for i in xrange(len(list_params)):
if(list_params[i] != "ID"): DB.set_attr(imm, list_params[i], nvalue[i])
else:
for n in xrange(image_start, image_end):
ID = data[n-image_start].get_attr_default('ID', n)
for param in list_params:
if(param != "ID"): DB.set_attr(ID, param, data[n-image_start].get_attr(param))
DB.close()
def check_attr(ima, num, params, default_value, action="Warning"):
from sys import exit
attr_list = ima.get_attr_dict()
if attr_list.has_key(params) == False:
if action=="Warning":
print "WARNING: In image %i, cannot find attribute \'%s\' in the header, set it to the default value" %(num, params), default_value
ima.set_attr_dict({params:default_value})
elif action=="Error":
print "ERROR: In image %i, cannot find attribute \'%s\' in the header, the program has to terminate" %(num, params)
exit()
return False
else: return True
def print_begin_msg(program_name, onscreen=False):
from time import localtime, strftime
t = 100
stars = '*'*t
string = "Beginning of the program " + program_name + ": " + strftime("%a, %d %b %Y %H:%M:%S", localtime())
s = (t-len(string))/2
spacing = ' '*s
if onscreen:
print stars
print spacing+string
print stars
else:
print_msg(stars+"\n")
print_msg(spacing+string+"\n")
print_msg(stars+"\n")
def print_end_msg(program_name, onscreen=False):
from time import localtime, strftime
t = 100
stars = '*'*t
string = "End of the program " + program_name + ": " + strftime("%a, %d %b %Y %H:%M:%S", localtime())
s = (t-len(string))/2
spacing = ' '*s
if onscreen:
print stars
print spacing+string
print stars
else:
print_msg(stars+"\n")
print_msg(spacing+string+"\n")
print_msg(stars+"\n")
def print_msg(msg):
import sys
import global_def
if (global_def.IS_LOGFILE_OPEN == False):
global_def.LOGFILE_HANDLE = open(LOGFILE,"w")
global_def.IS_LOGFILE_OPEN = True
if (global_def.BATCH):
global_def.LOGFILE_HANDLE.write(msg)
else:
sys.stdout.write(msg)
global_def.LOGFILE_HANDLE.write(msg)
global_def.LOGFILE_HANDLE.flush()
def read_fsc( filename ):
from string import split, atof
f = open( filename, 'r' )
fscc = None
line = f.readline()
while len(line) > 0:
items = split( line )
if fscc is None:
fscc = [None]*len(items)
for i in xrange( len(items) ):
fscc[i] = []
for i in xrange( len(items) ) :
fscc[i].append( atof(items[i]) )
line = f.readline()
return fscc
"""
# This would not work on windows
def memory_usage():
import os
from string import split
return 0
file = "/proc/%d/status" % os.getpid()
f = open(file, 'r')
line = f.readline()
while len(line) > 0 :
items = split( line )
if items[0]=='VmSize:':
return items[1]+items[2]
line = f.readline()
"""
def circumference( img, inner = -1, outer = -1):
nx = img.get_xsize()
ny = img.get_ysize()
nz = img.get_zsize()
if( inner == -1):
inner = nx//2 -2
if( outer <= inner ): outer = inner + 1
else:
if( outer <= inner ): outer = inner + 1
inner_sphere = model_circle(inner, nx, ny, nz)
[mean_a,sigma,imin,imax] = Util.infomask(img, model_circle(outer, nx, ny, nz) - inner_sphere, True)
inner_rest = model_blank(nx, ny, nz, 1.0) - inner_sphere
Util.mul_img(inner_sphere, img)
return Util.addn_img(inner_sphere, Util.mult_scalar(inner_rest, mean_a ) )
def copy_attr( pin, name, pot ):
pot.set_attr( name, pin.get_attr(name) )
pass
def write_headers(filename, data, lima):
"""
write headers from files in data into a disk file called filename.
The filename has to be either hdf or bdb.
lima - list with positions in the disk files into which headers will be written,
i.e., header from data[k] will be written into file number lima[k]
WARNING: this function will open and close DB library!
"""
from utilities import file_type
from EMAN2db import db_open_dict
ftp = file_type(filename)
if ftp == "bdb":
# For unknown reasons this does not work on Linux, but works on Mac ??? Really?
DB = db_open_dict(filename)
for i in range(len(lima)):
DB.set_header(lima[i], data[i])
DB.close()
#for i in range(len(lima)):
# data[i].write_image(filename, lima[i])
elif ftp == "hdf":
for i in range(len(lima)):
data[i].write_image(filename, lima[i], EMUtil.ImageType.IMAGE_HDF, True)
else:
ERROR("Unacceptable file format","write_headers",1)
def write_header(filename, data, lima):
"""
write header from a single file data into a disk file called filename.
The filename has to be either hdf or bdb.
lima - position in the disk files into which header will be written,
i.e., header from data will be written into file number lima
WARNING: this function assums DB library is opened and will NOT close it!
"""
from utilities import file_type
from EMAN2db import db_open_dict
ftp = file_type(filename)
if ftp == "bdb":
DB = db_open_dict(filename)
DB.set_header(lima, data)
elif ftp == "hdf":
data.write_image(filename, lima, EMUtil.ImageType.IMAGE_HDF, True)
else:
ERROR("Unacceptable file format","write_headers",1)
def file_type(name):
if(len(name)>4):
if(name[:4] == "bdb:"): return "bdb"
elif(name[-4:-3] == "."): return name[-3:]
ERROR("Unacceptable file format","file_type",1)
def get_params2D(ima, xform = "xform.align2d"):
"""
retrieve 2D alignment parameters from the header
alpha tx ty mirror scale
"""
t = ima.get_attr(xform)
d = t.get_params("2D")
return d["alpha"],d["tx"],d["ty"],d["mirror"],d["scale"]
def set_params2D(ima, p, xform = "xform.align2d"):
"""
set 2D alignment parameters in the header
alpha tx ty mirror scale
"""
t = Transform({"type":"2D","alpha":p[0],"tx":p[1],"ty":p[2],"mirror":p[3],"scale":p[4]})
ima.set_attr(xform, t)
def get_params3D(ima, xform = "xform.align3d"):
"""
retrieve 3D alignment parameters from the header
phi theta psi tx ty tz mirror scale
"""
t = ima.get_attr(xform)
d = t.get_params("spider")
return d["phi"],d["theta"],d["psi"],d["tx"],d["ty"],d["tz"],d["mirror"],d["scale"]
def set_params3D(ima, p, xform = "xform.align3d"):
"""
set 3D alignment parameters in the header
phi theta psi tx ty tz mirror scale
"""
t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2],"tx":p[3],"ty":p[4],"tz":p[5],"mirror":p[6],"scale":p[7]})
ima.set_attr(xform, t)
def get_params_proj(ima, xform = "xform.projection"):
"""
retrieve projection alignment parameters from the header
phi theta psi s2x s2y
"""
t = ima.get_attr(xform)
d = t.get_params("spider")
return d["phi"],d["theta"],d["psi"],-d["tx"],-d["ty"]
def set_params_proj(ima, p, xform = "xform.projection"):
"""
set projection alignment parameters in the header
phi theta psi s2x s2y
"""
from EMAN2 import Vec2f
t = Transform({"type":"spider","phi":p[0],"theta":p[1],"psi":p[2]})
t.set_trans(Vec2f(-p[3], -p[4]))
ima.set_attr(xform, t)
def get_ctf(ima):
"""
recover numerical values of CTF parameters from EMAN2 CTF object stored in a header of the input image
order of returned parameters:
[defocus, cs, voltage, apix, bfactor, ampcont]
"""
from EMAN2 import EMAN2Ctf
ctf_params = ima.get_attr("ctf")
return ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang
def generate_ctf(p):
"""
generate EMAN2 CTF object using values of CTF parameters given in the list p
order of parameters:
[defocus, cs, voltage, apix, bfactor, ampcont, astigmastism_amplitude, astigmatism_angle]
[ microns, mm, kV, Angstroms, A^2, microns, radians]
"""
from EMAN2 import EMAN2Ctf
defocus = p[0]
cs = p[1]
voltage = p[2]
pixel_size = p[3]
bfactor = p[4]
amp_contrast = p[5]
if defocus > 100: # which means it is very likely in Angstrom, therefore we are using the old convention
defocus *= 1e-4
if amp_contrast < 1.0:
from math import sqrt
amp_contrast = amp_contrast*100/sqrt(2*amp_contrast**2-2*amp_contrast+1)
ctf = EMAN2Ctf()
if(len(p) == 6):
ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast})
else:
ctf.from_dict({"defocus":defocus, "cs":cs, "voltage":voltage, "apix":pixel_size, "bfactor":bfactor, "ampcont":amp_contrast,'dfdiff':p[6],'dfang':p[7]})
return ctf
def set_ctf(ima, p):
"""
set EMAN2 CTF object in the header of input image using values of CTF parameters given in the list p
order of parameters:
[defocus, cs, voltage, apix, bfactor, ampcont]
"""
from utilities import generate_ctf
ctf = generate_ctf( p )
ima.set_attr( "ctf", ctf )
def delete_bdb(name):
"""
Delete bdb stack
"""
from EMAN2db import db_open_dict, db_remove_dict
a = db_open_dict(name)
db_remove_dict(name)
# parse user function parses the --function option. this option
# can be either a single function name (i.e. --function=ali3d_e)
# or a list of names, specifying the location of a user-defined
# function; this will have the form --function=/path/module/function
def parse_user_function(opt_string):
# check if option string is a string and return None if not. this
# will cause the user function to be set to default value
# "ref_ali3d" in the ali functions....
if not(type(opt_string) is str):
return None
# check opt_string for format:
if (opt_string.startswith("[") and opt_string.endswith("]")):
# options string is [path,file,function]
opt_list = opt_string[1:-1].split(",")
if (2 == len(opt_list)):
# options are [file,function]
return [opt_list[0],opt_list[1]]
elif (3 == len(opt_list)):
# options are [path,file,function]. note the order!
return [opt_list[1],opt_list[2],opt_list[0]]
else:
# neither. assume this is an error and return default
return None
else:
# no list format used, so we assume this is a function name
# defined (and referenced) in user_functions.
return opt_string
def getvec( phi, tht ):
from math import pi,cos,sin
angle_to_rad = pi/180.0
if tht > 180.0:
tht -= 180.0
phi += 180.0
if tht > 90.0:
tht = 180.0 - tht
phi += 180.0
assert tht <=90.0
tht *= angle_to_rad
phi *= angle_to_rad
x = sin(tht)*cos(phi)
y = sin(tht)*sin(phi)
z = cos(tht)
return (x,y,z)
def nearest_ang( vecs, phi, tht ) :
from utilities import getvec
vec = getvec( phi, tht )
best_s = -1.0
best_i = -1
for i in xrange( len(vecs) ):
s = abs(vecs[i][0]*vec[0] + vecs[i][1]*vec[1] + vecs[i][2]*vec[2])
if s > best_s:
best_s = s
best_i = i
return best_i
def closest_ang( vecs, vec) :
best_s = -1.0
best_i = -1
for i in xrange( len(vecs) ):
s = abs(vecs[i][0]*vec[0] + vecs[i][1]*vec[1] + vecs[i][2]*vec[2])
if s > best_s:
best_s = s
best_i = i
return best_i
# This is in python, it is very slow, we keep it just for comparison
def assign_projangles_slow(projangles, refangles):
refnormal = [None]*len(refangles)
for i in xrange(len(refangles)):
refnormal[i] = getvec(refangles[i][0], refangles[i][1])
assignments = [[] for i in xrange(len(refangles))]
for i in xrange(len(projangles)):
best_i = nearest_ang(refnormal, projangles[i][0], projangles[i][1])
assignments[best_i].append(i)
return assignments
def nearestk_projangles(projangles, whichone = 0, howmany = 1):
lookup = range(len(projangles))
refnormal = [None]*len(projangles)
for i in xrange(len(projangles)):
refnormal[i] = getvec(projangles[i][0], projangles[i][1])
# remove the reference projection from the list
ref = refnormal[whichone]
del refnormal[whichone], lookup[whichone]
assignments = [-1]*howmany
for i in xrange(howmany):
k = closest_ang(refnormal, ref)
assignments[i] = lookup[k]
del refnormal[k], lookup[k]
return assignments
def nearestk_to_refdir(refnormal, refdir, howmany = 1):
lookup = range(len(refnormal))
assignments = [-1]*howmany
for i in xrange(howmany):
k = closest_ang(refnormal, refdir)
assignments[i] = lookup[k]
del refnormal[k], lookup[k]
return assignments
'''
def assign_projangles(projangles, refangles, return_asg = False):
if len(refangles) > 10000:
if len(refangles) > 100000:
coarse_refangles = even_angles(1.5) # 9453 angles
else:
coarse_refangles = even_angles(5.0) # 849 angles
coarse_asg = assign_projangles(projangles, coarse_refangles, True)
ref_asg = assign_projangles(refangles, coarse_refangles, True)
else:
coarse_refangles = []
coarse_asg = []
ref_asg = []
nproj = len(projangles)
nref = len(refangles)
proj_ang = [0.0]*(nproj*2)
ref_ang = [0.0]*(nref*2)
for i in xrange(nproj):
proj_ang[i*2] = projangles[i][0]
proj_ang[i*2+1] = projangles[i][1]
for i in xrange(nref):
ref_ang[i*2] = refangles[i][0]
ref_ang[i*2+1] = refangles[i][1]
asg = Util.assign_projangles(proj_ang, ref_ang, coarse_asg, ref_asg, len(coarse_refangles))
if return_asg: return asg
assignments = [[] for i in xrange(nref)]
for i in xrange(nproj):
assignments[asg[i]].append(i)
return assignments
'''
def assign_projangles(projangles, refangles):
nproj = len(projangles)
nref = len(refangles)
proj_ang = [0.0]*(nproj*2)
ref_ang = [0.0]*(nref*2)
for i in xrange(nproj):
proj_ang[i*2] = projangles[i][0]
proj_ang[i*2+1] = projangles[i][1]
for i in xrange(nref):
ref_ang[i*2] = refangles[i][0]
ref_ang[i*2+1] = refangles[i][1]
asg = Util.assign_projangles(proj_ang, ref_ang)
if return_asg: return asg
assignments = [[] for i in xrange(nref)]
for i in xrange(nproj):
assignments[asg[i]].append(i)
return assignments
def cone_ang( projangles, phi, tht, ant ) :
from utilities import getvec
from math import cos, pi
vec = getvec( phi, tht )
cone = cos(ant*pi/180.0)
la = []
for i in xrange( len(projangles) ):
vec = getvec( phi, tht )
vecs = getvec( projangles[i][0], projangles[i][1] )
s = abs(vecs[0]*vec[0] + vecs[1]*vec[1] + vecs[2]*vec[2])
if s >= cone:
la.append(i)
return la
def findall(lo,val):
"""
Find all occurences of val on list lo
Returns a list of indices of val on lo.
"""
u = []
i = -1
while( i < len(lo)-1):
try:
i = lo.index(val,i+1)
u.append(i)
except:
i += 1
return u
def disable_bdb_cache():
import EMAN2db
EMAN2db.BDB_CACHE_DISABLE = True
def enable_bdb_cache():
import EMAN2db
EMAN2db.BDB_CACHE_DISABLE = False
def helical_consistency(p2i, p1):
"""
Find overall phi angle and z shift difference between two sets of projection parameters for helical structure.
The two sets have to be of the same length and it is assume that k'th element on the first
list corresponds to the k'th element on the second list.
Input: two lists [ [phi2, theta2, psi2, sx2, sy2], [phi1, theta1, psi1, sx1, sy1], ...]. Second list is considered reference.
parametes for helical symmetry-- dp, pixel_size, dphi
Output: , 3D_error between the two sets after adjustment
Note: all angles have to be in spider convention.
"""
from pixel_error import angle_diff
from math import cos,pi
from utilities import getvec
from pixel_error import angle_error
from EMAN2 import Vec2f
n =len(p1[0])
print n
qtm = -1.0e10
for lf in xrange(0,181,180):
p2 = []
p2.extend(p2i)
if( lf == 180):
tflip = Transform({"type":"spider","theta":180.0})
for j in xrange(n):
t2 = Transform({"type":"spider","phi":p2[0][j],"theta":p2[1][j],"psi":p2[2][j]})
t2.set_trans( Vec2f( -p2[3][j], -p2[4][j] ) )
t2 = t2*tflip
d = t2.get_params("spider")
p2[0][j] = d["phi"]
p2[1][j] = d["theta"]
p2[2][j] = d["psi"]
p2[3][j] = -d["tx"]
p2[4][j] = -d["ty"]
tt1 = [0.0]*n
tt2 = [0.0]*n
mirror = [False]*n
ln = 0
for j in xrange( n ):
t1 = getvec(p1[0][j],p1[1][j])
t2 = getvec(p2[0][j],p2[1][j])
tm = getvec(180.0+p2[0][j],180.0-p2[1][j])
tt1[j] = t1[0]*t2[0]+t1[1]*t2[1]+t1[2]*t2[2]
tt2[j] = t1[0]*tm[0]+t1[1]*tm[1]+t1[2]*tm[2]
if(abs(tt1[j])<1.0e-7): tt1[j] = 0.0
if(abs(tt2[j])<1.0e-7): tt2[j] = 0.0
if(tt1[j]>tt2[j]):
mirror[j] = True
ln+=1
print " FLIP ",lf
if(ln < n//2):
print "mirror ",ln
for j in xrange( n ):
p2[0][j] += 180.0
p2[1][j] = 180.0-p2[1][j]
p2[2][j] = -p2[2][j]
p2[4][j] = -p2[4][j]
mirror[j] = not(mirror[j])
else:
print " straight", ln
phi1 = []
phi2 = []
agree = []
for j in xrange(n):
if(mirror[j]):
phi1.append(p1[0][j])
phi2.append(p2[0][j])
agree.append(j)
print len(phi1)
delta_phi = angle_diff( phi2, phi1 )
print "close form diff===", delta_phi
phi1 = []
phi2 = []
errorm = []
for j in xrange( len( p1[0]) ):
p2[0][j] = (p2[0][j] + delta_phi + 360)%360.0
if(mirror[j]):
phi1.append(p1[0][j])
phi2.append(p2[0][j])
errorm.append(angle_error( [ p2[0][j] ], [ p1[0][j] ]))
qt = sum(errorm)/len(errorm)
print len(errorm),qt
if(qt > qtm):
qtm = qt
p2o = []
p2o.extend(p2)
errormo = []
phi1o = []
phi2o = []
errormo.extend(errorm)
phi1o.extend(phi1)
phi2o.extend(phi2)
return p2o, errormo, agree, delta_phi, phi1o, phi2o
# according two lists of orientation or marker (phi, theta, psi for each one)
# return the global rotation (dphi, dtheta, dpsi) between the two systems
def rotation_between_anglesets(agls1, agls2):
"""
Find overall 3D rotation (phi theta psi) between two sets of Eulerian angles.
The two sets have to be of the same length and it is assume that k'th element on the first
list corresponds to the k'th element on the second list.
Input: two lists [[phi1, theta1, psi1], [phi2, theta2, psi2], ...]. Second list is considered reference.
Output: overall rotation phi, theta, psi that has to be applied to the first list (agls1) so resulting
angles will agree with the second list.
Note: all angles have to be in spider convention.
For details see: Appendix in Penczek, P., Marko, M., Buttle, K. and Frank, J.: Double-tilt electron tomography. Ultramicroscopy 60:393-410, 1995.
"""
from math import sin, cos, pi, sqrt, atan2, acos, atan
from numpy import array, linalg, matrix
import types
deg2rad = pi/180.0
def ori2xyz(ori):
if(type(ori) == types.ListType):
phi, theta, psi = ori[:3]
else:
# it has to be Transformation object
d = ori.get_params("spider")
phi = d["phi"]
theta = d["theta"]
psi = d["psi"]
"""
# This makes no sense here! PAP 09/2011
if theta > 90.0:
phi += 180.0
theta = 180.0-theta
"""
phi *= deg2rad
theta *= deg2rad
x = sin(theta) * sin(phi)
y = sin(theta) * cos(phi)
z = cos(theta)
return [x, y, z]
N = len(agls1)
if N != len(agls2):
print 'Both lists must have the same length'
return -1
if N < 2:
print 'At least two orientations are required in each list'
return -1
U1, U2 = [], []
for n in xrange(N):
p1 = ori2xyz(agls1[n])
p2 = ori2xyz(agls2[n])
U1.append(p1)
U2.append(p2)
# compute all Suv with uv = {xx, xy, xz, yx, ..., zz}
Suv = [0] * 9
c = 0
nbori = len(U1)
for i in xrange(3):
for j in xrange(3):
for s in xrange(nbori):
Suv[c] += (U2[s][i] * U1[s][j])
c += 1
# create matrix N
N = array([[Suv[0]+Suv[4]+Suv[8], Suv[5]-Suv[7], Suv[6]-Suv[2], Suv[1]-Suv[3]],
[Suv[5]-Suv[7], Suv[0]-Suv[4]-Suv[8], Suv[1]+Suv[3], Suv[6]+Suv[2]],
[Suv[6]-Suv[2], Suv[1]+Suv[3], -Suv[0]+Suv[4]-Suv[8], Suv[5]+Suv[7]],
[Suv[1]-Suv[3], Suv[6]+Suv[2], Suv[5]+Suv[7], -Suv[0]-Suv[4]+Suv[8]]])
# eigenvector corresponding to the most positive eigenvalue
val, vec = linalg.eig(N)
q0, qx, qy, qz = vec[:, val.argmax()]
# create quaternion Rot matrix
r = [q0*q0-qx*qx+qy*qy-qz*qz, 2*(qy*qx+q0*qz), 2*(qy*qz-q0*qx), 0.0,
2*(qx*qy-q0*qz), q0*q0+qx*qx-qy*qy-qz*qz, 2*(qx*qz+q0*qy), 0.0,
2*(qz*qy+q0*qx), 2*(qz*qx-q0*qy), q0*q0-qx*qx-qy*qy+qz*qz, 0.0]
R = Transform(r)
dictR = R.get_rotation('SPIDER')
return dictR['phi'], dictR['theta'], dictR['psi']
def get_pixel_size(img):
"""
Retrieve pixel size from the header.
We check attribute Pixel_size and also pixel size from ctf object, if exisits.
If the two are different or if the pixel size is not set, return -1.0 and print a warning.
"""
p1 = img.get_attr_default("apix_x", -1.0)
cc = img.get_attr_default("ctf", None)
if cc == None:
p2 = -1.0
else:
p2 = round(cc.apix, 3)
if p1 == -1.0 and p2 == -1.0:
ERROR("Pixel size not set", "get_pixel_size", 0)
return -1.0
elif p1 > -1.0 and p2 > -1.0:
if abs(p1-p2) >= 0.001:
ERROR("Conflict between pixel size in attribute and in ctf object", "get_pixel_size", 0)
# pixel size is positive, so what follows omits -1 problem
return max(p1, p2)
else:
return max(p1, p2)
def set_pixel_size(img, pixel_size):
"""
Set pixel size in the header.
Set attribute Pixel_size and also pixel size in ctf object, if exists.
"""
nz = img.get_zsize()
img.set_attr("apix_x", round(pixel_size, 3))
img.set_attr("apix_y", round(pixel_size, 3))
img.set_attr("apix_z", round(pixel_size, 3))
cc = img.get_attr_default("ctf", None)
if(cc):
cc.apix = pixel_size
img.set_attr("ctf", cc)
def group_proj_by_phitheta_slow(proj_ang, symmetry = "c1", img_per_grp = 100, verbose = False):
from time import time
from math import exp, pi
def get_ref_ang_list(delta, sym):
ref_ang = even_angles(delta, symmetry=sym)
ref_ang_list = [0.0]*(len(ref_ang)*2)
for i in xrange(len(ref_ang)):
ref_ang_list[2*i] = ref_ang[i][0]
ref_ang_list[2*i+1] = ref_ang[i][1]
return ref_ang_list, len(ref_ang)
def gv(phi, theta):
from math import pi, cos, sin
angle_to_rad = pi/180.0
theta *= angle_to_rad
phi *= angle_to_rad
x = sin(theta)*cos(phi)
y = sin(theta)*sin(phi)
z = cos(theta)
return (x, y, z)
def ang_diff(v1, v2):
# The first return value is the angle between two vectors
# The second return value is whether we need to mirror one of them (0 - no need, 1 - need)
from math import acos, pi
v = v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]
if v > 1: v = 1
if v < -1: v = -1
if v >= 0: return acos(v)*180/pi, 0
else: return acos(-v)*180/pi, 1
t0 = time()
proj_list = []
angles_list = []
N = len(proj_ang)
if len(proj_ang[0]) == 3: # determine whether it has shifts provided, make the program more robust
for i in xrange(N):
proj_ang[i].append(i)
proj_ang[i].append(True)
vec = gv(proj_ang[i][0], proj_ang[i][1]) # pre-calculate the vector for each projection angles
proj_ang[i].append(vec)
else:
for i in xrange(N):
proj_ang[i][3] = i
proj_ang[i][4] = True
vec = gv(proj_ang[i][0], proj_ang[i][1]) # pre-calculate the vector for each projection angles
proj_ang[i].append(vec)
ref_ang_list1, nref1 = get_ref_ang_list(20.0, sym = symmetry)
ref_ang_list2, nref2 = get_ref_ang_list(10.0, sym = symmetry)
ref_ang_list3, nref3 = get_ref_ang_list(5.0, sym = symmetry)
ref_ang_list4, nref4 = get_ref_ang_list(2.5, sym = symmetry)
c = 100
L = max(100, img_per_grp)
# This is to record whether we are considering the same group as before
# If we are, we are only going to read the table and avoid calculating the distance again.
previous_group = -1
previous_zone = 5
for grp in xrange(N/img_per_grp):
print grp,
N_remain = N-grp*img_per_grp
# The idea here is that if each group has more than 100 images in average,
# we consider it crowded enough to just consider the most crowded group.
if N_remain >= nref4*L:
ref_ang_list = ref_ang_list4
nref = nref4
if previous_zone > 4:
previous_group = -1
previous_zone = 4
elif N_remain >= nref3*L:
ref_ang_list = ref_ang_list3
nref = nref3
if previous_zone > 3:
previous_group = -1
previous_zone = 3
elif N_remain >= nref2*L:
ref_ang_list = ref_ang_list2
nref = nref2
if previous_zone > 2:
previous_group = -1
previous_zone = 2
elif N_remain >= nref1*L:
ref_ang_list = ref_ang_list1
nref = nref1
if previous_zone > 1:
previous_group = -1
previous_zone = 1
else:
if previous_zone > 0:
previous_group = -1
previous_zone = 0
t1 = time()
v = []
index = []
if N_remain >= nref1*L:
# In this case, assign all projection to groups and only consider the most crowded group.
proj_ang_list = [0.0]*(N_remain*2)
nn = 0
remain_index = [0]*N_remain
for i in xrange(N):
if proj_ang[i][4]:
proj_ang_list[nn*2] = proj_ang[i][0]
proj_ang_list[nn*2+1] = proj_ang[i][1]
remain_index[nn] = i
nn += 1
asg = Util.assign_projangles(proj_ang_list, ref_ang_list)
assignments = [[] for i in xrange(nref)]
for i in xrange(N_remain):
assignments[asg[i]].append(i)
# find the largest group and record the group size and group number
max_group_size = 0
max_group = -1
for i in xrange(nref):
if len(assignments[i]) > max_group_size:
max_group_size = len(assignments[i])
max_group = i
print max_group_size, max_group, previous_group,
for i in xrange(len(assignments[max_group])):
ind = remain_index[assignments[max_group][i]]
v.append(proj_ang[ind][5])
index.append(ind)
else:
# In this case, use all the projections available
for i in xrange(N):
if proj_ang[i][4]:
v.append(proj_ang[i][5])
index.append(i)
max_group = 0
t2 = time()
Nn = len(index)
density = [[0.0, 0] for i in xrange(Nn)]
if max_group != previous_group:
diff_table = [[0.0 for i in xrange(Nn)] for j in xrange(Nn)]
for i in xrange(Nn-1):
for j in xrange(i+1, Nn):
diff = ang_diff(v[i], v[j])
q = exp(-c*(diff[0]/180.0*pi)**2)
diff_table[i][j] = q
diff_table[j][i] = q
diff_table_index = dict()
for i in xrange(Nn): diff_table_index[index[i]] = i
print Nn, True,
else:
print Nn, False,
t21 = time()
for i in xrange(Nn):
density[i][0] = sum(diff_table[diff_table_index[index[i]]])
density[i][1] = i
t22 = time()
density.sort(reverse=True)
t3 = time()
dang = [[0.0, 0] for i in xrange(Nn)]
most_dense_point = density[0][1]
for i in xrange(Nn):
diff = ang_diff(v[i], v[most_dense_point])
dang[i][0] = diff[0]
dang[i][1] = i
dang[most_dense_point][0] = -1.
dang.sort()
t4 = time()
members = [0]*img_per_grp
for i in xrange(img_per_grp):
idd = index[dang[i][1]]
for j in xrange(len(diff_table)):
diff_table[diff_table_index[idd]][j] = 0.0
diff_table[j][diff_table_index[idd]] = 0.0
members[i] = idd
proj_ang[members[i]][4] = False
proj_list.append(members)
center_i = index[dang[0][1]]
angles_list.append([proj_ang[center_i][0], proj_ang[center_i][1], dang[img_per_grp-1][0]])
previous_group = max_group
print t2-t1, t3-t2, t22-t21, t3-t22, t4-t3
if N%img_per_grp*3 >= 2*img_per_grp:
members = []
for i in xrange(N):
if proj_ang[i][4]:
members.append(i)
proj_list.append(members)
angles_list.append([proj_ang[members[0]][0], proj_ang[members[0]][1], 90.0])
elif N%img_per_grp != 0:
for i in xrange(N):
if proj_ang[i][4]:
proj_list[-1].append(i)
print "Total time used = ", time()-t0
return proj_list, angles_list
def group_proj_by_phitheta(proj_ang, symmetry = "c1", img_per_grp = 100, verbose = False):
from math import exp, pi
def gv(phi, theta):
from math import pi, cos, sin
angle_to_rad = pi/180.0
theta *= angle_to_rad
phi *= angle_to_rad
x = sin(theta)*cos(phi)
y = sin(theta)*sin(phi)
z = cos(theta)
return (x, y, z)
def ang_diff(v1, v2):
# The first return value is the angle between two vectors
# The second return value is whether we need to mirror one of them (0 - no need, 1 - need)
from math import acos, pi
v = v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]
if v > 1: v = 1
if v < -1: v = -1
if v >= 0: return acos(v)*180/pi, 0
else: return acos(-v)*180/pi, 1
def get_ref_ang_list(delta, sym):
ref_ang = even_angles(delta, symmetry=sym)
ref_ang_list = [0.0]*(len(ref_ang)*2)
for i in xrange(len(ref_ang)):
ref_ang_list[2*i] = ref_ang[i][0]
ref_ang_list[2*i+1] = ref_ang[i][1]
return ref_ang_list, len(ref_ang)
N = len(proj_ang)
proj_ang_list = [0]*(N*2)
for i in xrange(N):
proj_ang_list[i*2] = proj_ang[i][0]
proj_ang_list[i*2+1] = proj_ang[i][1]
ref_ang_list1, nref1 = get_ref_ang_list(20.0, sym = symmetry)
ref_ang_list2, nref2 = get_ref_ang_list(10.0, sym = symmetry)
ref_ang_list3, nref3 = get_ref_ang_list(5.0, sym = symmetry)
ref_ang_list4, nref4 = get_ref_ang_list(2.5, sym = symmetry)
ref_ang_list = []
ref_ang_list.extend(ref_ang_list1)
ref_ang_list.extend(ref_ang_list2)
ref_ang_list.extend(ref_ang_list3)
ref_ang_list.extend(ref_ang_list4)
ref_ang_list.append(nref1)
ref_ang_list.append(nref2)
ref_ang_list.append(nref3)
ref_ang_list.append(nref4)
proj_list = Util.group_proj_by_phitheta(proj_ang_list, ref_ang_list, img_per_grp)
proj_list2 = proj_list[:]
for i in xrange(len(proj_list2)): proj_list2[i] = abs(proj_list2[i])
proj_list2.sort()
assert N == len(proj_list2)
for i in xrange(N): assert i == proj_list2[i]
Ng = N/img_per_grp
proj_list_new = [[] for i in xrange(Ng)]
mirror_list = [[] for i in xrange(Ng)]
angles_list = []
for i in xrange(Ng):
for j in xrange(img_per_grp):
proj_list_new[i].append(abs(proj_list[i*img_per_grp+j]));
mirror_list[i].append(proj_list[i*img_per_grp+j] >= 0)
phi1 = proj_ang[proj_list_new[i][0]][0];
theta1 = proj_ang[proj_list_new[i][0]][1];
phi2 = proj_ang[proj_list_new[i][-1]][0];
theta2 = proj_ang[proj_list_new[i][-1]][1];
angles_list.append([phi1, theta1, ang_diff(gv(phi1, theta1), gv(phi2, theta2))[0]]);
if N%img_per_grp*3 >= 2*img_per_grp:
proj_list_new.append([])
mirror_list.append([])
for i in xrange(Ng*img_per_grp, N):
proj_list_new[-1].append(abs(proj_list[i]));
mirror_list[-1].append(proj_list[i] >= 0)
phi1 = proj_ang[proj_list_new[Ng][0]][0];
theta1 = proj_ang[proj_list_new[Ng][0]][1];
phi2 = proj_ang[proj_list_new[Ng][-1]][0];
theta2 = proj_ang[proj_list_new[Ng][-1]][1];
angles_list.append([phi1, theta1, ang_diff(gv(phi1, theta1), gv(phi2, theta2))[0]]);
elif N%img_per_grp != 0:
for i in xrange(Ng*img_per_grp, N):
proj_list_new[-1].append(abs(proj_list[i]))
mirror_list[-1].append(proj_list[i] >= 0)
return proj_list_new, angles_list, mirror_list
def nearest_proj(proj_ang, img_per_grp=100, List=[]):
from math import exp, pi
from sets import Set
from time import time
from random import randint
def gv(phi, theta):
from math import pi, cos, sin
angle_to_rad = pi/180.0
theta *= angle_to_rad
phi *= angle_to_rad
x = sin(theta)*cos(phi)
y = sin(theta)*sin(phi)
z = cos(theta)
return (x, y, z)
def ang_diff(v1, v2):
# The first return value is the angle between two vectors
# The second return value is whether we need to mirror one of them (0 - no need, 1 - need)
from math import acos, pi
v = v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2]
if v > 1: v = 1
if v < -1: v = -1
if v >= 0: return acos(v)*180/pi, 0
else: return acos(-v)*180/pi, 1
def get_ref_ang_list(delta, sym):
ref_ang = even_angles(delta, symmetry=sym)
ref_ang_list = [0.0]*(len(ref_ang)*2)
for i in xrange(len(ref_ang)):
ref_ang_list[2*i] = ref_ang[i][0]
ref_ang_list[2*i+1] = ref_ang[i][1]
return ref_ang_list, len(ref_ang)
def binary_search(a, x):
N = len(a)
begin = 0
end = N-1
while begin <= end:
mid = (begin+end)/2
if a[mid] == x: return mid
if a[mid] < x: begin = mid+1
else: end = mid-1
return -1
def binary_search_l(a, x):
# This function returns an index i such that i is the smallest number
# such that when t >= i, a[t] >= x
N = len(a)
t = binary_search(a, x)
if t != -1:
while t-1 >= 0 and a[t-1] == a[t]: t -= 1
return t
else:
if x > a[N-1]: return -1;
if x < a[0]: return 0;
begin = 0
end = N-2
while end >= begin:
mid = (begin+end)/2
if x > a[mid] and x < a[mid+1]: break;
if x < a[mid]: end = mid-1
else: begin = mid+1
return mid+1
def binary_search_r(a, x):
# This function returns an index i such that i is the largest number
# such that when t <= i, a[t] <= x
N = len(a)
t = binary_search(a, x)
if t != -1:
while t+1 <= N-1 and a[t+1] == a[t]: t += 1
return t
else:
if x > a[N-1]: return N-1;
if x < a[0]: return -1;
begin = 0
end = N-2
while end >= begin:
mid = (begin+end)/2
if x > a[mid] and x < a[mid+1]: break;
if x < a[mid]: end = mid-1
else: begin = mid+1
return mid
N = len(proj_ang)
if len(List) == 0: List = range(N)
if N < img_per_grp:
print "Error: image per group larger than the number of particles!"
exit()
phi_list = [[0.0, 0] for i in xrange(N)]
theta_list = [[0.0, 0] for i in xrange(N)]
vec = [None]*N
for i in xrange(N):
phi = proj_ang[i][0]
theta = proj_ang[i][1]
vec[i] = gv(phi, theta)
if theta > 90.0:
theta = 180.0-theta
phi += 180.0
phi = phi%360.0
phi_list[i][0] = phi
phi_list[i][1] = i
theta_list[i][0] = theta
theta_list[i][1] = i
theta_list.sort()
phi_list.sort()
theta_list_l = [0.0]*N
phi_list_l = [0.0]*N
for i in xrange(N):
theta_list_l[i] = theta_list[i][0]
phi_list_l[i] = phi_list[i][0]
g = [[360.0, 0, 0] for i in xrange(N)]
proj_list = []
mirror_list = []
neighbor = [0]*img_per_grp
#neighbor2 = [0]*img_per_grp
dis = [0.0]*img_per_grp
#dis2 = [0.0]*img_per_grp
mirror = [0]*img_per_grp
S = Set()
T = Set()
#tt1 = time()
for i in xrange(len(List)):
k = List[i]
#print "\nCase #%3d: Testing projection %6d"%(i, k)
#t1 = time()
phi = proj_ang[k][0]
theta = proj_ang[k][1]
if theta > 90.0:
theta = 180.0-theta
phi += 180.0
phi = phi%360.0
delta = 0.01
while True:
min_theta = max(0.0, theta-delta)
max_theta = min(90.0, theta+delta)
if min_theta == 0.0:
min_phi = 0.0
max_phi = 360.0
else:
dphi = min(delta/(2*min_theta)*180.0, 180.0)
min_phi = phi - dphi
max_phi = phi + dphi
if min_phi < 0.0: min_phi += 360.0
if max_phi > 360.0: max_phi -= 360.0
if theta+delta > 90.0:
phi_mir = (phi+180.0)%360.0
min_phi_mir = phi_mir - dphi
max_phi_mir = phi_mir + dphi
if min_phi_mir < 0.0: min_phi_mir += 360.0
if max_phi_mir > 360.0: max_phi_mir -= 360.0
phi_left_bound = binary_search_l(phi_list_l, min_phi)
phi_right_bound = binary_search_r(phi_list_l, max_phi)
theta_left_bound = binary_search_l(theta_list_l, min_theta)
theta_right_bound = binary_search_r(theta_list_l, max_theta)
if theta+delta > 90.0:
phi_mir_left_bound = binary_search_l(phi_list_l, min_phi_mir)
phi_mir_right_bound = binary_search_r(phi_list_l, max_phi_mir)
#print delta
#print min_phi, max_phi, min_theta, max_theta
#print phi_left_bound, phi_right_bound, theta_left_bound, theta_right_bound
if phi_left_bound < phi_right_bound:
for j in xrange(phi_left_bound, phi_right_bound+1):
S.add(phi_list[j][1])
else:
for j in xrange(phi_right_bound+1):
S.add(phi_list[j][1])
for j in xrange(phi_left_bound, N):
S.add(phi_list[j][1])
if theta+delta > 90.0:
if phi_mir_left_bound < phi_mir_right_bound:
for j in xrange(phi_mir_left_bound, phi_mir_right_bound+1):
S.add(phi_list[j][1])
else:
for j in xrange(phi_mir_right_bound+1):
S.add(phi_list[j][1])
for j in xrange(phi_mir_left_bound, N):
S.add(phi_list[j][1])
for j in xrange(theta_left_bound, theta_right_bound+1):
T.add(theta_list[j][1])
v = list(T.intersection(S))
S.clear()
T.clear()
if len(v) >= min(1.5*img_per_grp, N): break
delta *= 2
del v
for j in xrange(len(v)):
d = ang_diff(vec[v[j]], vec[k])
g[j][0] = d[0]
if v[j] == k: g[j][0] = -1. # To ensure the image itself is always included in the group
g[j][1] = d[1]
g[j][2] = v[j]
g[:len(v)] = sorted(g[:len(v)])
for j in xrange(img_per_grp):
neighbor[j] = g[j][2]
dis[j] = g[j][0]
mirror[j] = (g[j][1] == 1)
proj_list.append(neighbor[:])
mirror_list.append(mirror[:])
#t2 = time()
'''
for j in xrange(N):
d = ang_diff(vec[j], vec[k])
g[j][0] = d[0]
g[j][1] = d[1]
g[j][2] = j
g.sort()
for j in xrange(img_per_grp):
neighbor2[j] = g[j][2]
dis2[j] = g[j][0]
t3 = time()
print "Members in common = %3d extra delta = %6.3f time1 = %5.2f time2 = %5.2f"%(len(Set(neighbor).intersection(Set(neighbor2))),
dis[-1]-dis2[-1], t2-t1, t3-t2)
'''
#tt2 = time()
#print tt2-tt1
return proj_list, mirror_list
|
21,821 | 538006efbce7fcb740995844b52270b574f43dc1 | import numpy as np
import pandas as pd
from flask import Flask, request, render_template
import joblib
app = Flask(__name__)
model = joblib.load('Bank Loan Prediction.pkl')
@app.route('/')
def home():
return render_template('index_bank.html')
@app.route('/predict', methods=['POST', 'GET'])
def predict():
if request.method == 'POST':
input_features = [int(x) for x in request.form.values()]
feature_values = [np.array(input_features)]
feature_names = ['age', 'experience', 'income', 'family', 'education']
df = pd.DataFrame(feature_values, columns=feature_names)
print(df)
out = model.predict(df)
print(out)
if out == 0:
res = 'Congratulations, Loan Approved!!'
else:
res = 'Sorry, Loan Denied'
return render_template('index_bank.html', prediction_text=res)
else:
return render_template('index_bank.html')
if __name__ == '__main__':
app.run(debug=True)
|
21,822 | 2653a3846620a120959579db96b4af9c7e342be0 | from flask import Flask, g
from flask_restful import Api, Resource, reqparse, fields, abort
from flask_sqlalchemy import SQLAlchemy
import random, string, json, re
from passlib.apps import custom_app_context as pwd_context
from model_helpers import make_jsonifiable, update_model, format_features
from models import User, ProductArea, Client, FeatureRequest, InvalidatedAuthTokens
from backend import auth, app, jwt, db
from request_parsers import set_feature_reqparse, set_register_reqparse,\
set_login_reqparse, set_google_reqparse, set_client_reqparse,\
set_product_area_reqparse, set_logout_reqparse
@auth.verify_token
def verify_token(token):
g.user = None
invalidToken = InvalidatedAuthTokens.query.filter_by(invalid_token=token).first()
if invalidToken is not None:
return False
try:
data = jwt.loads(token)
except:
abort(401, authorized=False)
if 'user' in data:
social_id = data['user']
g.user = User.query.filter_by(social_id=social_id).first()
return True
abort(401, authorized=False)
class RegisterAPI(Resource):
def __init__(self):
set_register_reqparse(self)
super(RegisterAPI, self).__init__()
def post(self):
args = self.reqparse.parse_args()
fullname = args['fullname']
email = args['email']
password = args['password']
unique_social_id = False
social_id = 'non_social' + str(int(random.random()*1000000000))
while unique_social_id is False:
user = User.query.filter_by(social_id=social_id).first()
if user is not None:
social_id = 'non_social' + str(random.random()*random.random()*1000)
else:
unique_social_id = True
user = User(social_id=social_id, fullname=fullname, email=email)
user.hash_password(password)
db.session.add(user)
db.session.commit()
token = jwt.dumps({'user':user.social_id})
return {'token': token}, 201
class LoginAPI(Resource):
def __init__(self):
set_login_reqparse(self)
super(LoginAPI, self).__init__()
def post(self):
args = self.reqparse.parse_args()
email = args['email']
password = args['password']
user = User.query.filter_by(email=email).first()
if not user.verify_password(password):
return {'message': {'login_error': 'Invalid email or password'}}, 400
token = jwt.dumps({'user':user.social_id})
return {'token': token, 'username': user.fullname}
class GoogleLogin(Resource):
def __init__(self):
set_google_reqparse(self)
super(GoogleLogin, self).__init__()
def post(self):
args = self.reqparse.parse_args()
fullname = args['fullname']
email = args['email']
social_id = args['social_id']
user = User.query.filter_by(social_id=social_id).first()
if not user:
user = User(social_id=social_id, fullname=fullname, email=email)
db.session.add(user)
db.session.commit()
token = jwt.dumps({'user':user.social_id})
return {'token': token}
class VerifyAuthAPI(Resource):
@auth.login_required
def get(self):
return {'authorized': True}
class FeatureRequestAPI(Resource):
def __init__(self):
set_feature_reqparse(self)
super(FeatureRequestAPI, self).__init__()
@auth.login_required
def get(self, feature_id):
clients = Client.query.all()
formatted_clients = make_jsonifiable(Client, clients)
product_areas = ProductArea.query.all()
formatted_product_areas = make_jsonifiable(ProductArea, product_areas)
client_list = {}
for client in clients:
client_features = client.client_features.order_by(
FeatureRequest.priority).filter(FeatureRequest.id != feature_id).all()
for feature in client_features:
feature.target_date = str(feature.target_date)
formatted_client_list = make_jsonifiable(FeatureRequest, client_features)
client_list[client.name]= formatted_client_list
feature = FeatureRequest.query.filter_by(id=feature_id).first()
if feature is not None:
feature.target_date = str(feature.target_date)
formatted_feature = make_jsonifiable(FeatureRequest, feature)
return {'clients_features':client_list, 'clients': formatted_clients,
'product_areas': formatted_product_areas, 'feature': formatted_feature}
@auth.login_required
def post(self):
args = self.reqparse.parse_args()
features_to_reorder = json.loads(args['submitted_feature_list'])
try:
FeatureRequest.reorder_features(features_to_reorder)
except Exception:
return {'message': {'error': 'Unable to update feature request at this time'}}, 400
try:
feature = FeatureRequest()
feature.set_feature_fields(g.user, args)
db.session.add(feature)
db.session.commit()
return {'message': 'created'}
except Exception:
return {'message': {'error': 'Error saving Feature'}}, 400
@auth.login_required
def put(self, feature_id):
args = self.reqparse.parse_args()
features_to_reorder = json.loads(args['submitted_feature_list'])
try:
FeatureRequest.reorder_features(features_to_reorder)
except Exception:
return {'message': {'error': 'Error updating client features'}}, 400
update_feature = FeatureRequest.query.filter_by(id=feature_id).first()
try:
user = g.user
update_feature.set_feature_fields(user, args)
db.session.add(update_feature)
db.session.commit()
return {'message': 'updated'}
except Exception:
return {'message': {'error': 'Error saving Feature'}}, 400
@auth.login_required
def delete(self, feature_id):
feature = FeatureRequest.query.filter_by(id=feature_id).first()
if feature is None:
return {'message' :{'error':'Feature does not exist'}}, 400
try:
FeatureRequest.query.filter_by(id=feature_id).delete()
db.session.commit()
return {'success': 'Feature deleted'}
except Exception:
return {'message': {'error': 'Unable to delete feature at this time'}}, 400
class RetrieveFeatures(Resource):
@auth.login_required
def get(self):
try:
features = FeatureRequest.query.all()
user_features = FeatureRequest.query.join(User).filter(
FeatureRequest.user_id == g.user.id
).order_by(
FeatureRequest.priority
).all()
formatted_features = format_features(FeatureRequest, features)
for feature in user_features:
feature.target_date = str(feature.target_date)
formatted_user_features = make_jsonifiable(FeatureRequest, user_features);
return {'features': formatted_features, 'user_features': formatted_user_features,
'user_name': g.user.fullname}
except Exception:
return {'message': {'error': 'Server error'}}, 400
class ClientAPI(Resource):
def __init__(self):
set_client_reqparse(self)
super(ClientAPI, self).__init__()
@auth.login_required
def post(self):
args = self.reqparse.parse_args()
try:
client = Client(
name=args['name']
)
db.session.add(client)
db.session.commit()
return {'message': 'Success'}
except Exception:
return {'message': {'error': 'Unable to save client at this time'}}, 400
class ProductAreaAPI(Resource):
def __init__(self):
set_product_area_reqparse(self)
super(ProductAreaAPI, self).__init__()
@auth.login_required
def post(self):
args = self.reqparse.parse_args()
try:
product_area = ProductArea(
name=args['name'],
description=args['description']
)
db.session.add(product_area)
db.session.commit()
return {'message': 'Success'}
except Exception:
return {'message': {'error':'Unable to save product area at this time'}}, 400
class RetrieveFeatureInfo(Resource):
@auth.login_required
def get(self):
clients = Client.query.all()
formatted_clients = make_jsonifiable(Client, clients)
product_areas = ProductArea.query.all()
formatted_product_areas = make_jsonifiable(ProductArea, product_areas)
client_list = {}
for client in clients:
client_features = client.client_features.order_by(FeatureRequest.priority).all()
for feature in client_features:
feature.target_date = str(feature.target_date)
formatted_client_list = make_jsonifiable(FeatureRequest, client_features)
client_list[client.name]= formatted_client_list
return {'clients_features':client_list, 'clients': formatted_clients,
'product_areas': formatted_product_areas}
class LogoutAPI(Resource):
def __init__(self):
set_logout_reqparse(self)
super(LogoutAPI, self).__init__()
@auth.login_required
def get(self):
args = self.reqparse.parse_args()
try:
token = re.sub('Bearer ', '', args['Authorization'])
invalid_token = InvalidatedAuthTokens(invalid_token=token)
db.session.add(invalid_token)
db.session.commit()
return {'success': 'logged out'}
except Exception:
return {'message': {'error': 'Unable to invalidate User token'}}, 400
api = Api(app)
api.add_resource(RegisterAPI, '/register', endpoint='register')
api.add_resource(VerifyAuthAPI, '/auth/verify', endpoint='auth')
api.add_resource(LoginAPI, '/login', endpoint='login')
api.add_resource(GoogleLogin, '/login/google', endpoint='google_login')
api.add_resource(RetrieveFeatures, '/', endpoint='home')
api.add_resource(RetrieveFeatureInfo, '/feature-priorities', endpoint='feature_priority')
api.add_resource(FeatureRequestAPI, '/feature', endpoint='feature')
api.add_resource(FeatureRequestAPI, '/feature/<string:feature_id>')
api.add_resource(ClientAPI, '/client', endpoint='client')
api.add_resource(ProductAreaAPI, '/product_area', endpoint='product_area')
api.add_resource(LogoutAPI, '/logout');
|
21,823 | a7b9e2081e98f0ea5425283bd99de14d62c67375 | OVERNIGHT_ALGORITHM_NAME = 'overnight-us-stocks'
EVENING_BUY_FUNCTION_NAME = 'evening-buy-us-stocks-scheduled-on-kirill-windows-machine'
MORNING_SELL_FUNCTION_NAME = 'morning-sell-us-stocks-scheduled-on-kirill-windows-machine'
|
21,824 | 070b6946306b6f0273c8ce20a3038f5d913048ee | """
High Yield Low Vol system
Attempts to replicate http://imarketsignals.com/2016/trading-the-high-yield-low-volatility-stocks-of-the-sp500-with-the-im-hid-lov-7-system/
"""
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline import CustomFactor
from quantopian.pipeline.filters import Q500US
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.data import morningstar
import numpy as np
import pandas as pd
# Volatility factor
class Volatility(CustomFactor):
inputs = [USEquityPricing.close]
def compute(self, today, assets, out, close):
close = pd.DataFrame(data=close, columns=assets)
# Since we are going to rank largest is best we need to invert the sdev.
out[:] = 1 / np.log(close).diff().std()
# Yield Factor
class Yield(CustomFactor):
inputs = [morningstar.valuation_ratios.total_yield]
window_length = 1
def compute(self, today, assets, out, syield):
out[:] = syield[-1]
def initialize(context):
# how many days to look back volatility and returns
context.lookback=3*252 # 3 years
context.long_leverage = 1.0
#set_benchmark(sid(41382)) #SPLV
pipe = Pipeline()
attach_pipeline(pipe, 'lvhy')
# This is an approximation of the S&P 500
top_500=Q500US() # Q1500US()
volatility=Volatility(window_length=context.lookback)
pipe.add(volatility, 'volatility')
# Rank factor 1 and add the rank to our pipeline
volatility_rank = volatility.rank(mask=top_500)
pipe.add(volatility_rank, 'volatility_rank')
syield = Yield()
pipe.add(syield, 'yield')
# Rank factor 2 and add the rank to our pipeline
yield_rank = syield.rank(mask=top_500)
pipe.add(yield_rank, 'yield_rank')
# Take the average of the two factor rankings, add this to the pipeline
combo_raw = (volatility_rank + yield_rank)/2
pipe.add(combo_raw, 'combo_raw')
# Rank the combo_raw and add that to the pipeline
pipe.add(combo_raw.rank(mask=top_500), 'combo_rank')
# Set a screen to capture max top 100 best stocks
pipe.set_screen(top_500 )
# Scedule my rebalance function
schedule_function(func=rebalance,
date_rule=date_rules.month_start(days_offset=0),
time_rule=time_rules.market_open(hours=0,minutes=30),
half_days=True)
# Schedule my plotting function
schedule_function(func=record_vars,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
half_days=True)
def before_trading_start(context, data):
# Call pipelive_output to get the output
context.output = pipeline_output('lvhy')
# Load the long list
context.long_list = context.output.sort_values(by='combo_rank', ascending=False).iloc[:7]
def record_vars(context, data):
record(leverage = context.account.leverage, long_count=len(context.portfolio.positions))
# This rebalancing is called according to our schedule_function settings.
def rebalance(context,data):
if len(context.long_list) :
long_weight = context.long_leverage / float(len(context.long_list))
log.info("\n" + str(context.long_list.sort_values(by='combo_rank', ascending=False)))
for long_stock in context.long_list.index:
order_target_percent(long_stock, long_weight)
for stock in context.portfolio.positions.iterkeys():
if stock not in context.long_list.index :
order_target(stock, 0)
|
21,825 | e6623c769215adc7d747a1e073beadad613f46a3 | # Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from collections import defaultdict
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from chroma_core.models import ManagedFilesystem, ManagedTarget
from chroma_core.models import ManagedOst, ManagedMdt, ManagedMgs
from chroma_core.models import Volume, VolumeNode
from chroma_core.models import Command, OstPool
from chroma_core.models.filesystem import HSM_CONTROL_KEY, HSM_CONTROL_PARAMS
import tastypie.http as http
from tastypie import fields
from tastypie.exceptions import NotFound
from tastypie.validation import Validation
from chroma_api.authentication import AnonymousAuthentication, PatchedDjangoAuthorization
from chroma_api.chroma_model_resource import ChromaModelResource
from chroma_api.utils import custom_response, ConfParamResource, dehydrate_command
from chroma_api.validation_utils import validate
from chroma_core.lib import conf_param
class FilesystemValidation(Validation):
def _validate_put(self, bundle, request):
errors = defaultdict(list)
if "conf_params" in bundle.data and bundle.data["conf_params"] is not None:
try:
fs = ManagedFilesystem.objects.get(pk=bundle.data["id"])
except ManagedFilesystem.DoesNotExist:
errors["id"] = "Filesystem with id %s not found" % bundle.data["id"]
except KeyError:
errors["id"] = "Field is mandatory"
else:
if fs.immutable_state:
if not conf_param.compare(bundle.data["conf_params"], conf_param.get_conf_params(fs)):
errors["conf_params"].append("Cannot modify conf_params on immutable_state objects")
else:
conf_param_errors = conf_param.validate_conf_params(ManagedFilesystem, bundle.data["conf_params"])
if conf_param_errors:
errors["conf_params"] = conf_param_errors
return errors
def _validate_post(self, bundle, request):
errors = defaultdict(list)
targets = defaultdict(list)
# Check 'mgt', 'mdts', 'osts' are present and compose
# a record of targets which will be formatted
try:
# Check that client hasn't specified an existing MGT
# *and* a volume to format.
if "id" in bundle.data["mgt"] and "volume_id" in bundle.data["mgt"]:
errors["mgt"].append("id and volume_id are mutually exclusive")
mgt = bundle.data["mgt"]
if "volume_id" in mgt:
targets["mgt"].append(mgt)
except KeyError:
errors["mgt"].append("This field is mandatory")
try:
targets["mdts"].extend(bundle.data["mdts"])
except KeyError:
errors["mdts"].append("This field is mandatory")
try:
targets["osts"].extend(bundle.data["osts"])
except KeyError:
errors["osts"].append("This field is mandatory")
if "conf_params" not in bundle.data:
errors["conf_params"].append("This field is mandatory")
if "name" not in bundle.data:
errors["name"].append("This field is mandatory")
# Return if some of the things we're going to validate in detail are absent
if len(errors):
return errors
# As all fields are present we can be more specific about the errors.
errors["mgt"] = defaultdict(list)
errors["mdts"] = defaultdict(list)
errors["osts"] = defaultdict(list)
# Validate filesystem name
if len(bundle.data["name"]) > 8:
errors["name"].append("Name '%s' too long (max 8 characters)" % bundle.data["name"])
if len(bundle.data["name"]) < 1:
errors["name"].append("Name '%s' too short (min 1 character)" % bundle.data["name"])
if bundle.data["name"].find(" ") != -1:
errors["name"].append("Name may not contain spaces")
# Check volume IDs are present and correct
used_volume_ids = set()
def check_volume(field, volume_id):
# Check we haven't tried to use the same volume twice
if volume_id in used_volume_ids:
return "Volume ID %s specified for multiple targets!" % volume_id
try:
# Check the volume exists
volume = Volume.objects.get(id=volume_id)
try:
# Check the volume isn't in use
target = ManagedTarget.objects.get(volume=volume)
return "Volume with ID %s is already in use by target %s" % (volume_id, target)
except ManagedTarget.DoesNotExist:
pass
except Volume.DoesNotExist:
return "Volume with ID %s not found" % volume_id
used_volume_ids.add(volume_id)
try:
mgt_volume_id = bundle.data["mgt"]["volume_id"]
error = check_volume("mgt", mgt_volume_id)
if error:
errors["mgt"]["volume_id"].append(error)
except KeyError:
mgt_volume_id = None
try:
mgt = ManagedMgs.objects.get(id=bundle.data["mgt"]["id"])
if mgt.immutable_state:
errors["mgt"]["id"].append("MGT is unmanaged")
try:
ManagedFilesystem.objects.get(name=bundle.data["name"], mgs=mgt)
errors["mgt"]["name"].append(
"A file system with name '%s' already exists for this MGT" % bundle.data["name"]
)
except ManagedFilesystem.DoesNotExist:
pass
except KeyError:
errors["mgt"]["id"].append("One of id or volume_id must be set")
except ManagedMgs.DoesNotExist:
errors["mgt"]["id"].append("MGT with ID %s not found" % (bundle.data["mgt"]["id"]))
for mdt in bundle.data["mdts"]:
try:
mdt_volume_id = mdt["volume_id"]
check_volume("mdts", mdt_volume_id)
except KeyError:
errors["mdts"]["volume_id"].append("volume_id attribute is mandatory for mdt " % mdt["id"])
for ost in bundle.data["osts"]:
try:
volume_id = ost["volume_id"]
check_volume("osts", volume_id)
except KeyError:
errors["osts"]["volume_id"].append("volume_id attribute is mandatory for ost " % ost["id"])
# If formatting an MGS, check its not on a host already used as an MGS
# If this is an MGS, there may not be another MGS on
# this host
if mgt_volume_id:
mgt_volume = Volume.objects.get(id=mgt_volume_id)
hosts = [vn.host for vn in VolumeNode.objects.filter(volume=mgt_volume, use=True)]
conflicting_mgs_count = ManagedTarget.objects.filter(
~Q(managedmgs=None), managedtargetmount__host__in=hosts
).count()
if conflicting_mgs_count > 0:
errors["mgt"]["volume_id"].append(
"Volume %s cannot be used for MGS (only one MGS is allowed per server)" % mgt_volume.label
)
def validate_target(klass, target):
target_errors = defaultdict(list)
volume = Volume.objects.get(id=target["volume_id"])
if "inode_count" in target and "bytes_per_inode" in target:
target_errors["inode_count"].append("inode_count and bytes_per_inode are mutually exclusive")
if "conf_params" in target:
conf_param_errors = conf_param.validate_conf_params(klass, target["conf_params"])
if conf_param_errors:
# FIXME: not really representing target-specific validations cleanly,
# will sort out while fixing HYD-1077.
target_errors["conf_params"] = conf_param_errors
for setting in ["inode_count", "inode_size", "bytes_per_inode"]:
if setting in target:
if target[setting] is not None and not isinstance(target[setting], int):
target_errors[setting].append("Must be an integer")
# If they specify and inode size and a bytes_per_inode, check the inode fits
# within the ratio
try:
inode_size = target["inode_size"]
bytes_per_inode = target["bytes_per_inode"]
if inode_size >= bytes_per_inode:
target_errors["inode_size"].append("inode_size must be less than bytes_per_inode")
except KeyError:
pass
# If they specify an inode count, check it will fit on the device
try:
inode_count = target["inode_count"]
except KeyError:
# If no inode_count is specified, no need to check it against inode_size
pass
else:
try:
inode_size = target["inode_size"]
except KeyError:
inode_size = {ManagedMgs: 128, ManagedMdt: 512, ManagedOst: 256}[klass]
if inode_size is not None and inode_count is not None:
if inode_count * inode_size > volume.size:
target_errors["inode_count"].append(
"%d %d-byte inodes too large for %s-byte device" % (inode_count, inode_size, volume.size)
)
return target_errors
# Validate generic target settings
for attr, targets in targets.items():
for target in targets:
klass = ManagedTarget.managed_target_of_type(
attr[0:3]
) # We get osts, mdts, mgs so just take the first 3 letters.
target_errors = validate_target(klass, target)
if target_errors:
errors[attr].update(target_errors)
conf_param_errors = conf_param.validate_conf_params(ManagedFilesystem, bundle.data["conf_params"])
if conf_param_errors:
errors["conf_params"] = conf_param_errors
def recursive_count(o):
"""Count the number of non-empty dicts/lists or other objects"""
if isinstance(o, dict):
c = 0
for v in o.values():
c += recursive_count(v)
return c
elif isinstance(o, list):
c = 0
for v in o:
c += recursive_count(v)
return c
else:
return 1
if not recursive_count(errors):
errors = {}
return errors
def is_valid(self, bundle, request=None):
if request.method == "POST":
return self._validate_post(bundle, request)
elif request.method == "PUT":
return self._validate_put(bundle, request)
else:
return {}
class FilesystemResource(ConfParamResource):
"""
A Lustre file system, associated with exactly one MGT and consisting of
one or mode MDTs and one or more OSTs.
When using POST to create a file system, specify volumes to use like this:
::
{osts: [{volume_id: 22}],
mdt: {volume_id: 23},
mgt: {volume_id: 24}}
To create a file system using an existing MGT instead of creating a new
MGT, set the `id` attribute instead of the `volume_id` attribute for
that target (i.e. `mgt: {id: 123}`).
Note: A Lustre file system is owned by an MGT, and the ``name`` of the file system
is unique within that MGT. Do not use ``name`` as a globally unique identifier
for a file system in your application.
"""
mount_command = fields.CharField(
null=True,
help_text='Example command for\
mounting this file system on a Lustre client, e.g. "mount -t lustre 192.168.0.1:/testfs /mnt/testfs"',
)
mount_path = fields.CharField(
null=True,
help_text='Path for mounting the file system\
on a Lustre client, e.g. "192.168.0.1:/testfs"',
)
osts = fields.ToManyField(
"chroma_api.target.TargetResource",
null=True,
attribute=lambda bundle: ManagedOst.objects.filter(filesystem=bundle.obj),
help_text="List of OSTs which belong to this file system",
)
mdts = fields.ToManyField(
"chroma_api.target.TargetResource",
null=True,
full=True,
attribute=lambda bundle: ManagedMdt.objects.filter(filesystem=bundle.obj),
help_text="List of MDTs in this file system, should be at least 1 unless the "
"file system is in the process of being deleted",
)
mgt = fields.ToOneField(
"chroma_api.target.TargetResource",
attribute="mgs",
full=True,
help_text="The MGT on which this file system is registered",
)
def dehydrate_mount_path(self, bundle):
return bundle.obj.mount_path()
def dehydrate_mount_command(self, bundle):
path = self.dehydrate_mount_path(bundle)
if path:
return "mount -t lustre %s /mnt/%s" % (path, bundle.obj.name)
else:
return None
def get_hsm_control_params(self, mdt, bundle):
all_params = set(HSM_CONTROL_PARAMS.keys())
available_params = all_params - set([bundle.data["cdt_status"]])
bundle_params = []
# Strip the mdt down for brevity of transport and also to
# avoid problems with the PUT.
(resource, id) = mdt.data["resource_uri"].split("/")[-3:-1]
safe_mdt = dict(kind=mdt.data["kind"], resource=resource, id=id, conf_params=mdt.data["conf_params"])
for param in available_params:
bundle_params.append(
dict(
mdt=safe_mdt,
param_key=HSM_CONTROL_KEY,
param_value=param,
verb=HSM_CONTROL_PARAMS[param]["verb"],
long_description=HSM_CONTROL_PARAMS[param]["long_description"],
)
)
return bundle_params
def dehydrate(self, bundle):
# Have to do this here because we can't guarantee ordering during
# full_dehydrate to ensure that the mdt bundles are available.
try:
mdt = next(m for m in bundle.data["mdts"] if "mdt.hsm_control" in m.data["conf_params"])
bundle.data["cdt_status"] = mdt.data["conf_params"]["mdt.hsm_control"]
bundle.data["cdt_mdt"] = mdt.data["resource_uri"]
bundle.data["hsm_control_params"] = self.get_hsm_control_params(mdt, bundle)
except StopIteration:
pass
return bundle
class Meta:
queryset = ManagedFilesystem.objects.all()
resource_name = "filesystem"
authorization = PatchedDjangoAuthorization()
authentication = AnonymousAuthentication()
excludes = ["not_deleted", "ost_next_index", "mdt_next_index"]
ordering = ["name"]
filtering = {"id": ["exact", "in"], "name": ["exact"]}
list_allowed_methods = ["get", "post"]
detail_allowed_methods = ["get", "delete", "put"]
readonly = [
"mount_command",
"mount_path",
]
validation = FilesystemValidation()
always_return_data = True
@validate
def obj_create(self, bundle, **kwargs):
request = bundle.request
filesystem_id, command_id = JobSchedulerClient.create_filesystem(bundle.data)
filesystem = ManagedFilesystem.objects.get(pk=filesystem_id)
command = Command.objects.get(pk=command_id)
fs_bundle = self.full_dehydrate(self.build_bundle(obj=filesystem))
filesystem_data = self.alter_detail_data_to_serialize(request, fs_bundle).data
raise custom_response(
self, request, http.HttpAccepted, {"command": dehydrate_command(command), "filesystem": filesystem_data}
)
class OstPoolResource(ChromaModelResource):
osts = fields.ToManyField(
"chroma_api.target.TargetResource", "osts", null=True, help_text="List of OSTs in this Pool",
)
filesystem = fields.ToOneField("chroma_api.filesystem.FilesystemResource", "filesystem")
class Meta:
queryset = OstPool.objects.all()
resource_name = "ostpool"
authentication = AnonymousAuthentication()
authorization = PatchedDjangoAuthorization()
excludes = ["not_deleted"]
ordering = ["filesystem", "name"]
list_allowed_methods = ["get", "delete", "put", "post"]
detail_allowed_methods = ["get", "put", "delete"]
filtering = {"filesystem": ["exact"], "name": ["exact"], "id": ["exact"]}
# POST handler
@validate
def obj_create(self, bundle, **kwargs):
request = bundle.request
ostpool_id, command_id = JobSchedulerClient.create_ostpool(bundle.data)
command = Command.objects.get(pk=command_id)
raise custom_response(self, request, http.HttpAccepted, {"command": dehydrate_command(command)})
# PUT handler
@validate
def obj_update(self, bundle, **kwargs):
try:
obj = self.obj_get(bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
command_id = JobSchedulerClient.update_ostpool(bundle.data)
command = Command.objects.get(pk=command_id)
raise custom_response(self, bundle.request, http.HttpAccepted, {"command": dehydrate_command(command)})
# DELETE handlers
def _pool_delete(self, request, obj_list):
commands = []
for obj in obj_list:
command_id = JobSchedulerClient.delete_ostpool(obj.id)
command = Command.objects.get(pk=command_id)
commands.append(dehydrate_command(command))
raise custom_response(self, request, http.HttpAccepted, {"commands": commands})
def obj_delete(self, bundle, **kwargs):
try:
obj = self.obj_get(bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self._pool_delete(bundle.request, [obj])
def obj_delete_list(self, bundle, **kwargs):
try:
obj_list = self.obj_get_list(bundle, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
self._pool_delete(bundle.request, obj_list)
|
21,826 | c6b2180c3d5fb9fa7ffbf616a57652bf651da42c | import matplotlib.pyplot as plt
import numpy as np
SIGMA = 1000.0
def my_func(x):
return 1/(SIGMA*np.sqrt(2*np.pi))*np.exp(-x**2/2/SIGMA**2)
x = np.linspace(-5.0, 5.0, 1000)
plt.clf()
plt.plot(x, my_func(x))
plt.title("SIGMA = %f" % SIGMA)
plt.grid(True)
plt.savefig("IMG_gaussian.pdf") |
21,827 | 2a4f993e501468bd84a9def2cc35a20481226a7a | from colored_traceback import add_hook
add_hook(colors=16)
|
21,828 | 7cb4c14e81cb5e1a4a7e95b9fd9fc66e6ac73383 | from django.test import TestCase
import conf
import manage
from cacke import wsgi
from cacke.settings.celery import debug_task
from cacke.settings import pro
class Cacke(TestCase):
def test_config_sphinx(self):
self.assertEqual(conf.author, 'ah8ad3')
def test_wsgi_module(self):
self.assertIsNotNone(wsgi.application, 'check wsgi is not nil')
def test_manage_module(self):
self.assertIsNotNone(manage, 'check main manage is not nil')
def test_celery_app_task(self):
self.assertIsNotNone(debug_task.apply(), 'check task not empty')
def test_pro_settings(self):
self.assertIsNotNone(pro, 'pro is not none')
|
21,829 | 420d341adb716e96619234ba64b91f4082c1d753 | #!/usr/bin/env python
import sys
import termios
import tty
def getchar():
''' Returns a single character from standard input. '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(3)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def main():
while True:
ch = getchar()
print('test: ' + ch)
break
if ch == 'q':
sys.exit(0)
elif ch == '\x1b[A':
print('up')
elif ch == '\x1b[B':
print('down')
elif ch == '\x1b[C':
print('right')
elif ch == '\x1b[D':
print('left')
if __name__ == '__main__':
while True: main()
|
21,830 | 5932acb78bd970d4166cec9a8204159fe2b1afc5 | # encoding: utf-8
'''
Created on 2018年9月13日
@author: Administrator
'''
from matplotlib import pyplot
x_values = list(range(1, 1001))
y_values = [x ** 2 for x in x_values]
pyplot.scatter(x_values, y_values, c=y_values, edgecolors="none", s=40, cmap=pyplot.cm.Blues)
pyplot.title("Square Numbers", fontsize=14)
pyplot.xlabel("Value")
pyplot.ylabel("Squares of Numbers")
pyplot.axis([0, 1100, 0, 1100000])
# pyplot.show()
pyplot.savefig("squares_ploy.png",bbox_inches="tight")
|
21,831 | 4fe6e8979e6580361b3e1dfd93680c5735de4646 | import urllib
import json
location = raw_input('Enter location: ')
# hardcode location for testing
# location = "http://python-data.dr-chuck.net/comments_271857.json"
count = 0
total = 0
print 'Retrieving', location
uh = urllib.urlopen(location)
data = uh.read()
print 'Retrieved ', len(data), 'characters'
info = json.loads(data)
comments = info['comments']
comment_count = 0
total = 0
for item in comments:
name = item['name']
count = item['count']
comment_count += 1
total += count
print 'Count', comment_count
print 'Total', total
|
21,832 | 81348f903b134502551c966a7f93085e5cef28c4 | import asyncio
import json
import logging
from unittest.mock import patch, AsyncMock, MagicMock
import aiohttp
import pytest
import pytest_asyncio
from aiohttp import ClientTimeout
from aiohttp.hdrs import METH_GET, METH_POST
from aiohttp_retry import ExponentialRetry
from asyncio_throttle import Throttler
from aioetherscan.exceptions import EtherscanClientContentTypeError, EtherscanClientError, EtherscanClientApiError, \
EtherscanClientProxyError
from aioetherscan.network import Network
from aioetherscan.url_builder import UrlBuilder
class SessionMock(AsyncMock):
# noinspection PyUnusedLocal
@pytest.mark.asyncio
async def get(self, url, params, data):
return AsyncCtxMgrMock()
class AsyncCtxMgrMock(MagicMock):
@pytest.mark.asyncio
async def __aenter__(self):
return self.aenter
@pytest.mark.asyncio
async def __aexit__(self, *args):
pass
def get_loop():
return asyncio.get_event_loop()
@pytest_asyncio.fixture
async def ub():
ub = UrlBuilder('test_api_key', 'eth', 'main')
yield ub
@pytest_asyncio.fixture
async def nw(ub):
nw = Network(ub, get_loop(), None, None, None, None)
yield nw
await nw.close()
def test_init(ub):
myloop = get_loop()
proxy = 'qwe'
timeout = ClientTimeout(5)
throttler = Throttler(1)
retry_options = ExponentialRetry()
n = Network(ub, myloop, timeout, proxy, throttler, retry_options)
assert n._url_builder is ub
assert n._loop == myloop
assert n._timeout is timeout
assert n._proxy is proxy
assert n._throttler is throttler
assert n._retry_options is retry_options
assert n._retry_client is None
assert isinstance(n._logger, logging.Logger)
@pytest.mark.asyncio
async def test_get(nw):
with patch('aioetherscan.network.Network._request', new=AsyncMock()) as mock:
await nw.get()
mock.assert_called_once_with(METH_GET, params={'apikey': nw._url_builder._API_KEY})
@pytest.mark.asyncio
async def test_post(nw):
with patch('aioetherscan.network.Network._request', new=AsyncMock()) as mock:
await nw.post()
mock.assert_called_once_with(METH_POST, data={'apikey': nw._url_builder._API_KEY})
with patch('aioetherscan.network.Network._request', new=AsyncMock()) as mock:
await nw.post({'some': 'data'})
mock.assert_called_once_with(METH_POST, data={'apikey': nw._url_builder._API_KEY, 'some': 'data'})
with patch('aioetherscan.network.Network._request', new=AsyncMock()) as mock:
await nw.post({'some': 'data', 'null': None})
mock.assert_called_once_with(METH_POST, data={'apikey': nw._url_builder._API_KEY, 'some': 'data'})
@pytest.mark.asyncio
async def test_request(nw):
class MagicMockContext(MagicMock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
type(self).__aenter__ = AsyncMock(return_value=MagicMock())
type(self).__aexit__ = AsyncMock(return_value=MagicMock())
nw._retry_client = AsyncMock()
throttler_mock = AsyncMock()
nw._throttler = AsyncMock()
nw._throttler.__aenter__ = throttler_mock
get_mock = MagicMockContext()
nw._retry_client.get = get_mock
with patch('aioetherscan.network.Network._handle_response', new=AsyncMock()) as h:
await nw._request(METH_GET)
throttler_mock.assert_awaited_once()
get_mock.assert_called_once_with('https://api.etherscan.io/api', params=None, data=None, proxy=None)
h.assert_called_once()
post_mock = MagicMockContext()
nw._retry_client.post = post_mock
with patch('aioetherscan.network.Network._handle_response', new=AsyncMock()) as h:
await nw._request(METH_POST)
throttler_mock.assert_awaited()
post_mock.assert_called_once_with('https://api.etherscan.io/api', params=None, data=None, proxy=None)
h.assert_called_once()
assert throttler_mock.call_count == 2
# noinspection PyTypeChecker
@pytest.mark.asyncio
async def test_handle_response(nw):
class MockResponse:
def __init__(self, data, raise_exc=None):
self.data = data
self.raise_exc = raise_exc
@property
def status(self):
return 200
# noinspection PyMethodMayBeStatic
async def text(self):
return 'some text'
async def json(self):
if self.raise_exc:
raise self.raise_exc
return json.loads(self.data)
with pytest.raises(EtherscanClientContentTypeError) as e:
await nw._handle_response(MockResponse('some', aiohttp.ContentTypeError('info', 'hist')))
assert e.value.status == 200
assert e.value.content == 'some text'
with pytest.raises(EtherscanClientError, match='some exc'):
await nw._handle_response(MockResponse('some', Exception('some exception')))
with pytest.raises(EtherscanClientApiError) as e:
await nw._handle_response(MockResponse('{"status": "0", "message": "NOTOK", "result": "res"}'))
assert e.value.message == 'NOTOK'
assert e.value.result == 'res'
with pytest.raises(EtherscanClientProxyError) as e:
await nw._handle_response(MockResponse('{"error": {"code": "100", "message": "msg"}}'))
assert e.value.code == '100'
assert e.value.message == 'msg'
assert await nw._handle_response(MockResponse('{"result": "some_result"}')) == 'some_result'
@pytest.mark.asyncio
async def test_close_session(nw):
with patch('aiohttp.ClientSession.close', new_callable=AsyncMock) as m:
await nw.close()
m: AsyncMock
m.assert_not_called()
nw._retry_client = MagicMock()
nw._retry_client.close = AsyncMock()
await nw.close()
nw._retry_client.close.assert_called_once()
|
21,833 | 1d7124a9ae64e5701256eea9470df01f1ab70af0 | from abc import ABCMeta
from typing import Any, Tuple, Type, TypeVar
import torch.nn as nn
from torch import Tensor
from rllib.environment import AbstractEnvironment
T = TypeVar("T", bound="AbstractQFunction")
class AbstractQFunction(nn.Module, metaclass=ABCMeta):
dim_action: Tuple
num_actions: int
discrete_action: bool
dim_state: Tuple
num_states: int
tau: float
discrete_state: bool
def __init__(
self,
dim_state: Tuple,
dim_action: Tuple,
num_states: int = ...,
num_actions: int = ...,
tau: float = ...,
*args: Any,
**kwargs: Any,
) -> None: ...
def forward(self, *args: Tensor, **kwargs: Any) -> Tensor: ...
@classmethod
def default(
cls: Type[T], environment: AbstractEnvironment, *args: Any, **kwargs: Any
) -> T: ...
class AbstractValueFunction(AbstractQFunction):
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
|
21,834 | 0426550c163598d89e831c6af4446735b349c9c2 | from bs4 import BeautifulSoup as bs
import location as loc1
#import test2 as main
from PIL import Image
from io import BytesIO
from mapview import *
#import geopy
import ui, io, random, json, requests, copy
view = None
resultBox = None
scrollObj = None
markerColors = ['blue', 'green', 'red', 'yellow', 'purple', 'gray']
resultsCount = 5
mapObj = None
_tAddresses, _tPhones, _tNames = ([] for i in range(3))
def getTestingSites(state):
global _tAddress, _tPhones, _tNames
url = 'https://my.castlighthealth.com/corona-virus-testing-sites/data/result.php?county=All&state='+state+'&v=03042020813'
request = requests.get(url)
page = bs(request.content, 'html.parser')
headings = page.findAll('h2')
for idx, master in enumerate(page.findAll('div', class_='dont-break-out')):
name = headings[idx].get_text()
ps = master.findAll('p')
address = ps[0].get_text()
phone = ps[1].get_text()
address = address.replace('\n', '').replace(' ', '').replace('Address:', '')
phone = phone.replace('\n', '').replace(' ', '').replace('Phone:', '')
print(name)
_tNames.append(name)
_tAddresses.append(address)
_tPhones.append(phone)
#print(address + str(idx))
updateLanding()
#print(_tNames)
#print(_tAddresses)
#print(_tPhones)
def updateLanding():
print('size of results: ' + str(len(_tPhones)))
for idx in range(resultsCount):
box = scrollObj.subviews[idx]
box['label_r'].text = _tNames[idx]
def setLanding():
global scrollObj
global resultBox
car = view['content']['car_r']
car.image = ui.Image.named('assets/car.png')
phone = view['content']['phone_r']
horiz_r = view['content']['horizontal_r']
vert_r = view['content']['vertical_r']
case_r = view['content']['case_r']
marker_r = view['content']['siteMarker']
scrollObj = view['content']['innerScroll']
label_r = view['content']['label_r']
view['content']['copyright'].image = ui.Image.named('assets/copyright.png')
phoneBounds = phone.frame
marker_r.tint_color = 'blue'
scrollObj.add_subview(case_r)
scrollObj.content_size = (375, 900)
#scrollObj.content_mode = ui.CONTENT_TOP_LEFT
markerPos = (marker_r.x, marker_r.y)
case_r.add_subview(phone)
case_r.x = 14
case_r.y = 20
marker_r.x = 20
marker_r.y = 40
vert_r.x = 90
vert_r.y = 12
horiz_r.x = 100
horiz_r.y = 80
label_r.x = 110
label_r.y = 10
phone.x = 150
phone.y = 90
car.x = 220
car.y = 90
#case_r.frame = scrollObj.bounds
phone.bring_to_front()
case_r.add_subview(marker_r)
case_r.add_subview(car)
case_r.add_subview(horiz_r)
case_r.add_subview(label_r)
case_r.add_subview(vert_r)
vert_r.bring_to_front()
resultBox = case_r
boxSave = ui.dump_view(case_r)
for idx in range(4):
_idx = idx + 1
box = ui.load_view_str(boxSave)
box.x = 14
box.y = 180 + (180 * idx)
box['siteMarker'].tint_color = markerColors[_idx]
scrollObj.add_subview(box)
showMap([38.926640,-77.006981], None)
#view.add_subview(resultBox)
#scrollObj.add_subview(case_r)
def showMap(center, pois):
global mapObj
v = MapView(frame=(20, 570, 325, 270))
v.scroll_action = scroll_action
v.name = 'map'
view.add_subview(v)
v.set_region(center[0],center[1], 0.1, 0.1, animated=True)
mapObj = v
def updateMap(center, pois):
global mapObj
v = mapObj
v.set_region(center[0], center[1], 0.1, 0.1, animated=True)
for idx, poi in enumerate(pois):
v.add_pin(poi[0], poi[1], markerColors[idx], 'a')
def getLoc():
return loc1.get_location()
def revGeo(loc):
return loc1.reverse_geocode(loc)
def geo(loc):
return loc1.geocode(loc)
def locAuthorized():
return loc1.is_authorized()
def startLoc():
loc1.start_updates()
def stopLoc():
loc1.stop_updates()
def locObj():
return loc1
def testingOnMap(numShow):
#locator = geocoder.geocodefarm(_tAddresses[1])
#print(locator.json)
#locator = #geo.Nominatim(user_agent='myGeocoder')
showList = ''
siteCoords = []
agLat, agLng = 0, 0
for idx in range(numShow):
thisAddr = _tAddresses[random.randint(0, len(_tAddresses) -1)]
print(thisAddr)
address_dict = {'Street': 'Infinite Loop', 'City': 'Cupertino', 'Country': 'USA'}
splitStr = thisAddr.split(',')
if len(splitStr) > 3:
splitStr[0] += (splitStr[1])
del splitStr[1]
splitStr[2] = 'USA'
_address_dict = {'Street': splitStr[0], 'City': splitStr[1], 'Country': splitStr[2]}
#location = geocoder.geocodefarm(thisAddr).json
print(_address_dict)
print(location.get_location())
obscure = location.geocode(_address_dict)
#lat, lng = location['raw']['COORDINATES']['latitude'], location['raw']['COORDINATES']['longitude']
print(obscure)
print('hi')
#colors = ['blue', 'green', 'red', 'yellow', 'purple']
lat, lng = obscure[0]['latitude'], obscure[0]['longitude']
thisList = (str(lat)+ ',' +str(lng)+'|marker-'+markerColors[idx]+'||')
print(thisList)
showList += thisList
agLat += lat
agLng += lng
siteCoords.append((lat,lng))
mapCenter = (agLat/float(numShow), agLng/float(numShow))
updateMap(mapCenter, siteCoords)
showList = showList[:-2]
print(showList)
#response = requests.get('https://www.mapquestapi.com/staticmap/v5/map?key=diLpeRXgA56YLQTiN8iZ4tK3G9wvTGSY&locations='+showList+'&size=@2x&defaultMarker=marker-sm-22407F-3B5998&size=375,240@2x#')
#img = Image.open(BytesIO(response.content))
#img.show()
#mapView = view['localView']
#with io.BytesIO() as bIO:
#img.save(bIO, img.format)
#mapView.image = ui.Image.from_data(bIO.getvalue())
def initScene():
global view
view = ui.ScrollView()
_view = ui.load_view('testing.pyui')
_view.name = 'content'
view.content_inset = (-50, 0, 0, 0)
view.content_size = (_view.width*.9, _view.height*.85)
view.add_subview(_view)
setLanding()
getTestingSites('DC')
testingOnMap(5)
#initScene()
#currView = ui.NavigationView(view, navigation_bar_hidden=False)
#currView.objc_instance.navigationController().navigationBar().hidden = True
#view.present('fullscreen', hide_title_bar=True)
|
21,835 | 9b4dbf7fb7444dadfa89f98e8e41650d9bdbd910 | #!/usr/bin/python
from pwn import *
from hashlib import *
import string
poss = string.printable
host = '146.185.131.214'
port = 52709
def start(mode, want):
for a in poss:
for b in poss:
for c in poss:
for d in poss:
s = a+b+c+d
s2 = mode(s).hexdigest()[-6:]
if s2 == want:
return s, s2
r = remote(host, port)
r.recvuntil('Submit a printable string X, such that ')
mode = r.recvuntil('(X)')[:-3]
r.recvuntil('[-6:] = ')
want = r.recvuntil('\n').strip()
if 'md5' in mode:
ret, ret2 = start(md5, want)
elif 'sha512' in mode:
ret, ret2 = start(sha512, want)
elif 'sha256' in mode:
ret, ret2 = start(sha256, want)
elif 'sha384' in mode:
ret, ret2 = start(sha384, want)
elif 'sha224' in mode:
ret, ret2 = start(sha224, want)
elif 'sha1' in mode:
ret, ret2 = start(sha1, want)
r.close()
print ('ret : ', ret)
print ('mode : ', mode)
print ('want : ', want)
print ('result : ', ret2)
r.sendline(ret)
def laxt(expr, num):
ops = ' %&()*+-/<>^|~'
nude = expr.translate(None, ops)
try:
flag, val = True, eval(expr)
except:
flag, val = False, None
return set(nude) == set(num) and flag, val
r.interactive()
|
21,836 | 2f1c6a28d5957c5f3fffa30cad2d23dab1c626ed | # create a class and call its method
# class MyClass:
# def SayHello():
# print("Hello there!")
# MyClass.SayHello()
# create an instance of a class and call its method -- note you have to use self
# class Person:
# def greet(self):
# print("Hello")
# me = Person()
# me.greet()
# working with constructors. needed if you have variables
# class MyClass:
# def __init__(self):
# print("Upon Initialization: Hello!")
# def instance_method(self):
# print("hello instance")
# def class_method():
# print("Hello class method!")
# test = MyClass()
# test.instance_method()
# MyClass.class_method()
# test.class_method()
|
21,837 | 177fdee1cfdc34da18d6b1c0fdf28474b098b34b | # REFAÇA O DESAFIO 051, LENDO O PRIMEIRO TERMO E A RAZÃO DE UMA PA (PROGRESSÃO ARITMÉTICA),
# MOSTRANDO OS 10 PRIMEIROS TERMOS DA PROGRESSÃO USANDO A ESTRUTURA WHILE.
from time import sleep
print('10 PRIMEIROS TERMOS DE PROGRESSÃO ARITMÉTICA !')
print('-='*25)
primeiro = int(input('Primeiro termo: '))
razao = int(input('Razão: '))
print('-='*25)
print('Calculando...')
sleep(1)
termo = primeiro
cont = 1
while cont <= 10:
print('{} '.format(termo), end= '-> ')
termo += razao
cont += 1
print('FIM')
|
21,838 | 015f56269609c40263374a840adf8c8295b75c59 | import sys
sys.path.append("..")
from tensorflow.keras.utils import to_categorical
from models import utils
from models.CNNSpectMFCC import CNNSpectMFCC
import numpy as np
n_frames_utterance = 21
fake_features, fake_targets = utils.generate_fake_data(n_utterances=10,
n_frames_utterance=n_frames_utterance, n_features=50)
fake_targets_categ = to_categorical(fake_targets)
fake_features = fake_features.reshape(-1, 21, 50, 1)
input_dims = [(21, 50, 1), (21, 50, 1)]
pool_size_2d = 5
divide_pool_size_1d = 4
filters_size = [5, 5]
n_filters = [128, 256]
model = CNNSpectMFCC(input_dims, n_filters, filters_size, pool_size_2d, divide_pool_size_1d, n_output_nodes=2)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit([fake_features, fake_features], fake_targets_categ[np.random.randint(0, 100, 10)], epochs=10, batch_size=10)
#model.summary() |
21,839 | 774637e0d0a71e924a5379f827cf4ed00ed0739a | from django.shortcuts import render
from django.http import (HttpResponse, Http404,
HttpResponseRedirect, HttpRequest)
from .models import Question, Choice
from django.template import loader
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_GET
from django.views.generic import ListView, DetailView
from .forms import QuestionForm
class IndexView(ListView):
template_name = 'polls/index.html' # model = Question
context_object_name = 'object_list'
queryset = Question.objects.order_by('-question_text')
class DetailsView(DetailView):
template_name = 'polls/details.html'
model = Question
context_object_name = 'question'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["form"] = QuestionForm()
return context
class ChoiceCreate(CreateView):
template_name = 'polls/detail.html'
model = Choice
fields = ['question','choice_text']
@require_GET
def index(request: HttpRequest):
return render(request, 'polls/index.html', {
"lastest_question_list": Question.objects.order_by('-pub_date')[:5]
})
lastest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
context = {
"lastest_question_list": lastest_question_list
}
return HttpResponse(template.render(context, request))
def detail(request, question_id):
try:
return render(request, 'polls/details.html',
{"question": Question.objects.get(id=question_id)})
except Question.DoesNotExist:
raise Http404('Question does')
def result(request, question_id):
question = get_object_or_404(Question,pk=question_id)
return render(request,'polls/result.html',{"question":question})
def vote(request, question_id):
question = get_object_or_404(Question,id=question_id)
form = QuestionForm(request.POST)
if form.is_valid():
try:
selected_choice = question.choice_set.get(id=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
pass
else:
selected_choice.votes +=1
selected_choice.save()
return HttpResponseRedirect('{}/result'.format(question_id))
class ChoiceFormView(FormView):
template_name = 'polls/detail.html'
form_class = ChoiceModelForm
succsess_url = '/polls' |
21,840 | 7271877c5dc89005176702580001b0460de2997b | from pytest_socket import disable_socket
pytest_plugins = [
"tests.fixtures",
]
def pytest_runtest_setup():
disable_socket()
|
21,841 | 3c62c872075eb3afe5fad203f3c0789138ee18b0 | from django.http import HttpResponse
from .models import Student, Teacher, Score, Course
from django.db import transaction, connection
from django.db.models import Avg, Count, Sum, F, Q, Max, Min
import random
def index(request):
return HttpResponse("Index")
@transaction.atomic
def createData(request):
""" 增加数据,调用一次 """
Student.objects.bulk_create([
Student(name = "希特勒", gender = 0),
Student(name = "丘吉尔", gender = 0),
Student(name = "张伯伦", gender = 0),
Student(name = "罗斯福", gender = 0),
Student(name = "奥黛丽赫本", gender = 1),
Student(name = "玛莲娜梦露", gender = 1)
])
Teacher.objects.bulk_create([
Teacher(name = "语文老师"), # 0
Teacher(name = "英语老师"), # 1
Teacher(name = "数学老师"), # 2
Teacher(name = "物理老师"), # 3
Teacher(name = "化学老师"), # 4
Teacher(name = "生物老师"), # 5
Teacher(name = "历史老师"), # 6
Teacher(name = "政治老师"), # 7
Teacher(name = "地理老师"), # 8
Teacher(name = "体育老师") # 9
])
Course.objects.bulk_create(__initCourseList())
__initScoreList()
return HttpResponse("CreateData")
def __initSportTeacher(courseList, teacherList, teacherSport):
""" 体育老师所有科目授课 """
for teacher in teacherList:
courseList.append(Course(name = teacher.name[0: 2], teacher = teacherSport))
def __initCourseList():
teacherList = list(Teacher.objects.all())
courseList = []
for index in range(len(teacherList)):
teacher = teacherList[index]
if(index == 0):
courseList.append(Course(name = "物理", teacher = teacher))
elif(index == 3):
courseList.append(Course(name = "生物", teacher = teacher))
elif(index == 5):
courseList.append(Course(name = "语文", teacher = teacher))
elif(index == 7):
courseList.append(Course(name = "英语", teacher = teacher))
elif(index == 9):
__initSportTeacher(courseList, teacherList, teacherSport = teacher)
continue
courseList.append(Course(name = teacher.name[0: 2], teacher = teacher))
return courseList
def __initScoreList():
courseSet = Course.objects.all()
studentSet = Student.objects.all()
courseLen, studentLen = (len(courseSet), len(studentSet))
for index in range(50):
scoreNum = random.randint(1, 150)
course = courseSet[random.randint(0, courseLen - 1)]
student = studentSet[random.randint(0, studentLen - 1)]
print("%s -- %s : %d" % (student.name, course.name, scoreNum))
exists = Score.objects.filter(course_id = course.id, student_id = student.id).exists()
if(not exists):
score = Score(student = student, course = course, number = scoreNum)
score.save()
def question1(request):
students = Student.objects \
.annotate(scoreAvg = Avg("score__number")) \
.filter(scoreAvg__gt = 60)
for student in students:
print("%s : %d" % (student.name, student.scoreAvg))
__printSql()
return HttpResponse("查询平均成绩大于60分的同学id和平均成绩")
def question2(request):
students = Student.objects \
.annotate(courseCount = Count("score__course_id"), scoreSum = Sum("score__number")) \
.defer("gender")
for student in students:
print(student.__dict__)
__printSql()
return HttpResponse("查询所有同学id、姓名、选课的数、总成绩")
def question3(request):
teachers = Teacher.objects.filter(name__startswith = "数学")
for teacher in teachers:
print("%d : %s" % (teacher.id, teacher.name))
__printSql()
return HttpResponse("查询性数学老师的个数")
def question4(rquest):
students = Student.objects \
.exclude(score__course__teacher__name__startswith = "数学") \
.defer("gender")
for student in students:
print(student.__dict__)
__printSql()
return HttpResponse("查询没学习过数学老师课的同学的id、姓名")
def question5(request):
students = Student.objects \
.filter(score__course_id__in = (1, 2)) \
.defer("gender").distinct()
for student in students:
print("%d : %s" % (student.id, student.name))
__printSql()
return HttpResponse("查询学过id为1和2的所有同学的学号、姓名")
def question6(request):
students = Student.objects \
.filter(score__course__teacher__name = "历史老师")\
.defer("gender")
for student in students:
print("%d : %s" % (student.id, student.name))
__printSql()
return HttpResponse("查询学过历史老师所教的所有课的同学的学号、姓名")
def question7(reuqest):
studnets = Student.objects \
.exclude(score__number__gte = 120) \
.defer("gender").distinct()
for student in studnets:
print("%d : %s" % (student.id, student.name))
__printSql()
return HttpResponse("查询所有课程成绩均小于120分的同学的id和姓名")
def question8(reuqest):
# students = Student.objects.annotate(courseName = F("score__course__name")) \
# .defer("gender").annotate(count = Count("courseName"))
# finishStudent = {}
# for student in students:
# count = finishStudent.get(student.name, 0)
# finishStudent[student.name] = count + 1
# print(finishStudent)
# Sql函数有distinct属性
students = Student.objects \
.annotate(count = Count("score__course__name", distinct = True)) \
.defer("gender")
for student in students:
print("%s : %d" % (student.name, student.count))
__printSql()
return HttpResponse("查询没有完全所有课的同学的id,姓名(完全课代表 语数英物化生历地政 全部有上)")
def question9(request):
students = Student.objects \
.annotate(scoreAvg = Avg("score__number")) \
.order_by("-scoreAvg")
for student in students:
print("%s : %d" % (student.name, student.scoreAvg))
__printSql()
return HttpResponse("查询所有学生的姓名、平均分,并且按照平均分从高到低排序")
def question10(request):
courses = Course.objects \
.annotate(maxScore = Max("score__number"), minScore = Min("score__number"), teacherName = F("teacher__name")) \
.defer("teacher_id")
for course in courses:
print("%s, %s, %s, %s, %s" % (course.id, course.name, course.teacherName, course.maxScore, course.minScore))
__printSql()
return HttpResponse("查询各科升级的最高分和最低分,以如下形式显示:课程ID,课程名称,最高分,最低分")
def question11(request):
courses = Course.objects \
.annotate(scoreAvg = Avg("score__number"), teacherName = F("teacher__name")) \
.defer("teacher_id").filter(scoreAvg__isnull = False).order_by("scoreAvg")
for course in courses:
print("%s, %s, %s, %s" % (course.id, course.name, course.teacherName, course.scoreAvg))
__printSql()
return HttpResponse("查询每门课程的平均成绩,按照平均成绩进行排序")
def question12(request):
students = Student.objects \
.aggregate(maleNum = Count("gender", filter = Q(gender = 0)), famaleNum = Count("gender", filter = Q(gender = 1)))
print(students)
__printSql()
return HttpResponse("统计有多少女生,多少男生")
def question13(request):
scores = Score.objects \
.filter(course__teacher__name = "英语老师") \
.annotate(teacherName = F("course__teacher__name"), courseName = F("course__name"), studentName = F("student__name")) \
.defer("course_id", "student_id") \
# .update(number = F("number") + 10)
for score in scores:
print("%s, %s, %s : %d" % (score.studentName, score.courseName, score.teacherName, score.number))
__printSql()
return HttpResponse("将英语老师的每一门课程都在原来的基础上加5分")
def question14(reuqest):
students = Student.objects \
.annotate(badCount = Count("id", filter = Q(score__number__lte = 90))) \
.filter(badCount__gte = 2) \
.defer("gender")
for student in students:
print("%s, %s, %d" % (student.id, student.name, student.badCount))
__printSql()
return HttpResponse("查询两门以上不及格的同学的id、姓名、以及不及格课程数")
def question15(reuqest):
courses = Course.objects \
.annotate(personNum = Count("score__student_id"), teacherName = F("teacher__name")) \
.defer("teacher_id")
for course in courses:
print("%s, %s, %s授课 , 选课人数:%d" % (course.id, course.name, course.teacherName, course.personNum))
__printSql()
return HttpResponse("查询每门课的选课人数")
def __printSql():
print("运行的全部Sql语句:")
for sql in connection.queries:
print(sql)
|
21,842 | 590046770c108caa040d46b7b38da96b353bc168 | # soaplib with mod_wsgi/cherrypy
cherrypy.quickstart(Root(), "/")
|
21,843 | 0309669d38b4f0aa8aad989fddba1c8532590ee0 | #tupla com os times do campeonato brasileiro, mostrar os:
#5 primeiros
#4 últimos
#Ordem alfabética
#Posição da chapecoense
brasileirao = ('Palmeiras','Flamengo','Internacional',
'São Paulo','Grêmio','Atlético-MG','Santos','Fluminense',
'Cruzeiro','Atlético-PR','Corinthians','Bahia','Botafogo','Vitória','América-MG',
'Vasco','Chapecoense','Ceará','Sport','Paraná')
print('Os Cinco primeiros')
for c in range(0,5):
print(f'Na Posição {c+1}: {brasileirao[c]}')
print('\n')
print('Os Quatro últimos')
for c in range(-4,0):
print(f'Na Posiçao {c+21}: {brasileirao[c]}')
print('\n')
ordem = sorted(brasileirao)
print('Times em ordem alfabética:')
for c in range(0,20):
print(ordem[c])
print('\n')
n = brasileirao.index('Chapecoense')
print(f'Posição do Chapecoense: {n+1}')
|
21,844 | df14b0c6b16629d0d604062a525e378630b27b87 | #!/usr/bin/env python
# -*- coding: cp936 -*-
import easygui as g
import win32com.client as win32com
import os
import os.path
import csv
import re
import codecs,sys
import tkinter
import win32timezone
import win32timezone
import datetime
import time
now_1 = str(time.strftime('%Y'))
def ph_QC_Chart(address,files):
root=tkinter.Tk()
path = tkinter.filedialog.askopenfilenames(initialdir='Z:/Data/%s/%s'%(now_1,address), title="Select files",filetypes=[("csvfile", "*.csv")])
root.destroy()
excel = win32com.gencache.EnsureDispatch('Excel.Application')
excel.Visible = True
excel.Application.DisplayAlerts = True
workbook = excel.Workbooks.Open(os.path.join(os.getcwd(),r'./%s'%files))
maxcolumn=1
while excel.Sheets("Data 1").Cells(2,maxcolumn).Value is not None:
maxcolumn+=1
ph_date = csv.reader(open('%s'%path,'r'))
print (ph_date)
k=0
for each in ph_date:
if ('CC' in each):
now= re.findall('\d{4}-\d{2}-\d{2}',each[0])
excel.Sheets("Data 1").Cells(1,maxcolumn+k).Value=now[0]
excel.Sheets("Data 1").Cells(1,maxcolumn+k).NumberFormat= "yyyy/m/d"
excel.Sheets("Data 1").Cells(2,maxcolumn+k).Value=each[5]
excel.Sheets("Data 1").Cells(2,maxcolumn+k).NumberFormat="0.000"
k+=1
option=g.ccbox('which test item',title='pH QC data input', choices=('2014','2018'))
if option==1:
print ('2014')
ph_QC_Chart('66-01-2014-015 pH','QC Chart _pH_66-01-2014-015.xlsx')
else:
print ('2018')
ph_QC_Chart('66-01-2018-006 pH','QC Chart _pH_66-01-2018-006.xlsx')
g.msgbox('End ')
|
21,845 | 4025c4c450df45460478fbd605a9b4d2ea0124b3 | """knut_festival URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
import app.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', app.views.home, name="home"),
path('map_traffic/', app.views.map_traffic, name="map_traffic"),
path('aboutus/', app.views.aboutus, name="aboutus"),
# path('1/', app.views.comment_write, name="comment_write"),
# path('2/', app.views.login, name="login"),
# path('accounts/', include('allauth.urls')),
path('board/', app.views.board, name="board"),
path('board/<int:board_id>/', app.views.detail, name="detail"),
path('board/new/', app.views.new, name="new"),
path('board/create/', app.views.create, name="create"),
path('board/<int:board_id>/delete', app.views.delete, name="delete"),
path('board/<int:board_id>/deleteConfirm', app.views.deleteConfirm, name="deleteConfirm"),
path('board/<int:board_id>/edit', app.views.edit, name="edit"),
path('board/<int:board_id>/editConfirm', app.views.editConfirm, name="editConfirm"),
path('board/editFailed/', app.views.editFailed, name="editFailed"),
path('board/deleteFailed/', app.views.deleteFailed, name="deleteFailed"),
path('board/deleteSuccess/', app.views.deleteSuccess, name="deleteSuccess"),
path('board/friendsEditFailed', app.views.friendsEditFailed, name="friendsEditFailed"),
path('board/friendsDeleteFailed', app.views.friendsDeleteFailed, name="friendsDeleteFailed"),
path('board/friendsDeleteSuccess', app.views.friendsDeleteSuccess, name="friendsDeleteSuccess"),
path('kakao/', app.views.kakao, name="kakao"),
path('oauth/', app.views.oauth, name="oauth"),
# path('kakaoLogout/', app.views.kakaoLogout, name="kakaoLogout"),
# ################# 삭제 #################
# path('board/boothPromotion', app.views.boothPromotion, name="boothPromotion"),
# path('board/boothPromotionNew', app.views.boothPromotionNew, name="boothPromotionNew"),
# path('board/boothPromotionCreate', app.views.boothPromotionCreate, name="boothPromotionCreate"),
# path('board/boothPromotion/<int:board_id>/', app.views.boothPromotionDetail, name="boothPromotionDetail"),
# ################# 삭제 #################
path('board/friends', app.views.friends, name="friends"),
path('board/friendsNew', app.views.friendsNew, name="friendsNew"),
path('board/friendsCreate', app.views.friendsCreate, name="friendsCreate"),
path('board/friends/<int:board_id>/', app.views.friendsDetail, name="friendsDetail"),
path('board/friends/<int:board_id>/delete', app.views.friendsDelete, name="friendsDelete"),
path('board/friends/<int:board_id>/deleteConfirm', app.views.friendsDeleteConfirm, name="friendsDeleteConfirm"),
path('board/friends/<int:board_id>/edit', app.views.friendsEdit, name="friendsEdit"),
path('board/friends/<int:board_id>/editConfirm', app.views.friendsEditConfirm, name="friendsEditConfirm"),
# ################# 삭제 #################
# path('board/free', app.views.free, name="free"),
# path('board/freeNew', app.views.freeNew, name="freeNew"),
# path('board/freeCreate', app.views.freeCreate, name="freeCreate"),
# path('board/free/<int:board_id>/', app.views.freeDetail, name="freeDetail"),
# ################# 삭제 #################
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
21,846 | 50913e69790b1f39dfb4288eedbc4eae00b28d30 | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Energy production by source
data1 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_hourly_HPC_0.csv')
data2 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_hourly_HPC_50.csv')
data3 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_hourly_HPC_100.csv')
data4 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_hourly_HPC_150.csv')
data5 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_hourly_HPC_200.csv')
data6 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_hourly_HPC_250.csv')
data61 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_hourly_HPC_300.csv')
x = ['0MW', '50MW', '100MW', '150MW', '200MW', '250MW', '300MW']
y1 = np.array([sum(data1['solar']), sum(data2['solar']), sum(data3['solar']), sum(data4['solar']),
sum(data5['solar']), sum(data6['solar']), sum(data61['solar'])])
y2 = np.array([sum(data1['wind']), sum(data2['wind']), sum(data3['wind']), sum(data4['wind']),
sum(data5['wind']), sum(data6['wind']), sum(data61['wind'])])
y3 = np.array([sum(data1['coal']), sum(data2['coal']), sum(data3['coal']), sum(data4['coal']),
sum(data5['coal']), sum(data6['coal']), sum(data61['coal'])])
y4 = np.array([sum(data1['battery']), sum(data2['battery']), sum(data3['battery']), sum(data4['battery']),
sum(data5['battery']), sum(data6['battery']), sum(data61['battery'])])
# plot bars in stack manner
plt.bar(x, y1, color='orangered')
plt.bar(x, y2, bottom=y1, color='dodgerblue')
plt.bar(x, y3, bottom=y1 + y2, color='darkgrey')
plt.bar(x, y4, bottom=y1 + y2 + y3, color='seagreen')
plt.xlabel("Coal capacity")
plt.ylabel("Energy [MWh]")
plt.legend(['Solar', 'Wind', 'Coal', 'Battery'])
plt.show()
# Non-delivered
data7 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_yearly_HPC_0.csv')
data8 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_yearly_HPC_50.csv')
data9 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_yearly_HPC_100.csv')
data10 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_yearly_HPC_150.csv')
data11 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_yearly_HPC_200.csv')
data12 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_yearly_HPC_250.csv')
data13 = pd.read_csv('Sensitivity/Capacity_max/Operational_coal_max_yearly_HPC_300.csv')
x = ['0MW', '50MW', '100MW', '150MW', '200MW', '250MW', '300MW']
y1 = np.array([sum(data7['nonDel_yearly']), sum(data8['nonDel_yearly']), sum(data9['nonDel_yearly']),
sum(data10['nonDel_yearly']), sum(data11['nonDel_yearly']), sum(data12['nonDel_yearly']),
sum(data13['nonDel_yearly'])])
# plot bars in stack manner
plt.bar(x, y1, color='steelblue')
plt.xlabel("Coal capacity")
plt.ylabel("Energy [MWh]")
plt.show()
x = ['solar', 'wind', 'coal']
y1 = np.array([60])
y2 = np.array([345])
y3 = np.array([206])
# plot bars in stack manner
plt.bar(x, y1, color='orangered')
plt.bar(x, y2, color='dodgerblue')
plt.bar(x, y3, color='darkgrey')
plt.ylabel("Installed capacity [MW]")
plt.show() |
21,847 | 572afa18c5ebd676fc2e347dbd24ca15abbc5c32 | from sklearn.base import TransformerMixin, BaseEstimator
import numpy as np
import pandas as pd
class EMATransformer(BaseEstimator, TransformerMixin):
def transform(self, X, windows = [3, 5, 10, 20, 50], feature = 'close' ,**transform_params):
Xdum = pd.DataFrame(index=X.index)
for window in windows:
Xdum[('ema_%d' % window)] = EMATransformer.exp_moving_average(self, X, window, feature)
# replacing first window*288 number of values becuase theyre all 0
Xdum[('ema_%d' % window)] = np.where(Xdum[('ema_%d' % window)] == 0, X[feature], Xdum[('ema_%d' % window)])
return Xdum
def fit(self, X, y=None, **fit_params):
return self
def exp_moving_average(self, df, window, feature):
window *= 288 # 5 min periods -> 1 day
values = np.array(df[feature])
weights = np.exp(np.linspace(-1.,0.,window))
weights /= weights.sum()
a = np.convolve(values, weights)[:len(values)]
a[:window] = a[window]
return a
|
21,848 | 1ff07dadc45931e7672e05229d9f4ee43284249b | k = 1
fat = 1
n = int(input('n:'))
while k <= n:
far = fat * k
k = k + 1
print(f'fat(n) = {fat}')
|
21,849 | ebb68eb7a7a6ee58af8dc419ffc176e3de12ff1e | x=5
def func(a):
b=a+1
return b
y=int(input("enter no"))
z=y+func(x)
print(z) |
21,850 | 7b021d062c7e8d4b62927c7b366eb9fa83565e43 | # 假设:一些内容是从网络上抓取的
# 要求:
# 1.将字符串中的空白字符全部去掉
# 2.再使用" "作为分隔符,拼接成一个整齐的字符串
poem_str="登鹳雀楼\t王之涣\t白日依山尽\t\n黄河入海流\n欲穷千里目\n\t更上一层楼"
print(poem_str)
# 1.拆分字符串
poem_list=poem_str.split()
print(poem_list)
# 2.合并字符串
result=" ".join(poem_list)
print(result)
# 注意:使用" "、\n、\t 等均可作为字符串 |
21,851 | b2c8dd2a964b990854b7820d760e63e51e0aa3d1 | import util_funcs as uf
import numpy as np
from sympy import Rational
from sympy.matrices import Matrix, eye, zeros;
from sympy import nsimplify
import sympy as spy
def check_int_mat(T, tol1):
if isinstance(T, Matrix):
T = np.array(T, dtype='double');
return (np.max(np.abs(T - np.around(T))) < tol1);
def rat_approx(Tmat, tol1=0.01):
"""
"""
input1 = Tmat.flatten()
nshape = np.shape(Tmat)
denum_max = 1/tol1
Sz = input1.shape
Nmat = np.zeros(np.shape(input1), dtype='int64')
Dmat = np.zeros(np.shape(input1), dtype='int64')
for ct1 in range(Sz[0]):
num1 = (Rational(input1[ct1]).limit_denominator(denum_max))
Nmat[ct1] = num1.p
Dmat[ct1] = num1.q
Nmat1 = np.reshape(Nmat, nshape)
Dmat1 = np.reshape(Dmat, nshape)
Nmat1 = np.array(Nmat1, dtype='int64')
Dmat1 = np.array(Dmat1, dtype='int64')
return Nmat1, Dmat1;
def gcd_arr(int_mat):
input1 = int_mat.flatten()
Sz = input1.shape
gcd1 = 0
for ct1 in range(Sz[0]):
gcd1 = spy.gcd(gcd1, input1[ct1])
return int(gcd1)
def lcm_arr(Dmat):
input1 = Dmat.flatten()
Sz = input1.shape
lcm1 = 1
for ct1 in range(Sz[0]):
lcm1 = spy.lcm(lcm1, input1[ct1])
return int(lcm1)
def int_approx(Tmat, tol1=0.01):
tct1 = np.max(np.abs(Tmat))
tct2 = np.min(np.abs(Tmat))
mult1 = 1/((tct1 + tct2)/2)
mult2 = 1/np.max(np.abs(Tmat))
Tmat1 = Tmat*mult1
Tmat2 = Tmat*mult2
N1, D1 = rat_approx(Tmat1, tol1)
N2, D2 = rat_approx(Tmat2, tol1)
lcm1 = lcm_arr(D1)
lcm2 = lcm_arr(D2)
int_mat1 = np.array((N1/D1)*lcm1, dtype='double')
int_mat2 = np.array((N2/D2)*lcm2, dtype='double')
cond1 = check_int_mat(int_mat1, tol1*0.01)
if cond1:
int_mat1 = np.around(int_mat1)
int_mat1 = np.array(int_mat1, dtype='int64')
else:
raise Exception("int_mat1 is not an integer matrix")
cond2 = check_int_mat(int_mat2, tol1*0.01)
if cond2:
int_mat2 = np.around(int_mat2)
int_mat2 = np.array(int_mat2, dtype='int64')
else:
raise Exception("int_mat2 is not an integer matrix")
gcd1 = gcd_arr(int_mat1)
gcd2 = gcd_arr(int_mat2)
int_mat1 = int_mat1/gcd1
int_mat2 = int_mat2/gcd2
int_mat1 = np.array(int_mat1, dtype='int64')
int_mat2 = np.array(int_mat2, dtype='int64')
t1_mult = mult1*lcm1/gcd1
t2_mult = mult2*lcm2/gcd2
err1 = np.max(np.abs(Tmat - int_mat1/t1_mult))
err2 = np.max(np.abs(Tmat - int_mat2/t2_mult))
if err1 == err2:
tnorm1 = np.linalg.norm(int_mat1)
tnorm2 = np.linalg.norm(int_mat2)
if (tnorm1 > tnorm2):
return int_mat2, t2_mult
else:
return int_mat1, t1_mult
else:
if err1 > err2:
return int_mat2, t2_mult
else:
return int_mat1, t1_mult
|
21,852 | d933139fb1a70c0d7de23fe51f6125e8d76e5795 | for i in range(5):
print " "*(4-i), "*"*((i+1)*2-1) |
21,853 | 1e39b2d01984d52997e0ec4cafc73d1538a20dc4 | """
Description:
This script is used to create the file `TF_intervals.txt` from the file
`sorted_deepsea_data.bed` that was created by running
`process_chromatin_profiles.py`. `TF_intervals.txt` is used as input
to a `selene_sdk.samplers.IntervalsSampler` and contains only the
regions in `sorted_deepsea_data.bed` annotated to at least 1 TF.
Usage:
create_TF_intervals_file.py <features> <data-bed> <output-txt>
create_TF_intervals_file.py -h | --help
Options:
-h --help Show this screen
<features> Path to the list of genomic features in our dataset
<data-bed> Path to the DeepSEA data .bed file
<output-txt> Path to the output file
"""
from docopt import docopt
if __name__ == "__main__":
arguments = docopt(
__doc__,
version="1.0")
features_file = arguments["<features>"]
data_file = arguments["<data-bed>"]
output_file = arguments["<output-txt>"]
features = []
with open(features_file, 'r') as fh:
for f in fh:
features.append(f.strip())
only_TF_features = []
for f in features:
if "DNase" in f:
continue
f_sep = f.split('|')
target = f_sep[1]
# if-statement to check for histone mark features
if 'H' == target[0] and str.isdigit(target[1]):
continue
only_TF_features.append(f)
with open(data_file, 'r') as read_fh, \
open(output_file, 'w+') as write_fh:
for line in read_fh:
cols = line.strip().split('\t')
if cols[-1] not in only_TF_features:
continue
write_fh.write("{0}\t{1}\t{2}\n".format(
cols[0], cols[1], cols[2]))
|
21,854 | 5904b3c2b29237661a2a03c32369b2b72deb1854 | shoplist = ['яблоки',
'манго',
'морковь',
'бананы']
name = 'swaroop'
# Операция индексирования
print('Элемент 0 - ', shoplist[0]) # apple
print('Элемент 1 - ', shoplist[1]) # mango
print('Элемент 2 - ', shoplist[2]) # morkov
print('Элемент 3 - ', shoplist[3]) # bananas
print('Элемент -1 - ', shoplist[-1]) # bananas
print('Элемент -2 - ', shoplist[-2]) # morkov
print('Символ 0 - ', name[0]) # s
# Вырезка из списка
print('Элементы с 1 по 3: ', shoplist[1:3]) # mango, morkov
print('Элементы с 2 до конца: ', shoplist[2:]) # morkov, bananas
print('Элементы с 1 по -1: ', shoplist[1:-1]) # mango, morkov
print('Элементы от начала до конца: ', shoplist[:]) # apple mango morkov bananas
# Вырезка из строки
print('Символы с 1 по 3: ', name[1:3]) # wa
print('Символы с 2 до конца: ', name[2:]) # aroop
print('Символы с 1 по -1: ', name[1:-1]) # waroo
print('Символы от начала до конца: ', name[:]) # swaroop
# Вырезка с шагом
print('', shoplist[::1]) # all
print('', shoplist[::2]) # 0,2
print('', shoplist[::3]) # 0,3
print('', shoplist[::-1]) # all(but reverse) |
21,855 | 6873c72d13190e7db8b221e1f0e9bb1ae0f43567 | # -*- coding: utf-8 -*-
def PrintAllTheRoute (graph,s,d): #s表示初始点,d表示终结点
stack = [s] #用于DFS之中存储路径点
HasBeenConsideredByItsFather = {s:set([])} #用于判断是否已被父节点考虑
i=0
while stack: #栈空则表明结束
OldLength = len(stack)
for u in graph[stack[-1]]:
if not (u in stack):
if not (u in HasBeenConsideredByItsFather[stack[-1]]): #既没有被父节点考虑过,也不在栈内,可以入栈
stack.append(u)
HasBeenConsideredByItsFather[u] = set([])
if stack[-1] == d: #栈顶是终点,输出
i = i+1
print('第',i,'种路径')
print(stack)
else:
pass
break #防止多入,每个点最多考虑一个邻接点
else:
continue
else:
continue
if len(stack) == OldLength: #说明没有点入栈,应该采取出栈,此时,直接将它从“考虑”dict内remove
if i==0:#说明起始点与终点不连通
print("没有找到路径!")
return 0
PointOut = stack.pop()
HasBeenConsideredByItsFather.pop(PointOut)
if stack:
HasBeenConsideredByItsFather[stack[-1]].add(PointOut)#将刚刚出栈的点计入栈顶元素的“考虑”的dict内
else:
pass
return 0
'''
graph = {'A':['B','G'],
'B':['A','G','C'],
'C':['B','F','D','E'],
'D':['C','E'],
'E':['D','C','F'],
'F':['C','E','G'],
'G':['A','B','F']}#用于验证,图见文件夹
PrintAllTheRoute(graph, 'A', 'E')
''' |
21,856 | a407b3cc2aa4bb0e364bfec18e3c40601c270a5f | # 把一个类作为一个迭代器使用需要在类中实现两个方法 __iter__() 与 __next__() 。
#
# 如果你已经了解的面向对象编程,就知道类都有一个构造函数,Python 的构造函数为 __init__(), 它会在对象初始化的时候执行。
#
# 更多内容查阅:Python3 面向对象
#
# __iter__() 方法返回一个特殊的迭代器对象, 这个迭代器对象实现了 __next__() 方法并通过 StopIteration 异常标识迭代的完成。
#
# __next__() 方法(Python 2 里是 next())会返回下一个迭代器对象。
#
# 创建一个返回数字的迭代器,初始值为 1,逐步递增 1:
class MyNumbers:
def __iter__(self):
self.a = 1
return self
def __next__(self):
x = self.a
self.a += 1
return x
myclass = MyNumbers()
myiter = iter(myclass)
print(next(myiter))
print(next(myiter))
print(next(myiter))
print(next(myiter))
print(next(myiter))
print("------------")
# StopIteration
# StopIteration 异常用于标识迭代的完成,防止出现无限循环的情况,在 __next__() 方法中我们可以设置在完成指定循环次数后触发 StopIteration 异常来结束迭代。
#
# 在 20 次迭代后停止执行:
class MyNumbers2:
def __iter__(self):
self.a = 1
return self
def __next__(self):
if self.a <= 20:
x = self.a
self.a += 1
return x
else:
raise StopIteration
myclass = MyNumbers2()
myiter = iter(myclass)
for x in myiter:
print(x)
|
21,857 | 40f28f7792078eda9b14595ca2d298ef07ac2d85 | # This Python script Merges 2 pairs by adding NNNNNNs
# This script uses ideas for parsing FASTQ from here http://news.open-bio.org/news/2009/12/interleaving-paired-fastq-files-with-biopython/
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import sys
ForwardName = str(sys.argv[1]) # forward reads file
ReverseName = str(sys.argv[2]) # reverse reads file
mergedName = str(sys.argv[3]) # output file
print ("Forward Name is " + ForwardName)
print ("Reverse Name is " + ReverseName)
print ("Merged output is " + mergedName)
insert = "N" * 300 # assumed insert size TODO pass as argument Crazy TODO add random DNA instead of NNNN
def merge(iter1, iter2):
for (forward, reverse) in zip(iter1, iter2):
assert forward.id == reverse.id
allSeq = str(forward.seq + insert + reverse.seq.reverse_complement())
seqfasta = SeqRecord(Seq(allSeq), id=forward.id) # Choosing to go with forward ID as the fasta header for, might parse it out in the future.
yield seqfasta
records_f = SeqIO.parse(open(ForwardName,"rU"), 'fastq') # Input format can be changed here TODO add argument
records_r = SeqIO.parse(open(ReverseName,"rU"), 'fastq') # Input format can be changed here TODO add argument
handle = open(mergedName, "w")
count = SeqIO.write(merge(records_f, records_r), handle, 'fasta') #Fastq won't work because the quality data is lost
handle.close()
print ("Wrote %i entries to %s" % (count, mergedName))
|
21,858 | 724fb97ec523345dcc638d78a6f64d0b703b3959 | from matplotlib import pyplot as plt
from src.models.learn_rate import find_lr
from pathlib import Path
from pyedflib import highlevel
import numpy as np
def data_length(folder):
seizure_files = [(str(seizure)) for seizure in sorted(Path(folder).glob('seizures/*.edf'))]
normal_files = [(str(normal)) for normal in sorted(Path(folder).glob('normal/*.edf'))]
print("Number of files with seizures: " + str(len(seizure_files)))
print("Number of files without siezures: " + str(len(normal_files)))
signals, _, _ = highlevel.read_edf(str(seizure_files[20]))
print(signals[10])
length_seizures = []
length_normal = []
for seizure_file in seizure_files:
signals, _, _ = highlevel.read_edf(str(seizure_file))
length_seizures.append(len(signals[0]))
for normal_file in normal_files:
signals, _, _ = highlevel.read_edf(str(normal_file))
length_normal.append(len(signals[0]))
dis = np.sum(np.array(length_seizures)) # data in seizure
din = np.sum(np.array(length_normal)) # data in normal
print("Seizure: " + str(dis) + "fr/ " + str(dis/256) + "s/ " + str(dis/256/3600) + "h")
print("Normal: " + str(din) + "fr/ " + str(din/256) + "s/ " + str(din/256/3600) + "h")
# data_length("/home/jmsvanrijn/Documents/Afstuderen/Code/low-power-epilepsy-detection/data/processed/") |
21,859 | 3139d57e31dd8e71a18f72c19066ab285bdd852c |
import networkx as nx
from matplotlib import pyplot as plt
from common import remove_segments_with_no_points, build_road_network_from_shapefile_with_no_middle_nodes
file_points = 'data/gps_data/gps_points_07-11.csv'
data = []
X = []
Y = []
with open(file_points, 'r') as f:
for line in f:
x = line.split('\t')
data.append((float(x[0]), float(x[1])))
X.append(float(x[0]))
Y.append(float(x[1]))
# 2. Create directed graph from OSM:
shapefile = 'data/shapefiles/relevant_osm_part_doha_roads.shp'
gt_rn = build_road_network_from_shapefile_with_no_middle_nodes(shape_file=shapefile)
nseg = gt_rn.number_of_edges()
nx.write_gpickle(gt_rn, 'data/doha_roads.gpickle')
for s, t in gt_rn.edges():
plt.plot([s[0], t[0]], [s[1], t[1]], color='blue')
clean_rn = remove_segments_with_no_points(rn=gt_rn, data=data, distance_threshold=50)
nx.write_gpickle(gt_rn, 'data/clean_doha_roads.gpickle')
print 'segments in initial network:%s, segments in new network:%s' % (nseg, clean_rn.number_of_edges())
plt.scatter(X,Y, color='black')
for s, t in clean_rn.edges():
plt.plot([s[0], t[0]], [s[1], t[1]], color='red')
plt.show() |
21,860 | e0b5f729067411de0beb15e9bb6c6a4f3ac7f21c | __all__ = [
'test_concierge_courier.py'
] |
21,861 | ff145ce0a9ec08eae3bd52ebc6057688da8310f4 | # coding: utf-8
import os
import click
@click.command(
"fit",
help="""
Train a model.
INPUT should be a directory or list of directories. Subdirectories of INPUT directories are class labels and
subdirectory contents are image data as NPY arrays.
"""
)
@click.argument(
"input",
nargs=-1,
required=True,
type=click.Path(exists=True)
)
@click.option(
"--batch-size",
default=32,
help="Number of samples per gradient update.",
type=click.INT
)
@click.option(
"--directory",
default=None,
help="Output directory for model checkpoints, metrics, and metadata.",
type=click.Path(exists=True)
)
@click.option(
"--epochs",
default=128,
help="Number of iterations over training data.",
type=click.INT
)
@click.option(
"--name",
default=None,
help="A unique identifier for referencing this model.",
type=click.STRING
)
@click.option(
"--validation-split",
default=0.2,
help="Fraction of training data withheld for validation.",
type=click.FLOAT
)
@click.option(
"--verbose",
is_flag=True
)
def command(input, batch_size, directory, epochs, name, validation_split, verbose):
import deepometry.utils
directories = [os.path.realpath(directory) for directory in input]
x, y, units = deepometry.utils.load(directories, sample=True)
_fit(x, y, units, batch_size, directory, epochs, name, validation_split, verbose)
def _fit(x, y, units, batch_size, directory, epochs, name, validation_split, verbose):
import deepometry.model
model = deepometry.model.Model(
directory=directory,
name=name,
shape=x.shape[1:],
units=units
)
model.compile()
model.fit(
x,
y,
batch_size=batch_size,
epochs=epochs,
validation_split=validation_split,
verbose=1 if verbose else 0
)
|
21,862 | 944e2e463d5416513d96adce91bb53c3adf93d27 | # Generated by Django 2.0.6 on 2020-03-09 21:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0022_auto_20200309_2102'),
]
operations = [
migrations.CreateModel(
name='More',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('video', models.TextField(blank=True, default='', verbose_name='Видео')),
('description', models.TextField(blank=True, default='', verbose_name='Расписание')),
('sort', models.DecimalField(blank=True, decimal_places=0, max_digits=5, null=True)),
('sport', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='main.Sport')),
],
options={
'verbose_name': 'Подробнее',
'verbose_name_plural': 'Подробнее',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Trainers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('image', models.ImageField(blank=True, default='', null=True, upload_to='main/images', verbose_name='Изображение')),
('sort', models.DecimalField(blank=True, decimal_places=0, max_digits=5, null=True)),
('more', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='main.More')),
('sport', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='main.Sport')),
],
options={
'verbose_name': 'Тренер',
'verbose_name_plural': 'Тренеры',
'ordering': ['id'],
},
),
]
|
21,863 | aba1fc1ff8f0650b01afe9d6f10fa57bce6c340c |
# coding: utf-8
# In[13]:
import pandas as pd
# load dataset
pima = pd.read_csv("diabetes.csv")
pima.head()
# In[6]:
#split dataset in features and target variable
feature_cols = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness','Insulin','BMI','DiabetesPedigreeFunction', 'Age']
X = pima[feature_cols] # Features
y = pima.Outcome # Target variable
#print (X)
#print(y)
# In[7]:
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0)
# In[8]:
# import the class
from sklearn.linear_model import LogisticRegression
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# fit the model with data
logreg.fit(X_train,y_train)
#
y_pred=logreg.predict(X_test)
# In[9]:
# import the metrics class
from sklearn import metrics
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix
# In[10]:
# import required modules
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
class_names=[0,1] # name of classes
fig, ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names)
plt.yticks(tick_marks, class_names)
# create heatmap
sns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
ax.xaxis.set_label_position("top")
plt.tight_layout()
plt.title('Confusion matrix', y=1.1)
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
# In[11]:
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
print("Precision:",metrics.precision_score(y_test, y_pred))
print("Recall:",metrics.recall_score(y_test, y_pred))
|
21,864 | 3af249f63321d39e9c3f07718cf43e4fdd5dec85 | def GaussianElimination(N, mat):
for i in range(N-1):
l = i
for j in range(i+1, N):
if abs(mat[j][i]) > abs(mat[l][i]):
l = j
for k in range(i, N+1):
mat[i][k], mat[l][k] = mat[l][k], mat[i][k]
for j in range(i+1, N):
for k in range(N, i-1, -1):
mat[j][k] -= mat[i][k] * mat[j][i] / mat[i][i]
ans = [0] * N
for j in range(N-1, -1, -1):
t = 0.0
for k in range(j+1, N):
t += mat[j][k] * ans[k]
ans[j] = (mat[j][N]-t) / mat[j][j]
return ans
def main():
mat = [None] * 3
mat[0] = [1, 1, 2, 9]
mat[1] = [2, 4, -3, 1]
mat[2] = [3, 6, -5, 0]
X = GaussianElimination(3, mat)
print('X = %.1lf, Y = %.1lf, Z = %.1lf' % (X[0], X[1], X[2]))
main()
|
21,865 | 438b393e9811cffefb7225b71e4c3f13e0a26844 | from fileinput import filename
from platform import node
from turtle import color, stamp, title
from unicodedata import name
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import numpy as np
import rospkg
import pandas as pd
import os
from slugify import slugify
import urllib.parse
import chart_studio.plotly as py
from plotly.offline import iplot
import itertools
from plotly.graph_objs import *
graph_name = 'pipeline2'
dirname = rospkg.RosPack().get_path('mrpp_sumo')
no_agents = 6
deploy_tag = 'graph'
device_ranges = [100, 240, 1000]
device_names = ['Zigbee', 'BLE', 'LoRa']
# available_comparisons = ['Idleness', 'Worst Idleness']
# comparison_parameter_index = 0
# scater_nodes_algo_index = 2# putting scatter for only one algo is better otherwise mess put -1 if don't require node scatter
row_size = 1
col_size = 3
color_list = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
colorscale_list = ['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd']
# names = algo_list
# names = names.extend(['Range vs Deviation'])
fig = make_subplots(rows=row_size, cols=col_size, subplot_titles=device_names)
fig.update_layout(title='Correlation of Instantaneous Graph Idle between agents ({})'.format(
graph_name), title_x=0.5)
for idx, device_range in enumerate(device_ranges):
path = '{}/post_process/{}/on_{}/{}m_range'.format(
dirname, graph_name, deploy_tag, device_range)
n = [int(filename.split('_')[0]) for filename in os.listdir(path)]
df = pd.DataFrame()
results_path = '{}/{}_base_stations/{}_agents/run_0'.format(
path, min(n), no_agents)
agent_masterdata = np.load(
'{}/agent_masterdata_final.npz'.format(results_path))['arr_0']
print(agent_masterdata.shape)
stamps = np.load('{}/stamps_final.npz'.format(results_path))['arr_0']
agent_masterdata_graph_idle = np.transpose(
np.mean(agent_masterdata, axis=2))
# print(agent_masterdata_graph_idle.shape)
corr = np.corrcoef(agent_masterdata_graph_idle)
fig.add_trace(go.Heatmap(z=corr,text=np.around(corr, 2), texttemplate="%{text}", showlegend=(False if idx == 0 else False), showscale=(
True if idx == 0 else False), zmax=1, zmin=0), row=int(idx/col_size)+1, col=idx % col_size+1)
layout = Layout(
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)')
# fig.update_xaxes(scaleanchor = "y",scaleratio = 1,showgrid=False)
# fig.update_yaxes(scaleanchor = "x",scaleratio = 1,showgrid=False)
# fig.update_layout(yaxis1 = dict(range=[0, 6]),yaxis2 = dict(range=[0, 6]),yaxis3 = dict(range=[0, 6]))
fig.update_layout(height=700, width=2100)
# fig.update_layout(xaxis_title = dict(text=))
fig.update_layout(
xaxis1=dict(
tickmode='array',
tickvals=[i for i in range(no_agents)],
ticktext=['Agent_{}'.format(i+1) for i in range(no_agents)]
),
xaxis2=dict(
tickmode='array',
tickvals=[i for i in range(no_agents)],
ticktext=['Agent_{}'.format(i+1) for i in range(no_agents)]
),
xaxis3=dict(
tickmode='array',
tickvals=[i for i in range(no_agents)],
ticktext=['Agent_{}'.format(i+1) for i in range(no_agents)]
),
yaxis1=dict(
tickmode='array',
tickvals=[i for i in range(no_agents)],
ticktext=['Agent_{}'.format(i+1) for i in range(no_agents)]
),
yaxis2=dict(
tickmode='array',
tickvals=[i for i in range(no_agents)],
ticktext=['Agent_{}'.format(i+1) for i in range(no_agents)]
),
yaxis3=dict(
tickmode='array',
tickvals=[i for i in range(no_agents)],
ticktext=['Agent_{}'.format(i+1) for i in range(no_agents)]
)
)
fig.update_xaxes(showline=True, linewidth=1, linecolor='black', mirror=True)
fig.update_yaxes(showline=True, linewidth=1, linecolor='black', mirror=True)
iplot(fig)
|
21,866 | d5b6db6b5d840052001d9a86340b337245e5026a | from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QTableWidget,QTableWidgetItem
import sys
from m_win import *
import fb_parser
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.prs = fb_parser.Parser()
self.ui.change_book.clicked.connect(self.prs.parse_res)
if __name__=="__main__":
app = QtWidgets.QApplication(sys.argv)
myapp = MainWindow()
myapp.show()
sys.exit(app.exec_())
|
21,867 | d53e839c0fe71a6455246f444e4040d08ef72045 | class CachedRecordManager(object):
"""
This is a horrible class to let you do client-side comparisons
rather than relying on the API's search. Only use this if the API
does not provide a mechanism to search the way you want.
Example:
cached_records = CachedRecordsManager(TDPerson.objects.all())
users = cached_records.find({'AuthenticationUserName': 'x'})
"""
def __init__(self, records):
self.records = records
def find(self, match_dict, match_all=True):
found_records = []
for record in self.records:
if self._matches(record, match_dict, match_all):
found_records.append(record)
return found_records
def _matches(self, record, match_dict, match_all):
assert len(match_dict) > 0
for (match_key, match_val) in match_dict.items():
if record.get(match_key) == match_val:
if match_all is False:
return True
else:
# need to match all so keep checking
continue
else:
return False
return True
class KeyMatcher(object):
"""
Lets you define keys that should be tracked. You then add()
matches. You can then match() against the keys you defined.
The reason this exists is to support a "hierarchy" of matches. For
example, you may want to match on key A--and if there's a match,
you're done. Then if there's no match on key A, try key B. &c.
"""
def __init__(self, keys_to_track):
"""
keys_to_track -- order is important! Matches will be tested in
this order.
"""
self.keys_to_track = keys_to_track
self.tracker = {}
for key_to_track in self.keys_to_track:
self.tracker[key_to_track] = {}
def add(self, obj, match_dict):
"""
Add obj as a match for match_dict values.
Checks to make sure match_dict keys are valid.
Note: match_dict values will be ignored if they do not exist,
are None, or are ''.
"""
for match_key in match_dict.keys():
assert match_key in self.keys_to_track
for key_to_track in self.keys_to_track:
if match_dict.has_key(key_to_track):
match_val = match_dict[key_to_track]
if match_val is None or match_val == '':
pass
else:
self.tracker[key_to_track][match_val] = obj
def match(self, match_dict):
"""
Find a match using match_dict. Returns None if there is no match.
Checks to make sure match_dict keys are valid.
"""
for match_key in match_dict.keys():
assert match_key in self.keys_to_track
for key_to_track in self.keys_to_track:
if match_dict.has_key(key_to_track):
match_val = match_dict[key_to_track]
if self.tracker[key_to_track].has_key(match_val):
return self.tracker[key_to_track][match_val]
return None
def keys(self):
return self.keys_to_track
class KeyMatchingCachedRecordManager(CachedRecordManager):
"""
Define `KEYS_TO_TRACK` property and an _add_matches() method.
"""
def __init__(self, *args, **kwargs):
super(KeyMatchingCachedRecordManager, self).__init__(*args, **kwargs)
self.key_matcher = KeyMatcher(self.KEYS_TO_TRACK)
self._add_matches()
def _add_matches(self):
"""
Utility function to populate key_matcher from self.records.
"""
for record in self.records:
match_dict={key_to_track: record.get(key_to_track)
for key_to_track in self.key_matcher.keys()}
self.key_matcher.add(obj=record,
match_dict=match_dict)
def match(self, **match_dict):
return self.key_matcher.match(match_dict)
|
21,868 | c7a7e9e629344b9d62ccc77227800b8e0a05a0c7 | # key = input("Enter the actress'name: ")
# value = input("Enter the actress's age: ")
# film = {
# "name": "Stranger things",
# "seasons": 2,
# }
# film[key] = value
# print(film)
pokemon = {
"name": "pikachu",
"owner": "Ash",
}
text = input("Enter new items: ")
pair = text.split(",")
key = pair[0]
value = pair[1]
pokemon[key] = value
print(pokemon) |
21,869 | 4468136da1c14c85913bf685aecf8e210c1b92a7 | from typing import List, Dict, Sequence, Optional, Any, Callable
from abc import ABC, abstractmethod, abstractproperty
import logging
import enum
class ModuleStatus(enum.Enum):
NOT_LOADED = 0x00
ERROR = 0xFF
LOADED = 0x01
class ModuleIntegrity(enum.Enum):
CORE_MNR = 0x01
CORE_CRT = 0x08
PLUGIN_CRT = 0x11
PLUGIN_MNR = 0x18
class ModuleLoadPriority(enum.Enum):
DEFAULT = 0x00
PRE = 0x01
POST = 0x01
class DuplicateModuleError(ValueError):
def __init__(self, mod):
assert isinstance(mod, Module)
super().__init__("Duplicate module: %s" % mod.NAME)
class SystemDelegate:
def __init__(self, system, logger):
assert isinstance(logger, logging.Logger)
self._status = ModuleStatus.NOT_LOADED
self._system = system
def modules_by_type(self, type_name: str) -> List['Module']:
return self._system._types_to_modules.get(type_name, [])
@property
def logger(self) -> logging.Logger:
return self._system._logger
@property
def modules(self) -> Dict[str, 'Module']:
return self._system._modules
@property
def module_types(self) -> Dict[str, List['Module']]:
return self._system._types_to_modules
@property
def status(self) -> 'ModuleStatus':
return self._status
@property
def sets(self) -> Any:
return self._system.sets
def get_event_loop(self, another_thread: bool = False, run_pre: Callable[[Any], None] = None):
return self._system.get_event_loop(another_thread=another_thread, run_pre=run_pre)
def push_to_thread(self, func):
return self._system.push_to_thread(func=func)
class Module(ABC):
# default values - override if needed
DEPENDENCIES = [] # type: List[str]
NAME = None # type: str
TYPE = None # type: Optional[str]
INTEGRITY = ModuleIntegrity.CORE_CRT # type: ModuleIntegrity
LOAD_PRIORITY = ModuleLoadPriority.DEFAULT # type: ModuleLoadPriority
def __init__(self):
self._status = ModuleStatus.NOT_LOADED
self._system = None # type: SystemDelegate
@classmethod
def ModuleName(cls) -> str:
return cls.NAME
@classmethod
def ModuleDependencies(cls) -> List[str]:
return cls.DEPENDENCIES
@classmethod
def ModuleLoadPriority(cls) -> ModuleLoadPriority:
return cls.LOAD_PRIORITY
@classmethod
def ModuleIntegrity(cls) -> ModuleIntegrity:
return cls.INTEGRITY
@abstractmethod
def load(self, system: 'SystemDelegate', **deps: 'Module') -> ModuleStatus:
raise NotImplementedError()
@property
def status(self) -> ModuleStatus:
return self._status
class RunnableModule(Module):
RUN_AFTER = []
RUN_AFTER_DEPENDENCIES = True
@abstractmethod
def run(self, **kwargs) -> None:
"""
Perform a 'run' action on a module. Module should have status LOADED
for a System to actually call this method.
:param kwargs: additional arguments. Module mustn't make assumptions on
the contents of this dictionary.
To have consistent configuration, module should use System's "sets" property
or an ApplicationContext classq
"""
raise NotImplementedError()
|
21,870 | 3eb6857cf8a5001b1660d9d97c713ba245c4bf37 | #coding: utf-8
# Inverte Triplas
# (C) 2016, Yovany Cunha/UFCG, Programaçao I
def inverte3a3(palavra):
palavra_invertida = ''
for i in range(len(palavra)-3,-1,-3):
tripla = ''
for j in range(i,i+3):
tripla += palavra[j]
palavra_invertida += tripla
return palavra_invertida
def soma_interna(num):
soma = 0
while num != 0:
soma += (num%10)
num /= 10
return soma
assert inverte3a3("paisimtio") == "tiosimpai"
|
21,871 | ad1cd124714f0140db464d074ff615964faec17d | # -*- coding: utf-8 -*-
__author__ = 'Jay Choo'
__email__ = 'open-frequency@outlook.com'
__version__ = '0.1.0'
|
21,872 | d5b49bd806ec941e1820b3cc715ee9eac632575f | from django.conf.urls import url
from . import views
app_name = 'PiB'
urlpatterns = [
url(r'^$', views.Curriculum.as_view(), name='curriculum'),
url(r'^modules/(?P<pk>[0-9]+)/$', views.ModulePage.as_view(), name='module'),
url(r'^lessons/(?P<pk>[0-9]+)/$', views.LessonPage.as_view(), name='lesson'),
url(r'^problems/(?P<pk>[0-9]+)/$', views.MultipleChoice.as_view(), name='problem'),
url(r'^DrawVector/(?P<pk>[0-9]+)/$', views.DrawVector.as_view(), name='drawvector'),
] |
21,873 | 35abcf2542a48d22940d05071c520e963207e8bb | from server.settings import STORAGE, STORAGE_DIR
if STORAGE == 'SFTP':
# Подключение к удаленному SFTP серверу
SFTP_STORAGE_HOST = '127.0.0.1'
SFTP_STORAGE_ROOT = '/media/' + STORAGE_DIR
SFTP_STORAGE_PARAMS = {
'username': 'username',
'password': 'password',
'allow_agent': False,
'look_for_keys': False,
}
SFTP_STORAGE_INTERACTIVE = False
elif STORAGE == 'WEBDAV':
# Подключение к удаленному Webdav серверу
WEBDAV_URL = 'https://username:password@webdav.yandex.ru'
WEBDAV_PUBLIC_URL = "/media/"
|
21,874 | 8be3895bb3f00b95520e7e9b50d6214f60cc2bb9 | '''
public interface ViewController {
void show();
void hide();
}
'''
from abc import abstractmethod
class ViewController:
@abstractmethod
def show(self):
pass
@abstractmethod
def hide(self):
pass
|
21,875 | 68319bc5c90095d74391e13adebb3777058c93e5 | # Generated by Django 3.1.1 on 2020-09-09 17:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('kib_app', '0003_drink_ingredients'),
]
operations = [
migrations.RemoveField(
model_name='hide',
name='user',
),
migrations.AlterField(
model_name='drink',
name='favorite',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='favorites', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='drink',
name='hide',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hidden', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Favorite',
),
migrations.DeleteModel(
name='Hide',
),
]
|
21,876 | 3a4c9b9ff44fad2a76c82a1936a9b2bd26821b70 | from inmemory import InMemoryImpl
while True:
print("*"*80)
print("1.Add Contact 2.Delete Contact 3.View All 4.Search Contact 5.Update Contact 6.Exit")
print("*"*80)
ch = int(input("Enter your choice:"))
if ch == 1:
InMemoryImpl.add_contact()
elif ch == 2:
InMemoryImpl.delete_contact()
elif ch == 3:
InMemoryImpl.view_contacts()
elif ch == 4:
InMemoryImpl.search()
elif ch == 5:
InMemoryImpl.update_contact()
else:
break
|
21,877 | 5696f3592f2e24e6d69794adc922ad970d9129ba | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import statsmodels.formula.api as sm
import sqlite3
import sys
def relative_standard_error(fit):
data = pd.DataFrame(dict(residual=fit.resid,
fitted=fit.predict()))
data['deviation_ratio'] = data.residual / data.fitted
return np.sqrt(np.sum(data.deviation_ratio ** 2) / fit.df_resid)
def get_calibration(data):
data = data.copy()
try:
data['weight'] = data.known_concentration ** -2
except ZeroDivisionError:
data['weight'] = np.nan
data = data.replace([np.inf, -np.inf], np.nan).dropna(subset=['weight', 'area'])
if not len(data) > 1:
return
# Deal with presence/absence of an intercept term according to calibration_config
_intercept = data.intercept.unique()
assert len(_intercept) == 1
intercept = _intercept[0]
try:
if intercept == 0:
fit = sm.wls('area ~ known_concentration - 1', data=data, weights=data.weight).fit()
else:
fit = sm.wls('area ~ known_concentration', data=data, weights=data.weight).fit()
except ValueError as err:
print(data, file=sys.stderr)
raise err
out = {}
if hasattr(fit.params, 'Intercept'):
out['intercept'] = fit.params.Intercept
out['slope'] = fit.params[1]
else:
out['intercept'] = 0
out['slope'] = fit.params[0]
out['limit_of_detection'] = np.nan # TODO
out['observations'] = fit.nobs
out['relative_standard_error'] = relative_standard_error(fit)
out['rsquared'] = fit.rsquared
return pd.Series(out)
def main():
con = sqlite3.connect(sys.argv[1])
meta_query = """
SELECT *
FROM known_injection_metadata
JOIN calibration_group
USING (injection_id)
JOIN calibration_config
USING (molecule_id)
"""
meta = pd.read_sql(meta_query, con=con)
peak = pd.read_table(sys.argv[2])
data = peak.merge(meta, on=['injection_id', 'molecule_id', 'channel'])
calibration = (data.groupby(['molecule_id', 'channel', 'calibration_group'])
.apply(get_calibration))
calibration.to_csv(sys.stdout, sep='\t')
if __name__ == '__main__':
main()
|
21,878 | b8438f38394e0a151d4cd975210aa3a6026b2948 | def main():
points, tmp, result, lines = list(), list(), list(), dict()
n = int(input())
for num in range(n):
l, r = map(int, input().split())
points.append([l, 1, num]) # координата, тип (1 - левый, 2 - правый), номер отрезка
points.append([r, 2, num])
lines.update({num: 0}) # номер отрезка, покрытие (0 - не покрыт, 1 - покрыт)
points.sort(key=lambda x: [x[0], x[1]])
for point in points:
if point[1] == 1:
tmp.append(point[2])
else:
if lines[point[2]] == 1:
continue
else:
result.append(point[0])
for item in tmp:
lines[item] = 1
tmp.clear()
print("{0}\n{1}".format(len(result), " ".join(map(str, result))))
if __name__ == "__main__":
main()
|
21,879 | fbefe8641e0f5c143316c5352f8fc65cee256d21 | __author__ = 'yaSh'
url_list = []
corpus = {}
import re
import urllib2
from BeautifulSoup import BeautifulSoup
from Queue import Queue, Empty
from threading import Thread
visited = set()
queue = Queue()
def get_text(url):
from boilerpipe.extract import Extractor
try :
extractor = Extractor(extractor='DefaultExtractor', url=url)
return extractor.getText(), extractor.getHTML()
except:
return "",""
def get_title(html):
try:
titleRE = re.compile(r"<H1 class=\"title\">(.+?)</H1>")
title = titleRE.search(html).group(1)
except: title = ""
return title
def get_topic(html):
try:
topicRE = re.compile(r"<LABEL for=\"vertical-brief\">(.+?)</LABEL>")
topic = topicRE.search(html).group(1)
except: topic = "Other"
return topic
import json
def get_num_likes(url):
try:
share_link = "http://graph.facebook.com/?id="+url
response = urllib2.urlopen(share_link)
data = json.loads(response.read())
shares = dict(data)['shares']
except:
shares = 0
return shares
def link_crawl(root, article_limit):
name = root.rsplit(".")[1]
def parse():
try:
while True:
if len(url_list) > article_limit: break
url = queue.get_nowait()
try:
request = urllib2.Request(url)
response = urllib2.urlopen(request)
soup = BeautifulSoup(response)
except UnicodeDecodeError:
continue
for link in soup.findAll('a', href=re.compile(r'\d{4}/\d{2}/\d{2}/\w+')):
try:
href = link['href']
except KeyError:
continue
if href not in visited:
visited.add(href)
if (name in href) & (str(href).endswith('.html')) & ('linkedin' not in href):
queue.put(href)
date =re.search(r'(\d+/\d+/\d+)', href).group(1)
text, html = get_text(href)
title = get_title(html)
topic = get_topic(html)
likes = get_num_likes(href)
if len(title) & len(text):
feature_vector = {'url':href, 'title':title, 'content': text, 'date':date, 'topic':topic, 'likes':likes}
corpus[title] = text
print (title, date, topic, likes)
if date:
url_list.append(feature_vector)
except Empty:
pass
return parse
def get_data(limit):
corpus = []
import time
start = time.time()
root = "http://www.huffingtonpost.ca/"
parser = link_crawl(root, article_limit=limit)
queue.put(root)
workers = []
for i in range(5):
worker = Thread(target=parser)
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
end = time.time()
print
print "Time Taken to Extract Data : " + (str((end - start)/60.0)) + " minutes"
print
return url_list
|
21,880 | a0ff9559c89835813a410c7bd7e8291839e37481 | # # nuisances
# #FIXME: TO BE UPDATED FOR 2018!
# # name of samples here must match keys in samples.py
mc =["DY", "top", "VV", "VVV", "VBF-V", "top", "VBS", "Wjets"]
phase_spaces_boost = []
phase_spaces_res = []
for d in ["high","low"]:
for cat in ["sig", "wjetcr", "topcr"]:
phase_spaces_boost.append("boost_{}_dnn{}".format(cat, d))
phase_spaces_res.append("res_{}_dnn{}".format(cat, d))
phase_spaces_res_ele = [ ph+"_ele" for ph in phase_spaces_res]
phase_spaces_res_mu = [ ph+"_mu" for ph in phase_spaces_res]
phase_spaces_boost_ele = [ ph+"_ele" for ph in phase_spaces_boost]
phase_spaces_boost_mu = [ ph+"_mu" for ph in phase_spaces_boost]
phase_spaces_tot_ele = phase_spaces_res_ele + phase_spaces_boost_ele
phase_spaces_tot_mu = phase_spaces_res_mu + phase_spaces_boost_mu
phase_spaces_tot_res = phase_spaces_res_ele + phase_spaces_res_mu
phase_spaces_tot_boost = phase_spaces_boost_ele + phase_spaces_boost_mu
phase_spaces_dict = {"boost": phase_spaces_boost, "res": phase_spaces_res}
phase_spaces_tot = phase_spaces_tot_ele + phase_spaces_tot_mu
# ################################ EXPERIMENTAL UNCERTAINTIES #################################
# #### Luminosity
nuisances['lumi_Uncorrelated'] = {
'name': 'lumi_13TeV_2018',
'type': 'lnN',
'samples': dict((skey, '1.015') for skey in mc if skey not in ['top', 'Wjets'])
}
nuisances['lumi_XYFact'] = {
'name': 'lumi_13TeV_XYFact',
'type': 'lnN',
'samples': dict((skey, '1.02') for skey in mc if skey not in ['top','Wjets'])
}
nuisances['lumi_LScale'] = {
'name': 'lumi_13TeV_LSCale',
'type': 'lnN',
'samples': dict((skey, '1.002') for skey in mc if skey not in ['top','Wjets'])
}
nuisances['lumi_CurrCalib'] = {
'name': 'lumi_13TeV_CurrCalib',
'type': 'lnN',
'samples': dict((skey, '1.002') for skey in mc if skey not in ['top','Wjets'])
}
# #### FAKES
# if Nlep == '2' :
# # already divided by central values in formulas !
# fakeW_EleUp = fakeW+'_EleUp'
# fakeW_EleDown = fakeW+'_EleDown'
# fakeW_MuUp = fakeW+'_MuUp'
# fakeW_MuDown = fakeW+'_MuDown'
# fakeW_statEleUp = fakeW+'_statEleUp'
# fakeW_statEleDown = fakeW+'_statEleDown'
# fakeW_statMuUp = fakeW+'_statMuUp'
# fakeW_statMuDown = fakeW+'_statMuDown'
# else:
# fakeW_EleUp = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lElUp / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
# fakeW_EleDown = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lElDown / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
# fakeW_MuUp = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lMuUp / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
# fakeW_MuDown = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lMuDown / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
# fakeW_statEleUp = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lstatElUp / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
# fakeW_statEleDown = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lstatElDown / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
# fakeW_statMuUp = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lstatMuUp / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
# fakeW_statMuDown = '( fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'lstatMuDown / fakeW_ele_'+eleWP+'_mu_'+muWP+'_'+Nlep+'l )'
nuisances['fake_syst'] = {
'name' : 'CMS_fake_syst',
'type' : 'lnN',
'samples' : {
'Fake' : '1.30',
},
}
# nuisances['fake_ele'] = {
# 'name' : 'hww_fake_ele_2018',
# 'kind' : 'weight',
# 'type' : 'shape',
# 'samples' : {
# 'Fake' : [ fakeW_EleUp , fakeW_EleDown ],
# },
# }
# nuisances['fake_ele_stat'] = {
# 'name' : 'hww_fake_ele_stat_2018',
# 'kind' : 'weight',
# 'type' : 'shape',
# 'samples' : {
# 'Fake' : [ fakeW_statEleUp , fakeW_statEleDown ],
# },
# }
# nuisances['fake_mu'] = {
# 'name' : 'hww_fake_mu_2018',
# 'kind' : 'weight',
# 'type' : 'shape',
# 'samples' : {
# 'Fake' : [ fakeW_MuUp , fakeW_MuDown ],
# },
# }
# nuisances['fake_mu_stat'] = {
# 'name' : 'hww_fake_mu_stat_2018',
# 'kind' : 'weight',
# 'type' : 'shape',
# 'samples' : {
# 'Fake' : [ fakeW_statMuUp , fakeW_statMuDown ],
# },
# }
##### Btag nuisances
for shift in ['jes', 'lf', 'hf', 'hfstats1', 'hfstats2', 'lfstats1', 'lfstats2', 'cferr1', 'cferr2']:
btag_syst = ['(btagSF%sup)/(btagSF)' % shift, '(btagSF%sdown)/(btagSF)' % shift]
name = 'CMS_btag_%s' % shift
if 'stats' in shift:
name += '_2018'
nuisances['btag_shape_%s' % shift] = {
'name': name,
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, btag_syst) for skey in mc),
}
# ##### Trigger Efficiency
trig_syst = ['((TriggerEffWeight_'+Nlep+'l_u)/(TriggerEffWeight_'+Nlep+'l))*(TriggerEffWeight_'+Nlep+'l>0.02) + (TriggerEffWeight_'+Nlep+'l<=0.02)', '(TriggerEffWeight_'+Nlep+'l_d)/(TriggerEffWeight_'+Nlep+'l)']
nuisances['trigg'] = {
'name' : 'CMS_eff_trigger_2018',
'kind' : 'weight',
'type' : 'shape',
'samples' : dict((skey, trig_syst) for skey in mc)
}
# ##### Electron Efficiency and energy scale
ele_id_syst_up = '(abs(Lepton_pdgId[0]) == 11)*(Lepton_tightElectron_'+eleWP+'_TotSF'+'_Up[0])/\
(Lepton_tightElectron_'+eleWP+'_TotSF[0]) + (abs(Lepton_pdgId[0]) == 13)'
ele_id_syst_do = '(abs(Lepton_pdgId[0]) == 11)*(Lepton_tightElectron_'+eleWP+'_TotSF'+'_Down[0])/\
(Lepton_tightElectron_'+eleWP+'_TotSF[0]) + (abs(Lepton_pdgId[0]) == 13)'
mu_id_syst_up = '(abs(Lepton_pdgId[0]) == 13)*(Lepton_tightMuon_'+muWP+'_TotSF'+'_Up[0])/\
(Lepton_tightMuon_'+muWP+'_TotSF[0]) + (abs(Lepton_pdgId[0]) == 11)'
mu_id_syst_do = '(abs(Lepton_pdgId[0]) == 13)*(Lepton_tightMuon_'+muWP+'_TotSF'+'_Down[0])/\
(Lepton_tightMuon_'+muWP+'_TotSF[0]) + (abs(Lepton_pdgId[0]) == 11)'
id_syst_ele = [ ele_id_syst_up, ele_id_syst_do ]
id_syst_mu = [ mu_id_syst_up, mu_id_syst_do ]
nuisances['eff_e'] = {
'name' : 'CMS_eff_e_2018',
'kind' : 'weight',
'type' : 'shape',
'samples' : dict((skey, id_syst_ele) for skey in mc),
'cuts': phase_spaces_tot_ele
}
nuisances['electronpt'] = {
'name' : 'CMS_scale_e_2018',
'kind' : 'tree',
'type' : 'shape',
'samples' : dict((skey, ['1', '1']) for skey in mc),
'folderUp' : directory_bkg +"_ElepTup",
'folderDown' : directory_bkg +"_ElepTdo",
'cuts': phase_spaces_tot_ele
}
# ##### Muon Efficiency and energy scale
nuisances['eff_m'] = {
'name' : 'CMS_eff_m_2018',
'kind' : 'weight',
'type' : 'shape',
'samples' : dict((skey, id_syst_mu) for skey in mc),
'cuts': phase_spaces_tot_mu
}
nuisances['muonpt'] = {
'name' : 'CMS_scale_m_2018',
'kind' : 'tree',
'type' : 'shape',
'samples' : dict((skey, ['1', '1']) for skey in mc),
'folderUp' : directory_bkg +"_MupTup",
'folderDown' : directory_bkg +"_MupTdo",
'cuts': phase_spaces_tot_mu
}
##### Jet energy scale
nuisances['jes'] = {
'name' : 'CMS_scale_j_2018',
'kind' : 'tree',
'type' : 'shape',
'samples' : dict((skey, ['1', '1']) for skey in mc),
'folderUp' : directory_bkg +"_JESup",
'folderDown' : directory_bkg +"_JESdo",
}
# nuisances['fatjet_jes'] = {
# 'name' : 'CMS_scale_fatj_2018',
# 'kind' : 'tree',
# 'type' : 'shape',
# 'samples' : dict((skey, ['1', '1']) for skey in mc),
# 'folderUp' : directory_bkg +"_fatjet_JESup",
# 'folderDown' : directory_bkg +"_fatjet_JESdo",
# }
nuisances['fatjet_jms'] = {
'name' : 'CMS_mass_fatj_2018',
'kind' : 'tree',
'type' : 'shape',
'samples' : dict((skey, ['1', '1']) for skey in mc),
'folderUp' : directory_bkg +"_fatjet_JMSup",
'folderDown' : directory_bkg +"_fatjet_JMSdo",
}
# ##### MET energy scale
nuisances['met'] = {
'name' : 'CMS_scale_met_2018',
'kind' : 'tree',
'type' : 'shape',
'samples' : dict((skey, ['1', '1']) for skey in mc),
'folderUp' : directory_bkg +"_METup",
'folderDown' : directory_bkg +"_METdo",
}
######################
# Theory nuisance
nuisances['QCD_scale_wjets'] = {
'name' : 'QCDscale_wjets',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
"Wjets" : ["LHEScaleWeight[0]", "LHEScaleWeight[8]"],
}
}
nuisances['QCD_scale_top'] = {
'name' : 'QCDscale_top',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
"top" : ["LHEScaleWeight[0]", "LHEScaleWeight[8]"],
}
}
##################################
#### Custom nuisances
# if useEmbeddedDY: del nuisances['prefire']['samples']['DY']
# #
# # PS and UE
# #
# nuisances['PS'] = {
# 'name' : 'PS',
# 'skipCMS' : 1,
# 'kind' : 'weight',
# 'type' : 'shape',
# 'samples' : {
# 'WW' : ['PSWeight[0]', 'PSWeight[3]'],
# },
# }
# nuisances['UE'] = {
# 'name' : 'UE',
# 'skipCMS' : 1,
# 'kind' : 'tree',
# 'type' : 'shape',
# 'samples' : {
# # 'WW' : ['1.12720771849', '1.13963144574'],
# 'ggH_hww' : ['1.00211385568', '0.994966378288'],
# 'qqH_hww' : ['1.00367895901', '0.994831373195']
# },
# 'folderUp' : treeBaseDir+'Fall2018_nAOD_v1_Full2018v2/MCl1loose2018v2__MCCorr2018__btagPerEvent__l2loose__l2tightOR2018__UEup',
# 'folderDown' : treeBaseDir+'Fall2018_nAOD_v1_Full2018v2/MCl1loose2018v2__MCCorr2018__btagPerEvent__l2loose__l2tightOR2018__UEdo',
# 'AsLnN' : '1',
# }
# nuisances['PU'] = {
# 'name' : 'CMS_PU_2018',
# 'kind' : 'weight',
# 'type' : 'shape',
# 'samples' : {
# 'DY': ['0.993259983266*(puWeightUp/puWeight)', '0.997656381501*(puWeightDown/puWeight)'],
# 'top': ['1.00331969187*(puWeightUp/puWeight)', '0.999199609528*(puWeightDown/puWeight)'],
# 'WW': ['1.0033022059*(puWeightUp/puWeight)', '0.997085330608*(puWeightDown/puWeight)'],
# 'ggH_hww': ['1.0036768006*(puWeightUp/puWeight)', '0.995996570285*(puWeightDown/puWeight)'],
# 'qqH_hww': ['1.00374694528*(puWeightUp/puWeight)', '0.995878596852*(puWeightDown/puWeight)'],
# },
# 'AsLnN' : '1',
# }
## Top pT reweighting uncertainty
nuisances['singleTopToTTbar'] = {
'name': 'singleTopToTTbar',
'skipCMS': 1,
'kind': 'weight',
'type': 'shape',
'samples': {
'top': [
'isSingleTop * 1.0816 + isTTbar',
'isSingleTop * 0.9184 + isTTbar']
}
}
## Top pT reweighting uncertainty
# nuisances['TopPtRew'] = {
# 'name': 'CMS_topPtRew', # Theory uncertainty
# 'kind': 'weight',
# 'type': 'shape',
# 'samples': {'top': ["1.", "1./Top_pTrw"]},
# 'symmetrize': True
# }
#################
## Samples normalizations
nuisances['Top_norm'] = {
'name' : 'CMS_Top_norm_2018',
'samples' : {
'top' : '1.00',
},
'type' : 'rateParam',
'cuts' : phase_spaces_tot
}
# for wjbin in Wjets_lptbins:
# for fl in ["ele", "mu"]:
# for phs in ["res", "boost"]:
# nuisances["{}_norm_{}_{}_2018".format(wjbin, fl, phs )] = {
# 'name' : 'CMS{}_norm_{}_{}_2018'.format(wjbin, fl, phs),
# 'samples' : { wjbin: '1.00' },
# 'type' : 'rateParam',
# 'cuts' : [f+"_"+fl for f in phase_spaces_dict[phs]]
# }
## Use the following if you want to apply the automatic combine MC stat nuisances.
nuisances['stat'] = {
'type' : 'auto',
'maxPoiss' : '10',
'includeSignal' : '1',
# nuisance ['maxPoiss'] = Number of threshold events for Poisson modelling
# nuisance ['includeSignal'] = Include MC stat nuisances on signal processes (1=True, 0=False)
'samples' : {}
}
for n in nuisances.values():
n['skipCMS'] = 1
print ' '.join(nuis['name'] for nname, nuis in nuisances.iteritems() if nname not in ('lumi', 'stat'))
|
21,881 | e414010a8d0f8bd7f91c5941500440b667a810ef | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
import itertools
import random
import numpy as np
from problems.base import BaseProblem
from consts import Spaces, ParamsTypes
import utils
CASE_TYPES = utils.enum(
# get 5x more negative reward when crossing this region
Water='W',
# get 2x more negative reward when crossing this region
Sand='S',
# cannot cross this region
Wall='X',
# only get -1 reward when crossing this region
Open='.',
# terminate the episode, and get `successReward`
Termination='T',
# get min(reward) = -maxSteps when crossing this region and
# terminate the episode
Trap='!')
CASE_COLORS = {
CASE_TYPES.Water: (0, 0, 0.7),
CASE_TYPES.Sand: (0.7, 0.7, 0),
CASE_TYPES.Wall: (0, 0, 0),
CASE_TYPES.Open: (1, 1, 1),
CASE_TYPES.Termination: (0, 1, 0),
CASE_TYPES.Trap: (1, 0, 0)
}
# Grids are indexed by their
PREDEFINED_GRIDS = {
'random': None,
'simple': """....T
.....
.XXXX
.....
.....""",
'complex': """..........!.T
..SS......!..
.XWWX.....!..
..SSXXXXX.!..
........X.W..
...!....X.W..
...!....X.S..
...!....X....
........XXXXX
.............""",
'complex2': """
........SWSWS.......WW............!...WT
..SS.....S!S.......WWWW...........!S.WWW
.XWWX.....!.........WW............!..WWW
..SSXXXXX.!XXX!XXXX!XXXXWXXX!XXXXX!.SWWW
........X.W.........X..WWW.X...........W
...!....X.W.........X...W..X........!!!!
...!....X.S.........X......X............
...!....X.....XXXXXXX......X............
........XXXXXXX............X............
...........X.....XXXXXXXXXXX............
...........X.XXXXX......................
...........X.X.WWW.XXXX.................
...........X.X........X.................
...........X.X..XXXXWWXXXXXXXXXXXXXXXXXX
...........X.X..............!...........
...........X.X..............!.!!!!.XXXX.
...........X................SSWWW!.X....
...........XXXXXXXXXXXXXXXXXXXWWWXX!!.!!
...........X...............X..WWW.......
...........X...............X.......!!!..
...........X.......!.......X..!!!..!!!..
...........X...............X..!!!..!!!..
...........X...............X..!!!.......
...........X...............X............
.......XXX.X...............XXXXXXXX!.!.!
.........!.................X......X.....
...X.!.SSSX................X.XXXX.X.!.!.
...X....WXX................X...!X.X.....
...X...XXXX................XXXXXX.X!.!.!
...X..............!........X............
WWWW.......................X............
.XXXXXXXXXXXX..............X..XXXXXXXXXX
...........WWWWWWWWWWWWWWWWXWWWWWW......
SSSSSSSSSSSS......W........X...!........
SSSSSSSSSSSS......W........XXXX!........
.................WWWW......X...!........
SXXXXX.XXXXXX......XX......X.!.!........
WWWWWWWWWWWWXXXXXX!XX......X.!..........
........XXXXX......XX......X.!..........
..................!..........!..........
"""
}
class GridWorld(BaseProblem):
"""
Custom GridWorld problem implementation
"""
GYM_ENVIRONMENT_NAME = None
PARAMS = utils.extends(
{},
startPosX=ParamsTypes.Number,
startPosY=ParamsTypes.Number,
successReward=ParamsTypes.Number,
stepReward=ParamsTypes.Number,
sandReward=ParamsTypes.Number,
waterReward=ParamsTypes.Number,
failureReward=ParamsTypes.Number,
trapReward=ParamsTypes.Number,
**BaseProblem.PARAMS)
PARAMS_DOMAIN = utils.extends(
{},
startPosX={
'values': (0, 'random', 'center', 'episodeRandom'),
# startPosX < width will only be checked at runtime
'range': (0, 1000)
},
startPosY={
'values': (0, 'random', 'center', 'episodeRandom'),
# startPosY < height will only be checked at runtime
'range': (0, 1000)
},
successReward={
'values': (0, 100),
'range': (0, 10000)
},
stepReward={
'values': (-1, 0, 1),
'range': (-100, 100)
},
sandReward={
'values': (-5, -1, 0),
'range': (-1000, 1000)
},
waterReward={
'values': (-50, -10, -2, 0),
'range': (-1000, 1000)
},
failureReward={
'values': (-50, -10, -2, 0),
'range': (-1000, 1000)
},
trapReward={
'values': (-50, -10, -2, 0),
'range': (-1000, 1000)
},
**BaseProblem.PARAMS_DOMAIN)
PARAMS_DEFAULT = utils.extends(
{},
startPosX='episodeRandom',
startPosY='episodeRandom',
successReward=0,
stepReward=-1,
sandReward=-5,
waterReward=-10,
failureReward=0,
trapReward=-50,
**BaseProblem.PARAMS_DEFAULT)
PARAMS_DESCRIPTION = utils.extends(
{},
startPosX="Starting position for the agent along the X axis.",
startPosY="Starting position for the agent along the Y axis.",
successReward="Success reward upon reaching a termination state.",
stepReward="Reward received at each step into an 'Open' state.",
sandReward="Reward received when stepping into a 'Sand' state.",
waterReward="Reward received when stepping into a 'Water' state.",
failureReward="Reward received when failing to reach a termination \
state after the configured number of steps.",
trapReward="Reward received when stepping into a 'Trap' state.\
This cummulates to the negative reward already got that simulate that \
all the steps just got consumed.",
**BaseProblem.PARAMS_DESCRIPTION)
DOMAIN = {
'action': Spaces.Discrete,
'state': Spaces.Discrete
}
ACTION_NAMES = ['up', 'right', 'down', 'left']
STATE_DIMENSION_NAMES = ['X', 'Y']
def __init__(self, **kwargs):
super(GridWorld, self).__init__(**kwargs)
self._grid = None
self._currentPos = None
self._initState = None
self._viewer = None
self._trajectory = []
# for rendering
self._agentTrans = None
self._ystep = 0
self._xstep = 0
self._width = 0 # setup during `setupGrid` step
self._height = 0 # setup during `setupGrid` step
self._nbSteps = 0
def _setupGrid(self):
# setup the grid, the `_width` and `_height` parameters
raise NotImplementedError()
def setup(self):
logger.info("[%s] Problem setup" % self.__class__.__name__)
self._setupGrid()
# only during setup will the 'random' init state be randomized
# use 'episodeRandom' to randomize init state at each episode
self._currentPos = self.reset(setup=True)
self._initState = self._currentPos
def getStatesList(self):
return [tuple(float(x) for x in v) for v in itertools.product(
range(self._width), range(self._height))]
def getStatesDim(self):
"""
Return the number of dimension of the state space
"""
return 2
def getStatesBounds(self):
"""
Returns the max and min values each dimension can take.
These are returned as two tuples, `low` and `high`, where both
are a list of as many elements as there is dimension to the state space.
"""
return (0, 0), (self._width - 1, self._height - 1)
def getActionsList(self):
return range(len(self.ACTION_NAMES))
def _move(self, action, x, y):
x, y = self._currentPos
if action == 0: # up
y += 1
if action == 1: # right
x += 1
if action == 2: # down
y -= 1
if action == 3: # left
x -= 1
if x >= self._width:
x = self._width - 1
if y >= self._height:
y = self._height - 1
if x < 0:
x = 0
if y < 0:
y = 0
if chr(self._grid[x, y]) == CASE_TYPES.Wall:
x, y = self._currentPos # revert move
return x, y
def step(self, action):
"""
The agent take the given action and receives back the new state, reward,
whether the episode is terminated and some nothingness.
"""
x, y = self._move(action, *self._currentPos)
if chr(self._grid[x, y]) == CASE_TYPES.Wall:
# error - previous state was already a wall
self._done = True
self._trajectory.append(self._currentPos)
return self._currentPos, -1, self._done, {}
reward = {
CASE_TYPES.Water: self.waterReward,
CASE_TYPES.Sand: self.sandReward,
CASE_TYPES.Open: self.stepReward,
CASE_TYPES.Termination: self.successReward,
CASE_TYPES.Trap: (
-(self.maxSteps - len(self._trajectory)) + self.failureReward +
self.trapReward)
}[chr(self._grid[x, y])]
# termination state
if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:
self._done = True
self._currentPos = (x, y)
self._trajectory.append(self._currentPos)
self._nbSteps += 1
if self._nbSteps >= self.maxSteps and not self._done:
reward += self.failureReward
return self._currentPos, reward, self._done, {}
def reset(self, setup=False):
"""
Reset the state of the evironment for a new episode
`setup` is used to let the reset function know when we're calling it
from `setup`. If we don't, the 'random' init scheme should reset
to the randomly choosen position instead of picking a new random one.
"""
self._done = False
self._nbSteps = 0
x = None
if (self.startPosX == 'random' and setup) or (
self.startPosX == 'episodeRandom'):
x = random.randint(0, self._width - 1)
elif (self.startPosX == 'random' and not setup):
x = self._initState[0]
elif self.startPosX == 'center':
x = self._width - 1
else:
x = int(self.startPosX)
y = None
if (self.startPosX == 'random' and setup) or (
self.startPosX == 'episodeRandom'):
y = random.randint(0, self._height - 1)
elif (self.startPosY == 'random' and not setup):
y = self._initState[1]
elif self.startPosX == 'center':
y = self._height - 1
else:
y = int(self.startPosX)
self._currentPos = (x, y)
self._trajectory = [(x, y)]
return (x, y)
def _renderTrajectory(self):
from gym.envs.classic_control import rendering
points = [(
x * self._xstep + self._xstep / 2,
y * self._ystep + self._ystep / 2) for x, y in self._trajectory]
trajectory = rendering.make_polyline(points)
trajectory.set_color(0.3, 0.3, 0.3)
trajectory.set_linewidth(3)
self._viewer.add_onetime(trajectory)
def render(self, mode="human", close=False):
"""
Render the environment server-side
"""
if close and self._viewer is None:
if self._viewer is not None:
self._viewer.close()
self._viewer = None
return
screen_width = 600
screen_height = 600
if self._viewer is None:
from gym.envs.classic_control import rendering
self._viewer = rendering.Viewer(screen_width, screen_height)
# generate the grid
xs, self._xstep = np.linspace(
0, screen_width, self._width + 1, retstep=True)
ys, self._ystep = np.linspace(
0, screen_height, self._height + 1, retstep=True)
# render the grid
for x in xrange(self._width):
for y in xrange(self._height):
l, r, t, b = (0, self._xstep, self._ystep, 0)
tile = rendering.FilledPolygon([
(l, b), (l, t), (r, t), (r, b)])
tile.add_attr(rendering.Transform(translation=(
x * self._xstep, y * self._ystep)))
tile.set_color(*CASE_COLORS[chr(self._grid[x, y])])
self._viewer.add_geom(tile)
# render starting point
l, r, t, b = (0, self._xstep, self._ystep, 0)
tile = rendering.FilledPolygon([
(l, b), (l, t), (r, t), (r, b)])
tile.add_attr(rendering.Transform(translation=(
self._trajectory[0][0] * self._xstep,
self._trajectory[0][1] * self._ystep)))
tile.set_color(0, 1.0, 1.0)
self._viewer.add_geom(tile)
# render grid lines
for x in xs[1:len(xs) - 1]:
# not including the first and last one
line = rendering.Line((x, 0), (x, screen_height))
self._viewer.add_geom(line)
for y in ys[1: len(ys) - 1]:
line = rendering.Line((0, y), (screen_width, y))
self._viewer.add_geom(line)
agent = rendering.make_circle(
radius=min(
screen_width / (self._width + 1) / 3,
screen_height / (self._height + 1) / 3),
res=30)
self._agentTrans = rendering.Transform(translation=(
self._currentPos[0] * self._xstep + (self._xstep / 2),
self._currentPos[1] * self._ystep + (self._ystep / 2)))
agent.add_attr(self._agentTrans)
self._viewer.add_geom(agent)
self._renderTrajectory()
self._agentTrans.set_translation(
self._currentPos[0] * self._xstep + (self._xstep / 2),
self._currentPos[1] * self._ystep + (self._ystep / 2))
self._viewer.render(return_rgb_array=(mode == 'rgb_array'))
if close:
if self._viewer is not None:
self._viewer.close()
self._viewer = None
return
def release(self):
if self._viewer is not None:
self._viewer.close()
class PresetGridWorld(GridWorld):
"""
A gridworld implementation that offers a set of predefined grids for
your agent to train on.
"""
PARAMS = utils.extends(
{},
predefinedGrid=ParamsTypes.String,
**GridWorld.PARAMS)
PARAMS_DOMAIN = utils.extends(
{},
predefinedGrid={
'values': PREDEFINED_GRIDS.keys()
},
**GridWorld.PARAMS_DOMAIN)
PARAMS_DEFAULT = utils.extends(
{},
predefinedGrid='complex2',
**GridWorld.PARAMS_DEFAULT)
PARAMS_DESCRIPTION = utils.extends(
{},
predefinedGrid="Pick a predefined grid",
**GridWorld.PARAMS_DESCRIPTION)
def _setupGrid(self):
rep = PREDEFINED_GRIDS[self.predefinedGrid]
lines = [l.strip() for l in rep.split('\n') if len(l.strip()) > 0]
self._height = len(lines)
self._width = len(lines[0]) # all lines should have the same length
self._grid = np.zeros([self._width, self._height], dtype=int)
for x in xrange(self._width):
for y in xrange(self._height):
self._grid[x, self._height - y - 1] = ord(lines[y][x])
class RandomGridWorld(GridWorld):
"""
A gridworld implementation that generates a random grid problem
"""
PARAMS = utils.extends(
{},
width=ParamsTypes.Number,
height=ParamsTypes.Number,
nbTermStates=ParamsTypes.Number,
nbTraps=ParamsTypes.Number,
**GridWorld.PARAMS)
PARAMS_DOMAIN = utils.extends(
{},
width={
'values': (10, 20, 100),
'range': (5, 1000)
},
height={
'values': (10, 20, 100),
'range': (5, 1000)
},
nbTermStates={
'values': (0, 1, 2, 5, 10),
'range': (0, 10000)
},
nbTraps={
'values': (0, 10, 100),
'range': (0, 10000)
},
**GridWorld.PARAMS_DOMAIN)
PARAMS_DEFAULT = utils.extends(
{},
width=20,
height=20,
nbTermStates=1,
nbTraps=100,
**GridWorld.PARAMS_DEFAULT)
PARAMS_DESCRIPTION = utils.extends(
{},
width="Controls the generated grid's width.",
height="Controls the generated grid's height",
nbTermStates="""Controls the number of termination states.""",
nbTraps="""Controls the number of traps to generate.""",
**GridWorld.PARAMS_DESCRIPTION)
def _setupGrid(self):
self._width = self.width
self._height = self.height
self._grid = np.zeros([self._width, self._height], dtype=int)
for x in xrange(self._width):
for y in xrange(self._height):
self._grid[x, self._height - y - 1] = ord(CASE_TYPES.Open)
traps = [(
random.randint(0, self.width - 1),
random.randint(0, self.height - 1))
for x in xrange(self.nbTraps)
]
for term in traps:
self._grid[term[0], term[1]] = ord(CASE_TYPES.Trap)
termStates = [(
random.randint(0, self.width - 1),
random.randint(0, self.height - 1))
for x in xrange(self.nbTermStates)
]
for term in termStates:
self._grid[term[0], term[1]] = ord(CASE_TYPES.Termination)
|
21,882 | 55cf762ec389e011eb9b1dfd660c106664e13fb8 | from scene_nn import *
import pickle
import pandas as pd
import matplotlib.pyplot as plt
# ------------------------------
# plot loss over different size of dataset
# ------------------------------
def datasize_vs_loss(overide=False):
'''
'''
unit = 20
if overide:
tt_acu = pickle.load(open("DATA/datasize_loss_stats_0526.p", "rb"))
print('finish load datasize_vs_loss data from pickle!')
return tt_acu
else:
# train & test accuracy
tt_acu = []
# final_data = contextualize(None, True)
print('Begin training for datasize_vs_loss!')
for train_size in reversed(range(unit)):
print('epoch', unit -train_size)
train_size = train_size / unit
print('train_size:',1-train_size)
# train_size in split data means the part that need to throw away
train_data, cv_data, _ = split_data(final_data, train_size=train_size, overide=False)
train_loss_, cv_loss_ = train(train_data,cv_data)
tt_acu.append([1-train_size, train_loss_, cv_loss_])
print('result of cross validation:')
print(tt_acu)
pickle.dump(tt_acu, open("DATA/datasize_loss_stats_0526.p", "wb+"))
print('-success dump datasize_vs_loss data-')
return tt_acu
# ------------------------------
# plot loss of different dropout
# ------------------------------
def drop_out_vs_loss(overide=False):
'''
: input
'''
unit = 20
if overide:
tt_acu = pickle.load(open("DATA/drop_out_loss_stats_0526.p", "rb"))
print('finish load drop_out_plot_stats data from pickle!')
return tt_acu
else:
# train & test accuracy
tt_acu = []
# final_data = contextualize(None, True)
print('Begin training for drop_out_vs_loss!')
for drop_out in reversed(range(1,unit+1)):
print('epoch', unit - drop_out)
drop_out = drop_out/unit
print('drop out:',1-drop_out)
train_loss_,cv_loss_ = train(train_data,cv_data, drop_out = drop_out)
tt_acu.append([1-drop_out,train_loss_,cv_loss_])
print('result of cross validation:')
print(tt_acu)
pickle.dump(tt_acu, open("DATA/drop_out_loss_stats_0526.p", "wb+"))
print('-success dump drop_out_plot_stats data-')
return tt_acu
# ------------------------------
# plot loss of different L2 regulariziation term
# ------------------------------
def l2_vs_loss(overide=False):
'''
: input
'''
unit = 3
split = 90
if overide:
tt_acu = pickle.load(open("DATA/l2_loss_stats_0526.p", "rb"))
print('finish load l2_plot_stats data from pickle!')
return tt_acu
else:
# train & test accuracy
tt_acu = []
# final_data = contextualize(None, True)
print('Begin training for ls_vs_loss!')
for l2_term in range(1,split+1):
print('epoch', l2_term)
l2_term = l2_term/unit
print('l2_term:',l2_term)
train_loss_,cv_loss_ = train(train_data,cv_data, beta_l2 = l2_term)
tt_acu.append([l2_term,train_loss_,cv_loss_])
print('result of cross validation:')
print(tt_acu)
pickle.dump(tt_acu, open("DATA/l2_loss_stats_0526.p", "wb+"))
print('-success dump l2_plot_stats data-')
return tt_acu
# ------------------------------
# plot
# ------------------------------
def plot(tt_acu,xlabel,plot_path='default_save.png'):
'''
:input data: a list of sublist, one sublist contains data through time of training or cross validation
'''
df = pd.DataFrame.from_records(data = [(i[1],i[2]) for i in tt_acu],index =list(zip(*tt_acu))[0], columns=['train','cv'])
plt.figure();
df.plot(legend=True)
plt.xlabel(xlabel)
plt.ylabel("Loss")
plt.savefig(plot_path)
plt.show()
if __name__ == '__main__':
'''
with original 12k data
'''
data_file = 'sms_clean_0524.txt'
vocab_str, vocab_vec = loadZhW2v(None, True)
s = np.vstack(vocab_vec)
Gvar = np.var(s, 0) # distributional parameter for Glove, for later generating random embedding for UNK
Gmean = np.mean(s, 0)
raw_data = read_data(None, True)
final_data = contextualize(raw_data=None,overide=True)
# # plot datasize_vs_loss
# tt_acu = datasize_vs_loss(True)
# plot(tt_acu,"size of dataset","PLOT/datasize_loss_stats.png")
# # plot drop_out_vs_loss
# train_data, cv_data, _ = split_data(final_data, 0)
# tt_acu = drop_out_vs_loss()
# plot(tt_acu,"Dropout","PLOT/dropout_loss_stats.png")
# plot L2 regulariziation term _vs_loss
train_data, cv_data, _ = split_data(final_data, 0)
tt_acu = l2_vs_loss()
plot(tt_acu,"L2 regularization term","PLOT/l2_loss_stats.png")
'''
use clustered data
'''
# data_file = 'clustered_sms_message.txt'
#
# vocab_str, vocab_vec = loadZhW2v(None, True)
# s = np.vstack(vocab_vec)
#
# Gvar = np.var(s, 0) # distributional parameter for Glove, for later generating random embedding for UNK
# Gmean = np.mean(s, 0)
# raw_data = read_data(data_file)
#
# final_data = contextualize(raw_data,vocab_str, vocab_vec, Gmean, Gvar)
# # plot datasize_vs_loss
# tt_acu = datasize_vs_loss()
# plot(tt_acu,"size of dataset","PLOT/datasize_loss_stats_clustered.png")
#
#
# # plot L2 regulariziation term _vs_loss
# train_data, cv_data, _ = split_data(final_data, 0)
# tt_acu = l2_vs_loss()
# plot(tt_acu,"L2 regularization term","PLOT/l2_loss_stats_clustered.png") |
21,883 | 2683acaf8edde5acd6a787bc67e8ad98e9346470 | import pytesseract as ocr
from PIL import Image
import PIL
import pytesseract
pytesseract.pytesseract.tesseract_cmd=r'C:\Program Files\Tesseract-OCR\tesseract.exe'
print(pytesseract.image_to_string(Image.open('at01.jpeg'))) |
21,884 | 88762bfa1ae4acf4472986fc178836e896f1711d | # -*- coding: utf-8 -*-
from restapi.flask_ext.flask_celery import CeleryExt
from utilities.logs import get_logger
log = get_logger(__name__)
# celery_app = current_app.extensions.get('celery').celery_app
celery_app = CeleryExt.celery_app
@celery_app.task(bind=True)
def test_task(self, num):
with celery_app.app.app_context():
log.info("I'm %s" % self.request.id)
log.debug("Starting task to calculate %s squares!" % num)
for count in range(1, int(num)):
x = count * count
x = x * x
self.update_state(state='PROGRESS',
meta={'current': count, 'total': num})
log.info("Task completed, calculated up to %s squares" % num)
return "WOW, i calculated %s squares!!" % num
|
21,885 | eef4585b1f925e86dda65b32c84356571ad9c543 | # -*- coding: utf-8 -*-
from model.group import Group
testdata = [
Group(name="Group name 1", header="Header 1", footer="Footer 1"),
Group(name="Group name 2", header="Header 2", footer="Footer 2")
]
modgroupdata = [Group(name="12 DEC group", header="MODIFIED 12", footer="MODIFIED 12"),
Group(name="12 DEC NAME only group")]
|
21,886 | 4e026d34400a0dbc6ba5ddb58587b9dcd4bfdd06 | import hashmap
provinces = hashmap.new()
# Dutch provinces; key is province name, value is its largest city
hashmap.set(provinces, 'North Holland', 'Amsterdam')
hashmap.set(provinces, 'South Holland', 'Rotterdam')
hashmap.set(provinces, 'Utrecht', 'Utrecht')
hashmap.set(provinces, 'Zeeland', 'Middelburg')
hashmap.set(provinces, 'Groningen', 'Groningen')
hashmap.set(provinces, 'Limburg', 'Maastricht')
hashmap.set(provinces, 'Flevoland', 'Almere')
hashmap.set(provinces, 'North Brabant', 'Den Bosch')
hashmap.set(provinces, 'Friesland', 'Leeuwarden')
hashmap.set(provinces, 'Drenthe', 'Assen')
hashmap.set(provinces, 'Gelderland', 'Nijmegen')
hashmap.set(provinces, 'Overijssel', 'Enschede')
hashmap.list(provinces)
print '-' * 10
print hashmap.get(provinces, 'Drenthe') |
21,887 | 5be54fc54ecf35f60c4d01462e30b7391fbffc45 | from nose.tools import *
from gothonweb.planisphere import *
def test_room():
gold = Room('Gold Room', 'A room full of gold.', None, None, None, None)
assert_equal(gold.name, 'Gold Room')
assert_equal(gold.description, 'A room full of gold.')
def test_paths():
center = Room('center', 'A room at the center of the map', None, None, None, None)
up = Room('arena', 'A room for gladiators', None, None, None, None)
down = Room('wakanda', 'A room for elites', None, None, None, None)
center.add_paths({'go up': up, 'go down': down})
assert_equal(center.go('go up'), up)
assert_equal(center.go('go down'), down)
def test_load_room_object():
silver = load_room_object(START)
assert_equal(START, 'central_corridor')
assert_equal(silver, central_corridor)
def test_get_room_name():
silver = load_room_object(START)
bronze = get_room_name(silver)
assert_equal(START, 'central_corridor')
assert_equal(silver, central_corridor)
assert_equal(bronze, 'central_corridor')
def test_game_map():
start = load_room_object(START)
assert_equal(start.go('shoot gothon'), None)
assert_equal(start.go('tell joke'), laser_weapon_armory)
assert_equal(laser_weapon_armory.go('000'), the_bridge)
assert_equal(laser_weapon_armory.go('567'), None)
assert_equal(the_bridge.go('slowly place the bomb'), None)
assert_equal(the_bridge.go('use haki'), escape_pod)
assert_equal(escape_pod.go('2'), the_end_winner)
assert_equal(escape_pod.go(''), None)
|
21,888 | 57d21e4ce4c87b070355f217fa4378203f85612b | from pylogix import PLC
import time
import os
tag_list = [
{
#type = counter|value
'type': 'counter',
# tag is the PLC tag to read
'tag': 'Program:Production.ProductionData.DailyCounts.DailyTotal',
# Machine is written into the machine colum on the database
'Machine': '1533',
# used internally
'nextread': 0,
'lastcount': 0,
'lastread': 0,
# how often to try to read the tag in seconds
'frequency': .5,
# database table to write to
'table': 'GFxPRoduction',
# tag containing what part type is currently running
'Part_Type_Tag': 'Stn010.PartType',
# map values in above to a string to write in the part type db colum
'Part_Type_Map': {'0': '50-9341', '1': '50-0455'}
}
]
tag_frequency_op30 = [
{
'type': 'counter',
'tag': 'OP30_4_COUNT.SYSTEM[0].GOOD',
'Machine': '1605',
'nextread': 0,
'lastcount': 0,
'frequency': .5,
'table': 'GFxPRoduction',
'Part_Type_Tag': 'ROBOT_R30_4.O.DI37',
'Part_Type_Map': {'False': '50-5081', 'True': '50-4865'},
},
{
'type': 'counter',
'tag': 'OP30_1_COUNT.SYSTEM[0].GOOD',
'Machine': '1606',
'frequency': .5,
'nextread': 0,
'lastcount': 0,
'table': 'GFxPRoduction',
'Part_Type_Tag': 'ROBOT_R30_1.O.DI37',
'Part_Type_Map': {'False': '50-5081', 'True': '50-4865'},
},
{
'type': 'counter',
'tag': 'OP30_2_COUNT.SYSTEM[0].GOOD',
'Machine': '1607',
'frequency': .5,
'nextread': 0,
'lastcount': 0,
'table': 'GFxPRoduction',
'Part_Type_Tag': 'ROBOT_R30_2.O.DI37',
'Part_Type_Map': {'False': '50-5081', 'True': '50-4865'},
},
{
'type': 'counter',
'tag': 'OP30_3_COUNT.SYSTEM[0].GOOD',
'Machine': '1608',
'frequency': .5,
'nextread': 0,
'lastcount': 0,
'table': 'GFxPRoduction',
'Part_Type_Tag': 'ROBOT_R30_3.O.DI37',
'Part_Type_Map': {'False': '50-5081', 'True': '50-4865'},
},
{
'type': 'value',
'tag': 'OP30_3_COUNT.SYSTEM[0].GOOD',
'nextread': 0,
'frequency': 5,
'table': 'DataTable',
'name': 'random value'
}
]
def loop(taglist, ip, slot=0, minimum_cycle=.5):
with PLC() as comm:
comm.IPAddress = ip
comm.ProcessorSlot = slot
for entry in taglist:
# get current timestamp
now = time.time()
frequency = entry['frequency']
# make sure we are not polling too fast
if frequency < minimum_cycle:
frequency = minimum_cycle
# handle first pass through
if entry['nextread'] == 0:
entry['nextread'] = now
if entry['nextread'] > now:
continue # too soon move on
if entry['type'] == 'counter':
# print('Read Counter:', entry['tag'])
entry['lastread'] = now
read_counter(entry, comm)
# set the next read timestamp
entry['nextread'] += frequency
if entry['type'] == 'value':
# print('Read Value:', entry['tag'])
entry['lastread'] = now
read_value(entry, comm)
# set the next read timestamp
entry['nextread'] += frequency
def read_value(value_entry, comm):
print(time.time(), ':', comm.Read(entry['tag']))
def read_counter(counter_entry, comm):
# read the tag
part_count = comm.Read(counter_entry['tag'])
if part_count.Status != 'Success':
print('failed to read ', part_count)
return
part_type = comm.Read(counter_entry['Part_Type_Tag'])
if part_type.Status != 'Success':
print('failed to read ', part_type)
return
if (part_count.Value == 0) and (counter_entry['lastcount'] == 0):
return # machine count rolled over while not running
if (part_count.Value == 0) or (part_count.Value > counter_entry['lastcount']):
# save this reading
counter_entry['lastcount'] = part_count.Value
# post this reading
part_count_entry(
table=counter_entry['table'],
timestamp=counter_entry['lastread'],
count=part_count.Value,
machine=counter_entry['Machine'],
parttype=counter_entry['Part_Type_Map'][str(part_type.Value)]
)
def part_count_entry(table, timestamp, count, machine, parttype):
print('{} made a {} ({})'.format(machine, parttype, count))
file_path = '/var/local/SQL/{}.sql'.format(
str(int(timestamp)))
with open(file_path, "a+") as file:
sql = ('INSERT INTO {} '
'(Machine, Part, PerpetualCount, Timestamp) '
'VALUES ("{}", "{}" ,{} ,{});\n'.format(
table, machine, parttype, count, timestamp))
file.write(sql)
if __name__ == "__main__":
while True:
loop(tag_list, ip='192.168.1.2', slot=3, minimum_cycle=.5)
|
21,889 | a2b8fefbda430814bfb006edf57f5548162de32a | import pandas as pd
from dask import dataframe as dd
df1 = pd.DataFrame({
"a": [1,2,3,3,4,4,5,5,4,3],
"x": [1,2,3,4,5,6,7,8,9,10],
})
df1["diff"] = df1["a"].diff(periods=1)
print(df1)
print("="*40)
df = dd.from_pandas(df1, npartitions=2)
df["diff2"] = df["a"].diff(periods=1)
print(df.compute())
df1["grouped"] = df1.groupby("a")["x"].diff(periods=1)
print(df1)
ret = df.groupby("a")["x"].get_group("a").repartition(npartitions=1)
df["test"] = ret.diff(periods=1).compute()
print(df)
#df1["grouped"] = df.groupby("a")["x"].get_group("a").diff(periods=1) |
21,890 | e9f8f5a7c1125e5a8ec10e1f21eb43bfad7bbea7 | """Add snapshot model
Revision ID: 2191c871434
Revises: 19168fe64c41
Create Date: 2014-07-17 17:21:42.915797
"""
# revision identifiers, used by Alembic.
revision = '2191c871434'
down_revision = '19168fe64c41'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'snapshot',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=True),
sa.Column('status', sa.Enum(), server_default='0', nullable=False),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('snapshot')
|
21,891 | 6a6c7722d9f54636e4afb55e38574ffa7d434840 | import random
random_number_list = []
run = True
def generate_Num():
global random_number_list
global run
if len(random_number_list) < 20:
random_number_list.append(random.randint(0,49))
print(random_number_list)
else:
run = False
square_the_num()
def square_the_num():
global random_number_list
squared_num = [j**2 for j in random_number_list]
print(squared_num)
while run:
generate_Num() |
21,892 | 938b29b320fa681e9fe20febf380f733cbab95fd | from rest_framework import serializers
from .models import Location, Images, Tricks, Ratings, LocationComments, SkateObjects
from users.serializers import UserSerializer
from django.contrib.auth import get_user_model
from django.conf import settings
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model=Location
fields = ('id', 'location', 'name')
class CommentViewSerializer(serializers.ModelSerializer):
owner = UserSerializer()
class Meta:
model = LocationComments
fields = "__all__"
class CommentWriteSerializer(serializers.ModelSerializer):
class Meta:
model = LocationComments
fields = ('parent', 'content', )
class ImageViewSerializer(serializers.ModelSerializer):
owner = UserSerializer()
class Meta:
model = Images
fields = "__all__"
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Images
fields = ('image', )
class TricksViewSerializer(serializers.ModelSerializer):
owner = UserSerializer()
class Meta:
model = Tricks
fields = "__all__"
class SkateObjectSerializer(serializers.ModelSerializer):
class Meta:
model = SkateObjects
fields = ('name', )
# class RatingsViewSerializer(serializers.ModelSerializer):
# average =
# count =
# class Meta:
# model = Ratings
# fields = ('average', 'count')
class LocationDetailSerializer(serializers.ModelSerializer):
comments = CommentViewSerializer(many=True)
images = ImageViewSerializer(many=True)
tricks = TricksViewSerializer(many=True)
skateobjects = SkateObjectSerializer(many=True)
# ratings = RatingsViewSerializer(many=True)
class Meta:
model = Location
fields = ('location', 'name', 'comments', 'tricks', 'images', 'skateobjects')
|
21,893 | 28ce8c015b6cb6c9e451f5fc2ba46696c2d35fb1 | from __future__ import print_function, division, absolute_import, with_statement
import os
import shutil
import re as regex
import textwrap
from collections import namedtuple
from array import array
from PIL import ImageDraw
Batch = namedtuple('Batch', ['chr', 'fastas', 'output_folder'])
class LayoutLevel:
def __init__(self, name, modulo, chunk_size=None, padding=None, thickness=1, levels=None):
self.modulo = modulo
if chunk_size is not None:
self.chunk_size = chunk_size
self._padding = padding
self.thickness = thickness
else:
child = levels[-1]
self.chunk_size = child.modulo * child.chunk_size
self._padding = padding or child.padding * 3 # 6 * int(3 ** (len(levels) - 2)) # third level (count=2) should be 6, then 18
last_parallel = levels[-2]
self.thickness = last_parallel.modulo * last_parallel.thickness + self.padding
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, value):
original_thickness = self.thickness - self._padding
self._padding = value
self.thickness = original_thickness + value
class Contig:
def __init__(self, name, seq, reset_padding, title_padding, tail_padding, title_index, title_length):
self.name = name
self.seq = seq
self.reset_padding = reset_padding
self.title_padding = title_padding
self.tail_padding = tail_padding
self.nuc_title_start = title_index
self.nuc_seq_start = title_index + title_length
comp = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G', 'N': 'N', 'X': 'X'}
def complement(plus_strand):
return comp[plus_strand]
def rev_comp(plus_strand):
return ''.join([comp[a] for a in reversed(plus_strand)])
class ReverseComplement:
def __init__(self, seq, annotation=False):
"""Lazy generator for being able to pull out small reverse complement sections out of large chromosomes"""
self.seq = seq
self.length = len(seq)
self.annotation = annotation
def __getitem__(self, key):
if isinstance(key, slice):
end = self.length - key.start
begin = self.length - key.stop
if end < 0 or begin < 0 or end > self.length:
raise IndexError("%i %i vs. length %i" % (end, begin, self.length))
piece = self.seq[begin: end]
return rev_comp(piece) if not self.annotation else ''.join(reversed(piece))
letter = self.seq[self.length - key - 1]
return complement(letter) if not self.annotation else letter
def __len__(self):
return 0
def multi_line_height(font, multi_line_title, txt):
sum_line_spacing = ImageDraw.Draw(txt).multiline_textsize(multi_line_title, font)[1]
descender = font.getsize('y')[1] - font.getsize('A')[1]
return sum_line_spacing + descender
def pretty_contig_name(contig, title_width, title_lines):
"""Since textwrap.wrap break on whitespace, it's important to make sure there's whitespace
where there should be. Contig names don't tend to be pretty."""
pretty_name = contig.name.replace('_', ' ').replace('|', ' ').replace('chromosome chromosome', 'chromosome')
pretty_name = regex.sub(r'([^:]*\S):(\S[^:]*)', r'\1: \2', pretty_name)
pretty_name = regex.sub(r'([^:]*\S):(\S[^:]*)', r'\1: \2', pretty_name) # don't ask
if title_width < 20 and len(pretty_name) > title_width * 1.5: # this is a suboptimal special case to try and
# cram more characters onto the two lines of the smallest contig titles when there's not enough space
# For small spaces, cram every last bit into the line labels, there's not much room
pretty_name = pretty_name[:title_width] + '\n' + pretty_name[title_width:title_width * 2]
else: # this is the only case that correctly bottom justifies one line titles
pretty_name = '\n'.join(textwrap.wrap(pretty_name, title_width)[:title_lines]) # approximate width
return pretty_name
def copytree(src, dst, symlinks=False, ignore=None):
if not os.path.exists(dst):
os.makedirs(dst, exist_ok=True)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:
shutil.copy2(s, d)
def create_deepzoom_stack(input_image, output_dzi):
import deepzoom
dz_params = {'tile_size': 256,
'tile_overlap': 1,
'tile_format': "png",
'resize_filter': "antialias"} # cubic bilinear bicubic nearest antialias
creator = deepzoom.ImageCreator(tile_size=dz_params['tile_size'],
tile_overlap=dz_params['tile_overlap'],
tile_format=dz_params['tile_format'],
resize_filter=dz_params['resize_filter'])
creator.create(input_image, output_dzi)
def just_the_name(path):
"""Remove extension and path"""
return os.path.splitext(os.path.basename(path))[0]
def chunks(seq, size):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(seq), size):
yield seq[i:i + size]
def pluck_contig(chromosome_name, genome_source):
"""Scan through a genome fasta file looking for a matching contig name. When it find it, find_contig collects
the sequence and returns it as a string with no cruft."""
chromosome_name = '>' + chromosome_name
print("Searching for", chromosome_name)
seq_collection = []
printing = False
with open(genome_source, 'r') as genome:
for line in genome:
if line.startswith('>'):
# headers.append(line)
line = line.rstrip()
if line.upper() == chromosome_name.upper():
printing = True
print("Found", line)
elif printing:
break # we've collected all sequence and reached the beginning of the next contig
elif printing: # This MUST come after the check for a '>'
line = line.rstrip()
seq_collection.append(line.upper()) # always upper case so equality checks work
if not len(seq_collection):
# File contained these contigs:\n" + '\n'.join(headers)
raise IOError("Contig not found." + chromosome_name + " inside " + genome_source)
return ''.join(seq_collection)
def first_word(string):
import re
if '\\' in string:
string = string[string.rindex('\\') + 1:]
return re.split('[\W_]+', string)[0]
def make_output_dir_with_suffix(base_path, suffix):
output_dir = base_path + suffix
print("Creating Chromosome Output Directory...", os.path.basename(output_dir))
os.makedirs(output_dir, exist_ok=True)
return output_dir
def __do_write(filestream, seq, header=None):
"""Specialized function for writing sets of headers and sequence in FASTA.
It chunks the file up into 70 character lines, but leaves headers alone"""
if header is not None:
filestream.write(header + '\n') # double check newlines
try:
for line in chunks(seq, 70):
filestream.write(line + '\n')
except Exception as e:
print(e)
def _write_fasta_lines(filestream, seq):
import _io
assert isinstance(filestream, _io.TextIOWrapper) # I'm actually given a file name and have to open it myself
contigs = seq.split('\n')
index = 0
while index < len(contigs):
if len(contigs) > index + 1 and contigs[index].startswith('>') and contigs[index+1].startswith('>'):
print("Warning: Orphaned header:", contigs[index])
if contigs[index].startswith('>'):
header, contents = contigs[index], contigs[index + 1]
index += 2
else:
header, contents = None, contigs[index]
index += 1
__do_write(filestream, contents, header)
def write_complete_fasta(file_path, seq_content_array, header=None):
"""This function ensures that all FASTA files start with a >header\n line"""
with open(file_path, 'w') as filestream:
if seq_content_array[0] != '>': # start with a header
temp_content = seq_content_array
if header is None:
header = '>%s\n' % just_the_name(file_path)
if isinstance(temp_content, list):
seq_content_array = [header]
else:
seq_content_array = array('u', header)
seq_content_array.extend(temp_content)
_write_fasta_lines(filestream, ''.join(seq_content_array))
def write_contigs_to_file(out_filename, contigs, verbose=True):
with open(out_filename, 'w') as outfile:
for contig in contigs:
__do_write(outfile, header='>' + contig.name, seq=contig.seq)
if verbose:
print("Done writing ", len(contigs), "contigs and {:,}bp".format(sum([len(x.seq) for x in contigs])))
class BlankIterator:
def __init__(self, filler):
self.filler = filler
def __getitem__(self, index):
if isinstance(index, slice):
return self.filler * (index.stop - index.start)
else:
return self.filler
|
21,894 | f0c5fd5eda2e2628891e88cb68b32428fc61e50b | from selenium import webdriver
driver=webdriver.Chrome("C:\\Users\\CHINNU FINCY\\Downloads\\chromedriver_win32\\chromedriver.exe")
driver.set_page_load_timeout(30)
driver.get("https://www.facebook.com/")
assert "Facebook" in driver.title
#driver.maximize_window()
driver.get_screenshot_as_file("./screenshots/fb1.png")
driver.find_element_by_id("email").send_keys("hi")
driver.find_element_by_id("loginbutton").click()
driver.implicitly_wait(20)
driver.quit() |
21,895 | dc9ad6902cc84590e453618122c4e6f8d18fb4b7 | from nltk.corpus import stopwords
from collections import Counter
from collections import defaultdict
|
21,896 | b83791dbf5a18045100694d1fb92e62807a38729 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import os
import joblib
import itertools
from scipy.spatial import Voronoi
from sklearn.neighbors import BallTree
from skimage import morphology, feature, measure, segmentation, filters, color
from scipy import ndimage as ndi
from scipy.sparse import csr_matrix
import cv2 as cv
import napari
import dask
from dask.distributed import Client, LocalCluster
def make_simple_coords():
"""
Makes really simple coordinates to illustrate network construction methods.
Returns
-------
coords : ndarray
Array with 1st and 2nd column corresponding to x and y coordinates.
"""
x = np.array([144, 124, 97, 165, 114, 60, 165, 0, 76, 50, 147])
y = np.array([ 0, 3, 21, 28, 34, 38, 51, 54, 58, 56, 61])
coords = np.vstack((x,y)).T
return coords
def make_random_nodes(size=100, ndim=2, expand=True):
"""
Make a random set of nodes
Parameters
----------
size : int, optional
Number of nodes. The default is 100.
ndim : int, optional
Number of dimensions. The default is 2.
expand : bool, optional
If True, positions are multiplied by size**(1/ndim) in order to have a
consistent spacing across various `size` and `ndim` values.
The default is True.
Returns
-------
coords : ndarray
Coordinates of the set of nodes.
"""
coords = np.random.random(size=size*ndim).reshape((-1,ndim))
if expand:
coords = coords * size**(1/ndim)
return coords
def make_random_tiles(sx=500, sy=500, sz=0, nb=50, noise_sigma=None,
regular=True, double_pattern_y=False, double_pattern_z=False,
assym_y=True, assym_z=True, return_image=False):
"""
Build contacting areas similar to cell segmentation in tissues.
Parameters
----------
sx : int, optional
Size of the image on the x axis. The default is 500.
sy : int, optional
Size of the image on the y axis. The default is 500.
sz : int, optional
Size of the image on the z axis. The default is 0, which
implies generating a 2D image.
nb : int, optional
Related to the number of points, but not equal. The default is 50.
noise_sigma : None or float, optional
If float, a gaussian noise is added to seeds positions.
regular : bool, optional
If True points are on a regular lattice, else they are randomly located.
The default is True.
double_pattern : bool, optional
If True the regular lattice has more points. The default is False.
assym_y : bool, optional
If True the frenquency of seeds is twice higher on the y-axis. The default is True.
return_image : bool, optional
If True the image of seed points is also returned. The default is False.
Returns
-------
coords : ndarray
Coordinates of the set of nodes.
masks : ndarray
Detected areas coded by a unique integer.
Examples
--------
>>> coords, masks, image = make_random_tiles(double_pattern=True, return_image=True)
>>> showim(image)
>>> label_cmap = mpl.cm.get_cmap('Set2')(range(8))
>>> showim(color.label2rgb(masks, bg_label=0, colors=label_cmap), origin='lower')
"""
if sz == 0:
image = np.zeros((sy, sx))
# to overcome an issue with odd nb:
nb = int(np.ceil(nb / 2) * 2)
if regular:
x = np.linspace(start=0, stop=sx-1, num=nb, dtype=int)
x = np.hstack((x[::2], x[1::2]))
if assym_y:
nb = nb*2
y = np.linspace(start=0, stop=sy-1, num=nb, dtype=int)
if double_pattern_y:
y = np.hstack((y[::2], y[1::2]))
x_id = np.tile(x, y.size//2)
y_id = np.repeat(y, x.size//2)
else:
x_id = np.random.randint(sx, size=nb)
y_id = np.random.randint(sy, size=nb)
if noise_sigma is not None:
x_id = x_id + np.random.normal(loc=0.0, scale=noise_sigma, size=x_id.size)
x_id[x_id<0] = 0
x_id[x_id>sx-1] = sx-1
x_id = np.round(x_id).astype(int)
y_id = y_id + np.random.normal(loc=0.0, scale=noise_sigma, size=y_id.size)
y_id[y_id<0] = 0
y_id[y_id>sy-1] = sy-1
y_id = np.round(y_id).astype(int)
coords = np.vstack((x_id, y_id)).T
image[y_id, x_id] = 1
masks = segmentation.watershed(-image)
else:
# make 3D simulation
image = np.zeros((sz, sy, sx))
# to overcome an issue with odd nb:
nb = int(np.ceil(nb / 2) * 2)
if regular:
x = np.linspace(start=0, stop=sx-1, num=nb, dtype=int)
x = np.hstack((x[::2], x[1::2]))
if assym_y:
nb_y = nb*2
y = np.linspace(start=0, stop=sy-1, num=nb_y, dtype=int)
if assym_z:
nb_z = nb*2
z = np.linspace(start=0, stop=sz-1, num=nb_z, dtype=int)
if double_pattern_y:
y = np.hstack((y[::2], y[1::2]))
if double_pattern_z:
z = np.hstack((z[::2], z[1::2]))
x_id = np.tile(x, y.size//2)
y_id = np.repeat(y, x.size//2)
z_id = np.repeat(z, x.size//2)
else:
x_id = np.random.randint(sx, size=nb)
y_id = np.random.randint(sy, size=nb)
z_id = np.random.randint(sz, size=nb)
if noise_sigma is None:
print("For 3D simulations noise_sigma needs to be > 0")
print("Setting noise_sigma to 1")
noise_sigma = 1
# x
x_id = x_id + np.random.normal(loc=0.0, scale=noise_sigma, size=x_id.size)
x_id[x_id<0] = 0
x_id[x_id>sx-1] = sx-1
x_id = np.round(x_id).astype(int)
# y
y_id = y_id + np.random.normal(loc=0.0, scale=noise_sigma, size=y_id.size)
y_id[y_id<0] = 0
y_id[y_id>sy-1] = sy-1
y_id = np.round(y_id).astype(int)
# z
z_id = z_id + np.random.normal(loc=0.0, scale=noise_sigma, size=z_id.size)
z_id[z_id<0] = 0
z_id[z_id>sz-1] = sz-1
z_id = np.round(z_id).astype(int)
coords = np.vstack((x_id, y_id, z_id)).T
image[z_id, y_id, x_id] = 1
masks = segmentation.watershed(-image)
if return_image:
return coords, masks, image
else:
return coords, masks
def remove_duplicate_pairs(pairs):
"""
Remove redundant rows in a 2D array.
Parameters
----------
pairs : ndarray
The (n_pairs x 2) array of neighbors indices.
Returns
-------
uniq_pairs : ndarray
Array of unique pairs, the content of each row is sorted.
Example
-------
>>> pairs = [[4, 3],
[1, 2],
[3, 4],
[2, 1]]
>>> remove_duplicate_pairs(pairs)
array([[1, 2],
[3, 4]])
"""
uniq_pairs = np.unique(np.sort(pairs, axis=1), axis=0)
return uniq_pairs
def distance_neighbors(coords, pairs):
"""
Compute all distances between neighbors in a network.
Parameters
----------
coords : dataframe
Coordinates of points where columns are 'x', 'y', ...
pairs : ndarray
The (n_pairs x 2) array of neighbors indices.
Returns
-------
distances : array
Distances between each pair of neighbors.
"""
# source nodes coordinates
c0 = coords[pairs[:,0]]
# target nodes coordinates
c1 = coords[pairs[:,1]]
distances = (c0 - c1)**2
distances = np.sqrt(distances.sum(axis=1))
return distances
def find_trim_dist(dist, method='percentile_size', nb_nodes=None, perc=99):
"""
Find the distance threshold to eliminate reconstructed edges in a network.
Parameters
----------
dist : array
Distances between pairs of nodes.
method : str, optional
Method used to compute the threshold. The default is 'percentile_size'.
This methods defines an optimal percentile value of distances above which
edges are discarded.
nb_nodes : int , optional
The number of nodes in the network used by the 'percentile_size' method.
perc : int or float, optional
The percentile of distances used as the threshold. The default is 99.
Returns
-------
dist_thresh : float
Threshold distance.
"""
if method == 'percentile_size':
prop_edges = 4 / nb_nodes**(0.5)
perc = 100 * (1 - prop_edges * 0.5)
dist_thresh = np.percentile(dist, perc)
elif method == 'percentile':
dist_thresh = np.percentile(dist, perc)
return dist_thresh
def build_delaunay(coords, trim_dist='percentile_size', perc=99, return_dist=False):
"""
Reconstruct edges between nodes by Delaunay triangulation.
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
trim_dist : str or float, optional
Method or distance used to delete reconstructed edges. The default is 'percentile_size'.
perc : int or float, optional
The percentile of distances used as the threshold. The default is 99.
return_dist : bool, optional
Whether distances are returned, usefull to try sevral trimming methods and parameters.
The default is False.
Examples
--------
>>> coords = make_simple_coords()
>>> pairs = build_delaunay(coords, trim_dist=False)
Returns
-------
pairs : ndarray
The (n_pairs x 2) array of neighbors indices.
"""
# pairs of indices of neighbors
pairs = Voronoi(coords).ridge_points
if trim_dist is not False:
dist = distance_neighbors(coords, pairs)
if not isinstance(trim_dist, (int, float)):
trim_dist = find_trim_dist(dist=dist, method=trim_dist, nb_nodes=coords.shape[0], perc=perc)
pairs = pairs[dist < trim_dist, :]
return pairs
def pairs_from_knn(ind):
"""
Convert a matrix of Neirest Neighbors indices into
a matrix of unique pairs of neighbors
Parameters
----------
ind : ndarray
The (n_objects x n_neighbors) matrix of neighbors indices.
Returns
-------
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices.
"""
NN = ind.shape[1]
source_nodes = np.repeat(ind[:,0], NN-1).reshape(-1,1)
target_nodes = ind[:,1:].reshape(-1,1)
pairs = np.hstack((source_nodes, target_nodes))
pairs = remove_duplicate_pairs(pairs)
return pairs
def build_knn(coords, k=6, **kwargs):
"""
Reconstruct edges between nodes by k-nearest neighbors (knn) method.
An edge is drawn between each node and its k nearest neighbors.
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
k : int, optional
Number of nearest neighbors. The default is 6.
Examples
--------
>>> coords = make_simple_coords()
>>> pairs = build_knn(coords)
Returns
-------
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices.
"""
tree = BallTree(coords, **kwargs)
_, ind = tree.query(coords, k=k+1) # the first k is "oneself"
pairs = pairs_from_knn(ind)
return pairs
def build_rdn(coords, r, **kwargs):
"""
Reconstruct edges between nodes by radial distance neighbors (rdn) method.
An edge is drawn between each node and the nodes closer
than a threshold distance (within a radius).
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
r : float, optional
Radius in which nodes are connected.
Examples
--------
>>> coords = make_simple_coords()
>>> pairs = build_rdn(coords, r=60)
Returns
-------
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices.
"""
tree = BallTree(coords, **kwargs)
ind = tree.query_radius(coords, r=r)
# clean arrays of neighbors from self referencing neighbors
# and aggregate at the same time
source_nodes = []
target_nodes = []
for i, arr in enumerate(ind):
neigh = arr[arr != i]
source_nodes.append([i]*(neigh.size))
target_nodes.append(neigh)
# flatten arrays of arrays
source_nodes = np.fromiter(itertools.chain.from_iterable(source_nodes), int).reshape(-1,1)
target_nodes = np.fromiter(itertools.chain.from_iterable(target_nodes), int).reshape(-1,1)
# remove duplicate pairs
pairs = np.hstack((source_nodes, target_nodes))
pairs = remove_duplicate_pairs(pairs)
return pairs
def hyperdiagonal(coords):
"""
Compute the maximum possible distance from a set of coordinates as the
diagonal of the (multidimensional) cube they occupy.
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
Returns
-------
dist : float
Maximum possible distance.
"""
mini = coords.min(axis=0)
maxi = coords.max(axis=0)
dist = (maxi - mini)**2
dist = np.sqrt(dist.sum())
return dist
def find_neighbors(masks, i, r=1):
"""
Find the neighbors of a given mask.
Parameters
----------
masks : array_like
2D array of integers defining the identity of masks
0 is background (no object detected)
i : int
The mask for which we look for the neighbors.
r : int
Radius of search.
Returns
-------
pairs : ndarray
Pairs of neighbors given by the first and second element of each row,
values correspond to values in masks, which are different from index
values of nodes
"""
mask = np.uint8(masks == i)
# create the border in which we'll look at other masks
kernel = morphology.disk(r)
dilated = cv.dilate(mask, kernel, iterations=1)
dilated = dilated.astype(np.bool)
# detect potential touching masks
neighbors = np.unique(masks[dilated])
# discard the initial cell id of interest
neighbors = neighbors[neighbors != i]
# discard the background value
return neighbors[neighbors != 0]
def build_contacting(masks, r=1):
"""
Build a network from segmented regions that contact each other or are
within a given distance from each other.
Parameters
----------
masks : array_like
2D array of integers defining the identity of masks
0 is background (no object detected)
r : int
Radius of search.
Returns
-------
pairs : ndarray
Pairs of neighbors given by the first and second element of each row,
values correspond to values in masks, which are different from index
values of nodes
"""
source_nodes = []
target_nodes = []
for i in range(1, masks.max()+1):
neigh = find_neighbors(masks, i, r=r)
source_nodes.append([i]*(neigh.size))
target_nodes.append(neigh)
# flatten arrays of arrays
source_nodes = np.fromiter(itertools.chain.from_iterable(source_nodes), int).reshape(-1,1)
target_nodes = np.fromiter(itertools.chain.from_iterable(target_nodes), int).reshape(-1,1)
# remove duplicate pairs
pairs = np.hstack((source_nodes, target_nodes))
pairs = remove_duplicate_pairs(pairs)
return pairs
def mask_val_coord(masks):
"""
Compute the mapping between mask regions and their centroid coordinates.
Parameters
----------
masks : array_like
2D array of integers defining the identity of masks
0 is background (no object detected)
Returns
-------
coords : dataframe
Coordinates of points with columns corresponding to axes ('x', 'y', ...)
"""
coords = measure.regionprops_table(masks, properties=('label', 'centroid'))
coords = pd.DataFrame.from_dict(coords)
if coords.shape[1] == 3:
coords.rename(columns={'centroid-1':'x', 'centroid-0':'y'}, inplace=True)
elif coords.shape[1] == 4:
coords.rename(columns={'centroid-2':'x', 'centroid-1':'y', 'centroid-0':'z'},
inplace=True)
else:
print('More than 3 detected spatial dimensions, check output column names.')
coords.index = coords['label']
coords.drop(columns='label', inplace=True)
return coords
def refactor_coords_pairs(coords, pairs):
"""
Transforms coordinates and pairs of nodes data from segmented areas into
the formats used by the other functions for network analysis and visualization.
Parameters
----------
coords : dataframe
Coordinates of points with columns corresponding to axes ('x', 'y', ...)
pairs : ndarray
Pairs of neighbors given by the first and second element of each row,
values correspond to values in masks, which are different from index
values of nodes
Returns
-------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
pairs : ndarray
Pairs of neighbors given by the first and second element of each row.
"""
mapper = dict(zip(coords.index, np.arange(coords.shape[0])))
pairs = pd.DataFrame({'source': pairs[:,0], 'target': pairs[:,1]})
pairs['source'] = pairs['source'].map(mapper)
pairs['target'] = pairs['target'].map(mapper)
coords = coords.loc[:, ['x', 'y']].values
pairs = pairs.loc[:, ['source', 'target']].values
return coords, pairs
def link_solitaries(coords, pairs, method='knn', k=1, v=1):
"""
Detect nodes that are not connected and link them to other nodes.
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices.
method : string, optional
Method used to connect solitary nodes to their neighbors.
The default is 'knn', solitary nodes will be connected to their
'k' closest neighbors.
k : int, optional
Number of neighbors of the knn method. Default is 1.
v : int, optional
Verbosity, if different from 0 some messages are displayed.
Default is 1.
Returns
-------
pairs : ndarray
The (n_pairs x 2) matrix of neighbors indices, with additional edges (rows in array).
Example
-------
>>> coords = np.array([[0, 0],
[1, 0],
[2, 0],
[3.1, 0],
[4, 0]])
>>> pairs = np.array([[0, 1],
[1, 2]])
>>> link_solitaries(coords, pairs, method='knn', k=1)
array([[0, 1],
[1, 2],
[3, 4]])
>>> link_solitaries(coords, pairs, method='knn', k=2)
array([[0, 1],
[1, 2],
[2, 3],
[2, 4],
[3, 4]])
"""
# detect if some nodes have no edges
uniq_nodes = set(range(coords.shape[0]))
uniq_pairs = set(np.unique(pairs))
solitaries = uniq_nodes.difference(uniq_pairs)
if solitaries == set():
print("all nodes have at least one edge")
else:
if v!= 0:
print(f"there are {len(solitaries)}/{coords.shape[0]} nodes with no edges")
if method == 'knn':
nn_pairs = build_knn(coords, k=k)
# for each lonely node, add its edges with the knn neighbors
for i in solitaries:
select = np.logical_or(nn_pairs[:, 0] == i, nn_pairs[:, 1] == i)
pairs = np.vstack([pairs, nn_pairs[select, :]])
pairs = remove_duplicate_pairs(pairs)
return pairs
def build_contacting_nn(masks, r=1, k=3):
"""
Build a network from segmented regions as a mix between
the contacting areas method, that can output some nodes
edges, and the nearest neighbors method that will link
these nodes to their neighbors.
Parameters
----------
masks : array_like
2D array of integers defining the identity of masks
0 is background (no object detected)
r : int
Radius of search for the contacting areas method. The default is 1.
k : int, optional
Number of nearest neighbors. The default is 3.
Returns
-------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
pairs : ndarray
Pairs of neighbors given by the first and second element of each row,
values correspond to values in masks, which are different from index
values of nodes
"""
pairs = build_contacting(masks, r=r)
# reencode the coordinates to match node positions with their respective areas
coords = mask_val_coord(masks)
coords, pairs = refactor_coords_pairs(coords, pairs)
pairs = link_solitaries(coords, pairs)
return coords, pairs
# ------ Parallelized version of build_contacting ------
def choose_optimal_image_split(im, method='im_size', min_tile_size=360000):
"""
Compute the optimal number of splits of an image
to run in parallel a function of each core.
Parameters
----------
im : array_like
2D array of integers defining the identity of segmented objects
0 is background (no object detected)
method : str, optional
The method used to define the optimal number of splits.
The default is 'im_size'.
min_tile_size : int
Minimum number of bytes of tiles.
The default is 360000.
Returns
-------
n_splits : int
The optimal number of splits.
Example
-------
>>> im = np.zeros((1024, 1024), dtype=np.int32)
>>> n_splits = choose_optimal_image_split(im)
Notes
-----
One would ideally consider the number of cores, the size of the image
and the number of detected objects.
The number of splits should be essentially driven by the size of
the image and the number of cores.
The number of splits shouldn't be superior to the number of cores,
otherwise some cores will wait for the last tiles to be processed
by other cores, while increasing the inter-process communication
by too many splits.
Ideally n_splits should be a power of 2 in order to split easily
the image.
"""
n_cores = os.cpu_count()
# number of segmented objects, drop the background value
n_obj = np.unique(im).size - 1
if method == 'im_size':
# avoid too many splits if image is not so big
im_size = im.nbytes # slightly different from sys.getsizeof(im)
# max power of 2
max_i = int(np.log2(n_cores)) + 1
n_splits = 1
for i in range(1, max_i):
new_split = 2**i
if im_size / new_split >= min_tile_size:
n_splits = new_split
else:
break
elif method == 'naive':
n_splits = n_cores
return n_splits
def split_range(r, n):
"""
Computes the indices of segments after splitting a range of r values
into n segments.
Parameters
----------
r : int
Size of the range vector.
n : int
The number of splits.
Returns
-------
segments : list
The list of lists of first and last indices of segments.
Example
-------
>>> split_range(8, 2)
[[0, 4], [4, 8]]
"""
step = int(r / n)
segments = []
for i in range(n):
new_segment = [step * i, step * (i + 1)]
segments.append(new_segment)
# correct the gap in the missing index due to the truncated step
segments[-1][-1] = r
return segments
def extend_indices(segments, margin):
"""
Decrease and increase the values of the first and last elements
respectively in each list of segments by a given margin.
The first indice of the first segment and the last indice of the
last segments are not modified.
Parameters
----------
segments : list
The list of lists of first and last indices of segments.
margin : int
The extra extend to add on each side of segments.
Example
-------
>>> segments = split_range(16, 4)
>>> extend_indices(segments, margin=1)
[[0, 5], [3, 9], [7, 13], [11, 16]]
"""
if len(segments) == 1:
return segments
else:
# first process the first and last segments
segments[0][-1] += margin
segments[-1][0] -= margin
# if there are more than 2 segments
for i in range(len(segments))[1:-1]:
segments[i][0] -= margin
segments[i][-1] += margin
return segments
def make_tiles_limits(im, n_splits, margin=0):
"""
Compute the indices in an image to split it into several tiles.
Parameters
----------
im : array_like
2D array of integers defining the identity of segmented objects
0 is background (no object detected)
n_splits : int
The number of splits.
margin : int
The extra space to include at the border of tiles.
The default is 0.
Returns
-------
tiles_indices : list
The list of indices [[xmin, xmax], [ymin, ymax]] for each tile.
Example
-------
>>> im = np.arange(16 * 8).reshape(16, 8)
>>> make_tiles_limits(im, 4, margin=0)
[[0, 4, 0, 8], [0, 4, 8, 16], [4, 8, 0, 8], [4, 8, 8, 16]]
>>> make_tiles_limits(im, 4, margin=1)
[[0, 5, 0, 9], [0, 5, 7, 16], [3, 8, 0, 9], [3, 8, 7, 16]]
"""
if n_splits == 1:
return [0, im.shape[1], 0, im.shape[0]]
# number of splits per axis
ax_splits = int(np.log2(n_splits))
x_segments = split_range(im.shape[1], ax_splits)
y_segments = split_range(im.shape[0], ax_splits)
if margin > 0:
x_segments = extend_indices(x_segments, margin=margin)
y_segments = extend_indices(y_segments, margin=margin)
# make combinations of [xmin, xmax, ymin, ymax] indices of tiles
tiles_indices = []
for xlim in x_segments:
for ylim in y_segments:
tiles_indices.append(xlim + ylim)
return tiles_indices
def extract_tile(im, limits):
"""
Extract a tile from an image given
its [xmin, xmax, ymin, ymax] limit indices.
Parameters
----------
im : array_like
2D array of integers defining the identity of segmented objects
0 is background (no object detected)
limits : list
The list of limit indices [xmin, xmax, ymin, ymax].
Returns
-------
tile : array_like
The extracted tile.
Example
-------
>>> im = np.arange(8 * 8).reshape(8, 8)
>>> tiles_indices = make_tiles_limits(im, 4, margin=0)
>>> extract_tiles(im, tiles_indices[-1])
array([[36, 37, 38, 39],
[44, 45, 46, 47],
[52, 53, 54, 55],
[60, 61, 62, 63]])
"""
tile = im[limits[0]: limits[1], limits[2]: limits[3]]
return tile
def merge_pairs(lpairs):
"""
Merge a list of Nx2 arrays into a single N'x2 array.
Parameters
----------
lpairs : list
The list of detected edges as 2D arrays.
Returns
-------
pairs : array_like
The merged detected edges.
>>> a = np.arange(4).reshape(-1, 2)
>>> b = a + 2
>>> lpairs = [a, b]
>>> np.unique(np.vstack(lpairs), axis=0)
array([[0, 1],
[2, 3],
[4, 5]])
"""
pairs = np.unique(np.vstack(lpairs), axis=0)
return pairs
def build_contacting_parallel(im, r=1, split_method='im_size', min_tile_size=360000):
"""
Build a network from segmented regions that contact each other or are
within a given distance from each other.
Parameters
----------
im : array_like
2D array of integers defining the identity of masks
0 is background (no object detected)
r : int
Radius of search.
split_method : str, optional
The method used to define the optimal number of splits.
The default is 'im_size'.
min_tile_size : int
Minimum number of bytes of tiles.
The default is 360000.
Returns
-------
pairs : ndarray
Pairs of neighbors given by the first and second element of each row,
values correspond to values in masks, which are different from index
values of nodes
Example
-------
>>> # generate the tissue image
>>> coords, masks = ty.make_random_tiles(sx=600, sy=600, nb=12, noise_sigma=10.0)
>>> # erase some segmented objects
>>> if hole_proba != 0:
>>> for i in np.unique(masks):
>>> if np.random.rand() > (1 - hole_proba):
>>> masks[masks == i] = 0
>>>
>>> # ------ Contacting areas method ------
>>> pairs = ty.build_contacting(masks)
>>> coords = ty.mask_val_coord(masks)
>>> coords, pairs_true = ty.refactor_coords_pairs(coords, pairs)
>>>
>>> # ------ Parallel version ------
>>> paral_pairs = build_contacting_parallel(im)
>>> # check that detected edges are identical
>>> pairs = np.sort(pairs, axis=1)
>>> paral_pairs = np.sort(paral_pairs, axis=1)
>>> print(np.all(paral_pairs == pairs))
"""
n_splits = choose_optimal_image_split(im, method=split_method, min_tile_size=min_tile_size)
segments = make_tiles_limits(im, n_splits, margin=r)
cluster = LocalCluster(
n_workers=16,
threads_per_worker=1,
)
client = Client(cluster)
# list of pairs computed for each tile
lpairs = []
for limits in segments:
tile = dask.delayed(extract_tile)(im, limits)
pairs = dask.delayed(build_contacting)(tile, r=r)
lpairs.append(pairs)
# merge all pairs
pairs = dask.delayed(merge_pairs)(lpairs)
pairs = pairs.compute()
return pairs
# ------ end of parallel build_contacting ------
def rescale(data, perc_mini=1, perc_maxi=99,
out_mini=0, out_maxi=1,
cutoff_mini=True, cutoff_maxi=True,
return_extrema=False):
"""
Normalize the intensities of a planar 2D image.
Parameters
----------
data : numpy array
the matrix to process
perc_mini : float
the low input level to set to the low output level
perc_maxi : float
the high input level to set to the high output level
out_mini : int or float
the low output level
out_maxi : int or float
the high output level
cutoff_mini : bool
if True sets final values below the low output level to the low output level
cutoff_maxi : bool
if True sets final values above the high output level to the high output level
return_extrema : bool
if True minimum and maximum percentiles of original data are also returned
Returns
-------
data_out : numpy array
the output image
"""
mini = np.percentile(data, perc_mini)
maxi = np.percentile(data, perc_maxi)
if out_mini is None:
out_mini = mini
if out_maxi is None:
out_maxi = maxi
data_out = data - mini
data_out = data_out * (out_maxi-out_mini) / (maxi-mini)
data_out = data_out + out_mini
if cutoff_mini:
data_out[data_out<out_mini] = out_mini
if cutoff_maxi:
data_out[data_out>out_maxi] = out_maxi
if return_extrema:
return data_out, mini, maxi
else:
return data_out
def plot_network(coords, pairs, disp_id=False, labels=None,
color_mapper=None, legend=True, legend_opt=None,
col_nodes=None, cmap_nodes=None, marker=None,
size_nodes=None, col_edges='k', alpha_edges=0.5,
linewidth=None,
ax=None, figsize=(15, 15), aspect='equal', **kwargs):
"""
Plot a network.
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
pairs : ndarray
The (n_pairs x 2) array of neighbors indices.
disp_id: bool
If True nodes' indices are displayed.
labels: panda series
The nodes' labels from which they are colored.
legend_opt : dict or None
Optional parameters for the legend
like {'loc': 'upper right', 'bbox_to_anchor': (0.5, 0.5)}
color_mapper: dict
Maps each label to its color. Computed if not provided.
figsize : (float, float), default: :rc:`figure.figsize`
Width, height in inches. The default is (15, 15).
col_nodes : str of matplotlib compatible color, optional
Color of nodes. The default is None.
cmap_nodes: list
List of hexadecimal colors for nodes attributes.
marker : str, optional
Marker used to display nodes. The default is None.
size_nodes : int, optional
Size of nodes. The default is None.
col_edges : str or matplotlib compatible color, optional
Color of edges. The default is 'k'.
alpha_edges : float, optional
Tansparency of edges. The default is 0.5.
linewidth : float, optional
Width of edges. The default is None.
ax : matplotlib ax object, optional
If provided, the plot is displayed in ax. The default is None.
aspect : str, optional
Control aspect ration of the figure. The default is 'equal'.
**kwargs : dict
Optional parameters to display nodes.
Returns
-------
None or (fig, ax) if not provided in parameters.
"""
if ax is None:
ax_none = True
fig, ax = plt.subplots(figsize=figsize)
else:
ax_none = False
# plot nodes
if labels is not None:
if isinstance(labels, np.ndarray):
uniq = np.unique(labels)
elif isinstance(labels, pd.Series):
uniq = labels.unique()
else:
uniq = np.unique(np.array(labels))
# color nodes with manual colors
if color_mapper is None:
if cmap_nodes is None:
cmap_nodes = sns.color_palette('muted').as_hex()
# make a dictionnary attribute:color, with cycling over cmap
n_colors = len(cmap_nodes)
color_mapper = {x: cmap_nodes[i % n_colors] for i, x in enumerate(uniq)}
for label in uniq:
select = labels == label
color = color_mapper[label]
ax.scatter(coords[select,0], coords[select,1], c=color, label=label,
marker=marker, s=size_nodes, zorder=10, **kwargs)
if legend:
if legend_opt is None:
plt.legend()
else:
plt.legend(**legend_opt)
else:
ax.scatter(coords[:,0], coords[:,1], c=col_nodes, cmap=cmap_nodes,
marker=marker, s=size_nodes, zorder=10, **kwargs)
# plot edges
for pair in pairs[:,:]:
[x0, y0], [x1, y1] = coords[pair]
ax.plot([x0, x1], [y0, y1], c=col_edges, zorder=5, alpha=alpha_edges, linewidth=linewidth)
if disp_id:
offset=0.02
for i, (x,y) in enumerate(coords):
plt.text(x-offset, y-offset, str(i), zorder=15)
if aspect is not None:
ax.set_aspect(aspect)
if ax_none:
return fig, ax
def plot_network_distances(coords, pairs, distances, labels=None,
color_mapper=None, legend=True, legend_opt=None,
col_nodes=None, cmap_nodes=None, marker=None, size_nodes=None,
cmap_edges='viridis', alpha_edges=0.7, linewidth=None,
figsize=(15, 15), ax=None, aspect='equal', **kwargs):
"""
Plot a network with edges colored by their length.
Parameters
----------
coords : ndarray
Coordinates of points where each column corresponds to an axis (x, y, ...)
pairs : ndarray
The (n_pairs x 2) array of neighbors indices.
distances : array
Distances between each pair of neighbors.
labels: panda series
The nodes' labels from which they are colored.
legend_opt : dict or None
Optional parameters for the legend
like {'loc': 'upper right', 'bbox_to_anchor': (0.5, 0.5)}
color_mapper: dict
Maps each label to its color. Computed if not provided.
col_nodes : str of matplotlib compatible color, optional
Color of nodes. The default is None.
cmap_nodes: list
List of hexadecimal colors for nodes attributes.
marker : str, optional
Marker used to display nodes. The default is None.
size_nodes : float, optional
Size of nodes. The default is None.
cmap_edges : str of matplotlib.colormap, optional
Colormap of edges. The default is 'viridis'.
alpha_edges : float, optional
Tansparency of edges. The default is 0.7.
linewidth : float, optional
Width of edges. The default is None.
figsize : (float, float), default: :rc:`figure.figsize`
Width, height in inches. The default is (15, 15).
ax : matplotlib ax object, optional
If provided, the plot is displayed in ax. The default is None.
aspect : str, optional
Proportions of the figure. The default is None.
**kwargs : TYPE
labels of nodes.
Returns
-------
None or (fig, ax) if not provided in parameters.
"""
if ax is None:
ax_none = True
fig, ax = plt.subplots(figsize=figsize)
else:
ax_none = False
# plot nodes
if labels is not None:
if isinstance(labels, np.ndarray):
uniq = np.unique(labels)
elif isinstance(labels, pd.Series):
uniq = labels.unique()
else:
uniq = np.unique(np.array(labels))
# color nodes with manual colors
if color_mapper is None:
if cmap_nodes is None:
cmap_nodes = sns.color_palette('muted').as_hex()
# make a dictionnary attribute:color, with cycling over cmap
n_colors = len(cmap_nodes)
color_mapper = {x: cmap_nodes[i % n_colors] for i, x in enumerate(uniq)}
for label in uniq:
select = labels == label
color = color_mapper[label]
ax.scatter(coords[select,0], coords[select,1], c=color, label=label,
marker=marker, s=size_nodes, zorder=10, **kwargs)
if legend:
if legend_opt is None:
plt.legend()
else:
plt.legend(**legend_opt)
else:
ax.scatter(coords[:,0], coords[:,1], c=col_nodes, cmap=cmap_nodes,
marker=marker, s=size_nodes, zorder=10, **kwargs)
# plot edges
scaled_dist, min_dist, max_dist = rescale(distances, return_extrema=True)
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=min_dist, vmax=max_dist)
for pair, dist in zip(pairs[:,:], scaled_dist):
[x0, y0], [x1, y1] = coords[pair]
ax.plot([x0, x1], [y0, y1], c=cmap(dist), zorder=0, alpha=alpha_edges, linewidth=linewidth)
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
orientation='vertical', label='Distance')
# TODO: plot many lines more efficiently check
# from https://stackoverflow.com/a/50029441
# https://matplotlib.org/gallery/shapes_and_collections/line_collection.html#sphx-glr-gallery-shapes-and-collections-line-collection-py
if aspect is not None:
ax.set_aspect(aspect)
if ax_none:
return fig, ax
def showim(image, figsize=(9,9), ax=None, **kwargs):
"""
Displays an image with thigh layout and without axes.
Parameters
----------
image : ndarray
A 1 or 3 channels images.
figsize : (int, int), optional
Size of the figure. The default is (9,9).
ax : matplotlib ax object, optional
If provided, the plot is displayed in ax. The default is None.
**kwargs : dic
Other options for plt.imshow().
Returns
-------
(fig, ax)
"""
if ax is None:
return_ax = True
fig, ax = plt.subplots(figsize=figsize)
else:
return_ax = False
ax.imshow(image, **kwargs)
ax.axis('off')
ax.figure.tight_layout()
if return_ax:
return fig, ax
def categorical_to_integer(l):
uniq = set(l)
nb_uniq = len(uniq)
mapping = dict(zip(uniq, range(nb_uniq)))
converted = [mapping[x] for x in l]
return converted
def flatten_categories(nodes, att):
# the reverse operation is
# nodes = nodes.join(pd.get_dummies(nodes['nodes_class']))
return nodes.loc[:, att].idxmax(axis=1)
def coords_to_df(coords, columns=None):
"""
Convert an array of coordinates of nodes into a dataframe.
Parameters
----------
coords : ndarray
Coordinates of points with columns corresponding to axes ('x', 'y', ...)
columns : Index or array-like
Column labels to use for resulting frame. Will default to
['x0', 'x1',..., 'xn'] if no column labels are provided.
Returns
-------
nodes : dataframe
Coordinates of nodes indicated by 'x', 'y' or other if required.
"""
nb_dim = coords.shape[1]
if columns is None:
if nb_dim == 2:
columns = ['x', 'y']
elif nb_dim == 3:
columns = ['x', 'y', 'z']
else:
columns = ['x'+str(i) for i in range(nb_dim)]
nodes = pd.DataFrame(data=coords, columns=columns)
return nodes
def pairs_to_df(pairs, columns=['source', 'target']):
"""
Convert an array of pairs of nodes into a dataframe
Parameters
----------
pairs : ndarray
The (n_pairs x 2) array of neighbors indices.
columns : Index or array-like
Column labels to use for resulting frame. Default is ['source', 'target']
Returns
-------
edges : dataframe
Edges indicated by the nodes 'source' and 'target' they link.
"""
edges = pd.DataFrame(data=pairs, columns=columns)
return edges
def double_sort(data, last_var=0):
"""
Sort twice an array, first on axis 1, then preserves
whole rows and sort by one column on axis 0.
Usefull to compare pairs of nodes obtained
with different methods.
Parameters
----------
data : 2D array
Data to sort.
last_var : int, optional. The default is 0.
Column by which intact rows are sorted.
Returns
-------
data : 2D array
Sorted data.
Examples
--------
>>> pairs = np.array([[4,3],
[5,6],
[2,1]])
>>> double_sort(pairs)
array([[1, 2],
[3, 4],
[5, 6]])
"""
# doing simply np.sort(np.sort(pairs, axis=1), axis=0)
# would uncouple first and second elements of pairs
# during the second sorting (axis=0)
data = np.sort(data, axis=1)
x_sort = np.argsort(data[:, 0])
data = data[x_sort]
return data
def confusion_stats(set_true, set_test):
"""
Count the true positives, false positives and false
negatives in a test set with respect to a "true" set.
True negatives are not counted.
"""
true_pos = len(set_true.intersection(set_test))
false_pos = len(set_test.difference(set_true))
false_neg = len(set_true.difference(set_test))
return true_pos, false_pos, false_neg
def score_method(pairs_true, pairs_test):
"""
Compute a performance score from the counts of
true positives, false positives and false negatives
of predicted pairs of nodes that are "double sorted".
Examples
--------
>>> pairs_true = np.array([[3,4],
[5,6],
[7,8]])
>>> pairs_test = np.array([[1,2],
[3,4],
[5,6]])
>>> score_method(pairs_true, pairs_test)
(0.5, 0.5, 0.25, 0.25)
"""
set_true = {tuple(e) for e in pairs_true}
set_test = {tuple(e) for e in pairs_test}
true_pos, false_pos, false_neg = confusion_stats(set_true, set_test)
total = true_pos + false_pos + false_neg
true_pos_rate = true_pos / total
false_pos_rate = false_pos / total
false_neg_rate = false_neg / total
return true_pos_rate, false_pos_rate, false_neg_rate
def to_NetworkX(nodes, edges, attributes=None):
"""
Convert tysserand network representation to a NetworkX network object
Parameters
----------
nodes : ndarray or dataframe
Coordinates of points with columns corresponding to axes ('x', 'y', ...)
edges : ndarray or dataframe
The pairs of nodes given by their indices.
attributes : dataframe
Attributes of nodes to be added in NetworkX. Default is None.
Returns
-------
G : NetworkX object
The converted network.
"""
import networkx as nx
# convert to dataframe if numpy array
if isinstance(nodes, np.ndarray):
nodes = coords_to_df(nodes)
if isinstance(edges, np.ndarray):
edges = pairs_to_df(edges)
G = nx.from_pandas_edgelist(edges)
if attributes is not None:
for col in attributes.columns:
# only for glm extension file:
# nx.set_node_attributes(G, attributes[col].to_dict(), col.replace('+','AND'))
nx.set_node_attributes(G, attributes[col].to_dict(), col)
return G
def to_iGraph(nodes, edges, attributes=None):
"""
Convert tysserand network representation to an iGraph network object
Parameters
----------
nodes : ndarray or dataframe
Coordinates of points with columns corresponding to axes ('x', 'y', ...)
edges : ndarray or dataframe
The pairs of nodes given by their indices.
attributes : dataframe
Attributes of nodes to be added in NetworkX. Default is None.
Returns
-------
G : iGraph object
The converted network.
"""
import igraph as ig
# convert to dataframe if numpy array
if isinstance(nodes, np.ndarray):
nodes = coords_to_df(nodes)
if isinstance(edges, np.ndarray):
edges = pairs_to_df(edges)
# initialize empty graph
G = ig.Graph()
# add all the vertices
G.add_vertices(nodes.shape[0])
# add all the edges
G.add_edges(edges.values)
# add attributes
if attributes is not None:
for col in attributes.columns:
att = attributes[col].values
if isinstance(att[0], str):
att = categorical_to_integer(att)
G.vs[col] = att
return G
def add_to_AnnData(coords, pairs, adata):
"""
Convert tysserand network representation to sparse matrices
and add them to an AnnData (Scanpy) object.
Parameters
----------
nodes : ndarray
Coordinates of points with columns corresponding to axes ('x', 'y', ...)
edges : ndarray
The pairs of nodes given by their indices.
adata : AnnData object
An object dedicated to single-cell data analysis.
"""
# convert arrays to sparse matrices
n_cells = adata.shape[0]
connect = np.ones(pairs.shape[0], dtype=np.int8)
sparse_connect = csr_matrix((connect, (pairs[:,0], pairs[:,1])), shape=(n_cells, n_cells), dtype=np.int8)
distances = distance_neighbors(coords, pairs)
sparse_dist = csr_matrix((distances, (pairs[:,0], pairs[:,1])), shape=(n_cells, n_cells), dtype=np.float)
# add to AnnData object
adata.obsp['connectivities'] = sparse_connect
adata.obsp['distances'] = sparse_dist
adata.uns['neighbors'] = {'connectivities_key': 'connectivities',
'distances_key': 'distances',
'params': {'method': 'delaunay',
'metric': 'euclidean',
'edge_trimming': 'percentile 99'}}
# --------------------------------------------------------------------
# ------------- Interactive visualization and annotation -------------
# --------------------------------------------------------------------
def visualize(viewer, img, colormaps=None):
"""
Create a napari viewer instance with image splitted into
separate channels.
"""
if colormaps == 'rgb':
colormaps = [
'red',
'green',
'blue',
]
# add successively all channels
for i in range(img.shape[-1]):
# avoid the alpha channel of RGB images
if i == 3 and np.all(img[:, :, i] == 1):
pass
else:
if colormaps is not None and i < len(colormaps):
colormap = colormaps[i]
else:
colormap = 'gray'
viewer.add_image(img[:, :, i], name='ch' + str(i), colormap=colormap, blending='additive')
return
def get_annotation_names(viewer):
"""Detect the names of nodes and edges layers"""
layer_nodes_name = None
layer_edges_name = None
for layer in viewer.layers:
if isinstance(layer, napari.layers.points.points.Points):
layer_nodes_name = layer.name
elif isinstance(layer, napari.layers.shapes.shapes.Shapes):
layer_edges_name = layer.name
if layer_nodes_name is not None and layer_edges_name is not None:
break
return layer_nodes_name, layer_edges_name
def convert_nodes_tys_to_nap(coords):
new_nodes = coords[:, ::-1]
return new_nodes
def convert_edges_tys_to_nap(coords, pairs):
new_edges = []
for pair in pairs[:,:]:
new_edges.append(np.array(coords[pair]))
return new_edges
def make_annotation_dict(coords, pairs=None,
nodes_class=None,
nodes_class_color_mapper=None,
):
"""
Create a dictionnary of annotations from tysserand network objects.
"""
annotations = {}
new_nodes = convert_nodes_tys_to_nap(coords)
annotations['nodes_coords'] = new_nodes
if nodes_class is not None:
annotations['nodes_class'] = nodes_class
if nodes_class_color_mapper is not None:
annotations['nodes_class_color_mapper'] = nodes_class_color_mapper
if pairs is not None:
annotations['edges_coords'] = convert_edges_tys_to_nap(new_nodes, pairs)
return annotations
def get_annotation_dict(viewer, layer_nodes_name, layer_edges_name):
"""
Create a dictionnary of annotations from layers in a napari viewer.
"""
annotations = {}
if layer_nodes_name is not None:
annotations['nodes_coords'] = viewer.layers[layer_nodes_name].data
# pick a unique value instead of saving a 2D array of duplicates
annotations['nodes_size'] = np.median(viewer.layers[layer_nodes_name].size)
# ------ convert colors arrays into unique nodes classes ------
colors = viewer.layers[layer_nodes_name].face_color
color_set = {tuple(e) for e in colors}
# mapper to convert nodes classes into color tuples
nodes_class_color_mapper = dict(zip(range(len(color_set)), color_set))
# mapper to convert color tuples into nodes classes
nodes_color_class_mapper = {val: key for key, val in nodes_class_color_mapper.items()}
nodes_class = np.array([nodes_color_class_mapper[tuple(key)] for key in colors])
annotations['nodes_class'] = nodes_class
annotations['nodes_class_color_mapper'] = nodes_class_color_mapper
if layer_edges_name is not None:
annotations['edges_coords'] = viewer.layers[layer_edges_name].data
annotations['edges_edge_width'] = np.median(viewer.layers[layer_edges_name].edge_width)
# TODO (maybe): implement edge color mapper
# annotations['edges_edge_colors'] = viewer.layers[layer_edges_name].edge_color
return annotations
def save_annotations(path, viewer=None, annotations=None, layer_names=None):
""""
Create and save annotations in the layers of a napari viewer.
"""
if annotations is None:
if layer_names is not None:
layer_nodes_name, layer_edges_name = layer_names
else:
layer_nodes_name, layer_edges_name = get_annotation_names(viewer)
annotations = get_annotation_dict(viewer, layer_nodes_name, layer_edges_name)
joblib.dump(annotations, path);
return
def load_annotations(path):
""""
Load annotations for the layers of a napari viewer.
"""
annotations = joblib.load(path);
return annotations
def add_nodes(
viewer,
annotations,
name='nodes',
):
"""
Add nodes annotations in a napari viewer.
"""
if 'nodes_class_color_mapper' in annotations.keys() and 'nodes_class' in annotations.keys():
face_color = np.array([annotations['nodes_class_color_mapper'][key] for key in annotations['nodes_class']])
else:
face_color = '#1f77b4'
if 'nodes_size' in annotations.keys():
size = annotations['nodes_size']
else:
size = 10
viewer.add_points(
annotations['nodes_coords'],
# reconstruct the colors array
face_color=face_color,
size=size,
name=name,
)
return
def add_edges(
viewer,
annotations,
edge_color='white',
name='edges',
):
"""
Add edges annotations in a napari viewer.
"""
if 'edge_width' in annotations.keys():
edge_width = annotations['edge_width']
else:
edge_width = 1
viewer.add_shapes(
annotations['edges_coords'],
shape_type='line',
edge_width=edge_width,
edge_color=edge_color,
name=name,
)
return
def add_annotations(
viewer,
annotations,
layer_nodes_name='nodes',
layer_edges_name='edges',
edge_color='white',
):
"""
Add nodes and edges annotations in a napari viewer.
"""
if 'nodes_coords' in annotations.keys():
add_nodes(viewer, annotations, name=layer_nodes_name)
if 'edges_coords' in annotations.keys():
add_edges(viewer, annotations, edge_color=edge_color, name=layer_edges_name)
return
def assign_nodes_to_edges(nodes, edges):
"""
Link edges extremities to nodes and compute the matrix
of pairs of nodes indices.
"""
from scipy.spatial import cKDTree
edges_arr = np.vstack(edges)
kdt_nodes = cKDTree(nodes)
# closest node id and discard computed distances ('_,')
_, pairs = kdt_nodes.query(x=edges_arr, k=1)
# refactor list of successive ids for start and end of edges into 2D array
pairs = np.vstack((pairs[::2], pairs[1::2])).T
new_edges = []
for pair in pairs[:,:]:
new_edges.append(np.array(nodes[pair]))
return new_edges, pairs
def update_edges(
viewer,
annotations,
edge_color='white',
name='edges',
):
"""
Replace edges annotations with new ones in a napari viewer.
"""
try:
del viewer.layers[name]
except ValueError:
# edges layer was already deleted
pass
add_edges(viewer, annotations, edge_color=edge_color, name=name)
|
21,897 | c63f1c40ad378aedd36c89b578560d0b1b7f6ff6 | import M3Objects
import M3Types
import RTSTypes
import M3Predefined
import ConjTypesInt as CT
import TimerInt as Timer
import M3TypeLib
import M3ProcLib
import CapsuleMap
from Statistics import M3incStat
M3TL=M3TypeLib.internaliseTypes(r'm3lib/ConjC2CapMod')
class ConjC2(RTSTypes.M3CapsuleRuntimeType):
def __init__(self,level,runtimeName):
self.runtimeName = runtimeName
self.__level = level
RTSTypes.M3CapsuleRuntimeType.__init__(self,level)
self.__init_capsule_connect()
self.__init_capsule_begin_end()
self.__init_param_converters()
def __init_capsule_connect(self): pass
def __init_capsule_begin_end(self): pass # capsule BEGIN .. END
def __init_param_converters(self):
self.M3param_converters = {'p1.b' : (),'p1.a' : (),}
self.M3port_converters = {'a': 'p1.a', 'b': 'p1.b'}
def runcap():
import Simulator
Simulator.run(createCapsule(1,'top'))
def createCapsule(level,hName):
return ConjC2(level,hName)
if __name__ == '__main__': runcap()
|
21,898 | dd48d582c44060083bd8b789b5e1fc5e4b55c297 | # -*- Python -*-
#
# {project.authors}
# {project.affiliations}
# (c) {project.span} all rights reserved
#
"""
WSGI config for {project.name}
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
# adjust the python path
import sys
sys.path = ['{project.live.root}/packages'] + sys.path
# set the environment variable django uses to hunt down application settings
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{project.name}.settings.live")
# build the application hook
import django.core.wsgi
application = django.core.wsgi.get_wsgi_application()
# end of file
|
21,899 | 809d4849e141b89b902a0a2c88e00c865f9e517e |
def open_text_and_replace(path, old_str, new_str):
new_content = ""
try:
with open(path, 'r', encoding="utf-8") as f:
new_content += (f.read()).replace(old_str, new_str)
print(new_content)
except IOError as e:
print(e)
try:
with open(path, 'w', encoding="utf-8") as f:
if new_content != "":
f.write(new_content)
print("write ok")
except IOError as e:
print(e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.