id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
159452 | <filename>ui_utils.py
#!/usr/bin/env python
from PySide import QtGui
import sys
def catch_error(f):
import functools
@functools.wraps(f)
def catch_unhandled_exceptions(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
import traceback
traceback.print_exc()
sys.stderr.flush()
dialog = QtGui.QMessageBox()
dialog.setWindowTitle('MiasMod')
dialog.setIcon(QtGui.QMessageBox.Critical)
dialog.setText(QtGui.QApplication.translate('Errors', 'Unhandled Exception', None, QtGui.QApplication.UnicodeUTF8))
dialog.setInformativeText('%s: %s' % (e.__class__.__name__, str(e)))
dialog.setDetailedText(traceback.format_exc())
dialog.exec_()
return
return catch_unhandled_exceptions
# vi:noexpandtab:sw=8:ts=8
| StarcoderdataPython |
99932 | from pyimagesearch import datasets
from pyimagesearch import models
from sklearn.model_selection import train_test_split
from keras.layers.core import Dense
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import concatenate
import tensorflow as tf
from tensorflow import feature_column
import numpy as np
import argparse
import locale
import os
import cv2
#%%
#grab ROI for as input of predict model
import glob
import os
from os import walk
names = ["770L01.png","770L02.png","770R01.png","770R02.png","850L01.png","850L02.png","850R01.png","850R02.png","940L01.png","940L02.png","940R01.png","940R02.png"]
col = 2
r = 2
for sub in os.listdir(r"demo\data"):
path = r"demo\data"
save_path = r"demo\wrist"# a path for saving image
path = os.path.join(path, sub)
save_path = os.path.join(save_path, sub)
if not os.path.isfile(save_path):
os.makedirs(save_path)
# print(path)
cv_img = []
i = 0
a = 0
for img in os.listdir(path):
if os.path.join(path, img).endswith(".png"):
img = cv2.imread(os.path.join(path, img))
cv_img.append(img)
#Do histogram equalization
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #turn RGB into GRAY
hist,bins = np.histogram(gray.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
cdf_m = np.ma.masked_equal(cdf,0)# 除去直方圖中的0值
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')# 將掩模處理掉的元素補為0
img2 = cdf[gray.astype(np.uint8)]
# blur_gray = cv2.GaussianBlur(img2, (101, 101), 0) # Gaussian filter, the kernel must be an odd number
ret,thresh1 = cv2.threshold(img2,200,255,cv2.THRESH_BINARY)
_, contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
try: hierarchy = hierarchy[0]
except: hierarchy = []
height, width = thresh1.shape
min_x, min_y = width, height
max_x = max_y = 0
# computes the bounding box for the contour, and draws it on the frame,
for contour, hier in zip(contours, hierarchy):
(x,y,w,h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if max_x - min_x > 0 and max_y - min_y > 0:
cv2.rectangle(img, (int(min_x*1.1), int(min_y*1.0)), (int(max_x*0.95), int(max_y*0.9)), (255, 0, 0), 2) # 畫出適當ROI
x_range = int(max_x*0.95) - int(min_x*1.1)
if int(max_y*0.9) - (int(min_y) + x_range) < abs(int(min_x*1.1) - int(max_x*0.95))/5:
add = int(max_y*0.9) - int(min_y) - abs(int(min_x*1.1) - int(max_x*0.95))/3
rect =img2 [(int(min_y) + int(add)):int(max_y*0.9), int(min_x*1.1):int(max_x*0.95)]
else:
rect =img2 [(int(min_y) + x_range):int(max_y*0.9), int(min_x*1.1):int(max_x*0.95)]
cv2.imwrite(os.path.join(save_path, "{}".format(names[a])),rect)
a += 1
if a == 12:
a = 0
if col <= 7 :
ws.cell (row = r, column = col).value = rect.mean()
col += 1
else :
col = 2
r += 1
ws.cell (row = r, column = col).value = rect.mean()
col += 1
#%%
df = datasets.load_data(r'C:\Users\User\Desktop\Peter\Bone_density\demo\demo.xlsx')
df.dropna(subset = ["Name"], inplace=True)
df = df.reset_index(drop=True)
df.pop("Name")
images_Left = datasets.load_wrist_images(df,r'C:\Users\User\Desktop\Peter\Bone_density\demo\wrist',left_right = "Left")
#%%
feature_columns = []
feature_layer_inputs = {}
sex = feature_column.categorical_column_with_vocabulary_list(
'Sex', ['male', 'female'])
sex_one_hot = feature_column.indicator_column(sex)
feature_columns.append(sex_one_hot)
feature_layer_inputs['Sex'] = tf.keras.Input(shape=(1,), name='Sex', dtype=tf.string)
age = feature_column.numeric_column("Age")
age_buckets = feature_column.bucketized_column(age, boundaries=[20, 30, 40, 50, 60, 70])
feature_columns.append(age_buckets)
# demo(age_buckets)
Menopause = feature_column.categorical_column_with_vocabulary_list(
'Menopause', ['not suit', 'yes', 'no'])
Menopause_embedding = feature_column.embedding_column(Menopause, dimension=6)
feature_columns.append(Menopause_embedding)
feature_layer_inputs['Menopause'] = tf.keras.Input(shape=(1,), name='Menopause', dtype=tf.string)
# demo(Menopause_embedding)
Bone_injured = feature_column.categorical_column_with_vocabulary_list(
'Bone_injured', ['yes', 'no'])
Bone_injured_one_hot = feature_column.indicator_column(Bone_injured)
feature_columns.append(Bone_injured_one_hot)
# Bone_injured_embedding = feature_column.embedding_column(Bone_injured, dimension=8)
feature_layer_inputs['Bone_injured'] = tf.keras.Input(shape=(1,), name='Bone_injured', dtype=tf.string)
# demo(Bone_injured_one_hot)
#%%
test_data = []
first = True
for feature in feature_columns:
feature_layer = tf.keras.layers.DenseFeatures(feature)
feature_array = feature_layer(dict(df)).numpy()
if first:
test_data=feature_array
first = False
continue
test_data = np.concatenate((test_data, feature_array), axis=1)
print(feature_layer(dict(df)).numpy())
#%%
import keras
from keras.layers import LeakyReLU
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
mlp = models.create_mlp(np.asarray(test_data).shape[1], regress=True)
cnn_left = models.create_cnn(256, 128, 6, regress=False)
# cnn_right = models.create_cnn(256, 128, 6, regress=False)
# create the input to our final set of layers as the *output* of both
# the MLP and CNN
# combinedInput = concatenate([mlp.output, cnn_left.output, cnn_right.output])
combinedInput = concatenate([mlp.output, cnn_left.output])
# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(8, activation=LeakyReLU(alpha=0.2))(combinedInput)
x = Dense(4, activation=LeakyReLU(alpha=0.2))(x)
x = Dense(1)(x)
# our final model will accept categorical/numerical data on the MLP
# model = Model(inputs=[mlp.input, cnn_left.input, cnn_right.input], outputs=x)
my_model = Model(inputs=[mlp.input, cnn_left.input], outputs=x)
my_model.load_weights('Radius_UD_L.h5')
my_model.summary()
#%%
predictions = my_model.predict([test_data, images_Left])
print(predictions)
| StarcoderdataPython |
1722243 | import csv
import logging
import fret
import numpy as np
import torchtext as tt
logger = logging.getLogger(__name__)
class Cutter:
def __init__(self, words, max_len=None, split=' '):
self.words = words
self.max_len = max_len
self.split = split
def __call__(self, s):
words = s.split(self.split)[:self.max_len] \
if self.max_len else s.split()
return [self.words.get(w) or 0 for w in words]
class Vocab:
def __init__(self, vocab_list):
if isinstance(vocab_list, str):
self.itos = open(vocab_list).read().strip().split('\n')
else:
self.itos = vocab_list
self.stoi = {k: i for i, k in enumerate(self.itos)}
self.stoi['<unk>'] = -1
def __getitem__(self, item):
if isinstance(item, int):
return self.itos[item]
else:
return self.stoi[item]
def get(self, item):
return self.stoi[item] if item in self.stoi else None
def __len__(self):
return len(self.itos)
class Questions:
def __init__(self, dataset, maxlen=400):
self.dataset = dataset
cfg = fret.app['datasets'][dataset]
self._word = Vocab(cfg['word_list'])
self._know = Vocab(cfg['knowledge_list'])
self.n_words = len(self._word)
self.n_knowledge = len(self._know)
text_field = tt.data.Field(
tokenize=Cutter(self._word, maxlen),
use_vocab=False)
self._ques_text = tt.data.TabularDataset(
cfg['question_text_file'],
format='tsv',
fields=[('id', tt.data.Field(sequential=False)),
('content', text_field)],
skip_header=True,
csv_reader_params={'quoting': csv.QUOTE_NONE})
self._ques_text_ind = {item.id: i
for i, item in enumerate(self._ques_text)}
knowledge_field = tt.data.Field(
tokenize=Cutter(self._know, split=','),
use_vocab=False)
self._ques_know = tt.data.TabularDataset(
cfg['question_knowledge_file'],
format='tsv',
fields=[('id', tt.data.Field(sequential=False)),
('knowledge', knowledge_field)],
skip_header=True,
csv_reader_params={'quoting': csv.QUOTE_NONE})
self._ques_know = {item.id: item.knowledge for item in self._ques_know}
self._ques_diff = {}
diff_f = open(cfg['question_difficulty_file'])
next(diff_f)
for line in diff_f:
qid, diff = line.strip().split('\t')
diff = float(diff)
self._ques_diff[qid] = diff
self._ques_set = set(self._ques_text_ind) & \
set(self._ques_know) & set(self._ques_diff)
self.vocab = Vocab(list(sorted(self._ques_set)))
self.stoi = self.vocab.stoi
self.itos = self.vocab.itos
self.n_questions = len(self.vocab)
def __getitem__(self, index):
if isinstance(index, int):
qid = self.vocab.itos[index]
else:
qid = index
if qid in self._ques_set:
know = np.zeros((self.n_knowledge,))
know[self._ques_know[qid]] = 1
text = self._ques_text[self._ques_text_ind[qid]].content
if self.dataset == 'poj':
text = text[:50]
return {
'id': qid,
'text': text,
'knowledge': know,
'difficulty': self._ques_diff[qid]
}
else:
return None
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __len__(self):
return len(self.vocab)
@property
def knowledge(self):
return self._know
@property
def word(self):
return self._word
def load_embedding(emb_file):
f = open(emb_file, 'r', encoding='utf-8')
wcnt, emb_size = next(f).strip().split(' ')
wcnt, emb_size = int(wcnt), int(emb_size)
words = []
embs = []
for line in f:
fields = line.strip().split(' ')
word = fields[0]
emb = np.array([float(x) for x in fields[1:]])
words.append(word)
embs.append(emb)
embs = np.asarray(embs)
return embs
class QidField:
def __init__(self, set):
self._set = set
def get(self, item):
qid = item.split(',')[0]
if qid in self._set:
return qid
else:
return '<unk>'
class ScoreField:
def get(self, item):
return float(item.split(',')[1])
def load_record(rec_file, q_field):
question = tt.data.Field(tokenize=Cutter(QidField(q_field.stoi)))
question.vocab = q_field
score = tt.data.Field(tokenize=Cutter(ScoreField()),
use_vocab=False)
fields = {'question': ('question', question), 'score': ('score', score)}
reader = csv.reader(open(rec_file), quoting=csv.QUOTE_NONE,
delimiter='\t')
field_to_index = {'question': 0, 'score': 0}
examples = [tt.data.Example.fromCSV(line, fields, field_to_index)
for line in reader]
field_list = []
for field in fields.values():
if isinstance(field, list):
field_list.extend(field)
else:
field_list.append(field)
field_list = field_list
records = tt.data.Dataset(examples, field_list)
return records
| StarcoderdataPython |
1637099 | #Function min and max sorting etc are only supported for elements of the same type
l = [0,1,2,3,4,5]
print("Min ", min(l))
print("Max", max(l)) | StarcoderdataPython |
4815913 | from modelserver.base.base_predictor import BasePredictor
from visatt.model import ModelSpatial
from visatt.utils import imutils, evaluation
from visatt.config import *
from PIL import Image
import torch
from torchvision import datasets, transforms
import numpy as np
from skimage.transform import resize
def _get_transform():
transform_list = []
# input_resolution=224
transform_list.append(transforms.Resize((input_resolution,
input_resolution)))
transform_list.append(transforms.ToTensor())
transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]))
return transforms.Compose(transform_list)
test_transforms = _get_transform()
class GazeFollower(BasePredictor):
def __init__(self, weight_path='/workspace/modelserver/models/weights/visatt.pt'):
# Visual attention
self.model = ModelSpatial()
state_dict = self.model.state_dict()
if torch.cuda.is_available():
pretrained_state_dict = torch.load(weight_path)['model']
else:
pretrained_state_dict = torch.load(weight_path,
map_location=torch.device('cpu'))['model']
state_dict.update(pretrained_state_dict)
self.model.load_state_dict(state_dict)
self.model.train(False)
if torch.cuda.is_available():
self.model.cuda()
def predict_gaze(self, image, face_bbox):
""" image must be PIL image :(
"""
width, height = image.size
head = image.crop(face_bbox)
head = test_transforms(head)
frame = test_transforms(image)
head_channel = imutils.get_head_box_channel(face_bbox[0], face_bbox[1],
face_bbox[2], face_bbox[3], width, height,
resolution=input_resolution).unsqueeze(0)
if torch.cuda.is_available():
head = head.unsqueeze(0).cuda()
frame = frame.unsqueeze(0).cuda()
head_channel = head_channel.unsqueeze(0).cuda()
else:
head = head.unsqueeze(0)
frame = frame.unsqueeze(0)
head_channel = head_channel.unsqueeze(0)
# 3 inputs to model
raw_hm, _, inout = self.model(frame, head_channel, head)
# heatmap modulation
# detach from computational graph
raw_hm = raw_hm.cpu().detach().numpy() * 255
raw_hm = raw_hm.squeeze()
inout = inout.cpu().detach().numpy()
inout = 1 / (1 + np.exp(-inout))
inout = (1 - inout) * 255
norm_map = resize(raw_hm, (height, width)) - inout
point = evaluation.argmax_pts(norm_map)
return point
# return norm_map, raw_hm, inout
def strongest_pixel(self, width, height, raw_hm):
""" Returns a single pixel where the focus is predicted to be strongest.
"""
pred_x, pred_y = evaluation.argmax_pts(raw_hm)
pred_x, pred_y = [pred_x/output_resolution * width,
pred_y/output_resolution * height]
return pred_x, pred_y
| StarcoderdataPython |
141408 | """
Test suite module for ``XGBoost``.
Credits
-------
::
Authors:
- Diptesh
- Madhu
Date: Sep 27, 2021
"""
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
import unittest
import warnings
import re
import sys
from inspect import getsourcefile
from os.path import abspath
import pandas as pd
from sklearn.model_selection import train_test_split as split
from sklearn import metrics as sk_metrics
# Set base path
path = abspath(getsourcefile(lambda: 0))
path = re.sub(r"(.+)(\/tests.*)", "\\1", path)
sys.path.insert(0, path)
from mllib.lib.tree import RandomForest # noqa: F841
from mllib.lib.tree import XGBoost # noqa: F841
# =============================================================================
# --- DO NOT CHANGE ANYTHING FROM HERE
# =============================================================================
path = path + "/data/input/"
# =============================================================================
# --- User defined functions
# =============================================================================
def ignore_warnings(test_func):
"""Suppress warnings."""
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test_func(self, *args, **kwargs)
return do_test
class Test_RandomForest(unittest.TestCase):
"""Test suite for module ``RandomForest``."""
def setUp(self):
"""Set up for module ``RandomForest``."""
def test_rf_class(self):
"""RandomForest: Test for classification."""
x_var = ["x1", "x2", "x3", "x4"]
y_var = "y"
df_ip = pd.read_csv(path + "iris.csv")
df_ip = df_ip[[y_var] + x_var]
df_train, df_test = split(df_ip,
stratify=df_ip[y_var],
test_size=0.2,
random_state=42)
mod = RandomForest(df_train, y_var, x_var, method="classify")
y_hat = mod.predict(df_test[x_var])[y_var].tolist()
y = df_test[y_var].values.tolist()
acc = round(sk_metrics.accuracy_score(y, y_hat), 2)
self.assertGreaterEqual(acc, 0.93)
def test_rf_reg(self):
"""RandomForest: Test for regression."""
x_var = ["x1", "x2", "x3", "x4"]
y_var = "y"
df_ip = pd.read_csv(path + "iris.csv")
df_ip = df_ip[[y_var] + x_var]
df_train, df_test = split(df_ip,
stratify=df_ip[y_var],
test_size=0.2,
random_state=42)
mod = RandomForest(df_train, y_var, x_var, method="regression")
y_hat = mod.predict(df_test[x_var])[y_var].tolist()
y = df_test[y_var].values.tolist()
mse = round(sk_metrics.mean_squared_error(y, y_hat), 2)
self.assertLessEqual(mse, 0.1)
def test_rf_ts_exog(self):
"""RandomForest: Test for time series with exogenous variables"""
x_var = ["cost"]
y_var = "y"
test_perc = 0.2
df_ip = pd.read_excel(path + "test_time_series.xlsx",
sheet_name="exog")
df_ip = df_ip.set_index("ts")
df_train = df_ip.iloc[0:int(len(df_ip) * (1-test_perc)), :]
df_test = df_ip.iloc[int(len(df_ip) * (1-test_perc)): len(df_ip), :]
df_test = df_test[x_var]
mod = RandomForest(df_train, y_var, x_var, method="timeseries")
mod.predict(df_test)
metrics = mod.model_summary
self.assertGreaterEqual(metrics["rsq"], 0.8)
self.assertLessEqual(metrics["mape"], 0.5)
def test_rf_ts_endog(self):
"""RandomForest: Test for time series with endogenous variable"""
y_var = "y"
df_ip = pd.read_excel(path + "test_time_series.xlsx",
sheet_name="exog")
df_ip = df_ip.set_index("ts")
mod = RandomForest(df_ip, y_var, method="timeseries")
mod.predict()
metrics = mod.model_summary
self.assertGreaterEqual(metrics["rsq"], 0.7)
self.assertLessEqual(metrics["mape"], 0.7)
class Test_XGBoost(unittest.TestCase):
"""Test suite for module ``XGBoost``."""
def setUp(self):
"""Set up for module ``XGBoost``."""
def test_xgboost_class(self):
"""XGBoost: Test for classification."""
x_var = ["x1", "x2"]
y_var = "y"
df_ip = pd.read_csv(path + "iris.csv")
df_ip = df_ip[[y_var] + x_var]
df_train, df_test = split(df_ip,
stratify=df_ip[y_var],
test_size=0.2,
random_state=1)
mod = XGBoost(df_train, y_var, x_var, method="classify")
y_hat = mod.predict(df_test[x_var])[y_var].tolist()
y = df_test[y_var].values.tolist()
acc = round(sk_metrics.accuracy_score(y, y_hat), 2)
self.assertGreaterEqual(acc, 0.93)
def test_xgboost_reg(self):
"""XGBoost: Test for regression."""
x_var = ["x1", "x2", "x3", "x4"]
y_var = "y"
df_ip = pd.read_csv(path + "iris.csv")
df_ip = df_ip[[y_var] + x_var]
df_train, df_test = split(df_ip,
stratify=df_ip[y_var],
test_size=0.2,
random_state=1)
mod = XGBoost(df_train, y_var, x_var, method="regression")
y_hat = mod.predict(df_test[x_var])[y_var].tolist()
y = df_test[y_var].values.tolist()
mse = round(sk_metrics.mean_squared_error(y, y_hat), 2)
self.assertLessEqual(mse, 0.5)
def test_xgboost_ts_exog(self):
"""XGBoost: Test for time series with exogenous variables"""
x_var = ["cost"]
y_var = "y"
test_perc = 0.2
df_ip = pd.read_excel(path + "test_time_series.xlsx",
sheet_name="exog")
df_ip = df_ip.set_index("ts")
df_train = df_ip.iloc[0:int(len(df_ip) * (1-test_perc)), :]
df_test = df_ip.iloc[int(len(df_ip) * (1-test_perc)): len(df_ip), :]
df_test = df_test[x_var]
mod = XGBoost(df_train, y_var, x_var, method="timeseries")
mod.predict(df_test)
metrics = mod.model_summary
self.assertAlmostEqual(1.0, metrics["rsq"], places=1)
self.assertLessEqual(metrics["mape"], 0.1)
def test_xgboost_ts_endog(self):
"""XGBoost: Test for time series with endogenous variable"""
y_var = "y"
df_ip = pd.read_excel(path + "test_time_series.xlsx",
sheet_name="exog")
df_ip = df_ip.set_index("ts")
mod = XGBoost(df_ip, y_var, method="timeseries")
mod.predict()
metrics = mod.model_summary
self.assertAlmostEqual(1.0, metrics["rsq"], places=1)
self.assertLessEqual(metrics["mape"], 0.1)
# =============================================================================
# --- Main
# =============================================================================
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
48097 | """
This module is used to generate correlation (R) and regression (b)
coefficients for relationships between the 2015 Census,
2018 Yale Climate Opinion Maps (YCOM) and land area datasets,
as well as p values for these relationships.
"""
import numpy as np
import pandas as pd
from scipy.stats import linregress
def calculate_stats_outputs(n_ycom, n_census, ycom_county, census):
"""
Function to estimate regression coefficients correlation between YCOM data variables and US
Census variables.
Inputs: n_ycom, a full list of names for ycom variables,
n_census, a full list of names for census variables
Outputs: a matrix of correlation values between each variable each dataset
"""
stats_outputs = np.zeros((len(n_ycom), len(n_census), 5))
for yind, yvar in enumerate(n_ycom):
for cind, cvar in enumerate(n_census):
ycom_notnull = ycom_county[yvar][census[cvar].notnull()]
census_notnull = census[cvar][census[cvar].notnull()]
stats_outputs[yind, cind, 0:5] = linregress(ycom_notnull, census_notnull)
return stats_outputs
def calculate_stats_outputs_standard(n_ycom, n_census, ycom_county, census):
"""
Function to estimate regression coefficients between YCOM data variables and US
Census variables on standardized variables
standardized_column = (column - mean(column)) / std(column)
Inputs: n_ycom, a full list of names for ycom variables,
n_census, a full list of names for census variables
Outputs: a matrix of correlation values between each variable each dataset
"""
stats_outputs_standard = np.zeros((len(n_ycom), len(n_census), 5))
for yind, yvar in enumerate(n_ycom):
for cind, cvar in enumerate(n_census):
ycom_notnull = ycom_county[yvar][census[cvar].notnull()]
census_notnull = census[cvar][census[cvar].notnull()]
#also doing calculations on standardized variables
census_standard = (census_notnull - np.mean(census_notnull)) / np.std(census_notnull)
stats_outputs_standard[yind, cind, 0:5] = linregress(ycom_notnull, census_standard)
return stats_outputs_standard
def get_regs_df(stats_outputs_standard, n_census, n_ycom):
"""
making dataframe of regression coefficients
these are kinda standardized -they show what % change in an opinion is given
a 1 standard deviation change in a census variable
"""
regs = pd.DataFrame(stats_outputs_standard[:, :, 0], columns=n_census, index=n_ycom)
return regs
def get_cors_df(stats_outputs, n_census, n_ycom):
"""
making dataframe of correlation coefficients
"""
cors = pd.DataFrame(stats_outputs[:, :, 2], columns=n_census, index=n_ycom)
return cors
def get_pvalues_df(stats_outputs, n_census, n_ycom):
"""
making dataframes of pvalues
"""
pval = pd.DataFrame(stats_outputs[:, :, 3], columns=n_census, index=n_ycom)
return pval
| StarcoderdataPython |
3366013 | <gh_stars>0
from sqlalchemy import Column
from serialchemy import ModelSerializer
def _get_identity(cls):
args = getattr(cls, '__mapper_args__', None)
return args.get('polymorphic_identity') if args is not None else None
def _get_identity_key(cls):
identityColumn = cls.__mapper_args__['polymorphic_on']
assert isinstance(identityColumn, Column)
column_db_name = identityColumn.key
for attribute_name, attribute in cls.__mapper__.c.items():
if attribute.key == column_db_name:
return attribute_name
raise AttributeError(f"'polymorphic_on' attribute set incorrectly, are you sure it should be {column_db_name}?")
def is_sqlalchemy_polymorphic(cls):
return hasattr(cls, '__mapper_args__') and cls.__mapper_args__.get('polymorphic_on') is not None
class PolymorphicModelSerializer(ModelSerializer):
"""
Serializer for models that have a common base class. Can be used as serializer for endpoints that have objects
from different classes (but have a common base)
"""
def __init__(self, declarative_class):
super().__init__(declarative_class)
if is_sqlalchemy_polymorphic(declarative_class):
self.is_polymorphic = True
self.sub_serializers = self._get_sub_serializers(declarative_class)
self.identity_key = _get_identity_key(declarative_class)
else:
self.is_polymorphic = False
@classmethod
def _get_sub_serializers(cls, declarative_class):
serializers_sub_class_map = {
sub_cls.get_identity(): sub_cls for sub_cls in cls.__subclasses__() if sub_cls.get_identity()
}
return {
_get_identity(sub_cls): serializers_sub_class_map.get(_get_identity(sub_cls), cls)(sub_cls)
for sub_cls in declarative_class.__subclasses__()
}
@classmethod
def get_identity(cls):
return _get_identity(cls.__model_class__) if hasattr(cls, '__model_class__') else None
def load(self, serialized, existing_model=None, session=None):
if self.is_polymorphic:
model_identity = serialized.get(self.identity_key)
if model_identity and self.sub_serializers.get(model_identity):
return self.sub_serializers[model_identity].load(serialized, existing_model, session)
return super().load(serialized, existing_model, session)
def dump(self, model):
if self.is_polymorphic:
model_identity = _get_identity(model)
if model_identity in self.sub_serializers:
return self.sub_serializers[model_identity].dump(model)
return super().dump(model)
| StarcoderdataPython |
3387464 | # todo: write tests, y'know, when you have time (-: | StarcoderdataPython |
4823487 | <reponame>italogsfernandes/emg-moviments-classifier<filename>python-hand-movements-classifier/convert_database_to_new_format.py<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Coding at 6:48 and listening to:
Rock is Dead - <NAME>
Dance D'Amour - The 69 Eyes
Wake Up - Rage Against the Machine
Clubbed to Death - <NAME>.
@author: italo
"""
#%% Importing the libraries
import pandas as pd # reading files
import numpy as np # handling numerical data
import matplotlib.pyplot as plt # Plotting
from scipy import signal
###############################
#%% Adding the path to datasets
###############################
# Description of the analysed movements:
# Movement Number - Movement Name
# 1 -> Supinar
# 2 -> Pronar
# 3 -> Pinçar
# 4 -> Fechar
# 5 -> Estender
# 6 -> Flexionar
# This should be the output of the classifier. It should classify each moviment
# in one of this classes.
#########################
#%% Importing the dataset
#########################
# The file name refering to the folder where this script is located
# - emg-movements-classifier
# - datasets
# - coletas
# - Eber
# - LH
# - Miguel... etc
# - python-hand_moviments-classifier
# - app_procedimentos
# - app_procedures.py
# Opening a file and reading it to a dataFrame object
# sep means separator, the files have no headers
# After reading it, we add the names of each column in the dataset.
# At end, we select the 4 channels as a numpy vector and we save it in
# emg_channels.
# The trigger is saved in emg_trigger.
volunteer_id = '<NAME>'
volunteer_id_number = 1
print("Opening files of volunteer %d - %s" % (volunteer_id_number, volunteer_id))
print("Opening part 1.1...")
dataset_pt11 = pd.read_table('datasets/coletas/'+volunteer_id+'11-Final.txt', sep=';', header=None)
dataset_pt11.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 1.2...")
dataset_pt12 = pd.read_table('datasets/coletas/'+volunteer_id+'12-Final.txt', sep=';', header=None)
dataset_pt12.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 1.3...")
dataset_pt13 = pd.read_table('datasets/coletas/'+volunteer_id+'13-Final.txt', sep=';', header=None)
dataset_pt13.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 1.4...")
dataset_pt14 = pd.read_table('datasets/coletas/'+volunteer_id+'14-Final.txt', sep=';', header=None)
dataset_pt14.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.1...")
dataset_pt21 = pd.read_table('datasets/coletas/'+volunteer_id+'21-Final.txt', sep=';', header=None)
dataset_pt21.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.2...")
dataset_pt22 = pd.read_table('datasets/coletas/'+volunteer_id+'22-Final.txt', sep=';', header=None)
dataset_pt22.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.3...")
dataset_pt23 = pd.read_table('datasets/coletas/'+volunteer_id+'23-Final.txt', sep=';', header=None)
dataset_pt23.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print("Opening part 2.4...")
dataset_pt24 = pd.read_table('datasets/coletas/'+volunteer_id+'24-Final.txt', sep=';', header=None)
dataset_pt24.columns = 'CH1 CH2 CH3 CH4 Trigger None'.split()
print('*'*30)
dt_frames = [dataset_pt11, dataset_pt12, dataset_pt13, dataset_pt14,
dataset_pt21, dataset_pt22, dataset_pt23, dataset_pt24]
dataset = pd.concat(dt_frames)
emg_channels = dataset.iloc[:, :-2].values
emg_trigger = dataset.iloc[:, -2].values
dataset_pt11 = None
dataset_pt12 = None
dataset_pt13 = None
dataset_pt14 = None
dataset_pt21 = None
dataset_pt22 = None
dataset_pt23 = None
dataset_pt24 = None
dt_frames = None
dataset = None
# Here we do the same for obtaining a numpy vector with the movements
# executed in each peek of the trigger.
# targets contains the moviments as a number from 1 to 6
# and targets_str as a string(name)
print("Reading targets...")
targets_pt11 = pd.read_table('datasets/coletas/'+volunteer_id+'11-Resposta.txt', header=None)
targets_pt12 = pd.read_table('datasets/coletas/'+volunteer_id+'12-Resposta.txt', header=None)
targets_pt13 = pd.read_table('datasets/coletas/'+volunteer_id+'13-Resposta.txt', header=None)
targets_pt14 = pd.read_table('datasets/coletas/'+volunteer_id+'14-Resposta.txt', header=None)
targets_pt21 = pd.read_table('datasets/coletas/'+volunteer_id+'21-Resposta.txt', header=None)
targets_pt22 = pd.read_table('datasets/coletas/'+volunteer_id+'22-Resposta.txt', header=None)
targets_pt23 = pd.read_table('datasets/coletas/'+volunteer_id+'23-Resposta.txt', header=None)
targets_pt24 = pd.read_table('datasets/coletas/'+volunteer_id+'24-Resposta.txt', header=None)
targets_frames = [targets_pt11, targets_pt12, targets_pt13, targets_pt14,
targets_pt21, targets_pt22, targets_pt23, targets_pt24]
targets = pd.concat(targets_frames)
targets_pt11 = None
targets_pt12 = None
targets_pt13 = None
targets_pt14 = None
targets_pt21 = None
targets_pt22 = None
targets_pt23 = None
targets_pt24 = None
targets_frames = None
targets = targets.iloc[:, :].values.ravel()
print('*'*30)
#####################
#%% Signal constants
#####################
# The empirical delay time between the signal saying to execute a movement and
# the start of some movement by the volunteer.
# We guess a time of 250ms, this means 500 data points at a sampling frequency
# of 2 kHz
# This s a dalay time in the TRIGGER necessary to sync the TRIGGER with the SIGNAL
delay_trigger = 500 # amount of points to delay
fs = 2000 # Sampling frequency in Hz
#########################
#%% Correcting the triger
#########################
# representation of why there are the necessity of syncing the signals
# Before correction:
# emg signal: __________. .||||||||-.._____________
# ''||||||||-''
# trigger signal: ________________
# _____| |_____________
#
# After Correction:
# emg signal: __________. .||||||||-.._____________
# ''||||||||-''
# trigger signal: ________________
# _________| |_____________
#
# append concatenates some values in a array.
# Here we insert a array of zeros at the beggining of the trigger
# objectiving to deslocate the signal
# We also exclude the last 'delay_trigger' points of the signal
# to garant that the new array will have the same size of the emg_trigger
print("Correcting Trigger...")
emg_trigger_corrected = np.append(arr = np.zeros(delay_trigger),
values = emg_trigger[:-delay_trigger])
print('*'*30)
###########################
#%% downsampling to 1000Hz
###########################
print("downsampling...")
emg_channels = emg_channels[range(0,len(emg_channels),2),:]
emg_trigger = emg_trigger[range(0,len(emg_trigger),2)]
emg_trigger_corrected = emg_trigger_corrected[range(0,len(emg_trigger_corrected),2)]
print('*'*30)
###########################
#%% Normalizing
###########################
print("Normalizing")
maximum = max([abs(emg_channels.max()), abs(emg_channels.min())])
emg_channels = emg_channels / maximum
emg_trigger = np.array(emg_trigger > 0.7, dtype=np.uint8)
emg_trigger_corrected = np.array(emg_trigger_corrected > 0.7, dtype=np.uint8)
print('*'*30)
#####################
#%% Contraction sites
#####################
print("Calculating Contraction Sites")
s3= np.array(emg_trigger_corrected, dtype=np.int8)
s3[s3==0] = -1 # replace zeros with -1
s4=np.where(np.diff(s3))[0]+1
contractions_onsets = s4[np.arange(0,len(s4),2)]
contractions_offsets = s4[np.arange(1,len(s4),2)]
s3 = None
s4 = None
print('*'*30)
###############################
#%% OUPUT SIGNAL
###############################
print("Generating output signal...")
output_signal = emg_trigger_corrected
contractions_lenght = contractions_offsets - contractions_onsets
for n in range(len(contractions_onsets)):
cont_index = np.arange(contractions_onsets[n],contractions_offsets[n])
cont_values = targets[n] * np.ones(contractions_lenght[n])
output_signal[cont_index] = cont_values
print('*'*30)
###############################
#%% creating new file
###############################
print("Creating new dataframe...")
output_data_frame = pd.DataFrame(columns=['CH1', 'CH2', 'CH3', 'CH4', 'OUTPUT'])
output_data_frame['CH1'] = emg_channels[:,0]
output_data_frame['CH2'] = emg_channels[:,1]
output_data_frame['CH3'] = emg_channels[:,2]
output_data_frame['CH4'] = emg_channels[:,3]
output_data_frame['OUTPUT'] = output_signal
print('*'*30)
print("Writing new dataframe to file..")
file_name_output = 'datasets/volunteer_'+str(volunteer_id_number)+'.csv'
output_data_frame.to_csv(path_or_buf=file_name_output,header=True)
print('*'*30)
# TODO: add SQLAlchemy support
###############################
#%% Optional: Plotting the data
###############################
# Here we use the matplotlib library to plot a small window of the signal
# And verify if everything is all right
print("Done!")
print("Now plotting!")
print(":)")
fig = plt.figure()
fig.suptitle("Voluntario: " + str(volunteer_id_number))
axes = [None for i in range(4)]
for i in range(4):
axes[i] = plt.subplot(4,1,i+1)
plt.plot(emg_channels[12000:80000,i])
plt.plot(output_signal[12000:80000]/10.0)
plt.title('Ch ' + str(i+1))
plt.ylim((-1,1))
plt.grid()
axes[0].get_shared_x_axes().join(axes[0],axes[1],axes[2],axes[3])
axes[0].get_shared_y_axes().join(axes[0],axes[1],axes[2],axes[3])
axes[0].set_xticklabels([])
axes[1].set_xticklabels([])
axes[2].set_xticklabels([])
plt.show()
| StarcoderdataPython |
3274114 | <reponame>DariaMinieieva/sudoku_project<gh_stars>1-10
"""This module visualizes the solved sudoku."""
from copy import deepcopy
import tkinter as tk
import tkinter.font as tkFont
from board import Board
class SudokuDrawer:
"""Class for solving sudoku and visualizing it."""
def __init__(self, board_file: str) -> None:
"""Creates a new sudoku drawer."""
self.board_file = board_file
self.board = self.read_board()
self.sudoku = Board(deepcopy(self.board))
def read_board(self) -> list:
"""Reads a file with board and returns a list of lists with sudoku puzzle."""
board = []
with open(self.board_file, encoding='utf-8') as board_f:
for line in board_f:
board.append(list(map(int, line.strip().split(','))))
return board
def draw_matrix(self, matrix):
"""Draws one matrix with Tkinter."""
root = tk.Tk()
root.geometry("500x500")
font_style = tkFont.Font(family="Lucida Grande", size=20)
for i in range(9):
for j in range(9):
if self.board[i][j] == matrix[i][j]:
color = 'white'
else:
color = 'green'
entry = tk.Text(root, fg='black', bg=color, font=font_style)
entry.place(x=j*(500/9), y=i*(500/9))
entry.insert(tk.END, matrix[i][j])
for i in range(1, 3):
frame1 = tk.Frame(root, highlightbackground="black", \
highlightthickness=1, width=2, height=500)
frame2 = tk.Frame(root, highlightbackground="black", \
highlightthickness=1, width=500, height=2)
frame1.place(x=500*i/3, y=0)
frame2.place(x=0, y=500*i/3)
root.mainloop()
@staticmethod
def draw_no_solution():
"""Shows a label with a message that there is no solution."""
root = tk.Tk()
root.geometry("400x100")
text = "There is no possible solution to solve sudoku"
tk.Label(root, text=text, justify='center', font=("Lucida Grande", 14)).pack(pady=20)
root.mainloop()
def solve_sudoku(self):
"""Solves sudoku and draws it if thre is solution."""
solution = self.sudoku.backtracking()
if solution:
self.draw_matrix(solution)
else:
self.draw_no_solution()
if __name__ == '__main__':
drawer = SudokuDrawer('board.txt')
drawer.solve_sudoku()
| StarcoderdataPython |
132141 | <reponame>zagm/resolwe
# pylint: disable=missing-docstring
from versionfield import VersionField
from django.db import models
from resolwe.flow.models.fields import ResolweSlugField
class TestModel(models.Model):
name = models.CharField(max_length=30)
slug = ResolweSlugField(populate_from='name', unique_with='version')
version = VersionField(default='0.0.0')
| StarcoderdataPython |
1705670 | def f(x):
return g(x)
def g(x):
return f(x)
y = f(1)
| StarcoderdataPython |
114943 | import random
import csv
from typing import Iterator, Union, List
from torch.utils.data.sampler import Sampler
import numpy as np
from ..datapaths import DATAPATHS_MAPPING
class LengthTrainSampler(Sampler):
def __init__(
self,
source: str,
field: str,
max_len: float, # 16K * 320
max_pool_difference: float, # 16K * 0.3
min_pool_size: int = 512,
num_batches: Union[int, None] = None,
):
"""
This batch_sampler groups the source into sample pools of examples with similar length meeting criterias defined
by 'max_pool_difference' and 'min_pool_size'. Batches of close to, but never more than, 'max_len', are
constructed by first sampling a pool and then sampling each batch from from within that pool.
Args:
source (object): Dataset for which the sampler will be used.
field (str): The field containing the relevant length information.
max_len (float): The maximum size of the batch in seconds.
max_pool_difference (float): The maximum length difference between shortest and longest sample a pool.
min_pool_size (float): The minimum number of examples in a pool. Overwrites max_pool_difference.
num_batches (int or None): Samples num_batches (with replacement if necessary) instead of running a standard epoch.
"""
self.source = source
self.field = field
self.max_len = max_len
self.max_pool_difference = max_pool_difference
self.min_pool_size = min_pool_size
self.num_batches = num_batches
self.buffer = [] # only used when num_batches is not None
self.source_filepath = DATAPATHS_MAPPING[source] if source in DATAPATHS_MAPPING else source
self.lengths = self.load_lengths(self.source_filepath)
self.pools = self.create_sample_pools(max_pool_difference, min_pool_size)
self.batches = self.sample_batches()
assert self.lengths.max() < self.max_len, "One or more examples are longer than the maximum length."
def load_lengths(self, source_filepath):
"""
Loads the example lengths into an array with same order as the examples of the source dataset.
"""
with open(source_filepath, newline='') as source_file_buffer:
reader = csv.DictReader(source_file_buffer)
lengths = [int(row[self.field]) for row in reader]
return np.array(lengths)
def create_sample_pools(self, max_diff, min_size):
"""Creates the sample pools. Can be used to change to the sampling criteria without creating a new sampler."""
start, end = 0, 0
sorted_idxs = np.argsort(self.lengths)
sorted_lens = self.lengths[sorted_idxs]
pools = []
while end != len(self.lengths):
base_len = sorted_lens[start]
deltas = sorted_lens - base_len
pool_size = np.logical_and(0 <= deltas, deltas < max_diff).sum()
end = min(max(start + min_size, start + pool_size), len(self.lengths))
if (len(self.lengths) - end) < min_size:
end = len(self.lengths)
pools.append(sorted_idxs[start:end].tolist())
start = end
return pools
def sample_batches(self):
"""Sample batches from the pools."""
if self.num_batches is not None:
if len(self.buffer) >= self.num_batches:
batches = self.buffer[:self.num_batches]
self.buffer = self.buffer[self.num_batches:]
return batches
ordered_idxs = np.concatenate([random.sample(p, k=len(p)) for p in self.pools]) # shuffle each pool internally
batch, batches, batch_len = [], [], 0
for idx in ordered_idxs:
l = self.lengths[idx]
if batch_len + l <= self.max_len:
batch_len += l
batch.append(idx)
else:
batches.append(batch)
batch = [idx]
batch_len = l
# batch_idxs = (self.lengths[ordered_idxs].cumsum() // self.max_len).astype(int)
# split_points = np.bincount(batch_idxs).cumsum()[:-1] # the last split is implicit
# batches = np.array_split(ordered_idxs, split_points)
# batches = list(map(lambda x: x.tolist(), batches))
random.shuffle(batches) # shuffle the order of batches
if self.num_batches is not None:
self.buffer += batches
return self.sample_batches()
return batches
def __iter__(self) -> Iterator[List[int]]:
try:
for batch in self.batches:
yield batch
finally:
self.batches = self.sample_batches() # to ensure batches are resampled if interrupted
def __len__(self):
return len(self.batches)
class LengthEvalSampler(Sampler):
def __init__(
self,
source: str,
field: str,
max_len: float
):
"""
This batch_sampler groups the source into sample pools of examples with similar length meeting criterias defined
by 'max_pool_difference' and 'min_pool_size'. Batches of up to 'seconds' are constructed by sampling from
one pool at the time.
Args:
source (object): Dataset for which the sampler will be used.
max_len (float): The maximum size of the batch in seconds.
"""
self.source = source
self.field = field
self.max_len = max_len
self.source_filepath = DATAPATHS_MAPPING[source] if source in DATAPATHS_MAPPING else source
self.lengths = self.load_lengths(self.source_filepath)
self.batches = self.sample_batches()
def load_lengths(self, source_filepath):
"""Loads the example lengths into an array with same order as the examples of the source dataset."""
with open(source_filepath, newline='') as source_file_buffer:
reader = csv.DictReader(source_file_buffer)
lengths = [int(row[self.field]) for row in reader]
return np.array(lengths)
def sample_batches(self):
"""Sample batches from the pools."""
sorted_idxs = np.argsort(self.lengths)
batch, batches, batch_len = [], [], 0
for idx in sorted_idxs:
l = self.lengths[idx]
if batch_len + l <= self.max_len:
batch_len += l
batch.append(idx)
else:
batches.append(batch)
batch = [idx]
batch_len = l
return batches
def __iter__(self) -> Iterator[List[int]]:
for batch in self.batches:
yield batch
def __len__(self):
return len(self.batches) | StarcoderdataPython |
4836974 | from django.urls import path, include
from demo_app import calculator_api
urlpatterns = [
path('add/', calculator_api.add),
]
| StarcoderdataPython |
1784717 | <reponame>Yoann-Vie/esgi-hearthstone<filename>tests/test_runner_apps/tagged/tests.py
from unittest import TestCase
from django.test import tag
@tag('slow')
class TaggedTestCase(TestCase):
@tag('fast')
def test_single_tag(self):
self.assertEqual(1, 1)
@tag('fast', 'core')
def test_multiple_tags(self):
self.assertEqual(1, 1)
| StarcoderdataPython |
3371293 | import sys
sys.path.append("Mask_RCNN")
from mrcnn.model import MaskRCNN
from waldo_config import Waldoconfig
import numpy as np
import skimage.draw
from PIL import Image
if __name__ == '__main__':
config = Waldoconfig(predict=True)
config.display()
model = MaskRCNN(mode="inference", config=config,
model_dir=config.MODEL_DIR)
weights_path = sys.argv[1]
print("weights_path: ", weights_path)
model.load_weights(weights_path, by_name=True)
image = skimage.io.imread(sys.argv[2])
masks = model.detect([image], verbose=1)[0]["masks"]
print("Masks:", masks)
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
mask_filter = (np.sum(masks, -1, keepdims=True) >= 1)
if mask_filter.shape[0] > 0:
waldo = np.where(mask_filter, image, gray).astype(np.uint8)
img = Image.fromarray(waldo, 'RGB')
img.show()
else:
print("Can't find Waldo. Hmm..")
| StarcoderdataPython |
159194 | <filename>utils/plot_metrics.py
""" A script to plot validation metrics produced by the model.
"""
from collections import namedtuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
_STEPS_PER_ITERATION = 5000
TraceSettings = namedtuple("TraceSettings", ["y", "name", "color"])
def plot_metrics(filename):
df = pd.read_csv(filename, sep="\t")
df["f1"] = 2 * (df.precision * df.recall) / (df.precision + df.recall)
trace_metrics = [
TraceSettings(y=df.recall, name="recall", color="red"),
TraceSettings(y=df.precision, name="precision", color="blue"),
TraceSettings(y=df.accuracy, name="accuracy", color="green"),
TraceSettings(y=df.f1, name="f1", color="black"),
TraceSettings(y=df.loss, name="loss", color="orange"),
TraceSettings(y=df.roc_auc, name="roc_auc", color="darkcyan"),
]
fig = go.Figure()
for trace_metric in trace_metrics:
fig.add_trace(
go.Scatter(
x=np.array(range(len(df))) * _STEPS_PER_ITERATION,
y=trace_metric.y,
mode="lines",
name=trace_metric.name,
line_color=trace_metric.color,
)
)
fig.update_layout(title=filename, xaxis_title="iteration")
return fig
| StarcoderdataPython |
3248948 | from typing import List
from ..parser.ast import AST, Node
from ..scanner.tokens import Tokens
from ..semantic import COMPUTATION_NODES, CONSTANT_NODES
class CodeGenerator:
"""
Generates code for the dc language from an AST of the ac language.
"""
def __init__(self, ast: AST):
self.ast = ast
self.generated = list()
def visit_assignment(self, node: Node) -> None:
"""
Visits an assignment node and emits dc code for that assignment
:param node: the assignment node to visit and emit dc code for
"""
self.codegen(node.right())
self.emit("s")
self.emit(f"{node.left().value}") # emit ID
self.emit("0 k")
def visit_computation(self, node: Node) -> None:
"""
Visits a computation node and emits dc code for that computation
:param node: the computation node to visit and emit dc code for
"""
self.codegen(node.left())
self.codegen(node.right())
self.emit("+" if node.type == Tokens.PLUS else "-")
def visit_reference(self, node: Node) -> None:
"""
Visits a reference node and emits the ID of that node
:param node: the reference node to visit and emit dc code for
"""
self.emit("l")
self.emit(node.value)
def visit_print(self, node: Node) -> None:
"""
Visits a print node and emits the value of the symbol referenced in that node
:param node: the print node to visit and emit dc code for
"""
self.emit("l")
self.emit(node.value)
self.emit("p")
self.emit("si")
def visit_convert(self, node: Node) -> None:
"""
Visits a convert node, emit the value of child, and emit dc code to change the
precision level to five decimal places.
:param node: the convert node to visit and emit dc code for
"""
self.emit(node.child().value)
self.emit("5 k")
def visit_constant(self, node: Node) -> None:
"""
Visits a constant node and emits its value in dc code.
:param node: the constant node to visit and emit dc code for
"""
self.emit(node.value)
def codegen(self, node: Node) -> None:
"""
Generates dc code by calling the relevant visitor method for the given node.
:param node: the node to generate dc code for.
"""
for child in node.children:
self.codegen(child)
if node.type == Tokens.ASSIGN:
self.visit_assignment(node)
elif node.type in COMPUTATION_NODES:
self.visit_computation(node)
elif node.type == Tokens.ID:
self.visit_reference(node)
elif node.type == Tokens.PRINT:
self.visit_print(node)
elif node.type == Tokens.CONVERT:
self.visit_convert(node)
elif node.type in CONSTANT_NODES:
self.visit_constant(node)
def emit(self, code: str) -> None:
"""
Append generated code to the list of produced code.
:param code: the code string to append to the list of generated code
"""
self.generated.append(code)
def generate(self) -> List[str]:
"""
Generate dc code from the AST produced by the parser
:return: the list of generated dc code statements
"""
self.codegen(self.ast.root)
return self.generated
| StarcoderdataPython |
37381 | #!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test pipeline functions in pipeline.py.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
from yank.pipeline import *
# =============================================================================
# TESTS
# =============================================================================
def test_compute_min_dist():
"""Test computation of minimum distance between two molecules"""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[3, 3, 3], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) == np.sqrt(3)
def test_compute_min_max_dist():
"""Test compute_min_max_dist() function."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]])
mol2_pos = np.array([[2, 2, 2], [2, 4, 5]]) # determine min dist
mol3_pos = np.array([[3, 3, 3], [3, 4, 5]]) # determine max dist
min_dist, max_dist = compute_min_max_dist(mol1_pos, mol2_pos, mol3_pos)
assert min_dist == np.linalg.norm(mol1_pos[1] - mol2_pos[0])
assert max_dist == np.linalg.norm(mol1_pos[1] - mol3_pos[1])
# ==============================================================================
# SETUP PIPELINE UTILITY FUNCTIONS
# ==============================================================================
def test_remove_overlap():
"""Test function remove_overlap()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[1, 1, 1], [3, 4, 5]], np.float)
mol3_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) < 0.1
mol1_pos = remove_overlap(mol1_pos, mol2_pos, mol3_pos, min_distance=0.1, sigma=2.0)
assert compute_min_dist(mol1_pos, mol2_pos, mol3_pos) >= 0.1
def test_pull_close():
"""Test function pull_close()."""
mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol2_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mol3_pos = np.array([[10, 10, 10], [13, 14, 15]], np.float)
translation2 = pull_close(mol1_pos, mol2_pos, 1.5, 5)
translation3 = pull_close(mol1_pos, mol3_pos, 1.5, 5)
assert isinstance(translation2, np.ndarray)
assert 1.5 <= compute_min_dist(mol1_pos, mol2_pos + translation2) <= 5
assert 1.5 <= compute_min_dist(mol1_pos, mol3_pos + translation3) <= 5
def test_pack_transformation():
"""Test function pack_transformation()."""
BOX_SIZE = 5
CLASH_DIST = 1
mol1 = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
mols = [np.copy(mol1), # distance = 0
mol1 + 2 * BOX_SIZE] # distance > box
mols_affine = [np.append(mol, np.ones((2, 1)), axis=1) for mol in mols]
transformations = [pack_transformation(mol1, mol2, CLASH_DIST, BOX_SIZE) for mol2 in mols]
for mol, transf in zip(mols_affine, transformations):
assert isinstance(transf, np.ndarray)
mol2 = mol.dot(transf.T)[:, :3] # transform and "de-affine"
min_dist, max_dist = compute_min_max_dist(mol1, mol2)
assert CLASH_DIST <= min_dist and max_dist <= BOX_SIZE
| StarcoderdataPython |
196563 | <filename>cv_drawing_stuff.py
# @Author: <NAME> <varoon>
# @Date: 15-08-2017
# @Filename: cv_drawing_stuff.py
# @Last modified by: varoon
# @Last modified time: 15-08-2017
import numpy
import cv2
image = numpy.zeros((512,512,3),numpy.uint8) #create a black image
cv2.line(image, (384,0),(511,511), (255,0,0),5) #image, start point, stop point, color, thickness
cv2.rectangle(image, (384,0), (510,128),(0,255,0),3) #Image, top left, bottom right, color, thickness
cv2.circle(image, (477,63),63,(0,0,255),-1) #image, center raduis, color, thickness (-1=filled)
cv2.ellipse(image, (256,256),(100,50),0,0,180,255,-1) #image, center, major axis length, minor axis length, angle (counterclockwise, degrees)
#dray a polygon
points = numpy.array([[10,5],[20,30],[70,20],[50,10]], numpy.int32)
points = points.reshape((-1,1,2))
cv2.polylines(image ,[points],True,(0,255,255)) #true gives closed shape. False joins all points
#Note: Polylines can let you draw lots of lines quickly.
cv2.putText(image, 'OpenCV',(10,500),cv2.FONT_HERSHEY_SIMPLEX,4,(255,255,255),2,cv2.LINE_AA) #image, text to be written, bottem left coord of where text starts, font type, font scale, color/thickness/linetype/etc.
cv2.imshow('Drawing In OpenCV Example', image)
cv2.waitKey(0)
| StarcoderdataPython |
57337 | <reponame>stanfordnlp/spinn<gh_stars>100-1000
"""From the project root directory (containing data files), this can be run with:
Boolean logic evaluation:
python -m spinn.models.fat_classifier --training_data_path ../bl-data/pbl_train.tsv \
--eval_data_path ../bl-data/pbl_dev.tsv
SST sentiment (Demo only, model needs a full GloVe embeddings file to do well):
python -m spinn.models.fat_classifier --data_type sst --training_data_path sst-data/train.txt \
--eval_data_path sst-data/dev.txt --embedding_data_path spinn/tests/test_embedding_matrix.5d.txt \
--model_dim 10 --word_embedding_dim 5
SNLI entailment (Demo only, model needs a full GloVe embeddings file to do well):
python -m spinn.models.fat_classifier --data_type snli --training_data_path snli_1.0/snli_1.0_dev.jsonl \
--eval_data_path snli_1.0/snli_1.0_dev.jsonl --embedding_data_path spinn/tests/test_embedding_matrix.5d.txt \
--model_dim 10 --word_embedding_dim 5
Note: If you get an error starting with "TypeError: ('Wrong number of dimensions..." during development,
there may already be a saved checkpoint in ckpt_path that matches the name of the model you're developing.
Move or delete it as appropriate.
"""
from functools import partial
import os
import pprint
import sys
import gflags
from theano import tensor as T
import theano
import numpy as np
from spinn import afs_safe_logger
from spinn import util
from spinn.data.boolean import load_boolean_data
from spinn.data.sst import load_sst_data
from spinn.data.snli import load_snli_data
import spinn.fat_stack
import spinn.plain_rnn
import spinn.cbow
FLAGS = gflags.FLAGS
def build_sentence_model(cls, vocab_size, seq_length, tokens, transitions,
num_classes, training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=None, project_embeddings=False, ss_mask_gen=None, ss_prob=0.0):
"""
Construct a classifier which makes use of some hard-stack model.
Args:
cls: Hard stack class to use (from e.g. `spinn.fat_stack`)
vocab_size:
seq_length: Length of each sequence provided to the stack model
tokens: Theano batch (integer matrix), `batch_size * seq_length`
transitions: Theano batch (integer matrix), `batch_size * seq_length`
num_classes: Number of output classes
training_mode: A Theano scalar indicating whether to act as a training model
with dropout (1.0) or to act as an eval model with rescaling (0.0).
ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access
to ground truth transitions. This can be disabled at evaluation time to force Model 1
(or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0.
vs: Variable store.
"""
# Prepare layer which performs stack element composition.
if cls is spinn.plain_rnn.RNN:
if FLAGS.use_gru:
compose_network = partial(util.GRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.LSTMLayer,
initializer=util.HeKaimingInitializer())
embedding_projection_network = None
elif cls is spinn.cbow.CBOW:
compose_network = None
embedding_projection_network = None
else:
if FLAGS.lstm_composition:
if FLAGS.use_gru:
compose_network = partial(util.TreeGRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.TreeLSTMLayer,
initializer=util.HeKaimingInitializer())
else:
assert not FLAGS.connect_tracking_comp, "Can only connect tracking and composition unit while using TreeLSTM"
compose_network = partial(util.ReLULayer,
initializer=util.HeKaimingInitializer())
if project_embeddings:
embedding_projection_network = util.Linear
else:
assert FLAGS.word_embedding_dim == FLAGS.model_dim, \
"word_embedding_dim must equal model_dim unless a projection layer is used."
embedding_projection_network = util.IdentityLayer
# Build hard stack which scans over input sequence.
sentence_model = cls(
FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length,
compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs,
predict_use_cell=FLAGS.predict_use_cell,
use_tracking_lstm=FLAGS.use_tracking_lstm,
tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim,
X=tokens,
transitions=transitions,
initial_embeddings=initial_embeddings,
embedding_dropout_keep_rate=FLAGS.embedding_keep_rate,
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob,
connect_tracking_comp=FLAGS.connect_tracking_comp,
context_sensitive_shift=FLAGS.context_sensitive_shift,
context_sensitive_use_relu=FLAGS.context_sensitive_use_relu,
use_input_batch_norm=False)
# Extract top element of final stack timestep.
if FLAGS.lstm_composition or cls is spinn.plain_rnn.RNN:
sentence_vector = sentence_model.final_representations[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2))
sentence_vector_dim = FLAGS.model_dim / 2
else:
sentence_vector = sentence_model.final_representations.reshape((-1, FLAGS.model_dim))
sentence_vector_dim = FLAGS.model_dim
sentence_vector = util.BatchNorm(sentence_vector, sentence_vector_dim, vs, "sentence_vector", training_mode)
sentence_vector = util.Dropout(sentence_vector, FLAGS.semantic_classifier_keep_rate, training_mode)
# Feed forward through a single output layer
logits = util.Linear(
sentence_vector, sentence_vector_dim, num_classes, vs,
name="semantic_classifier", use_bias=True)
return sentence_model.transitions_pred, logits
def build_sentence_pair_model(cls, vocab_size, seq_length, tokens, transitions,
num_classes, training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=None, project_embeddings=False, ss_mask_gen=None, ss_prob=0.0):
"""
Construct a classifier which makes use of some hard-stack model.
Args:
cls: Hard stack class to use (from e.g. `spinn.fat_stack`)
vocab_size:
seq_length: Length of each sequence provided to the stack model
tokens: Theano batch (integer matrix), `batch_size * seq_length`
transitions: Theano batch (integer matrix), `batch_size * seq_length`
num_classes: Number of output classes
training_mode: A Theano scalar indicating whether to act as a training model
with dropout (1.0) or to act as an eval model with rescaling (0.0).
ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access
to ground truth transitions. This can be disabled at evaluation time to force Model 1
(or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0.
vs: Variable store.
"""
# Prepare layer which performs stack element composition.
if cls is spinn.plain_rnn.RNN:
if FLAGS.use_gru:
compose_network = partial(util.GRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.LSTMLayer,
initializer=util.HeKaimingInitializer())
embedding_projection_network = None
elif cls is spinn.cbow.CBOW:
compose_network = None
embedding_projection_network = None
else:
if FLAGS.lstm_composition:
if FLAGS.use_gru:
compose_network = partial(util.TreeGRULayer,
initializer=util.HeKaimingInitializer())
else:
compose_network = partial(util.TreeLSTMLayer,
initializer=util.HeKaimingInitializer())
else:
assert not FLAGS.connect_tracking_comp, "Can only connect tracking and composition unit while using TreeLSTM"
compose_network = partial(util.ReLULayer,
initializer=util.HeKaimingInitializer())
if project_embeddings:
embedding_projection_network = util.Linear
else:
assert FLAGS.word_embedding_dim == FLAGS.model_dim, \
"word_embedding_dim must equal model_dim unless a projection layer is used."
embedding_projection_network = util.IdentityLayer
# Split the two sentences
premise_tokens = tokens[:, :, 0]
hypothesis_tokens = tokens[:, :, 1]
premise_transitions = transitions[:, :, 0]
hypothesis_transitions = transitions[:, :, 1]
# Build two hard stack models which scan over input sequences.
premise_model = cls(
FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length,
compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs,
predict_use_cell=FLAGS.predict_use_cell,
use_tracking_lstm=FLAGS.use_tracking_lstm,
tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim,
X=premise_tokens,
transitions=premise_transitions,
initial_embeddings=initial_embeddings,
embedding_dropout_keep_rate=FLAGS.embedding_keep_rate,
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob,
connect_tracking_comp=FLAGS.connect_tracking_comp,
context_sensitive_shift=FLAGS.context_sensitive_shift,
context_sensitive_use_relu=FLAGS.context_sensitive_use_relu,
use_attention=FLAGS.use_attention,
initialize_hyp_tracking_state=FLAGS.initialize_hyp_tracking_state)
premise_stack_tops = premise_model.stack_tops if FLAGS.use_attention != "None" else None
premise_tracking_c_state_final = premise_model.tracking_c_state_final if cls not in [spinn.plain_rnn.RNN,
spinn.cbow.CBOW] else None
hypothesis_model = cls(
FLAGS.model_dim, FLAGS.word_embedding_dim, vocab_size, seq_length,
compose_network, embedding_projection_network, training_mode, ground_truth_transitions_visible, vs,
predict_use_cell=FLAGS.predict_use_cell,
use_tracking_lstm=FLAGS.use_tracking_lstm,
tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim,
X=hypothesis_tokens,
transitions=hypothesis_transitions,
initial_embeddings=initial_embeddings,
embedding_dropout_keep_rate=FLAGS.embedding_keep_rate,
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob,
connect_tracking_comp=FLAGS.connect_tracking_comp,
context_sensitive_shift=FLAGS.context_sensitive_shift,
context_sensitive_use_relu=FLAGS.context_sensitive_use_relu,
use_attention=FLAGS.use_attention,
premise_stack_tops=premise_stack_tops,
is_hypothesis=True,
initialize_hyp_tracking_state=FLAGS.initialize_hyp_tracking_state,
premise_tracking_c_state_final=premise_tracking_c_state_final)
# Extract top element of final stack timestep.
if FLAGS.use_attention == "None" or FLAGS.use_difference_feature or FLAGS.use_product_feature:
premise_vector = premise_model.final_representations
hypothesis_vector = hypothesis_model.final_representations
if (FLAGS.lstm_composition and cls is not spinn.cbow.CBOW) or cls is spinn.plain_rnn.RNN:
premise_vector = premise_vector[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2))
hypothesis_vector = hypothesis_vector[:,:FLAGS.model_dim / 2].reshape((-1, FLAGS.model_dim / 2))
sentence_vector_dim = FLAGS.model_dim / 2
else:
premise_vector = premise_vector.reshape((-1, FLAGS.model_dim))
hypothesis_vector = hypothesis_vector.reshape((-1, FLAGS.model_dim))
sentence_vector_dim = FLAGS.model_dim
if FLAGS.use_attention != "None":
# Use the attention weighted representation
h_dim = FLAGS.model_dim / 2
mlp_input = hypothesis_model.final_weighed_representation.reshape((-1, h_dim))
mlp_input_dim = h_dim
else:
# Create standard MLP features
mlp_input = T.concatenate([premise_vector, hypothesis_vector], axis=1)
mlp_input_dim = 2 * sentence_vector_dim
if FLAGS.use_difference_feature:
mlp_input = T.concatenate([mlp_input, premise_vector - hypothesis_vector], axis=1)
mlp_input_dim += sentence_vector_dim
if FLAGS.use_product_feature:
mlp_input = T.concatenate([mlp_input, premise_vector * hypothesis_vector], axis=1)
mlp_input_dim += sentence_vector_dim
mlp_input = util.BatchNorm(mlp_input, mlp_input_dim, vs, "sentence_vectors", training_mode)
mlp_input = util.Dropout(mlp_input, FLAGS.semantic_classifier_keep_rate, training_mode)
if FLAGS.classifier_type == "ResNet":
features = util.Linear(
mlp_input, mlp_input_dim, FLAGS.sentence_pair_combination_layer_dim, vs,
name="resnet/linear", use_bias=True)
features_dim = FLAGS.sentence_pair_combination_layer_dim
for layer in range(FLAGS.num_sentence_pair_combination_layers):
features = util.HeKaimingResidualLayerSet(features, features_dim, vs, training_mode, name="resnet/" + str(layer),
dropout_keep_rate=FLAGS.semantic_classifier_keep_rate, depth=FLAGS.resnet_unit_depth,
initializer=util.HeKaimingInitializer())
features = util.BatchNorm(features, features_dim, vs, "combining_mlp/" + str(layer), training_mode)
features = util.Dropout(features, FLAGS.semantic_classifier_keep_rate, training_mode)
elif FLAGS.classifier_type == "Highway":
features = util.Linear(
mlp_input, mlp_input_dim, FLAGS.sentence_pair_combination_layer_dim, vs,
name="resnet/linear", use_bias=True)
features_dim = FLAGS.sentence_pair_combination_layer_dim
for layer in range(FLAGS.num_sentence_pair_combination_layers):
features = util.HighwayLayer(features, features_dim, vs, training_mode, name="highway/" + str(layer),
dropout_keep_rate=FLAGS.semantic_classifier_keep_rate,
initializer=util.HeKaimingInitializer())
features = util.BatchNorm(features, features_dim, vs, "combining_mlp/" + str(layer), training_mode)
features = util.Dropout(features, FLAGS.semantic_classifier_keep_rate, training_mode)
else:
# Apply a combining MLP
features = mlp_input
features_dim = mlp_input_dim
for layer in range(FLAGS.num_sentence_pair_combination_layers):
features = util.ReLULayer(features, features_dim, FLAGS.sentence_pair_combination_layer_dim, vs,
name="combining_mlp/" + str(layer),
initializer=util.HeKaimingInitializer())
features_dim = FLAGS.sentence_pair_combination_layer_dim
features = util.BatchNorm(features, features_dim, vs, "combining_mlp/" + str(layer), training_mode)
features = util.Dropout(features, FLAGS.semantic_classifier_keep_rate, training_mode)
# Feed forward through a single output layer
logits = util.Linear(
features, features_dim, num_classes, vs,
name="semantic_classifier", use_bias=True)
return premise_model.transitions_pred, hypothesis_model.transitions_pred, logits
def build_cost(logits, targets):
"""
Build a classification cost function.
"""
# Clip gradients coming from the cost function.
logits = theano.gradient.grad_clip(
logits, -1. * FLAGS.clipping_max_value, FLAGS.clipping_max_value)
predicted_dist = T.nnet.softmax(logits)
costs = T.nnet.categorical_crossentropy(predicted_dist, targets)
cost = costs.mean()
pred = T.argmax(logits, axis=1)
acc = 1. - T.mean(T.cast(T.neq(pred, targets), theano.config.floatX))
return cost, acc
def build_transition_cost(logits, targets, num_transitions):
"""
Build a parse action prediction cost function.
"""
# swap seq_length dimension to front so that we can scan per timestep
logits = T.swapaxes(logits, 0, 1)
targets = targets.T
def cost_t(logits, tgt, num_transitions):
# TODO(jongauthier): Taper down xent cost as we proceed through
# sequence?
predicted_dist = T.nnet.softmax(logits)
cost = T.nnet.categorical_crossentropy(predicted_dist, tgt)
pred = T.argmax(logits, axis=1)
error = T.neq(pred, tgt)
return cost, error
results, _ = theano.scan(cost_t, [logits, targets], non_sequences=[num_transitions])
costs, errors = results
# Create a mask that selects only transitions that involve real data.
unrolling_length = T.shape(costs)[0]
padding = unrolling_length - num_transitions
padding = T.reshape(padding, (1, -1))
rng = T.arange(unrolling_length) + 1
rng = T.reshape(rng, (-1, 1))
mask = T.gt(rng, padding)
# Compute acc using the mask
acc = 1.0 - (T.sum(errors * mask, dtype=theano.config.floatX)
/ T.sum(num_transitions, dtype=theano.config.floatX))
# Compute cost directly, since we *do* want a cost incentive to get the padding
# transitions right.
cost = T.mean(costs)
return cost, acc
def evaluate(eval_fn, eval_set, logger, step):
# Evaluate
acc_accum = 0.0
action_acc_accum = 0.0
eval_batches = 0.0
for (eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch) in eval_set[1]:
acc_value, action_acc_value = eval_fn(
eval_X_batch, eval_transitions_batch,
eval_y_batch, eval_num_transitions_batch, 0.0, # Eval mode: Don't apply dropout.
int(FLAGS.allow_gt_transitions_in_eval), # Allow GT transitions to be used according to flag.
float(FLAGS.allow_gt_transitions_in_eval)) # If flag not set, used scheduled sampling
# p(ground truth) = 0.0,
# else SS p(ground truth) = 1.0
acc_accum += acc_value
action_acc_accum += action_acc_value
eval_batches += 1.0
logger.Log("Step: %i\tEval acc: %f\t %f\t%s" %
(step, acc_accum / eval_batches, action_acc_accum / eval_batches, eval_set[0]))
return acc_accum / eval_batches
def evaluate_expanded(eval_fn, eval_set, eval_path, logger, step, sentence_pair_data, ind_to_word, predict_transitions):
"""
Write the gold parses and predicted parses in the files <eval_out_path>.gld and <eval_out_path>.tst
respectively. These files can be given as inputs to Evalb to evaluate parsing performance -
evalb -p evalb_spinn.prm <eval_out_path>.gld <eval_out_path>.tst
TODO(SB): Set up for RNN and Model0 on non-sentence-pair data; port support to classifier.py.
"""
# TODO: Prune out redundant code, make usable on Model0 as well.
acc_accum = 0.0
action_acc_accum = 0.0
eval_batches = 0.0
eval_gold_path = eval_path + ".gld"
eval_out_path = eval_path + ".tst"
eval_lbl_path = eval_path + ".lbl"
with open(eval_gold_path, "w") as eval_gold, open(eval_out_path, "w") as eval_out:
if FLAGS.write_predicted_label:
label_out = open(eval_lbl_path, "w")
if sentence_pair_data:
for (eval_X_batch, eval_transitions_batch, eval_y_batch,
eval_num_transitions_batch) in eval_set[1]:
acc_value, action_acc_value, sem_logit_values, logits_pred_hyp, logits_pred_prem = eval_fn(
eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch,
0.0, # Eval mode: Don't apply dropout.
int(FLAGS.allow_gt_transitions_in_eval), # Allow GT transitions to be used according to flag.
float(FLAGS.allow_gt_transitions_in_eval)) # adjust visibility of GT
acc_accum += acc_value
action_acc_accum += action_acc_value
eval_batches += 1.0
# write each predicted transition to file
for orig_transitions, pred_logit_hyp, pred_logit_prem, tokens, true_class, example_sem_logits \
in zip(eval_transitions_batch, logits_pred_hyp,
logits_pred_prem, eval_X_batch, eval_y_batch, sem_logit_values):
if predict_transitions:
orig_hyp_transitions, orig_prem_transitions = orig_transitions.T
pred_hyp_transitions = pred_logit_hyp.argmax(axis=1)
pred_prem_transitions = pred_logit_prem.argmax(axis=1)
else:
orig_hyp_transitions = orig_prem_transitions = pred_hyp_transitions = pred_prem_transitions = None
hyp_tokens, prem_tokens = tokens.T
hyp_words = [ind_to_word[t] for t in hyp_tokens]
prem_words = [ind_to_word[t] for t in prem_tokens]
eval_gold.write(util.TransitionsToParse(orig_hyp_transitions, hyp_words) + "\n")
eval_out.write(util.TransitionsToParse(pred_hyp_transitions, hyp_words) + "\n")
eval_gold.write(util.TransitionsToParse(orig_prem_transitions, prem_words) + "\n")
eval_out.write(util.TransitionsToParse(pred_prem_transitions, prem_words) + "\n")
predicted_class = np.argmax(example_sem_logits)
exp_logit_values = np.exp(example_sem_logits)
class_probs = exp_logit_values / np.sum(exp_logit_values)
class_probs_repr = "\t".join(map(lambda p : "%.8f" % (p,), class_probs))
if FLAGS.write_predicted_label:
label_out.write(str(true_class == predicted_class) + "\t" + str(true_class)
+ "\t" + str(predicted_class) + "\t" + class_probs_repr + "\n")
else:
for (eval_X_batch, eval_transitions_batch, eval_y_batch,
eval_num_transitions_batch) in eval_set[1]:
acc_value, action_acc_value, sem_logit_values, logits_pred = eval_fn(
eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch,
0.0, # Eval mode: Don't apply dropout.
int(FLAGS.allow_gt_transitions_in_eval), # Allow GT transitions to be used according to flag.
float(FLAGS.allow_gt_transitions_in_eval)) # adjust visibility of GT
acc_accum += acc_value
action_acc_accum += action_acc_value
eval_batches += 1.0
# write each predicted transition to file
for orig_transitions, pred_logit, tokens, true_class, example_sem_logits \
in zip(eval_transitions_batch, logits_pred, eval_X_batch, eval_y_batch, sem_logit_values):
words = [ind_to_word[t] for t in tokens]
eval_gold.write(util.TransitionsToParse(orig_transitions, words) + "\n")
eval_out.write(util.TransitionsToParse(pred_logit.argmax(axis=1), words) + "\n")
predicted_class = np.argmax(example_sem_logits)
exp_logit_values = np.exp(example_sem_logits)
class_probs = exp_logit_values / np.sum(exp_logit_values)
class_probs_repr = "\t".join(map(lambda p : "%.3f" % (p,), class_probs))
if FLAGS.write_predicted_label:
label_out.write(str(true_class == predicted_class) + "\t" + str(true_class)
+ "\t" + str(predicted_class) + "\t" + class_probs_repr + "\n")
logger.Log("Written gold parses in %s" % (eval_gold_path))
logger.Log("Written predicted parses in %s" % (eval_out_path))
if FLAGS.write_predicted_label:
logger.Log("Written predicted labels in %s" % (eval_lbl_path))
label_out.close()
logger.Log("Step: %i\tEval acc: %f\t %f\t%s" %
(step, acc_accum / eval_batches, action_acc_accum / eval_batches, eval_set[0]))
def run(only_forward=False):
logger = afs_safe_logger.Logger(os.path.join(FLAGS.log_path, FLAGS.experiment_name) + ".log")
if FLAGS.data_type == "bl":
data_manager = load_boolean_data
elif FLAGS.data_type == "sst":
data_manager = load_sst_data
elif FLAGS.data_type == "snli":
data_manager = load_snli_data
else:
logger.Log("Bad data type.")
return
pp = pprint.PrettyPrinter(indent=4)
logger.Log("Flag values:\n" + pp.pformat(FLAGS.FlagValuesDict()))
# Load the data.
raw_training_data, vocabulary = data_manager.load_data(
FLAGS.training_data_path)
# Load the eval data.
raw_eval_sets = []
if FLAGS.eval_data_path:
for eval_filename in FLAGS.eval_data_path.split(":"):
eval_data, _ = data_manager.load_data(eval_filename)
raw_eval_sets.append((eval_filename, eval_data))
# Prepare the vocabulary.
if not vocabulary:
logger.Log("In open vocabulary mode. Using loaded embeddings without fine-tuning.")
train_embeddings = False
vocabulary = util.BuildVocabulary(
raw_training_data, raw_eval_sets, FLAGS.embedding_data_path, logger=logger,
sentence_pair_data=data_manager.SENTENCE_PAIR_DATA)
else:
logger.Log("In fixed vocabulary mode. Training embeddings.")
train_embeddings = True
# Load pretrained embeddings.
if FLAGS.embedding_data_path:
logger.Log("Loading vocabulary with " + str(len(vocabulary))
+ " words from " + FLAGS.embedding_data_path)
initial_embeddings = util.LoadEmbeddingsFromASCII(
vocabulary, FLAGS.word_embedding_dim, FLAGS.embedding_data_path)
else:
initial_embeddings = None
# Trim dataset, convert token sequences to integer sequences, crop, and
# pad.
logger.Log("Preprocessing training data.")
training_data = util.PreprocessDataset(
raw_training_data, vocabulary, FLAGS.seq_length, data_manager, eval_mode=False, logger=logger,
sentence_pair_data=data_manager.SENTENCE_PAIR_DATA,
for_rnn=FLAGS.model_type == "RNN" or FLAGS.model_type == "CBOW")
training_data_iter = util.MakeTrainingIterator(
training_data, FLAGS.batch_size)
eval_iterators = []
for filename, raw_eval_set in raw_eval_sets:
logger.Log("Preprocessing eval data: " + filename)
e_X, e_transitions, e_y, e_num_transitions = util.PreprocessDataset(
raw_eval_set, vocabulary, FLAGS.seq_length, data_manager, eval_mode=True, logger=logger,
sentence_pair_data=data_manager.SENTENCE_PAIR_DATA,
for_rnn=FLAGS.model_type == "RNN" or FLAGS.model_type == "CBOW")
eval_iterators.append((filename,
util.MakeEvalIterator((e_X, e_transitions, e_y, e_num_transitions), FLAGS.batch_size)))
# Set up the placeholders.
y = T.vector("y", dtype="int32")
lr = T.scalar("lr")
training_mode = T.scalar("training_mode") # 1: Training with dropout, 0: Eval
ground_truth_transitions_visible = T.scalar("ground_truth_transitions_visible", dtype="int32")
logger.Log("Building model.")
vs = util.VariableStore(
default_initializer=util.UniformInitializer(FLAGS.init_range), logger=logger)
if FLAGS.model_type == "CBOW":
model_cls = spinn.cbow.CBOW
elif FLAGS.model_type == "RNN":
model_cls = spinn.plain_rnn.RNN
else:
model_cls = getattr(spinn.fat_stack, FLAGS.model_type)
# Generator of mask for scheduled sampling
numpy_random = np.random.RandomState(1234)
ss_mask_gen = T.shared_randomstreams.RandomStreams(numpy_random.randint(999999))
# Training step number
ss_prob = T.scalar("ss_prob")
if data_manager.SENTENCE_PAIR_DATA:
X = T.itensor3("X")
transitions = T.itensor3("transitions")
num_transitions = T.imatrix("num_transitions")
predicted_premise_transitions, predicted_hypothesis_transitions, logits = build_sentence_pair_model(
model_cls, len(vocabulary), FLAGS.seq_length,
X, transitions, len(data_manager.LABEL_MAP), training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=initial_embeddings, project_embeddings=(not train_embeddings),
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob)
else:
X = T.matrix("X", dtype="int32")
transitions = T.imatrix("transitions")
num_transitions = T.vector("num_transitions", dtype="int32")
predicted_transitions, logits = build_sentence_model(
model_cls, len(vocabulary), FLAGS.seq_length,
X, transitions, len(data_manager.LABEL_MAP), training_mode, ground_truth_transitions_visible, vs,
initial_embeddings=initial_embeddings, project_embeddings=(not train_embeddings),
ss_mask_gen=ss_mask_gen,
ss_prob=ss_prob)
xent_cost, acc = build_cost(logits, y)
# Set up L2 regularization.
l2_cost = 0.0
for var in vs.trainable_vars:
l2_cost += FLAGS.l2_lambda * T.sum(T.sqr(vs.vars[var]))
# Compute cross-entropy cost on action predictions.
if (not data_manager.SENTENCE_PAIR_DATA) and FLAGS.model_type not in ["Model0", "RNN", "CBOW"]:
transition_cost, action_acc = build_transition_cost(predicted_transitions, transitions, num_transitions)
elif data_manager.SENTENCE_PAIR_DATA and FLAGS.model_type not in ["Model0", "RNN", "CBOW"]:
p_transition_cost, p_action_acc = build_transition_cost(predicted_premise_transitions, transitions[:, :, 0],
num_transitions[:, 0])
h_transition_cost, h_action_acc = build_transition_cost(predicted_hypothesis_transitions, transitions[:, :, 1],
num_transitions[:, 1])
transition_cost = p_transition_cost + h_transition_cost
action_acc = (p_action_acc + h_action_acc) / 2.0 # TODO(SB): Average over transitions, not words.
else:
transition_cost = T.constant(0.0)
action_acc = T.constant(0.0)
transition_cost = transition_cost * FLAGS.transition_cost_scale
total_cost = xent_cost + l2_cost + transition_cost
if ".ckpt" in FLAGS.ckpt_path:
checkpoint_path = FLAGS.ckpt_path
else:
checkpoint_path = os.path.join(FLAGS.ckpt_path, FLAGS.experiment_name + ".ckpt")
if os.path.isfile(checkpoint_path):
logger.Log("Found checkpoint, restoring.")
step, best_dev_error = vs.load_checkpoint(checkpoint_path, num_extra_vars=2,
skip_saved_unsavables=FLAGS.skip_saved_unsavables)
else:
assert not only_forward, "Can't run an eval-only run without a checkpoint. Supply a checkpoint."
step = 0
best_dev_error = 1.0
# Do an evaluation-only run.
if only_forward:
if FLAGS.eval_output_paths:
eval_output_paths = FLAGS.eval_output_paths.strip().split(":")
assert len(eval_output_paths) == len(eval_iterators), "Invalid no. of output paths."
else:
eval_output_paths = [FLAGS.experiment_name + "-" + os.path.split(eval_set[0])[1] + "-parse"
for eval_set in eval_iterators]
# Load model from checkpoint.
logger.Log("Checkpointed model was trained for %d steps." % (step,))
# Generate function for forward pass.
logger.Log("Building forward pass.")
if data_manager.SENTENCE_PAIR_DATA:
eval_fn = theano.function(
[X, transitions, y, num_transitions, training_mode, ground_truth_transitions_visible, ss_prob],
[acc, action_acc, logits, predicted_hypothesis_transitions, predicted_premise_transitions],
on_unused_input='ignore',
allow_input_downcast=True)
else:
eval_fn = theano.function(
[X, transitions, y, num_transitions, training_mode, ground_truth_transitions_visible, ss_prob],
[acc, action_acc, logits, predicted_transitions],
on_unused_input='ignore',
allow_input_downcast=True)
# Generate the inverse vocabulary lookup table.
ind_to_word = {v : k for k, v in vocabulary.iteritems()}
# Do a forward pass and write the output to disk.
for eval_set, eval_out_path in zip(eval_iterators, eval_output_paths):
logger.Log("Writing eval output for %s." % (eval_set[0],))
evaluate_expanded(eval_fn, eval_set, eval_out_path, logger, step,
data_manager.SENTENCE_PAIR_DATA, ind_to_word, FLAGS.model_type not in ["Model0", "RNN", "CBOW"])
else:
# Train
new_values = util.RMSprop(total_cost, vs.trainable_vars.values(), lr)
new_values += [(key, vs.nongradient_updates[key]) for key in vs.nongradient_updates]
# Training open-vocabulary embeddings is a questionable idea right now. Disabled:
# new_values.append(
# util.embedding_SGD(total_cost, embedding_params, embedding_lr))
# Create training and eval functions.
# Unused variable warnings are supressed so that num_transitions can be passed in when training Model 0,
# which ignores it. This yields more readable code that is very slightly slower.
logger.Log("Building update function.")
update_fn = theano.function(
[X, transitions, y, num_transitions, lr, training_mode, ground_truth_transitions_visible, ss_prob],
[total_cost, xent_cost, transition_cost, action_acc, l2_cost, acc],
updates=new_values,
on_unused_input='ignore',
allow_input_downcast=True)
logger.Log("Building eval function.")
eval_fn = theano.function([X, transitions, y, num_transitions, training_mode, ground_truth_transitions_visible, ss_prob], [acc, action_acc],
on_unused_input='ignore',
allow_input_downcast=True)
logger.Log("Training.")
# Main training loop.
for step in range(step, FLAGS.training_steps):
if step % FLAGS.eval_interval_steps == 0:
for index, eval_set in enumerate(eval_iterators):
acc = evaluate(eval_fn, eval_set, logger, step)
if FLAGS.ckpt_on_best_dev_error and index == 0 and (1 - acc) < 0.99 * best_dev_error and step > 1000:
best_dev_error = 1 - acc
logger.Log("Checkpointing with new best dev accuracy of %f" % acc)
vs.save_checkpoint(checkpoint_path + "_best", extra_vars=[step, best_dev_error])
X_batch, transitions_batch, y_batch, num_transitions_batch = training_data_iter.next()
learning_rate = FLAGS.learning_rate * (FLAGS.learning_rate_decay_per_10k_steps ** (step / 10000.0))
ret = update_fn(X_batch, transitions_batch, y_batch, num_transitions_batch,
learning_rate, 1.0, 1.0, np.exp(step*np.log(FLAGS.scheduled_sampling_exponent_base)))
total_cost_val, xent_cost_val, transition_cost_val, action_acc_val, l2_cost_val, acc_val = ret
if step % FLAGS.statistics_interval_steps == 0:
logger.Log(
"Step: %i\tAcc: %f\t%f\tCost: %5f %5f %5f %5f"
% (step, acc_val, action_acc_val, total_cost_val, xent_cost_val, transition_cost_val,
l2_cost_val))
if step % FLAGS.ckpt_interval_steps == 0 and step > 0:
vs.save_checkpoint(checkpoint_path, extra_vars=[step, best_dev_error])
if __name__ == '__main__':
# Experiment naming.
gflags.DEFINE_string("experiment_name", "experiment", "")
# Data types.
gflags.DEFINE_enum("data_type", "bl", ["bl", "sst", "snli"],
"Which data handler and classifier to use.")
# Where to store checkpoints
gflags.DEFINE_string("ckpt_path", ".", "Where to save/load checkpoints. Can be either "
"a filename or a directory. In the latter case, the experiment name serves as the "
"base for the filename.")
gflags.DEFINE_string("log_path", ".", "A directory in which to write logs.")
# Data settings.
gflags.DEFINE_string("training_data_path", None, "")
gflags.DEFINE_string("eval_data_path", None, "Can contain multiple file paths, separated "
"using ':' tokens. The first file should be the dev set, and is used for determining "
"when to save the early stopping 'best' checkpoints.")
gflags.DEFINE_integer("seq_length", 30, "")
gflags.DEFINE_integer("eval_seq_length", 30, "")
gflags.DEFINE_string("embedding_data_path", None,
"If set, load GloVe-formatted embeddings from here.")
# Model architecture settings.
gflags.DEFINE_enum("model_type", "Model0",
["CBOW", "RNN", "Model0", "Model1", "Model2", "Model2S"],
"")
gflags.DEFINE_boolean("allow_gt_transitions_in_eval", False,
"Whether to use ground truth transitions in evaluation when appropriate "
"(i.e., in Model 1 and Model 2S.)")
gflags.DEFINE_integer("model_dim", 8, "")
gflags.DEFINE_integer("word_embedding_dim", 8, "")
gflags.DEFINE_integer("tracking_lstm_hidden_dim", 4, "")
gflags.DEFINE_boolean("use_tracking_lstm", True,
"Whether to use LSTM in the tracking unit")
gflags.DEFINE_boolean("predict_use_cell", False,
"For models which predict parser actions, use "
"both the tracking LSTM hidden and cell values as "
"input to the prediction layer")
gflags.DEFINE_enum("use_attention", "None",
["None", "Rocktaschel", "WangJiang", "Thang", "TreeWangJiang", "TreeThang"],
"")
gflags.DEFINE_boolean("context_sensitive_shift", False,
"Use LSTM hidden state and word embedding to determine the vector to be pushed")
gflags.DEFINE_boolean("context_sensitive_use_relu", False,
"Use ReLU Layer to combine embedding and tracking unit hidden state")
gflags.DEFINE_float("semantic_classifier_keep_rate", 0.5,
"Used for dropout in the semantic task classifier.")
gflags.DEFINE_float("embedding_keep_rate", 0.5,
"Used for dropout on transformed embeddings.")
gflags.DEFINE_boolean("lstm_composition", True, "")
gflags.DEFINE_enum("classifier_type", "MLP", ["MLP", "Highway", "ResNet"], "")
gflags.DEFINE_integer("resnet_unit_depth", 2, "")
# gflags.DEFINE_integer("num_composition_layers", 1, "")
gflags.DEFINE_integer("num_sentence_pair_combination_layers", 2, "")
gflags.DEFINE_integer("sentence_pair_combination_layer_dim", 1024, "")
gflags.DEFINE_float("scheduled_sampling_exponent_base", 0.99,
"Used for scheduled sampling, with probability of Model 1 over Model 2 being base^#training_steps")
gflags.DEFINE_boolean("use_difference_feature", True,
"Supply the sentence pair classifier with sentence difference features.")
gflags.DEFINE_boolean("use_product_feature", True,
"Supply the sentence pair classifier with sentence product features.")
gflags.DEFINE_boolean("connect_tracking_comp", True,
"Connect tracking unit and composition unit. Can only be true if using LSTM in both units.")
gflags.DEFINE_boolean("initialize_hyp_tracking_state", False,
"Initialize the c state of the tracking unit of hypothesis model with the final"
"tracking unit c state of the premise model.")
gflags.DEFINE_boolean("use_gru", False,
"Use GRU units instead of LSTM units.")
# Optimization settings.
gflags.DEFINE_integer("training_steps", 500000, "Stop training after this point.")
gflags.DEFINE_integer("batch_size", 32, "SGD minibatch size.")
gflags.DEFINE_float("learning_rate", 0.001, "Used in RMSProp.")
gflags.DEFINE_float("learning_rate_decay_per_10k_steps", 0.75, "Used in RMSProp.")
gflags.DEFINE_float("clipping_max_value", 5.0, "")
gflags.DEFINE_float("l2_lambda", 1e-5, "")
gflags.DEFINE_float("init_range", 0.005, "Mainly used for softmax parameters. Range for uniform random init.")
gflags.DEFINE_float("transition_cost_scale", 1.0, "Multiplied by the transition cost.")
# Display settings.
gflags.DEFINE_integer("statistics_interval_steps", 100, "Print training set results at this interval.")
gflags.DEFINE_integer("eval_interval_steps", 100, "Evaluate at this interval.")
gflags.DEFINE_integer("ckpt_interval_steps", 5000, "Update the checkpoint on disk at this interval.")
gflags.DEFINE_boolean("ckpt_on_best_dev_error", True, "If error on the first eval set (the dev set) is "
"at most 0.99 of error at the previous checkpoint, save a special 'best' checkpoint.")
# Evaluation settings
gflags.DEFINE_boolean("expanded_eval_only_mode", False,
"If set, a checkpoint is loaded and a forward pass is done to get the predicted "
"transitions. The inferred parses are written to the supplied file(s) along with example-"
"by-example accuracy information. Requirements: Must specify checkpoint path.")
gflags.DEFINE_string("eval_output_paths", None,
"Used when expanded_eval_only_mode is set. The number of supplied paths should be same"
"as the number of eval sets.")
gflags.DEFINE_boolean("write_predicted_label", False,
"Write the predicted labels in a <eval_output_name>.lbl file.")
gflags.DEFINE_boolean("skip_saved_unsavables", False,
"Assume that variables marked as not savable will appear in checkpoints anyway, and "
"skip them when loading. This should be used only when loading old checkpoints.")
# Parse command line flags.
FLAGS(sys.argv)
run(only_forward=FLAGS.expanded_eval_only_mode)
| StarcoderdataPython |
148068 | <reponame>mikkohei13/Loxpyt
import time
import datetime
import json
class report():
def __init__(self, directoryPath):
self._filePath = directoryPath + "_report.html"
self._timeStart = int(time.time())
self._counterPositive = 0
self._counterNegative = 0
html = """<!DOCTYPE html>
<html lang="fi" class="no-js">
<head>
<meta charset="UTF-8">
<title>Loxpyt Report</title>
<link rel='stylesheet' href='../styles.css' media='all' />
</head>
<body>
"""
# Create file
f = open(self._filePath, "w+") # open file in append mode
f.write(html)
f.close()
def appendLine(self, html):
file = open(self._filePath, "a")
file.write(html + "\n")
file.close()
def addFile(self, fileData):
print(fileData) # Debug
html = "<h3>"
html += fileData['fileName'] + ", start UTC " + fileData['recordDateStartUTC'].strftime("%Y-%m-%d %H:%M:%S")
html += "</h3>"
self.appendLine(html) # debug
def addPositiveSegment(self, segmentMeta, score):
score = str(score)
scoreClass = score[0:3].replace(".", "")
self.appendLine("<div class='segment s" + scoreClass + "'>")
self.addSpectro(segmentMeta["spectroFilename"])
self.addAudio(segmentMeta["finalAudioFilename"])
# segmentStartUTCstr = segmentMeta["segmentStartUTC"].__str__()
segmentStartUTCstr = segmentMeta["segmentStartUTC"].strftime("%H:%M:%S")
self.appendLine("<p class='caption'><em>UTC " + segmentStartUTCstr + ",</em> <strong>score " + score + "</strong></p> \n</div>")
self._counterPositive = self._counterPositive + 1 # TODO: shorthand / append?
def addAudio(self, segment):
html = """
<figure>
<figcaption>""" + segment + """</figcaption>
<audio
controls
src='""" + segment + """'>
</audio>
</figure>
"""
self.appendLine(html)
def addSpectro(self, segment):
html = "<img src='" + segment + "'>\n"
self.appendLine(html)
def addHtml(self, html):
self.appendLine(html)
def addNegativeSegment(self, score):
self.appendLine(str(score) + " / ")
self._counterNegative = self._counterNegative + 1 # TODO: shorthand / append?
def finalize(self):
timeEnd = int(time.time())
timeElapsed = timeEnd - self._timeStart
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M:%S")
total = self._counterPositive + self._counterNegative
timeElapsedPerSegment = timeElapsed / total
html = ""
html += "<p>" + str(self._counterPositive) + " positives, " + str(self._counterNegative) + " negatives, total " + str(total) + "</p>\n"
html += "<p>End of report / " + date + " UTC / Elapsed " + str(timeElapsed) + " s, " + str(timeElapsedPerSegment) + " s per segment</p>\n"
html += "</body></html>"
self.appendLine(html)
| StarcoderdataPython |
3318019 | <gh_stars>1-10
"""
Utility Functions (:mod:`skdh.utility`)
=======================================
.. currentmodule:: skdh.utility
Binary State Fragmentation Endpoints
------------------------------------
.. autosummary:
:toctree: generated/
fragmentation_endpoints.average_duration
fragmentation_endpoints.state_transition_probability
fragmentation_endpoints.gini_index
fragmentation_endpoints.average_hazard
fragmentation_endpoints.state_power_law_distribution
Misc. Math Functions
--------------------
.. autosummary::
:toctree: generated/
math.moving_mean
math.moving_sd
math.moving_skewness
math.moving_kurtosis
math.moving_median
Orientation Functions
---------------------
.. autosummary::
:toctree: generated/
orientation.correct_accelerometer_orientation
Windowing Functions
-------------------
.. autosummary::
:toctree: generated/
windowing.compute_window_samples
windowing.get_windowed_view
"""
from skdh.utility.fragmentation_endpoints import *
from skdh.utility import fragmentation_endpoints
from skdh.utility.math import *
from skdh.utility import math
from skdh.utility.orientation import correct_accelerometer_orientation
from skdh.utility import orientation
from skdh.utility.windowing import compute_window_samples, get_windowed_view
from skdh.utility import windowing
__all__ = (
["math", "windowing", "orientation", "fragmentation_endpoints"]
+ fragmentation_endpoints.__all__
+ math.__all__
+ windowing.__all__
+ orientation.__all__
)
| StarcoderdataPython |
149581 | # part 1
def check_numbers(a,b):
print (a+b)
check_numbers(2,6)
# part 2
def check_numbers_list(a,b):
i=0
if len(a)==len(b):
while i<len(a):
check_numbers(a[i],b[i])
i +=1
else:
print ("lists ki len barabar nahi hai")
check_numbers_list([10,30,40],[40,20,21])
| StarcoderdataPython |
150813 | <reponame>irom-lab/PAC-BUS
import torch
import numpy as np
from data_generators.omniglot_data import OmniglotNShot
import argparse
import learn2learn as l2l
from models.omniglot_models import SOmniglotModel, OmniglotModel, OmniglotModel1
from learners.reptile_learner import ReptileLearner
argparser = argparse.ArgumentParser()
argparser.add_argument('--method', type=str, help="[\'maml\', \'pac_bus_h\', \'mr_maml_w\', \'fli_online\']")
argparser.add_argument('--nme', help="Whether samples will be not-mutually-exclusive", default='False')
argparser.add_argument('--n_way', type=int, default=20)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=1)
argparser.add_argument('--batch', type=int, help='batch size', default=16)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=5)
argparser.add_argument('--gpu', type=int, help='which GPU', default=0)
argparser.add_argument('--lrm', type=float, help='meta learning rate', default=0.001)
argparser.add_argument('--lrb', type=float, help='base learners learning rate', default=0.1)
argparser.add_argument('--n_filt', type=int, default=64)
argparser.add_argument('--seed', type=int, default=-1)
argparser.add_argument('--regscale', type=float, default=1e-6)
argparser.add_argument('--regscale2', type=float, default=1)
argparser.add_argument('--epochsb', type=int, default=5)
argparser.add_argument('--epochsm', type=int, default=100000)
args = argparser.parse_args()
if args.gpu == -1:
device = torch.device('cpu')
else:
device = torch.device('cuda:'+str(args.gpu))
nme = args.nme != 'False'
method = args.method
if method not in ['maml', 'pac_bus_h', 'mr_maml_w', 'fli_online']:
print("invalid options, select one of: [\'maml\', \'pac_bus_h\', \'mr_maml_w\', \'fli_online\']")
exit()
if args.seed >= 0:
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
init_as_base = True
partition = 500
score_part = 2000
results_part = 10000
print(args)
if method == 'maml':
bmodel = OmniglotModel(n_way=args.n_way).to(device)
elif method == 'fli_online':
bmodel = ReptileLearner(OmniglotModel1, (args.n_way, args.n_filt), n_way=args.n_way, k_shot=args.k_spt, meta_batchsz=args.batch, beta=args.lrm, num_updates=args.epochsb).to(device)
elif method == 'mr_maml_w':
bmodel = SOmniglotModel(n_way=args.n_way, n_filt=args.n_filt).to(device)
bmodel.init_logvar(-6, -6)
else: # method == 'pac_bus_h':
bmodel = SOmniglotModel(n_way=args.n_way, n_filt=args.n_filt, ELU=True).to(device)
bmodel.init_logvar(-6, -6)
reg1_scale = torch.tensor(args.regscale, dtype=torch.float).to(device)
reg2_scale = torch.tensor(args.regscale2, dtype=torch.float).to(device) # only for pac_bus_h
delta = torch.tensor(0.01, dtype=torch.float).to(device)
T = torch.tensor(args.epochsm, dtype=torch.float).to(device)
m = torch.tensor(args.epochsb, dtype=torch.float).to(device)
c = T * args.lrb
if method != 'fli_online':
model = l2l.algorithms.MAML(bmodel, lr=args.lrb, first_order=False, allow_nograd=True).to(device)
prior = model.clone().to(device)
else:
model = bmodel
prior = None
optimizer = torch.optim.Adam(model.parameters(), lr=args.lrm)
criterion = torch.nn.CrossEntropyLoss().to(device)
db_train = OmniglotNShot('./data/omniglot', batchsz=args.batch, n_way=args.n_way, k_shot=args.k_spt, k_query=args.k_qry, nme=nme, device=None)
for step in range(1, args.epochsm+1):
x_spt, y_spt, x_qry, y_qry = db_train.next('train')
x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)
num_tasks = len(x_spt)
score = 0
if method != 'fli_online':
meta_error = torch.tensor(0.).to(device)
optimizer.zero_grad()
learners = []
for i in range(num_tasks):
learners.append(model.clone())
if init_as_base:
learners[i].init_as_base()
for be in range(args.epochsb):
loss = criterion(learners[i](x_spt[i]), y_spt[i])
learners[i].adapt(loss)
pred_q = torch.nn.functional.softmax(learners[i](x_qry[i]), dim=1).argmax(dim=1)
score += torch.eq(pred_q, y_qry[i]).sum().item() / len(y_qry[0, :])
meta_error += criterion(learners[i](x_qry[i]), y_qry[i])
if method == 'pac_bus_h':
kl_div = model.calc_kl_div(prior, device)
reg1 = torch.sqrt(kl_div + (torch.log(2*torch.sqrt(T) / delta)) / (2 * T))
avg_model = model.clone()
avg_model.init_as_base()
L, S = avg_model.calc_ls_constants(device)
p1 = (1 + 1 / S*c) / (m - 1)
p2 = (2 * c * L**2) ** (1 / (S*c + 1))
p3 = T**(S*c / (S*c + 1))
reg2 = p1 * p2 * p3
meta_error += reg1_scale*reg1*num_tasks + reg2_scale*reg2*num_tasks
if method == 'mr_maml_w':
kl_div = model.calc_kl_div(prior, device)
meta_error += reg1_scale*kl_div*num_tasks # equation 5 from MLWM paper
meta_error /= num_tasks
meta_error.backward(retain_graph=True)
optimizer.step()
else:
accs = model(x_spt, y_spt, x_qry, y_qry)
score += np.array(accs).sum()
if step % partition == 0:
print('step:', step, '\t score:', score / num_tasks)
scores = []
num_test_trials = 10
if step % results_part == 0:
print("Results:")
num_test_trials = 500
elif step % score_part == 0:
print("Score many more test trials:")
num_test_trials = 100
for _ in range(num_test_trials):
x_spt, y_spt, x_qry, y_qry = db_train.next('test')
x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)
num_tasks = len(x_spt)
if method != 'fli_online':
accs = []
for i in range(num_tasks):
learner = model.clone()
if init_as_base:
learner.init_as_base()
for be in range(args.epochsb):
loss = criterion(learner(x_spt[i]), y_spt[i])
learner.adapt(loss)
pred_q = torch.nn.functional.softmax(learner(x_qry[i]), dim=1).argmax(dim=1)
accs.append(torch.eq(pred_q, y_qry[i]).sum().item()/len(y_qry[0, :]))
scores.append(np.mean(accs))
else:
acc = model.pred(x_spt, y_spt, x_qry, y_qry)
scores.append(acc)
print("test accs", np.round(np.mean(scores),5))
else:
print('step:', step, '\t score:', score / num_tasks, end="\r")
print()
| StarcoderdataPython |
3264380 | from django.contrib import admin
from .models import Restaurant, Food, Comment, FoodLike, RestaurantUser
class FoodAdmin(admin.ModelAdmin):
list_display = ('name', 'restaurant', 'image')
search_fields = ('name', 'restaurant__name')
admin.site.register(Restaurant)
admin.site.register(Food, FoodAdmin)
admin.site.register(Comment)
admin.site.register(FoodLike)
admin.site.register(RestaurantUser) | StarcoderdataPython |
1609012 | <reponame>escaped/cookiecutter-jupyter<gh_stars>1-10
import os
from pathlib import Path
if __name__ == '__main__':
project_root = Path(os.path.curdir)
| StarcoderdataPython |
121871 |
def ben_update():
return
def ben_random_tick():
return
| StarcoderdataPython |
8927 | <filename>python/testData/resolve/AssignmentExpressionsAndOuterVar.py<gh_stars>1-10
total = 0
partial_sums = [total := total + v for v in values]
print("Total:", total)
<ref> | StarcoderdataPython |
1644863 | # coding: utf-8
"""
cloudFPGA Resource Manager API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DebugApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cf_manager_rest_api_delete_debug_connection(self, username, password, instance_id, **kwargs): # noqa: E501
"""Deletes an existing connection to the `hw_server` of this instance # noqa: E501
This deletes the *connection to the* `hw_server`. This **does not imply** that the `hw_server` itself is stopped too. The `hw_server` is only stopped if there is no other open debug connection to the connected JTAG probe (some probes are connected to a JTAG chain). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_delete_debug_connection(username, password, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str instance_id: ROLE instance unique identifier (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_delete_debug_connection_with_http_info(username, password, instance_id, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_delete_debug_connection_with_http_info(username, password, instance_id, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_delete_debug_connection_with_http_info(self, username, password, instance_id, **kwargs): # noqa: E501
"""Deletes an existing connection to the `hw_server` of this instance # noqa: E501
This deletes the *connection to the* `hw_server`. This **does not imply** that the `hw_server` itself is stopped too. The `hw_server` is only stopped if there is no other open debug connection to the connected JTAG probe (some probes are connected to a JTAG chain). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_delete_debug_connection_with_http_info(username, password, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str instance_id: ROLE instance unique identifier (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'instance_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_delete_debug_connection" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_delete_debug_connection`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_delete_debug_connection`") # noqa: E501
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params or
params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `cf_manager_rest_api_delete_debug_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_id' in params:
path_params['instance_id'] = params['instance_id'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/debug/ila_connection/{instance_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_all_debug_connections(self, username, password, **kwargs): # noqa: E501
"""Requests a list of running `hw_server`s on all instances (admin only) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_all_debug_connections(username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:return: list[InlineResponse2003]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_all_debug_connections_with_http_info(username, password, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_all_debug_connections_with_http_info(username, password, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_all_debug_connections_with_http_info(self, username, password, **kwargs): # noqa: E501
"""Requests a list of running `hw_server`s on all instances (admin only) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_all_debug_connections_with_http_info(username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:return: list[InlineResponse2003]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_all_debug_connections" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_all_debug_connections`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_all_debug_connections`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/debug/open_ila_connections', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[InlineResponse2003]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_all_debug_connections_of_user(self, username, password, **kwargs): # noqa: E501
"""Returns all open `hw_server` of a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_all_debug_connections_of_user(username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:return: list[InlineResponse2003]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_all_debug_connections_of_user_with_http_info(username, password, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_all_debug_connections_of_user_with_http_info(username, password, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_all_debug_connections_of_user_with_http_info(self, username, password, **kwargs): # noqa: E501
"""Returns all open `hw_server` of a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_all_debug_connections_of_user_with_http_info(username, password, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:return: list[InlineResponse2003]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_all_debug_connections_of_user" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_all_debug_connections_of_user`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_all_debug_connections_of_user`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/debug/ila_connections', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[InlineResponse2003]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_debug_connection(self, username, password, instance_id, **kwargs): # noqa: E501
"""Requests a connection to the `hw_server` of this instance # noqa: E501
This returns an IP-address and a TCP port to enter into the *remote debugging dialog* in `vivado_lab`. **Only the IP-address that issues this request will be allowed to connect (or the specified IP address).** If there is already an open debug connection to the specified instance, the existing data will be returned. That means, to debug the same instance on a different client, the connection must be deleted first. Due to ongoing Hardware Development it is possible that there are more than one FPGA visible in the debug connection (some FPGAs share a JTAG probe via a JTAG chain). **You are only allowed to interact with the device that is stated in the `device` value in the response from the server.** (The Resource Manager assures that the devices in a JTAG chain are only given to members of the same project, so if you interact with other devices, you will disturb your direct colleges). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_debug_connection(username, password, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str instance_id: ROLE instance unique identifier (required)
:param str ip_address: IPv4 address of the Debuging client if different from requesting client (only this addres will be allowed to connect).
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_debug_connection_with_http_info(username, password, instance_id, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_debug_connection_with_http_info(username, password, instance_id, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_debug_connection_with_http_info(self, username, password, instance_id, **kwargs): # noqa: E501
"""Requests a connection to the `hw_server` of this instance # noqa: E501
This returns an IP-address and a TCP port to enter into the *remote debugging dialog* in `vivado_lab`. **Only the IP-address that issues this request will be allowed to connect (or the specified IP address).** If there is already an open debug connection to the specified instance, the existing data will be returned. That means, to debug the same instance on a different client, the connection must be deleted first. Due to ongoing Hardware Development it is possible that there are more than one FPGA visible in the debug connection (some FPGAs share a JTAG probe via a JTAG chain). **You are only allowed to interact with the device that is stated in the `device` value in the response from the server.** (The Resource Manager assures that the devices in a JTAG chain are only given to members of the same project, so if you interact with other devices, you will disturb your direct colleges). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_debug_connection_with_http_info(username, password, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: <PASSWORD>Stack password (required)
:param str instance_id: ROLE instance unique identifier (required)
:param str ip_address: IPv4 address of the Debuging client if different from requesting client (only this addres will be allowed to connect).
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'instance_id', 'ip_address'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_debug_connection" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_debug_connection`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_debug_connection`") # noqa: E501
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params or
params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `cf_manager_rest_api_get_debug_connection`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_id' in params:
path_params['instance_id'] = params['instance_id'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
if 'ip_address' in params:
query_params.append(('ip_address', params['ip_address'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/debug/ila_connection/{instance_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_flight_recorder_cluster(self, username, password, cluster_id, **kwargs): # noqa: E501
"""Requests network runtime information of all instances # noqa: E501
Requests and returns the status information of the Network Routing Core of all instances in this cluster `Attention:` There may be a delay of a few seconds until the counters are updated after the packets were processed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_flight_recorder_cluster(username, password, cluster_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param int cluster_id: ID of a cluster (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_flight_recorder_cluster_with_http_info(username, password, cluster_id, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_flight_recorder_cluster_with_http_info(username, password, cluster_id, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_flight_recorder_cluster_with_http_info(self, username, password, cluster_id, **kwargs): # noqa: E501
"""Requests network runtime information of all instances # noqa: E501
Requests and returns the status information of the Network Routing Core of all instances in this cluster `Attention:` There may be a delay of a few seconds until the counters are updated after the packets were processed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_flight_recorder_cluster_with_http_info(username, password, cluster_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: <PASSWORD> (required)
:param int cluster_id: ID of a cluster (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'cluster_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_flight_recorder_cluster" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_flight_recorder_cluster`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_flight_recorder_cluster`") # noqa: E501
# verify the required parameter 'cluster_id' is set
if ('cluster_id' not in params or
params['cluster_id'] is None):
raise ValueError("Missing the required parameter `cluster_id` when calling `cf_manager_rest_api_get_flight_recorder_cluster`") # noqa: E501
collection_formats = {}
path_params = {}
if 'cluster_id' in params:
path_params['cluster_id'] = params['cluster_id'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/clusters/{cluster_id}/flight_recorder_data', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cf_manager_rest_api_get_flight_recorder_instance(self, username, password, instance_id, **kwargs): # noqa: E501
"""Requests network runtime information # noqa: E501
Requests and returns the status information of the Network Routing Core of this FPGA instance `Attention:` There may be a delay of a few seconds until the counters are updated after the packets were processed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_flight_recorder_instance(username, password, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str instance_id: ROLE instance unique identifier (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cf_manager_rest_api_get_flight_recorder_instance_with_http_info(username, password, instance_id, **kwargs) # noqa: E501
else:
(data) = self.cf_manager_rest_api_get_flight_recorder_instance_with_http_info(username, password, instance_id, **kwargs) # noqa: E501
return data
def cf_manager_rest_api_get_flight_recorder_instance_with_http_info(self, username, password, instance_id, **kwargs): # noqa: E501
"""Requests network runtime information # noqa: E501
Requests and returns the status information of the Network Routing Core of this FPGA instance `Attention:` There may be a delay of a few seconds until the counters are updated after the packets were processed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cf_manager_rest_api_get_flight_recorder_instance_with_http_info(username, password, instance_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: OpenStack username (required)
:param str password: OpenStack password (required)
:param str instance_id: ROLE instance unique identifier (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username', 'password', 'instance_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cf_manager_rest_api_get_flight_recorder_instance" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `cf_manager_rest_api_get_flight_recorder_instance`") # noqa: E501
# verify the required parameter 'password' is set
if ('password' not in params or
params['password'] is None):
raise ValueError("Missing the required parameter `password` when calling `cf_manager_rest_api_get_flight_recorder_instance`") # noqa: E501
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params or
params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `cf_manager_rest_api_get_flight_recorder_instance`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instance_id' in params:
path_params['instance_id'] = params['instance_id'] # noqa: E501
query_params = []
if 'username' in params:
query_params.append(('username', params['username'])) # noqa: E501
if 'password' in params:
query_params.append(('password', params['password'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/instances/{instance_id}/flight_recorder_data', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
3253339 | # * -- utf-8 -- * # python3
# Author: Tang Time:2018/4/17
import math
for i in range(100000):
x = int(math.sqrt(i+100))
y = int(math.sqrt(i+268))
if (x*x == i+100) and (y*y ==i+268):
print(i)
'''简述:一个整数,它加上100和加上268后都是一个完全平方数 提问:请问该数是多少?''' | StarcoderdataPython |
3359362 | from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from integrations.slack.models import SlackConfiguration, SlackEnvironment
from .exceptions import SlackChannelJoinError
from .slack import SlackWrapper
class SlackEnvironmentSerializer(serializers.ModelSerializer):
class Meta:
model = SlackEnvironment
fields = ("id", "channel_id", "enabled")
def save(self, **kwargs):
try:
slack_configuration = SlackConfiguration.objects.get(
project=kwargs.get("environment").project
)
except ObjectDoesNotExist as e:
raise serializers.ValidationError(
"Slack api token not found. Please generate the token using oauth"
) from e
kwargs.update(slack_configuration=slack_configuration)
try:
SlackWrapper(
api_token=slack_configuration.api_token,
channel_id=self.validated_data.get("channel_id"),
).join_channel()
except SlackChannelJoinError as e:
raise serializers.ValidationError(e) from e
return super().save(**kwargs)
class SlackChannelSerializer(serializers.Serializer):
channel_name = serializers.CharField()
channel_id = serializers.CharField()
class SlackChannelListSerializer(serializers.Serializer):
cursor = serializers.CharField()
channels = serializers.ListField(child=SlackChannelSerializer())
class SlackOauthInitQueryParamSerializer(serializers.Serializer):
redirect_url = serializers.URLField(allow_blank=False)
class SlackChannelListQueryParamSerializer(serializers.Serializer):
limit = serializers.IntegerField(default=20, max_value=1000, min_value=1)
cursor = serializers.CharField(required=False)
| StarcoderdataPython |
1762912 | <filename>noogle/gcal.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module holds the interface to Google calendar.
"""
# Imports #####################################################################
import arrow
from apiclient import discovery
from googleapiclient.errors import HttpError
from .db import session
from .google_auth import get_credentials
from .helpers import print_log
from .models import Event
from .settings import settings
# Metadata ####################################################################
__author__ = "<NAME>"
__creationDate__ = "08-JUN-2017"
def setup():
"""
Set up your credentials for access to google calendar.
"""
events = get_next_gcal_events()
if not events:
print_log("No upcoming events found.")
return
for event in events:
e = Event.create_from_gcal(event, commit=False)
print_log(
"{:<19s}({:^9}) {}".format(
e.scheduled_date.format("YYYY-MM-DD h:mmA"), e.state, e.name
)
)
def get_next_gcal_events(max_results=10, q_filter="nest", since=None):
"""
Returns a list of events filtered by ``q_filter``.
:param int max_results: The maximum number of results to return
:param str q: This is the "advanced search syntax" item
:param datetime since: Get the events since this date
"""
calendar_id = settings.calendar.name
credentials = get_credentials(
name="calendar", oauth_token=settings.calendar.token_file
)
service = discovery.build("calendar", "v3", credentials=credentials)
if since:
since = since.to("UTC").isoformat()
else:
lookback = settings.calendar.lookback
since = (
arrow.now()
.replace(hour=0, minute=0, second=0, microsecond=0)
.shift(days=-1 * lookback)
.isoformat()
)
try:
events_result = (
service.events()
.list(
calendarId=calendar_id,
timeMin=since,
maxResults=max_results,
singleEvents=True,
orderBy="startTime",
q=q_filter,
)
.execute()
)
except HttpError as e:
if e.resp["status"] == "404":
raise ValueError(
'Could not find your calendar: "%s". Please check your settings!'
% calendar_id
)
raise
return events_result.get("items", [])
def purge_db():
"""Removes Structures and Thermostats from the database"""
session.query(Event).delete()
| StarcoderdataPython |
3200470 |
import os
import sys
import os.path
import logging
import time
import pprint
import shutil
import traceback
import settings
import pArch
import dbPhashApi as dbApi
import scanner.fileHasher
PHASH_DISTANCE_THRESHOLD = 2
BAD_PHASHES = [
# Phash value of '0' is commonly a result of an image where there is no content, such as a blank page.
# There are 79 THOUSAND of these in my collection. As a result, the existence check is prohibitively slow, so
# we just short-circuit and ignore it.
0,
-9223372036854775808, # 0x8000000000000000
]
class ArchiveProcessorException(Exception):
pass
class DatabaseDesynchronizedError(ArchiveProcessorException):
pass
class InvalidArchiveContentsException(ArchiveProcessorException):
pass
class InvalidArchivePhashContentsException(InvalidArchiveContentsException):
pass
class InvalidArchiveMd5ContentsException(InvalidArchiveContentsException):
pass
class ProxyDbBase(object):
def __init__(self):
self.db = self.getDbConnection()
# Overridden in child classes so the unit tests can redirect
# db access to the testing database by returning a different DB
# connection object.
def getDbConnection(self): # pragma: no cover
return dbApi.PhashDbApi()
# def convertDbIdToPath(self, inId):
# return self.db.getItem(wantCols=['fsPath', "internalPath"], dbId=inId)
class ArchChecker(ProxyDbBase):
'''
Class to encapsulate the required object to check if `archpath` is unique.
'''
hasher = scanner.fileHasher.HashThread
def __init__(self, archPath, pathNegativeFilter=None, pathPositiveFilter=None, negativeKeywords=None):
'''
Params:
pathNegativeFilter (list): default =``[]``
List of paths to exclude from matching.
By default, and empty list, leading to all possible paths being used.
'''
super().__init__()
self.negativeMaskedPaths = pathNegativeFilter or []
self.positiveMaskedPaths = pathPositiveFilter or []
self.negativeKeywords = negativeKeywords or []
self.archPath = archPath
self.arch = pArch.PhashArchive(archPath)
self.log = logging.getLogger("Main.Deduper")
self.log.info("ArchChecker Instantiated on '%s'", archPath)
# If getMatchingArchives returns something, it means we're /not/ unique,
# because getMatchingArchives returns matching files
def isBinaryUnique(self):
'''
Is the archive this class was instantiated on binary unique (e.g. there
is a byte-for-byte complete duplicate of it) elsewhere on the filesystem,
that still exists.
Returns:
Boolean: True if unique, False if not.
'''
ret = self.getMatchingArchives()
if len(ret):
return False
return True
def isPhashUnique(self, searchDistance=None):
'''
Is the archive this class was instantiated on phash unique, where the
duplicating files elsewhere on the filesystem still exist.
Phash-unique means there are matches for each of the files in the archive,
including searching by phash within distance `searchDistance` for any files
that are images.
Returns:
Boolean: True if unique, False if not.
'''
if searchDistance is None:
searchDistance=PHASH_DISTANCE_THRESHOLD
ret = self.getPhashMatchingArchives(searchDistance, getAllCommon=False)
if len(ret):
return False
return True
def getBestBinaryMatch(self):
'''
Get the filesystem path of the "best" matching archive.
"Best" is a somewhat fuzzy concept. In this case, it's assumed to be
the archive with the largest number of images in common.
If two archives share the same number of matching images, the larger
of the two matching archives is selected. If they're the same size,
the chosen archive will be chosen arbitrarily.
Returns:
String: Path to archive on the local filesystem. Path is verified to
exist at time of return.
If the current archive contains unique files, this will return a empty string.
'''
ret = self.getMatchingArchives()
return self._getBestMatchingArchive(ret)
def getBestPhashMatch(self, distance=None):
'''
Get the filesystem path of the "best" matching archive.
"Best" is a somewhat fuzzy concept. In this case, it's assumed to be
the archive with the largest number of images in common.
If two archives share the same number of matching images, the larger
of the two matching archives is selected. If they're the same size,
the chosen archive will be chosen arbitrarily.
Identical to `getBestBinaryMatch()`, except including phash-matches
in the search for matches.
Returns:
String: Path to archive on the local filesystem. Path is verified to
exist at time of return.
'''
if distance is None:
distance=PHASH_DISTANCE_THRESHOLD
ret = self.getPhashMatchingArchives(distance, getAllCommon=False)
return self._getBestMatchingArchive(ret)
def getSignificantlySimilarArches(self, searchDistance=None):
'''
This function returns a dict of lists containing archives with files in common with
the current archive. It only operates using phash similarity metrics (as phash searches
are intrinsically a superset of binary match similarity metrics).
The dict keys are the number of files in common, and the list is a number of filesystem-
paths to the intersecting archives.
'''
if searchDistance is None:
searchDistance=PHASH_DISTANCE_THRESHOLD
common = self.getPhashMatchingArchives(getAllCommon=True, searchDistance=searchDistance)
ret = self._processMatchesIntoRet(common)
# Now, we truncate the return common set to every item which has >
# the mean number of items in common
# This is a preventative measure against things like scanlators which
# have a credit page they put EVERYWHERE, and we therefor want to
# disregard.
# print("Common:")
# pprint.pprint(common)
# print("Ret:")
# pprint.pprint(ret)
keys = list(ret.keys())
if not keys:
return ret
mean = (sum(keys) / len(keys))
for key in [key for key in keys if key < mean]:
ret.pop(key)
# And pop all items which have only one item in common
if 1 in ret:
ret.pop(1)
# Sort the return, to make it deterministic
for item in ret.values():
item.sort()
return ret
def _processMatchesIntoRet(self, matches):
'''
This takes a dict of items where each key is a filesystem path, and the value
is a set of internal-paths that are matched in the archive at the key filesystem path.
It transforms that dict into another dict where the key is the number of matches
that a filesystem path has, and the value is a list of filesystem paths that
had `key` matches.
'''
ret = {}
for key in matches.keys():
ret.setdefault(len(matches[key]), []).append(key)
# Make the return ordering deterministic
for key in ret.keys():
ret[key].sort()
return ret
def _shouldSkipFile(self, fileN, fileType):
'''
Internal method call. Is used to filter out files that are considered
"garbage" from evaluation in the matching search. This includes things
like the windows "Thumbs.db" file, some of the information notes generated
by the automated ad-removal system in MangaCMS, and `__MACOSX` resource
fork files&directory that Crapple loves to spew all over any non-HFS
volumes.
Returns:
Boolean: True if the file is garbage, False if it is not.
'''
thumbs_file_types = [
# So debian wheezy is so out of date their libmagick
# doesn't appear to have the mimetype parameter.
'Composite Document File V2 Document, No summary info',
'application/CDFV2-corrupt',
'application/CDFV2',
]
if fileN.endswith("Thumbs.db") and fileType in thumbs_file_types:
self.log.info("Windows thumbnail database. Ignoring")
return True
# We have to match both 'ASCII text', and the occational 'ASCII text, with no line terminators'
if fileN.endswith("deleted.txt") and fileType =='text/plain':
self.log.info("Found removed advert note. Ignoring")
return True
if '__MACOSX/' in fileN:
self.log.info("Mac OS X cache dir. Ignoring")
return True
return False
def _getBestMatchingArchive(self, ret):
'''
Internal function that drives `getBestBinaryMatch()` and `getBestPhashMatch()`.
"Best" match is kind of a fuzzy term here. I define it as the archive with the
most files in common with the current archive.
If there are multiple archives with identical numbers of items in common,
the "best" is then the largest of those files
(I assume that the largest is probably either a 1. volume archive, or
2. higher quality)
'''
# Short circuit for no matches
if not len(ret):
return None
tmp = {}
for key in ret.keys():
tmp.setdefault(len(ret[key]), []).append(key)
maxKey = max(tmp.keys())
# If there is only one file with the most items, return that.
if len(tmp[maxKey]) == 1:
return tmp[maxKey].pop()
items = [(os.path.getsize(item), item) for item in tmp[maxKey]]
items.sort()
# TODO: The fitness of the match should additionally consider the number of files in each dir.
# e.g. if the current file has 100 files, with 10 in common with another file with
# 100 files, that's not really a good match. On the other hand, if the current
# file has 100 files, with 10 in common with another file which has a total of
# 10 files in it, it's an excellent match since the current file is a complete
# superset of the other file.
# Finally, sort by size, return the biggest one of them
return items.pop()[-1]
def _getBinaryMatchesForHash(self, hexHash):
'''
Params:
hexHash (String): The hash to match against.
Returns:
dict of sets. Dict keys are filesystem paths, and the set contains
the internal path of each item in the key that has the query key
This function searches for all items with a binary hash of `hexHash`, masks out
any paths in `self.negativeMaskedPaths`, and then checks for file existence. If the file exists,
it's inserted into a local dictionary with the key being the filesystem path,
and the value being a set into which the internal path is inserted.
'''
matches = {}
dupsIn = self.db.getByHash(hexHash, wantCols=['fsPath', 'internalPath'])
for fsPath, internalPath in dupsIn:
# Mask out items on the same path.
if fsPath == self.archPath:
continue
# Do negative path masking
if any([fsPath.startswith(badpath) for badpath in self.negativeMaskedPaths]):
continue
# And positive masking
if self.positiveMaskedPaths and not any([fsPath.startswith(badpath) for badpath in self.positiveMaskedPaths]):
continue
if self.negativeKeywords and any([tmp in fsPath for tmp in self.negativeKeywords]):
continue
exists = os.path.exists(fsPath)
if exists:
matches.setdefault(fsPath, set()).add(internalPath)
elif not exists:
self.log.warning("Item '%s' no longer exists!", fsPath)
self.db.deleteDbRows(fspath=fsPath)
return matches
def getMatchingArchives(self):
'''
This function does two things.
1. It iterates over all the files in an archive, checking each file for binary uniqueness
via MD5sums.
2. Accumulates a list of each file with any files in common with the archive
this class was instantiated on.
The return value can be two things:
If the instantiated archive contains unique items, the return value is
an empty set (`{}`).
If the target archive does not contain unique files, the return value is a
dict of sets, where the key is the filesystem path of each archive containing
matching items, and the value is a set containing the items that the
filesystem-path-key has in common with the target archive.
'''
self.log.info("Checking if %s contains any binary unique files.", self.archPath)
matches = {}
for fileN, infoDict in self.arch.iterHashes():
if self._shouldSkipFile(fileN, infoDict['type']):
continue
# get a dict->set of the matching items
matchDict = self._getBinaryMatchesForHash(infoDict['hexHash'])
if matchDict:
# If we have matching items, merge them into the matches dict->set
for key in matchDict.keys():
matches.setdefault(key, set()).update(matchDict[key])
else:
# Short circuit on unique item, since we are only checking if ANY item is unique
self.log.info("It contains at least one unique file(s).")
return {}
self.log.info("It does not contain any binary unique file(s).")
return matches
def _loadFileContents(self):
ret = []
for fileN, infoDict in self.arch.iterHashes():
if self._shouldSkipFile(fileN, infoDict['type']):
continue
ret.append((fileN, infoDict))
return ret
def _doRowLookup(self, matchids, resolution):
keys = ["dbid", "fspath", "internalpath", "itemhash", "phash", "itemkind", "imgx", "imgy"]
# self.log.info("Row lookup for %s (%s)", matchids, resolution)
ret_rows = []
for matchid in matchids:
row = self.db.getItem(dbId=matchid)
# Sometimes a row has been deleted without being removed from the tree.
# If this has happened, getItem() will return an empty list.
# Don't return that, if it happens
if not row:
self.log.info("Row deleted without updating tree")
continue
row = dict(zip(keys, row))
# Mask out items on the same path.
if row['fspath'] == self.archPath:
continue
# Mask with the masked-paths array
if any([row['fspath'].startswith(badpath) for badpath in self.negativeMaskedPaths]):
print("(negativeMaskedPaths) MaskedPath: ", row['fspath'], " in ", self.negativeMaskedPaths)
continue
# And positive masking
if self.positiveMaskedPaths and not any([row['fspath'].startswith(badpath) for badpath in self.positiveMaskedPaths]):
print("(positiveMaskedPaths) MaskedPath: ", row['fspath'], " in ", self.negativeMaskedPaths)
continue
# I genuinely cannot see how this line would get hit, but whatever.
if row['phash'] is None and resolution: #pragma: no cover
raise ValueError("Line is missing phash, yet in phash database? DbId = '%s'", row['dbid'])
if (not row['imgx'] or not row['imgy']) and resolution:
self.log.warning("Image with no resolution stats! Wat?.")
self.log.warning("Image: '%s', '%s'", row['fspath'], row['internalpath'])
continue
if resolution and len(resolution) == 2:
res_x, res_y = resolution
if res_x > row['imgx'] or res_y > row['imgy']:
# self.log.info("Filtering phash match due to lower resolution.")
continue
# else:
# self.log.info("Image not resolution filtered: (%s x %s) - (%s x %s).", res_x, res_y, row['imgx'], row['imgy'])
if not os.path.exists(row['fspath']):
self.log.info("File deleted without updating tree")
continue
ret_rows.append(row)
# Pack returned row tuples into nice dicts for easy access
return ret_rows
def _isBadPee(self, phash):
return phash in BAD_PHASHES
def _doHashSearches(self, filelist, searchDistance, resolutionFilter):
for fileN, infoDict in filelist:
infoDict["fileN"] = fileN
# Do the normal binary lookup
for dummy_fileN, infoDict in filelist:
# get a dict->set of the matching items
infoDict['binMatchIds'] = [tmp for tmp, in self.db.getByHash(infoDict['hexHash'], wantCols=['dbid'])]
# Then, atomically do the phash searches
# I really don't like reaching into the class this far, but
# it means I don't need to import the contextlib library into the phashdbapi file.
matches = self.db.searchPhashSet([infoDict['pHash'] for fileN, infoDict in filelist if infoDict['pHash'] is not None], searchDistance)
for fileN, infoDict in filelist:
if infoDict['pHash'] is not None:
infoDict['pMatchIds'] = matches[infoDict['pHash']]
# Finally, resolve out the row returns from the p-hash searches out
# too db rows.
for fileN, infoDict in filelist:
if resolutionFilter:
imgDims = (infoDict['imX'], infoDict['imY'])
else:
imgDims = None
if 'pMatchIds' in infoDict:
if self._isBadPee(infoDict['pHash']):
self.log.warning("Skipping any checks for hash value of '%s', as it's uselessly common.", infoDict['pHash'])
elif len(infoDict['pMatchIds']) > 100:
self.log.info("Skipping existence check due to quantity of candidate matches.")
else:
infoDict['pMatches'] = self._doRowLookup(infoDict['pMatchIds'], imgDims)
# print("PHash Matches: ", infoDict['pMatches'])
if 'binMatchIds' in infoDict:
if self._isBadPee(infoDict['pHash']):
self.log.warning("Skipping any checks for hash value of '%s', as it's uselessly common.", infoDict['pHash'])
elif len(infoDict['binMatchIds']) > 100:
self.log.info("Skipping existence check due to quantity of candidate matches.")
else:
# Resolution filtering is pointless here, since we matched on the MD5s, rather then file hashes
infoDict['bMatches'] = self._doRowLookup(infoDict['binMatchIds'], False)
# print("Binary Matches: ", infoDict['bMatches'])
return filelist
def _checkHashesOk(self, fileContent, searchDistance):
'''
Do some integrity checks against the loaded file content, to catch some possible
issues.
Primarily, this detects issues where the files in an archive are mis-hashed due to
library issues.
The idea is that a single archive should be at least ~75% unique. If an archive has
10 images, yet only 5 of them are unique even within the archive, something is
probably wrong somewhere.
'''
md5s = [filed['hexHash'] for filen, filed in fileContent if not filed['pHash']]
muniqueratio = len(set(md5s)) / max(1, len(md5s))
phashes = [filed['pHash'] for filen, filed in fileContent if filed['pHash']]
so_far = []
unique = 0
for phash in phashes:
similarity = [dbApi.hammingDistance(phash, other) for other in so_far]
coincides = [tmp for tmp in similarity if tmp <= searchDistance]
so_far.append(phash)
if not coincides:
unique += 1
puniqratio = unique / max(1, len(phashes))
hashratio = len(phashes) / max(1, len(md5s))
# print("phashes", len(phashes))
# print("muniqueratio", muniqueratio)
# print("unique", unique)
# print("puniqratio", puniqratio)
# print("hashratio", hashratio)
# print("len(md5s)", len(md5s))
# print("len(set(md5s))", len(set(md5s)))
if len(phashes) and puniqratio < 0.5:
raise InvalidArchivePhashContentsException("Too many identical images (phash-search) in the archive!")
# If there are any md5-only files, check they're at least 50% unique
# Only do this if there are more md5s then images
if len(md5s) and muniqueratio <= 0.6 and hashratio <= 1:
raise InvalidArchiveMd5ContentsException("Too many identical files in the archive!")
# This really, /really/ feels like it should be several smaller functions, but I cannot see any nice ways to break it up.
# It's basically like 3 loops rolled together to reduce processing time and lookups, and there isn't much I can do about that.
def getPhashMatchingArchives(self, searchDistance=None, getAllCommon=False, resolutionFilter=True):
'''
This function effectively mirrors the functionality of `getMatchingArchives()`,
except that it uses phash-duplicates to identify matches as well as
simple binary equality.
The additional `getAllCommon` parameter overrides the early-return behaviour if
one of the scanned items is unique. As such, if `getAllCommon` is True,
it will phash search for every item in the archive, even if they're all unique.
It also disables the resolution filtering of the match results.
This is necessary for finding commonalities between archives, which is intended
to return archives that the current archive has potentially superceded.
'''
if searchDistance is None:
searchDistance = PHASH_DISTANCE_THRESHOLD
self.log.info("Scanning for phash duplicates.")
matches = {}
fc = self._loadFileContents()
self._checkHashesOk(fc, searchDistance)
hashMatches = self._doHashSearches(fc, searchDistance, resolutionFilter)
for container_filen, infoDict in hashMatches:
fileN = infoDict['fileN']
if self._shouldSkipFile(fileN, infoDict['type']):
continue
# Handle cases where an internal file is not an image
if infoDict['pHash'] is None:
self.log.warning("No phash for file '%s'! Wat?", fileN)
self.log.warning("Returned pHash: '%s'", infoDict['pHash'])
self.log.warning("Guessed file type: '%s'", infoDict['type'])
self.log.warning("Should skip: '%s'", self._shouldSkipFile(fileN, infoDict['type']))
self.log.warning("Using binary dup checking for file!")
# If we have a phash, and yet pMatches is not present,
# the duper skipped loading the matching files because
# of quantity.
# As such, just continue on.
if 'binMatchIds' in infoDict and not 'bMatches' in infoDict:
continue
# get a dict->set of the matching items
matchList = infoDict['bMatches']
if matchList:
for matchDict in matchList:
# If we have matching items, merge them into the matches dict->set
matches.setdefault(matchDict['fspath'], {})[(container_filen, fileN)] = True
elif not getAllCommon:
# Short circuit on unique item, since we are only checking if ANY item is unique
self.log.info("It contains at least one unique file(s).")
return {}
# Any non-none and non-0 matches get the normal lookup behaviour.
else:
# If we have a phash, and yet pMatches is not present,
# the duper skipped loading the matching files because
# of quantity.
# As such, just continue on.
if 'pHash' in infoDict and 'pMatchIds' in infoDict and not 'pMatches' in infoDict:
continue
matchList = infoDict['pMatches']
if matchList:
for matchDict in matchList:
# If we have matching items, merge them into the matches dict->set
# These are stored with the key being the item in the /original archive/ they
# match to. This way, if one file in the current archive matches
# to many images another archive, it will only get counted as a single
# match.
# This is because there are some archives with many, many white pages in them.
# Therefore, if a deduplicated archive has a single white page, it was
# resulting in an errant high similarity rating with the archive containing
# many duplicate files, which produces a mis-link in the post-deduplication
# relinking.
matches.setdefault(matchDict['fspath'], {})[(container_filen, fileN)] = True
elif not getAllCommon:
# Short circuit on unique item, since we are only checking if ANY item is unique
self.log.info("It contains at least one unique file(s).")
self.log.info("Archive contains at least one unique phash(es).")
self.log.info("First unique file: '%s'", fileN)
return {}
self.log.info("Archive does not contain any unique phashes.")
return matches
def deleteArch(self, moveToPath=False):
'''
Delete target arch.
If ``moveToPath`` is specified, the archive will be moved there instead, as an option
for deferred deletion.
When ``moveToPath`` is specified, the current path is prepended to the filename, by
replacing all directory delimiters (``/``) with semicolons (``;``). This allows the
moved archive to be returned to it's original fs location in (almost) all cases.
'''
self.db.deleteDbRows(fspath=self.archPath)
if not moveToPath:
self.log.warning("Deleting archive '%s'", self.archPath)
os.remove(self.archPath)
else:
dst = self.archPath.replace("/", ";")
dst = os.path.join(moveToPath, dst)
self.log.info("Moving item from '%s'", self.archPath)
self.log.info(" to '%s'", dst)
try:
for x in range(3):
try:
shutil.move(self.archPath, dst)
break
except OSError:
self.log.error("Failure moving file?")
time.sleep(0.1)
if x == 2:
raise
except KeyboardInterrupt: # pragma: no cover (Can't really test keyboard interrupts)
raise
except (OSError, FileNotFoundError):
self.log.error("ERROR - Could not move file!")
self.log.error(traceback.format_exc())
def deleteArchFromDb(self):
self.db.deleteDbRows(fspath=self.archPath)
def addArch(self):
'''
Add the hash values from the target archive to the database, with the current
archive FS path as it's location.
'''
self.log.info("Adding archive to database. Hashing file: %s", self.archPath)
# Delete any existing hashes that collide
self.deleteArchFromDb()
# And tell the hasher to process the new archive.
hasher = self.hasher(inputQueue=None, outputQueue=None, runMgr=None)
hasher.processArchive(self.archPath)
# Proxy through to the archChecker from UniversalArchiveInterface
@staticmethod
def isArchive(archPath):
'''
Simple staticmethod boolean check. Used to determine of the item
at the passed path (``archPath``) is actually an archive, by
looking at it with ``libmagic``.
'''
return pArch.PhashArchive.isArchive(archPath)
def getSignificantlySimilarArches(filePath, distance=4):
log = logging.getLogger("Main.DedupServer")
# print("Args:", (filePath, distance))
try:
ck = ArchChecker(filePath, pathNegativeFilter=settings.masked_path_prefixes)
return ck.getSignificantlySimilarArches(searchDistance=distance)
except Exception:
log.critical("Exception when processing item!")
for line in traceback.format_exc().split("\n"):
log.critical(line)
return "error!"
def processDownload(filePath, pathNegativeFilter=None, distance=None, moveToPath=None, checkClass=ArchChecker, cross_match=True, pathPositiveFilter=None, negativeKeywords=None):
'''
Process the file `filePath`. If it's a phash or binary duplicate, it is deleted.
The `checkClass` param is to allow the checking class to be overridden for testing.
Returns:
(tag, bestMatch) tuple.
`tag` is a string containing space-separated tags corresponding to
the deduplication state (e.g. `deleted`, `was-duplicate`, and `phash-duplicate`)
`bestMatch` is the fspath of the best-matching other archive.
'''
log = logging.getLogger("Main.DedupServer")
status = ''
bestMatch = None
common = {}
# Hackyness to work around some strange behaviour in the
# netref objects from rpyc.
pathNegativeFilter_local = []
pathPositiveFilter_local = []
if isinstance(pathNegativeFilter, (list, tuple)):
for item in pathNegativeFilter:
pathNegativeFilter_local.append(item)
if isinstance(pathPositiveFilter, (list, tuple)):
for item in pathPositiveFilter:
pathPositiveFilter_local.append(item)
pathNegativeFilter_local.extend(settings.masked_path_prefixes)
try:
ck = checkClass(filePath, pathNegativeFilter=pathNegativeFilter_local, pathPositiveFilter=pathPositiveFilter_local, negativeKeywords=negativeKeywords)
if cross_match:
common = ck.getSignificantlySimilarArches(searchDistance=distance)
else:
common = None
binMatch = ck.getBestBinaryMatch()
if binMatch:
ck.deleteArch(moveToPath=moveToPath)
return 'deleted was-duplicate', binMatch, common
pMatch = ck.getBestPhashMatch(distance=distance)
if pMatch:
ck.deleteArch(moveToPath=moveToPath)
return 'deleted was-duplicate phash-duplicate', pMatch, common
ck.addArch()
except InvalidArchivePhashContentsException:
log.critical("Excessive duplicates when processing item!")
for line in traceback.format_exc().split("\n"):
log.critical(line)
status += " warning phash-conflict"
except Exception:
log.critical("Exception when processing item!")
for line in traceback.format_exc().split("\n"):
log.critical(line)
status += " damaged"
status = status.strip()
log.info("Returning status '%s' for archive '%s'. Best Match: '%s'", status, filePath, bestMatch)
return status, bestMatch, common
def commandLineReloadTree(scanConf):
import rpyc
remote = rpyc.connect("localhost", 12345)
# print("Forcing reload of phash tree. Search functionality will block untill load is complete.")
remote.root.reloadTree()
# print("Tree reloaded!")
def commandLineProcess(scanConf):
import scanner.logSetup
import rpyc
scanner.logSetup.initLogging()
if not os.path.exists(scanConf.sourcePath):
# print("ERROR: Source file does not exist!")
return
if not os.path.isfile(scanConf.sourcePath):
# print("ERROR: Source is not a file!")
return
if scanConf.noContext:
scanContext = None
else:
scanContext = [os.path.split(scanConf.sourcePath)]
remote = rpyc.connect("localhost", 12345)
status, bestMatch, intersections = remote.root.processDownload(
scanConf.sourcePath,
pathNegativeFilter=scanContext,
distance=6,
moveToPath=None,
locked=True)
# print("Processed archive. Return status '%s'", status)
if bestMatch:
print("Matching archive '%s'", bestMatch)
return status, bestMatch, intersections
| StarcoderdataPython |
1751685 | # coding: utf8
"""Conll training algorithm"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, vocab_size, embedding_dim, H1, H2, H3, D_pair_in, D_single_in, dropout=0.5):
super(Model, self).__init__()
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.drop = nn.Dropout(dropout)
self.pair_top = nn.Sequential(nn.Linear(D_pair_in, H1), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(H1, H2), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(H2, H3), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(H3, 1),
nn.Linear(1, 1))
self.single_top = nn.Sequential(nn.Linear(D_single_in, H1), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(H1, H2), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(H2, H3), nn.ReLU(), nn.Dropout(dropout),
nn.Linear(H3, 1),
nn.Linear(1, 1))
self.init_weights()
def init_weights(self):
w = (param.data for name, param in self.named_parameters() if 'weight' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
nn.init.uniform_(self.word_embeds.weight.data, a=-0.5, b=0.5)
for t in w:
nn.init.xavier_uniform_(t)
for t in b:
nn.init.constant_(t, 0)
def load_embeddings(self, preloaded_weights):
self.word_embeds.weight = nn.Parameter(preloaded_weights)
def load_weights(self, weights_path):
print("Loading weights")
single_layers_weights, single_layers_biases = [], []
for f in sorted(os.listdir(weights_path)):
if f.startswith("single_mention_weights"):
single_layers_weights.append(np.load(os.path.join(weights_path, f)))
if f.startswith("single_mention_bias"):
single_layers_biases.append(np.load(os.path.join(weights_path, f)))
top_single_linear = (layer for layer in self.single_top if isinstance(layer, nn.Linear))
for w, b, layer in zip(single_layers_weights, single_layers_biases, top_single_linear):
layer.weight = nn.Parameter(torch.from_numpy(w).float())
layer.bias = nn.Parameter(torch.from_numpy(b).float().squeeze())
pair_layers_weights, pair_layers_biases = [], []
for f in sorted(os.listdir(weights_path)):
if f.startswith("pair_mentions_weights"):
pair_layers_weights.append(np.load(os.path.join(weights_path, f)))
if f.startswith("pair_mentions_bias"):
pair_layers_biases.append(np.load(os.path.join(weights_path, f)))
top_pair_linear = (layer for layer in self.pair_top if isinstance(layer, nn.Linear))
for w, b, layer in zip(pair_layers_weights, pair_layers_biases, top_pair_linear):
layer.weight = nn.Parameter(torch.from_numpy(w).float())
layer.bias = nn.Parameter(torch.from_numpy(b).float().squeeze())
def forward(self, inputs, concat_axis=1):
pairs = (len(inputs) == 8)
if pairs:
spans, words, single_features, ant_spans, ant_words, ana_spans, ana_words, pair_features = inputs
else:
spans, words, single_features = inputs
words = words.type(torch.LongTensor)
if torch.cuda.is_available():
words = words.cuda()
embed_words = self.drop(self.word_embeds(words).view(words.size()[0], -1))
single_input = torch.cat([spans, embed_words, single_features], 1)
single_scores = self.single_top(single_input)
if pairs:
batchsize, pairs_num, _ = ana_spans.size()
ant_words_long = ant_words.view(batchsize, -1).type(torch.LongTensor)
ana_words_long = ana_words.view(batchsize, -1).type(torch.LongTensor)
if torch.cuda.is_available():
ant_words_long = ant_words_long.cuda()
ana_words_long = ana_words_long.cuda()
ant_embed_words = self.drop(self.word_embeds(ant_words_long).view(batchsize, pairs_num, -1))
ana_embed_words = self.drop(self.word_embeds(ana_words_long).view(batchsize, pairs_num, -1))
pair_input = torch.cat([ant_spans, ant_embed_words, ana_spans, ana_embed_words, pair_features], 2)
pair_scores = self.pair_top(pair_input).squeeze(dim=2)
total_scores = torch.cat([pair_scores, single_scores], concat_axis)
return total_scores if pairs else single_scores
| StarcoderdataPython |
3246407 | #!/usr/bin/env python
import codecs, httplib, json, os, urllib, shutil, subprocess, sys, argparse
upstream_git = 'https://github.com/catapult-project/catapult.git'
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
catapult_src_dir = os.path.join(script_dir, 'catapult-upstream')
parser = argparse.ArgumentParser()
parser.add_argument('trace_file_or_dir',
help='Path to trace file or directory of trace files.')
parser.add_argument('--output_file', dest='outfile', default=os.path.join(os.getcwd(), 'mapper_output.json'),
help='Path to output file to store results.')
parser.add_argument('--mapper_func', dest='func', default='AvgDrawFrame',
help='Name of javascript mapper function in systrace_parser.html.')
args = parser.parse_args()
# Update the source if needed.
if not os.path.exists(catapult_src_dir):
# Pull the latest source from the upstream git.
git_args = ['git', 'clone', upstream_git, catapult_src_dir]
p = subprocess.Popen(git_args, stdout=subprocess.PIPE, cwd=script_dir)
p.communicate()
if p.wait() != 0:
print 'Failed to checkout source from upstream git.'
sys.exit(1)
mapper_func_file = os.path.join(script_dir, 'systrace_parser.html')
path_to_process_traces = os.path.join(catapult_src_dir, 'trace_processor/bin/process_traces')
run_command = path_to_process_traces + " --mapper_handle " + mapper_func_file + ":" + args.func + " --output_file " + args.outfile + " " + args.trace_file_or_dir
print run_command
sys.exit(os.system(run_command))
| StarcoderdataPython |
3395397 | <filename>open_mlstat/google_sheets/sheet_element.py
"""
Element for requesting google sheet
Copyright 2019 <NAME>, <EMAIL>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class SheetElement:
def __init__(self, service, sheet_title, spreadsheet_id):
self.spreadsheetId = spreadsheet_id
self.service = service
self.sheetTitle = sheet_title
self.requests = []
self.valueRanges = []
def prepare_set_values(self, cellsRange, values, majorDimension="ROWS"):
self.valueRanges.append(
{"range": self.sheetTitle + "!" + cellsRange, "majorDimension": majorDimension, "values": values})
# spreadsheets.batchUpdate and spreadsheets.values.batchUpdate
def run_prepared(self, valueInputOption="USER_ENTERED"):
upd1Res = {'replies': []}
upd2Res = {'responses': []}
try:
if len(self.requests) > 0:
upd1Res = self.service.spreadsheets().batchUpdate(spreadsheetId=self.spreadsheetId,
body={"requests": self.requests}).execute()
if len(self.valueRanges) > 0:
upd2Res = self.service.spreadsheets().values().batchUpdate(spreadsheetId=self.spreadsheetId,
body={"valueInputOption": valueInputOption,
"data": self.valueRanges}).execute()
finally:
self.requests = []
self.valueRanges = []
return upd1Res['replies'], upd2Res['responses'] | StarcoderdataPython |
93013 | <filename>utility/split_csv.py
''' Split large csv files of online events from a online retail store into
smaller csv files. Also "compressing" the time, which is determined by t_step.
For example, t_step=30 will be 30 times denser, ie. 30 days of data will be packed
into a single day.
'''
import time
from io import StringIO # python3; python2: BytesIO
import numpy as np
import pandas as pd
import boto3
def main(key='2020-Feb'):
'''Split large csv files of online events from a online retail store into
smaller csv files. Also "compressing" the time, which is determined by t_step.
For example, t_step=30 will be 30 times denser, ie. 30 days of data will be packed
into a single day.
'''
key += ".csv"
################################################################################
# read csv file on s3 into spark dataframe
bucket = 'maxwell-insight'
s3file = f"s3a://{bucket}/{key}"
print(s3file)
# read csv file on s3 into spark dataframe
df = pd.read_csv(s3file)
print(f'{time.asctime( time.localtime(time.time()) )}\n{key} read into pandas dataframe!')
################################################################################
# compress time, 30 second -> 1 second if t_step=30
# unit in seconds, one second in the outputs will now contain t_step seconds
t_step = 30
df['event_time'] = ((pd.to_datetime(df['event_time']) -
pd.Timestamp("1970-01-01", tz='utc')) / pd.Timedelta('1s'))
t_min = pd.Timestamp("2019-10-01", tz='utc').value // 10**9
df['event_time'] = df['event_time'] - t_min
df['event_time'] = df['event_time'] // t_step
df['event_time'] = (df['event_time'] + t_min).astype(np.int64)
df['event_time'] = pd.to_datetime(df['event_time'], unit='s')
print(f'{time.asctime( time.localtime(time.time()) )}\nTime transformation complete\n')
t0 = df['event_time'].min()
t1 = t0 + pd.Timedelta(seconds=60)
t_end = df['event_time'].max()
while t0 < t_end:
df_temp = df[(df['event_time'] > t0) & (df['event_time'] < t1)]
for i in range(4):
f_name = t0.strftime("%Y-%m-%d-%H-%M-%S") + '-' + str(i) + '.csv'
print(f_name)
df_i = df_temp.iloc[i::4, :]
csv_buffer = StringIO()
df_i.to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(bucket, "minicsv/" + f_name).put(Body=csv_buffer.getvalue())
t0 += pd.Timedelta(seconds=60)
t1 = t0 + pd.Timedelta(seconds=60)
if __name__ == "__main__":
for k in ["2019-Oct", "2019-Nov", "2020-Jan", "2020-Feb", "2020-Mar", "2020-Apr"]:
main(k)
| StarcoderdataPython |
3252848 | """ This module contains the Video and Movie class and functions. """
import webbrowser
class Video():
def __init__(self, title, duration):
self.title = title
self.duration = (duration)
class Movie(Video): # The main class for movie-trailer website
valid_ratings = ["G", "PG", "PG-13", "R"] # Global variable
def __init__(self, title, duration, movie_storyline, poster_image_url,
trailer_youtube_url):
Video.__init__(self, title, duration)
self.storyline = movie_storyline
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_url
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
class Tvshow(Video):
def __init__(self, title, duration, season, episode, tv_station):
Video.__init__(self, title, duration)
self.season = season
self.episode = episode
self.tv_station = tv_station
| StarcoderdataPython |
4830233 | <reponame>smartcar/python-sdk<filename>smartcar/types.py
import datetime
from collections import namedtuple
from typing import List, NamedTuple
import re
import requests.structures as rs
# Return types for Smartcar API.
#
# 'generate_named_tuple' is used to generate an un-typed namedtuple from
# a dictionary. It will return a namedtuple that has attributes matching
# the dictionary's keys. Use this function as a catch all, or for data
# that does not have an explicit length (e.g. response headers, batch requests)
#
# Otherwise, use the explicitly defined NamedTuples for better type hints!
def generate_named_tuple(
dictionary: dict, name: str = "namedtuple", kebab_case=False
) -> namedtuple:
"""
Take a dictionary and map its keys to the attributes of a named tuple.
Args:
dictionary (dict): Any dictionary
name (str): The desired name of the returned namedtuple
kebab_case (bool): format kebab-cased keys, otherwise handle camelCased keys
Returns:
namedtuple: With attributes matching the keys of the inputted dictionary
"""
# Instantiate keys list, which will keep order consistent
keys = dictionary.keys()
if len(keys) == 0:
return None
attributes = []
for key in keys:
# Convert kebab to snake case, if relevant
if kebab_case:
formatted = key.replace("-", "_").lower()
# convert camel to snake case (using regex function)
else:
formatted = _camel_to_snake(key)
attributes.append(formatted)
gen = namedtuple(name, attributes)
return gen._make([dictionary[k] for k in keys])
def _camel_to_snake(camel_string: str) -> str:
"""
Use regex to change camelCased string to snake_case
Args:
camel_string(str)
Returns:
A snake_cased string
"""
result = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_string)
result = re.sub("(.)([0-9]+)", r"\1_\2", result)
result = re.sub("([a-z0-9])([A-Z])", r"\1_\2", result)
return result.lower()
def build_meta(response_headers: rs.CaseInsensitiveDict) -> namedtuple:
smartcar_headers = {
"sc-data-age": "data_age",
"sc-unit-system": "unit_system",
"sc-request-id": "request_id",
}
meta_dict = {}
for key, value in smartcar_headers.items():
if key in response_headers:
meta_dict[value] = response_headers[key]
return generate_named_tuple(meta_dict, "Meta", True)
# ===========================================
# auth_client.py
# ===========================================
Access = NamedTuple(
"Access",
[
("access_token", str),
("token_type", str),
("expires_in", int),
("expiration", datetime.datetime),
("refresh_token", str),
("refresh_expiration", datetime.datetime),
],
)
def make_access_object(access: dict) -> Access:
return Access(
access.get("access_token"),
access.get("token_type"),
access.get("expires_in"),
access.get("expiration"),
access.get("refresh_token"),
access.get("refresh_expiration"),
)
# ===========================================
# smartcar.py
# ===========================================
Paging = NamedTuple("Paging", [("count", int), ("offset", int)])
User = NamedTuple("User", [("id", str), ("meta", namedtuple)])
Vehicles = NamedTuple(
"Vehicles",
[("vehicles", List[str]), ("paging", Paging), ("meta", namedtuple)],
)
Compatibility = NamedTuple(
"Compatibility", [("compatible", bool), ("meta", namedtuple)]
)
# ===========================================
# vehicle.py
# ===========================================
Vin = NamedTuple("Vin", [("vin", str), ("meta", namedtuple)])
Charge = NamedTuple(
"Charge",
[("is_plugged_in", bool), ("state", str), ("meta", namedtuple)],
)
Battery = NamedTuple(
"Battery",
[("percent_remaining", float), ("range", float), ("meta", namedtuple)],
)
BatteryCapacity = NamedTuple(
"BatteryCapacity", [("capacity", float), ("meta", namedtuple)]
)
Fuel = NamedTuple(
"Fuel",
[
("range", float),
("percent_remaining", float),
("amount_remaining", float),
("meta", namedtuple),
],
)
TirePressure = NamedTuple(
"TirePressure",
[
("front_left", int),
("front_right", int),
("back_left", int),
("back_right", int),
("meta", namedtuple),
],
)
EngineOil = NamedTuple("EngineOil", [("life_remaining", float), ("meta", namedtuple)])
Odometer = NamedTuple("Odometer", [("distance", float), ("meta", namedtuple)])
Location = NamedTuple(
"Location",
[("latitude", float), ("longitude", float), ("meta", namedtuple)],
)
Attributes = NamedTuple(
"Attributes",
[
("id", str),
("make", str),
("model", str),
("year", str),
("meta", namedtuple),
],
)
Action = NamedTuple("Action", [("status", str), ("message", str), ("meta", namedtuple)])
Status = NamedTuple("Status", [("status", str), ("meta", namedtuple)])
Permissions = NamedTuple(
"Permissions", [("permissions", list), ("paging", Paging), ("meta", namedtuple)]
)
Subscribe = NamedTuple(
"Subscribe",
[("webhook_id", str), ("vehicle_id", str), ("meta", namedtuple)],
)
# ===========================================
# Named Tuple Selector Function
# ===========================================
def select_named_tuple(path: str, response_or_dict) -> NamedTuple:
"""
This function is used to select one of the pre-defined NamedTuples
based on a path provided. Upon selection, the appropriate NamedTuple
will be instantiated and returned. This function can take in a
response from Smartcar API OR a dictionary with "body", "headers",
"path", and "status".
The only use case for the parsing of a dictionary (rather than a response)
would be for "batch" calls to Smartcar API. Upon sending a batch request
to SmartcarAPI, a single response is returned. The response data
(i.e. response.json()) contains a list of dictionaries, each dictionary
representing the result of each request in the batch. For this reason,
this function needs to be able to parse a dictionary as well.
Note that if a path is not dictated in one of the
conditionals below, the raw data will be returned. This, in theory,
shouldn't run because paths are defined by the contributing developer.
In the case of "batch" requests, incorrect paths to batch will result in
a SmartcarException before this function is called.
Args:
path (str): Smartcar API path
response_or_dict: Smartcar response, or a dictionary after parsing
the response to the "batch" endpoint
Returns:
NamedTuple: Appropriate to the path.
"""
if type(response_or_dict) == dict:
headers_dict = rs.CaseInsensitiveDict(response_or_dict["headers"])
headers = build_meta(headers_dict)
data = response_or_dict["body"]
else:
headers = build_meta(response_or_dict.headers)
data = response_or_dict.json()
# smartcar.py
if path == "user":
return User(data["id"], headers)
elif path == "vehicles":
return Vehicles(
data["vehicles"],
Paging(data["paging"]["count"], data["paging"]["offset"]),
headers,
)
elif path == "compatibility":
return Compatibility(data["compatible"], headers)
# vehicle.py
elif path == "vin":
return Vin(data["vin"], headers)
elif path == "charge":
return Charge(data["isPluggedIn"], data["state"], headers)
elif path == "battery":
return Battery(data["percentRemaining"], data["range"], headers)
elif path == "battery/capacity":
return BatteryCapacity(data["capacity"], headers)
elif path == "fuel":
return Fuel(
data["range"],
data["percentRemaining"],
data["amountRemaining"],
headers,
)
elif path == "tires/pressure":
return TirePressure(
data["frontLeft"],
data["frontRight"],
data["backLeft"],
data["backRight"],
headers,
)
elif path == "engine/oil":
return EngineOil(data["lifeRemaining"], headers)
elif path == "odometer":
return Odometer(data["distance"], headers)
elif path == "location":
return Location(data["latitude"], data["longitude"], headers)
elif path == "permissions":
return Permissions(
data["permissions"],
Paging(data["paging"]["count"], data["paging"]["offset"]),
headers,
)
elif path == "subscribe":
return Subscribe(data["webhookId"], data["vehicleId"], headers)
elif (
path == "lock"
or path == "unlock"
or path == "start_charge"
or path == "stop_charge"
):
return Action(data["status"], data["message"], headers)
elif path == "disconnect" or path == "unsubscribe":
return Status(data["status"], headers)
elif path == "":
return Attributes(
data["id"],
data["make"],
data["model"],
data["year"],
headers,
)
elif type(data) == dict:
return generate_named_tuple(data, "Data")
else:
return data
| StarcoderdataPython |
3289722 | <reponame>Sagu12/TextSummarizer
from flask import Flask, render_template,request
import requests
from bs4 import BeautifulSoup
import nltk
import pandas as pd
#https://www.youtube.com/watch?v=h8fE_G9a_Oo
app = Flask(__name__)
def get_wiki_content(url):
req_obj = requests.get(url)
text = req_obj.text
soup = BeautifulSoup(text)
all_paras = soup.find_all("p")
wiki_text = ''
for para in all_paras:
wiki_text += para.text
return wiki_text
def top10_sent(url):
required_text = get_wiki_content(url)
stopwords = nltk.corpus.stopwords.words("english")
sentences = nltk.sent_tokenize(required_text)
words = nltk.word_tokenize(required_text)
word_freq = {}
for word in words:
if word not in stopwords:
if word not in word_freq:
word_freq[word] = 1
else:
word_freq[word] += 1
max_word_freq = max(word_freq.values())
for key in word_freq.keys():
word_freq[key] /= max_word_freq
sentences_score = []
for sent in sentences:
curr_words = nltk.word_tokenize(sent)
curr_score = 0
for word in curr_words:
if word in word_freq:
curr_score += word_freq[word]
sentences_score.append(curr_score)
sentences_data = pd.DataFrame({"sent":sentences, "score":sentences_score})
sorted_data = sentences_data.sort_values(by = "score", ascending = True).reset_index()
top10_rows = sorted_data.iloc[0:11,:]
#top_10 = list(sentences_data.sort_values(by = "score",ascending = False).reset_index().iloc[0:11,"sentences"])
return " ".join(list(top10_rows["sent"]))
@app.route("/", methods = ["GET", "POST"])
def index():
if request.method == "POST":
url = request.form.get("url")
url_content = top10_sent(url)
return url_content
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True) | StarcoderdataPython |
3269185 | <gh_stars>1-10
import __clrclasses__.System.Management.Instrumentation as Instrumentation
| StarcoderdataPython |
108247 | """
SQ数据集数据文件目录
文件获取举例:inner2['29'][2],
得到的是内圈2下29Hz的第2个文件(actually 3rd),
默认文件夹内排列顺序,每一个转速下共有3个文件[0~2]
train_dir109 1-故障程度 09-转速(Hz)
train 均使用第0个数据文件,test均使用第1个数据文件
"""
# home = r'G:\dataset\SQdata' # U盘
home = r'F:\dataset\SQdata' # 本地F盘
inner1 = {'09': [home + r'\inner1\09\REC3585_ch2.txt', home + r'\inner1\09\REC3586_ch2.txt',
home + r'\inner1\09\REC3587_ch2.txt'],
'19': [home + r'\inner1\19\REC3588_ch2.txt', home + r'\inner1\19\REC3589_ch2.txt',
home + r'\inner1\19\REC3590_ch2.txt'],
'29': [home + r'\inner1\29\REC3591_ch2.txt', home + r'\inner1\29\REC3592_ch2.txt',
home + r'\inner1\29\REC3593_ch2.txt'],
'39': [home + r'\inner1\39\REC3594_ch2.txt', home + r'\inner1\39\REC3595_ch2.txt',
home + r'\inner1\39\REC3596_ch2.txt']}
inner2 = {'09': [home + r'\inner2\09\REC3607_ch2.txt', home + r'\inner2\09\REC3608_ch2.txt',
home + r'\inner2\09\REC3609_ch2.txt'],
'19': [home + r'\inner2\19\REC3610_ch2.txt', home + r'\inner2\19\REC3611_ch2.txt',
home + r'\inner2\19\REC3612_ch2.txt'],
'29': [home + r'\inner2\29\REC3613_ch2.txt', home + r'\inner2\29\REC3614_ch2.txt',
home + r'\inner2\29\REC3615_ch2.txt'],
'39': [home + r'\inner2\39\REC3616_ch2.txt', home + r'\inner2\39\REC3617_ch2.txt',
home + r'\inner2\39\REC3618_ch2.txt']}
inner3 = {'09': [home + r'\inner3\09\REC3520_ch2.txt', home + r'\inner3\09\REC3521_ch2.txt',
home + r'\inner3\09\REC3522_ch2.txt'],
'19': [home + r'\inner3\19\REC3523_ch2.txt', home + r'\inner3\19\REC3524_ch2.txt',
home + r'\inner3\19\REC3525_ch2.txt'],
'29': [home + r'\inner3\29\REC3526_ch2.txt', home + r'\inner3\29\REC3527_ch2.txt',
home + r'\inner3\29\REC3528_ch2.txt'],
'39': [home + r'\inner3\39\REC3529_ch2.txt', home + r'\inner3\39\REC3530_ch2.txt',
home + r'\inner3\39\REC3531_ch2.txt']}
outer1 = {'09': [home + r'\outer1\09\REC3500_ch2.txt', home + r'\outer1\09\REC3501_ch2.txt',
home + r'\outer1\09\REC3502_ch2.txt'],
'19': [home + r'\outer1\19\REC3503_ch2.txt', home + r'\outer1\19\REC3504_ch2.txt',
home + r'\outer1\19\REC3505_ch2.txt'],
'29': [home + r'\outer1\29\REC3506_ch2.txt', home + r'\outer1\29\REC3507_ch2.txt',
home + r'\outer1\29\REC3508_ch2.txt'],
'39': [home + r'\outer1\39\REC3510_ch2.txt', home + r'\outer1\39\REC3511_ch2.txt',
home + r'\outer1\39\REC3512_ch2.txt']}
outer2 = {'09': [home + r'\outer2\09\REC3482_ch2.txt', home + r'\outer2\09\REC3483_ch2.txt',
home + r'\outer2\09\REC3484_ch2.txt'],
'19': [home + r'\outer2\19\REC3485_ch2.txt', home + r'\outer2\19\REC3486_ch2.txt',
home + r'\outer2\19\REC3487_ch2.txt'],
'29': [home + r'\outer2\29\REC3488_ch2.txt', home + r'\outer2\29\REC3489_ch2.txt',
home + r'\outer2\29\REC3490_ch2.txt'],
'39': [home + r'\outer2\39\REC3491_ch2.txt', home + r'\outer2\39\REC3492_ch2.txt',
home + r'\outer2\39\REC3493_ch2.txt']}
outer3 = {'09': [home + r'\outer3\09\REC3464_ch2.txt', home + r'\outer3\09\REC3465_ch2.txt',
home + r'\outer3\09\REC3466_ch2.txt'],
'19': [home + r'\outer3\19\REC3467_ch2.txt', home + r'\outer3\19\REC3468_ch2.txt',
home + r'\outer3\19\REC3469_ch2.txt'],
'29': [home + r'\outer3\29\REC3470_ch2.txt', home + r'\outer3\29\REC3471_ch2.txt',
home + r'\outer3\29\REC3472_ch2.txt'],
'39': [home + r'\outer3\39\REC3473_ch2.txt', home + r'\outer3\39\REC3474_ch2.txt',
home + r'\outer3\39\REC3475_ch2.txt']}
normal = {'09': [home + r'\normal\09\REC3629_ch2.txt', home + r'\normal\09\REC3630_ch2.txt',
home + r'\normal\09\REC3631_ch2.txt'],
'19': [home + r'\normal\19\REC3632_ch2.txt', home + r'\normal\19\REC3633_ch2.txt',
home + r'\normal\19\REC3634_ch2.txt'],
'29': [home + r'\normal\29\REC3635_ch2.txt', home + r'\normal\29\REC3636_ch2.txt',
home + r'\normal\29\REC3637_ch2.txt'],
'39': [home + r'\normal\39\REC3638_ch2.txt', home + r'\normal\39\REC3639_ch2.txt',
home + r'\normal\39\REC3640_ch2.txt']}
# -------------------------------------------------------------------------
# -----------------------------train_dir-----------------------------------
train3_dir109 = [normal['09'][0], inner1['09'][0], outer1['09'][0]]
train3_dir139 = [normal['39'][0], inner1['39'][0], outer1['39'][0]]
train3_dir239 = [normal['39'][0], inner2['39'][0], outer2['39'][0]]
train3_dir309 = [normal['09'][0], inner3['09'][0], outer3['09'][0]]
# 区分故障类别
sq3_39_0 = [normal['39'][0], inner3['39'][0], outer3['39'][0]]
sq3_39_1 = [normal['39'][1], inner3['39'][1], outer3['39'][1]]
sq3_29_0 = [normal['29'][0], inner3['29'][0], outer3['29'][0]]
sq3_29_1 = [normal['29'][1], inner3['29'][1], outer3['29'][1]]
# 区分故障类别同时区分出故障严重程度
sq7_39_0 = [normal['39'][0], inner1['39'][0], inner2['39'][0], inner3['39'][0],
outer1['39'][0], outer2['39'][0], outer3['39'][0]]
sq7_39_1 = [normal['39'][1], inner1['39'][1], inner2['39'][1], inner3['39'][1],
outer1['39'][1], outer2['39'][1], outer3['39'][1]]
sq7_29_0 = [normal['29'][0], inner1['29'][0], inner2['29'][0], inner3['29'][0],
outer1['29'][0], outer2['29'][0], outer3['29'][0]]
sq7_29_1 = [normal['29'][1], inner1['29'][1], inner2['29'][1], inner3['29'][1],
outer1['29'][1], outer2['29'][1], outer3['29'][1]]
# 多转速情况下
sq3_09 = [normal['09'][0], inner3['09'][0], outer3['09'][0]]
sq3_19 = [normal['19'][0], inner3['19'][0], outer3['19'][0]]
sq3_29 = [normal['29'][0], inner3['29'][0], outer3['29'][0]]
sq3_39 = [normal['39'][0], inner3['39'][0], outer3['39'][0]]
sq3_09_ = [normal['09'][1], inner3['09'][1], outer3['09'][1]]
sq3_19_ = [normal['19'][1], inner3['19'][1], outer3['19'][1]]
sq3_29_ = [normal['29'][1], inner3['29'][1], outer3['29'][1]]
sq3_39_ = [normal['39'][1], inner3['39'][1], outer3['39'][1]]
# sq_NC = [normal['09'][0], normal['19'][0], normal['29'][0], normal['39'][0]]
# sq_IF = [inner3['09'][0], inner3['19'][0], inner3['29'][0], inner3['39'][0]]
# sq_OF = [outer3['09'][0], outer3['19'][0], outer3['29'][0], outer3['39'][0]]
if __name__ == '__main__':
pass
| StarcoderdataPython |
3256421 | <reponame>SunliangzeSmile/smilexls
#!/usr/bin/env python3
#-*-coding:utf-8-*-
# /* *
# @Author: sunliangzesmile
# @Date: 2019-09-28 18:28:16
# @Last Modified by: sunliangzesmile
# @Last Modified time: 2019-09-28 18:28:16
# @Description:
#* */
from config import app_config
from flask import Flask,request,url_for
from flask_wtf import CSRFProtect
from .views.home import home
from .views.blog import blog
app=Flask(__name__)
app.config.from_mapping(app_config)
app.register_blueprint(home)
app.register_blueprint(blog)
CSRFProtect(app)
| StarcoderdataPython |
1607620 | <gh_stars>0
# autostart localizer
from __future__ import print_function
import sys, os, time
import rospy
import rosnode
import tf
from nav_msgs.msg import Odometry
TOPIC_GROUND_TRUTH = '/base_pose_ground_truth'
def get_ROS_nodes():
nodes = None
try:
nodes = rosnode.get_node_names()
except Exception as e:
print(e)
return nodes
def noderunning(nodename):
nodes = get_ROS_nodes()
#print(nodes)
return nodename in nodes
gt_robot_pose = None
def groundtruth_cb(data):
global gt_robot_pose
if (gt_robot_pose is None):
gt_robot_pose = [0,0,0]
gt_robot_pose[0] = data.pose.pose.position.x
gt_robot_pose[1] = data.pose.pose.position.y
o = data.pose.pose.orientation
q = (o.x, o.y, o.z, o.w)
euler = tf.transformations.euler_from_quaternion(q)
gt_robot_pose[2] = euler[2] # yaw
### main ###
def main():
rospy.init_node('autostart_localizer')
#check if stageros is running
stagerun = noderunning('/stageros')
print('stageros running: %r' %(stagerun))
#check if map_server already running
msrun = noderunning('/map_server')
print('map_server running: %r' %(msrun))
# get map name
mapname = 'map' # default map name
try:
mapname = rospy.get_param('/mapname')
except:
pass
print('map name: %s' %mapname)
if not msrun:
# check if mapname file exists - if not ERROR
print("TODO: check if map file %s exists" %mapname)
#get robot current pose from topic /base_pose_ground_truth
gt_sub = rospy.Subscriber(TOPIC_GROUND_TRUTH, Odometry, groundtruth_cb)
print("Waiting for ground truth pose...")
rospy.sleep(1)
gt_sub.unregister()
print("Current robot pose: %r" %(gt_robot_pose))
#start localizer
if (msrun):
rstr = "roslaunch amcl.launch use_mapserver:=false "
else:
rstr = "roslaunch amcl.launch map_name:=%s " %mapname
rstr += " initial_pose_x:=%.1f initial_pose_y:=%.1f initial_pose_a:=%.3f" \
%(gt_robot_pose[0], gt_robot_pose[1], gt_robot_pose[2])
print(rstr)
os.system(rstr)
if __name__=='__main__':
main()
| StarcoderdataPython |
1678592 | <filename>embeds/migrations/0001_initial.py<gh_stars>0
# Generated by Django 3.2.9 on 2021-11-10 11:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Embed',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='Evento Sin Nombre', max_length=200)),
('event_url', models.URLField()),
('creation_date', models.DateTimeField(auto_now_add=True)),
('last_modification', models.DateTimeField(auto_now=True)),
('platform', models.CharField(choices=[('YouTube', 'YouTube'), ('Vimeo', 'Vimeo')], max_length=20)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='creator', to=settings.AUTH_USER_MODEL)),
('last_modification_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='last_editor', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Modification',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('new_title', models.CharField(default='Evento Sin Nombre', max_length=200)),
('previous_url', models.URLField()),
('new_url', models.URLField()),
('modification_date', models.DateTimeField(auto_now_add=True)),
('modified_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='editor', to=settings.AUTH_USER_MODEL)),
('previous_title', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='previous_name', to='embeds.embed')),
],
),
]
| StarcoderdataPython |
119717 | <gh_stars>10-100
from asyncio import create_task, sleep, Task
from collections import Counter
from datetime import datetime as dt, timedelta as td
from itertools import chain
from typing import Dict, List, Optional, Sequence, Tuple, TypeVar
from discord import abc, Embed, TextChannel, Message, Reaction, User
from .util.cdn import get_avatar
from .checks import all_checks, Reactions
# Assemble all the emoji we need via hexadecimal values.
# I have trust issues when it comes to eclectic characters in my source code, so
# this makes me feel slightly safer.
letters: Tuple[str, ...] = tuple(map(chr, range(0x1F1E6, 0x1F200)))
cancel: str = chr(0x1F6AB)
confirm: str = chr(0x2611)
# Alternatives:
# Red X: 0x274C
# Green X: 0x274E
# Green Check: 0x2705
# "Do Not Enter": 0x26D4
# Zodiac icons because why not
astro: Tuple[str, ...] = tuple(chr(n) for n in range(0x2648, 0x2654))
info: str = chr(0x2139) # [i]
okay: str = chr(0x1F197) # [OK]
clock: Tuple[str, ...] = (chr(0x1F55C), *(chr(n) for n in range(0x1F550, 0x1F55B)))
# List of all long-term Menu Operations, as Tasks. Before Shutdown, these can be
# cancelled en masse to have the Messages get cleaned up.
live: List[Task] = []
T_ = TypeVar("T_")
def count_votes(allowed: Sequence[str], votes: Sequence[Reaction]) -> Counter:
allowed: List[str] = [str(a).casefold() for a in allowed]
return Counter(
{
str(vote.emoji): (vote.count - 1) if vote.me else vote.count
for vote in votes
if str(vote.emoji).casefold() in allowed
}
)
class Menu:
def __init__(
self,
client,
channel: TextChannel,
title: str,
desc: str = None,
user: User = None,
colour: int = 0x_0A_CD_FF,
):
self.client = client
self.channel: abc.Messageable = channel
self.em: Embed = Embed(title=title, description=desc, colour=colour)
if user:
self.em.set_author(name=user.display_name, icon_url=get_avatar(user))
self.msg: Optional[Message] = None
self.master: User = user
def add_section(self, result: str, title: str = "Results", overwrite: int = None):
if overwrite is not None:
self.em.set_field_at(overwrite, name=title, value=str(result), inline=False)
else:
self.em.add_field(name=title, value=str(result), inline=False)
async def clear(self):
await self.msg.clear_reactions()
async def close(self, text=""):
if text:
self.em.description = text
await self.post()
await self.clear()
async def post(self):
if self.msg and self.em not in self.msg.embeds:
await self.msg.edit(embed=self.em)
else:
self.msg: Message = await self.channel.send(embed=self.em)
async def _add_buttons(self, selection: Sequence):
await self.msg.clear_reactions()
for opt in selection:
await self.msg.add_reaction(opt)
async def add_buttons(self, selection: Sequence) -> Task:
if not self.msg:
await self.post()
return create_task(self._add_buttons(selection))
# ========---
# Begin methods for actually running the interface
# ========---
### PRIVATE interfaces; Only one person may respond.
async def get_one(
self, opts: Sequence[T_], time: float = 30, title: str = "Select One"
) -> Optional[T_]:
"""Ask the user to select ONE of a set of predefined options."""
if not 1 <= len(opts) <= len(letters):
return None
letopt = dict(zip(letters, opts))
selection = [cancel, *letopt]
self.add_section(
"\n".join(f"{letter}: `{opt}`" for letter, opt in letopt.items()), title
)
await self.post()
buttons: Task = await self.add_buttons(selection)
choice = (
await Reactions.waitfor(
self.client,
all_checks(
Reactions.by_user(self.master), Reactions.on_message(self.msg)
),
timeout=time,
)
)[0]
if not choice or choice.emoji == cancel:
result = None
else:
result = letopt.get(choice.emoji)
await buttons
await self.clear()
return result
async def get_multi(
self,
opts: Sequence[T_],
time: float = 30,
prompt: str = "Select One or More and Confirm:",
title: str = "Multiple Choice",
) -> Tuple[T_, ...]:
"""Ask the user to select ONE OR MORE of a set of predefined options."""
if not 1 <= len(opts) <= len(letters):
return ()
letopt = dict(zip(letters, opts))
selection = [cancel, *letopt, confirm]
self.add_section(
"\n".join(
chain(
[prompt], (f"{letter}: `{opt}`" for letter, opt in letopt.items())
)
),
title,
)
await self.post()
buttons: Task = await self.add_buttons(selection)
ok = (str(cancel), str(confirm))
pre = all_checks(Reactions.by_user(self.master), Reactions.on_message(self.msg))
def check(react_: Reaction, user: User) -> bool:
return pre(react_, user) and str(react_.emoji) in ok
choice = (await Reactions.waitfor(self.client, check, timeout=time))[0]
if not choice or choice.emoji == cancel:
await self.clear()
return ()
try:
vm: Message = await self.channel.fetch_message(self.msg.id)
except:
await self.clear()
return ()
results: Tuple[T_, ...] = tuple(
[
letopt.get(react.emoji)
for react in vm.reactions
if (
react.emoji in selection[1:-1]
and self.master in await react.users().flatten()
)
]
)
await buttons
await self.clear()
return results
async def get_bool(
self,
time: float = 30,
# prompt: str = "Select Yes or No",
# title: str = "Boolean Choice",
) -> Optional[bool]:
"""Ask the user to click a simple YES or NO."""
selection = (confirm, cancel)
# self.em.description = prompt or "Select Yes or No"
# await self.post()
# adding = create_task(self.add_buttons(selection))
# self.add_section(prompt, title)
# await self.post()
adding: Task = await self.add_buttons(selection)
choice = (
await Reactions.waitfor(
self.client,
all_checks(
Reactions.by_user(self.master), Reactions.on_message(self.msg)
),
timeout=time,
)
)[0]
await adding
await self.clear()
if not choice:
return None
elif choice.emoji == confirm:
return True
elif choice.emoji == cancel:
return False
else:
return None
### PUBLIC interfaces; ANYONE may respond.
async def get_poll(
self,
opts: Sequence[T_],
time: int = 3600,
prompt: str = "Select One or More:",
title: str = "Poll",
) -> Dict[T_, int]:
"""Run a MULTIPLE CHOICE open poll that anyone can answer."""
if not 1 <= len(opts) <= len(letters):
return {}
letopt = dict(zip(letters, opts))
selection = list(letopt)
do_footer = not self.em.footer and not self.em.timestamp
self.add_section(
"\n".join(
chain(
[prompt], (f"{letter}: `{opt}`" for letter, opt in letopt.items())
)
),
title,
)
if do_footer:
self.em.set_footer(text="Poll Ends").timestamp = dt.utcnow() + td(
seconds=time
)
await self.post()
await (await self.add_buttons(selection))
await sleep(time)
try:
vm = await self.channel.fetch_message(self.msg.id)
except:
return {}
outcome = count_votes(selection, vm.reactions)
await self.clear()
self.add_section(
"\n".join("{}: **{}**".format(letopt.get(k), v) for k, v in outcome.items())
)
if do_footer:
self.em.set_footer(text="").timestamp = Embed.Empty
await self.post()
return outcome
async def get_vote(self, time: int = 3600) -> Dict[bool, int]:
"""Run a YES OR NO open vote that anyone can answer."""
selection = (confirm, cancel)
do_footer = not self.em.footer and not self.em.timestamp
if do_footer:
self.em.set_footer(text="Vote Ends").timestamp = dt.utcnow() + td(
seconds=time
)
await self.post()
await (await self.add_buttons(selection))
await sleep(time)
try:
vm = await self.channel.fetch_message(self.msg.id)
except:
return {}
outcome = count_votes(selection, vm.reactions)
await self.clear()
real = {True: outcome.get(confirm, 0), False: outcome.get(cancel, 0)}
self.add_section(
"\n".join(
"**{}**: **{}**".format("Yes" if k else "No", v)
for k, v in real.items()
)
)
if do_footer:
self.em.set_footer(text="").timestamp = Embed.Empty
await self.post()
return real
async def confirm_action(
client, src: Message, title: str, desc: str, timeout: int = 30
) -> Optional[bool]:
author: User = src.author
channel: TextChannel = src.channel
m = Menu(client, channel, title, desc, author)
return await m.get_bool(timeout)
| StarcoderdataPython |
4821283 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.4/kidscore_momwork.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'kid_score' in data, 'variable not found in data: key=kid_score'
assert 'mom_work' in data, 'variable not found in data: key=mom_work'
# initialize data
N = data["N"]
kid_score = data["kid_score"]
mom_work = data["mom_work"]
def transformed_data(data):
# initialize data
N = data["N"]
kid_score = data["kid_score"]
mom_work = data["mom_work"]
work2 = mom_work == 2
work3 = mom_work == 3
work4 = mom_work == 4
data["work2"] = work2
data["work3"] = work3
data["work4"] = work4
def init_params(data):
params = {}
params["beta"] = init_vector("beta", dims=(4)) # vector
return params
def model(data, params):
# initialize data
N = data["N"]
kid_score = data["kid_score"]
mom_work = data["mom_work"]
# initialize transformed data
work2 = data["work2"].float()
work3 = data["work3"].float()
work4 = data["work4"].float()
# init parameters
beta = params["beta"]
sigma = pyro.sample("sigma", dist.HalfCauchy(torch.tensor(2.5)))
with pyro.plate("data", N):
kid_score = pyro.sample('obs', dist.Normal(beta[0] + beta[1] * work2 + beta[2] * work3 + beta[3] * work4, sigma), obs=kid_score)
| StarcoderdataPython |
1672359 | <gh_stars>1-10
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='steam-review-scraper',
version='0.1.0',
author='<NAME>',
author_email='<EMAIL>',
description='A package to scrape game reviews from Steam.',
keywords=['steam', 'review', 'scrape', 'crawl'],
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/Zhihan-Zhu/steam-review-scraper',
packages=setuptools.find_packages(),
install_requires=[
'beautifulsoup4',
'requests',
'pandas'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
python_requires='>=3',
) | StarcoderdataPython |
3229333 | <reponame>ipeterov/convenient-rpc<filename>task_server/test.py
import unittest
from lib.tasks import *
manager = TaskManager()
class TestManager(unittest.TestCase):
def test_estimate_runtime(self):
import random
task = {
'package': 1,
'version': 2,
'function': 3,
}
task2 = {
'package': 1,
'version': 2,
'function': 2,
}
manager = TaskManager()
for _ in range(10):
manager.add_task(task)
for i in range(10):
id_, answer = manager.get_task()
manager.add_answer(id_, 1, time=i+2)
for _ in range(9):
manager.add_task(task)
self.assertEqual(manager.estimate_runtime(manager.hash_task(task)), 6.5)
self.assertEqual(manager.estimate_time_left(), 58.5)
if __name__ == '__main__':
unittest.main(verbosity=2) | StarcoderdataPython |
171436 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BigMLer - A Higher Level API to BigML's API
# Basic usage
python bigmler.py \
--train data/iris.csv \
--test data/test_iris.csv
--no-test-header
# Create an 10-model ensemble using bagging
python bigmler.py
--train train.csv \
--output submission.csv \
--objective 0 \
--types types.txt \
--name 'Iris Ensemble' \
--number_of_models 10 \
--sample_rate 0.75 \
--replacement \
--tag my_ensemble
# Make predictions using models tagged with my_ensemble
python bigmler.py \
--model_tag my_ensemble \
--test test.csv
--no-test-header
"""
from __future__ import absolute_import
import sys
from bigmler.dispatcher import main_dispatcher
from bigmler.analyze.dispatcher import analyze_dispatcher
from bigmler.cluster.dispatcher import cluster_dispatcher
from bigmler.anomaly.dispatcher import anomaly_dispatcher
from bigmler.delete.dispatcher import delete_dispatcher
from bigmler.parser import SUBCOMMANDS
from bigmler.utils import SYSTEM_ENCODING
def check_delete_option(args):
"""Checks if the --delete option (to be deprecated) is used and changes
its syntax to the corresponding subcommand
"""
try:
delete_index = args.index('--delete')
args[0] = "delete"
del args[delete_index]
except ValueError:
pass
return args
def main(args=sys.argv[1:]):
"""Main process
"""
if args:
if not args[0].lower() in SUBCOMMANDS:
new_args = ["main"]
new_args.extend(args)
else:
new_args = args
# checks if the old --delete syntax is used
new_args = check_delete_option(new_args)
new_args = [arg.decode(SYSTEM_ENCODING) for arg in new_args]
if new_args[0] == "main":
main_dispatcher(args=new_args)
elif new_args[0] == "analyze":
analyze_dispatcher(args=new_args)
elif new_args[0] == "cluster":
cluster_dispatcher(args=new_args)
elif new_args[0] == "anomaly":
anomaly_dispatcher(args=new_args)
elif new_args[0] == "delete":
delete_dispatcher(args=new_args)
else:
sys.exit("BigMLer used with no arguments. Check:\nbigmler --help\n\nor"
"\n\nbigmler analyze --help\n\n"
"\n\nbigmler cluster --help\n\n"
"\n\nbigmler anomaly --help\n\n"
"\n\nbigmler delete --help\n\n"
" for a list of options")
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
43888 | from django.urls import include, path
from rest_framework.routers import DefaultRouter
from rooms import views
# Create a router and register our viewsets with it.
router = DefaultRouter()
router.register(r"room", views.RoomViewSet, basename="room")
# The API URLs are now determined automatically by the router.
urlpatterns = [
path("", include(router.urls)),
]
| StarcoderdataPython |
3351328 | <filename>conans/test/generators/json_test.py<gh_stars>0
import json
import unittest
import os
from conans.client.generators.json_generator import JsonGenerator
from conans.model.settings import Settings
from conans.model.conan_file import ConanFile
from conans.model.build_info import CppInfo
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestClient
class JsonTest(unittest.TestCase):
def variables_setup_test(self):
conanfile = ConanFile(None, None, Settings({}), None)
ref = ConanFileReference.loads("MyPkg/0.1@lasote/stables")
cpp_info = CppInfo("dummy_root_folder1")
cpp_info.defines = ["MYDEFINE1"]
cpp_info.cflags.append("-Flag1=23")
cpp_info.version = "1.3"
cpp_info.description = "My cool description"
conanfile.deps_cpp_info.update(cpp_info, ref.name)
ref = ConanFileReference.loads("MyPkg2/0.1@lasote/stables")
cpp_info = CppInfo("dummy_root_folder2")
cpp_info.defines = ["MYDEFINE2"]
cpp_info.version = "2.3"
cpp_info.exelinkflags = ["-exelinkflag"]
cpp_info.sharedlinkflags = ["-sharedlinkflag"]
cpp_info.cppflags = ["-cppflag"]
cpp_info.public_deps = ["MyPkg"]
conanfile.deps_cpp_info.update(cpp_info, ref.name)
generator = JsonGenerator(conanfile)
json_out = generator.content
parsed = json.loads(json_out)
dependencies = parsed["dependencies"]
self.assertEquals(len(dependencies), 2)
my_pkg = dependencies[0]
self.assertEquals(my_pkg["name"], "MyPkg")
self.assertEquals(my_pkg["description"], "My cool description")
self.assertEquals(my_pkg["defines"], ["MYDEFINE1"])
def generate_json_info_test(self):
conanfile_py = """from conans import ConanFile
class HelloConan(ConanFile):
exports_sources = "*.h"
def package(self):
self.copy("*.h", dst="include")
def package_info(self):
self.env_info.MY_ENV_VAR = "foo"
self.user_info.my_var = "my_value"
"""
client = TestClient()
client.save({"conanfile.py": conanfile_py,
"header.h": ""})
client.run("create . Hello/0.1@lasote/testing")
client.run("install Hello/0.1@lasote/testing -g json")
conan_json = os.path.join(client.current_folder, "conanbuildinfo.json")
with open(conan_json) as f:
data = json.load(f)
self.assertEquals(data["deps_env_info"]["MY_ENV_VAR"], "foo")
self.assertEquals(data["deps_user_info"]["Hello"]["my_var"], "my_value")
hello_data = data["dependencies"][0]
self.assertTrue(os.path.exists(hello_data["rootpath"]))
include_path = hello_data["include_paths"][0]
self.assertTrue(os.path.isabs(include_path))
self.assertTrue(os.path.exists(include_path))
def generate_json_info_settings_test(self):
conanfile_py = """from conans import ConanFile
class HelloConan(ConanFile):
exports_sources = "*.h"
settings = "os", "arch"
def package(self):
self.copy("*.h", dst="include")
def package_info(self):
self.env_info.MY_ENV_VAR = "foo"
self.user_info.my_var = "my_value"
"""
client = TestClient()
client.save({"conanfile.py": conanfile_py,
"header.h": ""})
settings = "-sos=Linux -sarch=x86_64"
client.run("create . Hello/0.1@lasote/testing " + settings)
client.run("install Hello/0.1@lasote/testing -g json " + settings)
conan_json = os.path.join(client.current_folder, "conanbuildinfo.json")
with open(conan_json) as f:
data = json.load(f)
settings_data = data["settings"]
self.assertEqual(settings_data["os"], "Linux")
self.assertEqual(settings_data["arch"], "x86_64")
| StarcoderdataPython |
153801 | import pickle
from PIL import Image
import numpy as np
from dlib import cnn_face_detection_model_v1
from controller import Camera
from flockai.PyCatascopia.Metrics import *
from flockai.interfaces.flockai_ml import FlockAIClassifier
from flockai.models.probes.flockai_probe import FlockAIProbe, ProcessCpuUtilizationMetric, ProcessCpuTimeMetric, ProcessIOTimeMetric, \
ProcessAliveTimeMetric, ProbeAliveTimeMetric, ProcessMemoryMetric
from flockai.webots_controllers.mavic2dji import KeyboardMavic2DJI
from flockai.models.devices.device_enums import EnableableDevice, NonEnableableDevice, MotorDevice, AircraftAxis, \
Relative2DPosition, Devices
"""""""""""""""""""""
DECLARE DEVICES HERE
"""""""""""""""""""""
enableable_devices = [
(EnableableDevice.RECEIVER, "receiver"),
(EnableableDevice.CAMERA, "camera"),
(EnableableDevice.KEYBOARD, None),
(EnableableDevice.BATTERY_SENSOR, None),
(EnableableDevice.INERTIAL_UNIT, "inertial unit"),
(EnableableDevice.GPS, "gps"),
(EnableableDevice.COMPASS, "compass"),
(EnableableDevice.GYRO, "gyro")
]
non_enableable_devices = [
(NonEnableableDevice.EMITTER, "emitter"),
(NonEnableableDevice.LED, "front left led"),
(NonEnableableDevice.LED, "front right led"),
(NonEnableableDevice.DISTANCE_SENSOR, "ds0")
]
"""""""""""""""""""""
DECLARE MOTORS HERE
"""""""""""""""""""""
motor_devices = [
(MotorDevice.CAMERA, "camera roll", AircraftAxis.ROLL),
(MotorDevice.CAMERA, "camera pitch", AircraftAxis.PITCH),
(MotorDevice.CAMERA, "camera yaw", AircraftAxis.YAW),
(MotorDevice.PROPELLER, "front left propeller", Relative2DPosition(1, -1)),
(MotorDevice.PROPELLER, "front right propeller", Relative2DPosition(1, 1)),
(MotorDevice.PROPELLER, "rear left propeller", Relative2DPosition(-1, -1)),
(MotorDevice.PROPELLER, "rear right propeller", Relative2DPosition(-1, 1)),
]
devices = Devices(enableable_devices, non_enableable_devices, motor_devices)
"""""""""""""""""""""""""""
CREATE MONITORING PROBES
"""""""""""""""""""""""""""
metrics = [
ProcessCpuUtilizationMetric(name='cpu_pct', units='%', desc='process-level cpu utilization', minVal=0, higherIsBetter=False),
ProcessCpuTimeMetric('cpu_time', 's', 'process-level cpu time', minVal=0, higherIsBetter=False),
ProcessIOTimeMetric('io_time', 's', 'process-level io time (linux-only)', minVal=0, higherIsBetter=False),
ProcessAliveTimeMetric('alive_time', 's', 'time process is alive', minVal=0, higherIsBetter=False),
ProbeAliveTimeMetric('probe_alive_time', 's', 'time probe is alive', minVal=0, higherIsBetter=False),
ProcessMemoryMetric('mem_pct', '%', 'process-level memory utilization', minVal=0, higherIsBetter=False),
]
probe = FlockAIProbe(metrics, name='Example Probe', periodicity=1)
"""""""""""""""""""""""""""""
INITIALIZE THE CONTROLLER
"""""""""""""""""""""""""""""
controller = KeyboardMavic2DJI(devices=devices, probe=probe)
"""""""""""""""""""""""""""""""""""
IMPLEMENT THE FLOCKAI CLASSIFIER
"""""""""""""""""""""""""""""""""""
class FaceDetectionClassifier(FlockAIClassifier):
def __init__(self):
super().__init__()
# REQUIRED ATTRIBUTES
self.periodicity = 5 # defines the periodicity of the prediction
self.onboard = True # defines if the classifier is run on the drone, if False, the drone transmits the input data via its emitter device
self._load_model()
""" IMPLEMENT ABSTRACT METHODS"""
def _load_model(self):
"""
Custom method that implements the way a model is loaded
:return:
"""
filename = 'cnnFaceRecognition.bin'
self.model = pickle.load(open(filename, 'rb'))
self.cnn_face_detector = cnn_face_detection_model_v1(self.model)
def _get_model_input(self):
"""
Custom method that access the camera on the controller and captures images
:return:
"""
filename = f'logs/Images/image_{str(int(time.time()))}.jpg'
camera: Camera = controller.devices['camera']['device'] # get access to controller devices
camera.saveImage(filename, 20)
return filename
def predict(self):
"""
Main pipeline method used by FlockAI during the simulation to make predictions
:return:
"""
if controller.getTime() % self.periodicity != 0.0: # get access to controller functions
return None
image_filename = self._get_model_input()
# return image_filename
image = self._load_image_file(image_filename)
return [self._trim_css_to_bounds(self._rect_to_css(face.rect), image.shape) for face in self.cnn_face_detector(image, 1)]
""" IMPLEMENT CUSTOM METHODS """
def _get_foo_unused_input(self):
"""
Unused method showcasing a different input method that the user needs
:return:
"""
camera: Camera = controller.devices['camera']['device']
image = camera.getImage()
width = camera.getWidth()
height = camera.getHeight()
image_vector = [[[camera.imageGetRed(image, width, x, y),
camera.imageGetGreen(image, width, x, y),
camera.imageGetBlue(image, width, x, y)] for y in range(height)] for x in range(width)]
return image_vector
def _trim_css_to_bounds(self, css, image_shape):
return max(css[0], 0), min(css[1], image_shape[1]), min(css[2], image_shape[0]), max(css[3], 0)
def _rect_to_css(self, rect):
return rect.top(), rect.right(), rect.bottom(), rect.left()
def _load_image_file(self, file, mode='RGB'):
im = Image.open(file)
if mode:
im = im.convert(mode)
return np.array(im)
"""""""""""""""""""""""""""""""""""""""""""""
SET THE ML MODEL ON THE CONTROLLER AND RUN IT
"""""""""""""""""""""""""""""""""""""""""""""
controller.model = FaceDetectionClassifier()
controller.run()
| StarcoderdataPython |
113176 | from ..gui.main_window import Ui_EditorMainWindow
from PySide.QtGui import QApplication, QMainWindow, QPixmap
from PySide import QtGui, QtCore
from PySide.QtCore import QObject
import sys
import numpy as np
from .. import util
from .brush_dialog import BrushDialog
from .about_dialog import AboutDialog
from .new_image_dialog import NewImageDialog
from .helper_threads import IFTThread
class EditorMainWindow(QMainWindow):
def __init__(self, parent=None):
super(EditorMainWindow, self).__init__(parent)
self.ui = Ui_EditorMainWindow()
self.ui.setupUi(self)
self.ui.action_open.triggered.connect(self.open_file)
self.ui.action_save_spatial.triggered.connect(self.save_spatial)
self.ui.action_new_image.triggered.connect(self.new_image)
self.ui.action_linked_zoom.triggered.connect(self.link_zoom)
self.ui.action_save_both.triggered.connect(self.save_both)
self.ui.action_brush.triggered.connect(self.show_brush)
self.ui.action_website.triggered.connect(self.show_website)
self.ui.action_about.triggered.connect(self.show_about)
self.ui.action_none.triggered.connect(self.remove_brush)
self.ui.image_zoom_in_btn.clicked.connect(self.image_zoom_in)
self.ui.image_zoom_out_btn.clicked.connect(self.image_zoom_out)
self.ui.freq_zoom_in_btn.clicked.connect(self.freq_zoom_in)
self.ui.freq_zoom_out_btn.clicked.connect(self.freq_zoom_out)
self.ui.image_label.installEventFilter(self)
self.ui.freq_label.installEventFilter(self)
self.ui.image_label.setMouseTracking(True)
self.ui.freq_label.setMouseTracking(True)
self.spatial_image = None
# This will store the shifted frequency image
self.frequency_array_magnitude = None
self.frequency_array_angle = None
self.freq_pixmap = None
self.scaled_freq_pixmap = None
self.image_pixmap = None
self.scaled_image_pixmap = None
self.spatial_scale = 1.0
self.frequency_scale = 1.0
self.current_brush = None
self.is_zoom_linked = False
def open_file(self):
""" Signal handler for the Open Menu """
filters = "Image Files (*.png *.jpg *.bmp)"
file_name = QtGui.QFileDialog.getOpenFileName(self, "Open File",
filter=filters)[0]
if file_name:
image = QtGui.QImage(file_name)
filters = "Image Files (*.png *.jpg *.bmp)"
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer",
"Cannot load %s." % file_name)
return
array = util.qimage_to_numpy(image)
self.load_image_from_array(array)
def load_image_from_array(self, array):
""" Loads an array as spatial domain image.
This function recomputes the fft and updates both the UIs. """
image = util.rgb_to_yuv(array)
garray = image[..., 0]
farray = np.fft.fft2(garray)
farray = np.fft.fftshift(farray)
self.set_yuv_image(image)
self.set_freq_image_angle(np.angle(farray))
self.set_freq_image_magnitude(np.absolute(farray))
def set_freq_image_magnitude(self, fimg):
""" Sets a numpy array as a frequncy domain image magnitude.
This function expects an appropriately shifted numpy array as input.
Except taking log, no manipulation to the values is done before
rendering. The function updates recomputes all internal intermediate
values and re renders the frequency UI.
"""
self.frequency_array_magnitude = fimg
qimage = util.fft_to_qimage(self.frequency_array_magnitude)
pixmap = QPixmap.fromImage(qimage)
self.set_freq_pixmap(pixmap)
self.invalidate_freq_scale()
self.render_freq()
def set_freq_pixmap(self, pixmap):
"""Sets the pixmap to be shown for frequency image.
This function only caches the pixmap, not computation or UI updation
is done.
"""
self.freq_pixmap = pixmap
def invalidate_freq_scale(self):
"""Implies scale has changed and recomputes internal fields
This function is to be called when either `self.freq_pixmap` changes
or `self.frequency_scale` changes. This function merely caches the
scaled pixmap, no UI updation is done.
"""
w, h = self.freq_pixmap.width(), self.freq_pixmap.height()
sw, sh = int(w*self.frequency_scale), int(h*self.frequency_scale)
self.scaled_freq_pixmap = self.freq_pixmap.scaled(sw, sh)
def render_freq(self, pixmap=None):
"""Render `pixmap` as the frequency image. If not given display last
known sclaed spatial image pixmap.
This function does not perform any computations internally. The
function is to be called to update the UI to reflect the state of the
internal fields, when called without the 2nd argument. When a brush
is set, a pixmap with the brush drawn on it can supplied as the 2nd
argument.
"""
if not pixmap:
pixmap = self.scaled_freq_pixmap
self.ui.freq_label.setPixmap(pixmap)
def set_freq_image_angle(self, fimg):
" Sets a numpy array as a frequncy domain image angle. "
self.frequency_array_angle = fimg
def set_yuv_image(self, img):
""" Sets the spatial image as YUV array.
The function expects a `uint8` array and will set the spatial domain
image in the UI along with updating all internal fields.
"""
self.spatial_image = img
img = util.yuv_to_rgb(self.spatial_image)
qimage = util.numpy_to_qimage(img)
pixmap = QPixmap.fromImage(qimage)
self.set_image_pixmap(pixmap)
self.invalidate_image_scale()
self.render_image()
def set_image_pixmap(self, pixmap):
"""Sets the pixmap to be shown for spatial image.
This function only caches the pixmap, not computation or UI updation
is done.
"""
self.image_pixmap = pixmap
def invalidate_image_scale(self):
"""Implies scale has changed and recomputes internal fields.
This function is to be called when either `self.image_pixmap` changes
or `self.spatial_scale` changes. This function merely caches the
scaled pixmap, no UI updation is done.
"""
w, h = self.image_pixmap.width(), self.image_pixmap.height()
sw, sh = int(w*self.spatial_scale), int(h*self.spatial_scale)
self.scaled_image_pixmap = self.image_pixmap.scaled(sw, sh)
def render_image(self, pixmap=None):
"""Render the pixmap as spatial image. If not given, display last known
sclaed spatial image pixmap.
"""
if not pixmap:
pixmap = self.scaled_image_pixmap
self.ui.image_label.setPixmap(pixmap)
def image_zoom_in(self):
" Zoom in the spatial domain image "
if self.spatial_image is None:
return
self.spatial_scale += 0.1
self.invalidate_image_scale()
self.render_image()
if self.is_zoom_linked:
self.frequency_scale = self.spatial_scale
self.invalidate_freq_scale()
self.render_freq()
def image_zoom_out(self):
" Zoom out the spatial domain image "
if self.spatial_image is None:
return
self.spatial_scale -= 0.1
self.invalidate_image_scale()
self.render_image()
if self.is_zoom_linked:
self.frequency_scale = self.spatial_scale
self.invalidate_freq_scale()
self.render_freq()
def freq_zoom_out(self):
"Zoom out the frequency domain image."
if self.frequency_array_magnitude is None:
return
self.frequency_scale -= 0.1
self.invalidate_freq_scale()
self.render_freq()
if self.is_zoom_linked:
self.spatial_scale = self.frequency_scale
self.invalidate_image_scale()
self.render_image()
def freq_zoom_in(self):
"Zoom out the frequency domain image."
if self.frequency_array_magnitude is None:
return
self.frequency_scale += 0.1
self.invalidate_freq_scale()
self.render_freq()
if self.is_zoom_linked:
self.spatial_scale = self.frequency_scale
self.invalidate_image_scale()
self.render_image()
def handle_image_move(self, event):
"Handle mouse move on the spatial image."
if self.spatial_image is None:
return
self.handle_image_stats(event)
def handle_image_stats(self, event):
"""Given an event, take care of displaying stats for spatial image.
The assumption made here is that the QLabel is exactly the size of the
image.
"""
pos = event.pos()
x, y = pos.x(), pos.y()
x, y = int(x/self.spatial_scale), int(y/self.spatial_scale)
r, c = y, x
r = np.clip(r, 0, self.spatial_image.shape[0])
c = np.clip(c, 0, self.spatial_image.shape[1])
value = self.spatial_image[r, c].astype(np.int)
msg = "X:%d Y:%d Value:" % (x, y)
msg += str(value)
self.ui.image_info_label.setText(msg)
def handle_freq_move(self, event):
"""Handle mouse move on the frequency domain image.
"""
if self.frequency_array_magnitude is None:
return
self.handle_freq_stats(event)
if self.current_brush:
pixmap = self.scaled_freq_pixmap.copy()
self.current_brush.draw_marker(event.x(), event.y(), pixmap,
self.frequency_scale)
if event.buttons() & QtCore.Qt.MouseButton.LeftButton:
self.handle_freq_modify(event)
# We use the pre computed scaled pixmap and mark the brush on it
# before displaying
self.render_freq(pixmap)
def handle_freq_stats(self, event):
"""Given an event, show frequency image stats.
The assumption made here is that the QLabel is exactly the size of the
image.
"""
pos = event.pos()
x, y = pos.x(), pos.y()
x, y = int(x/self.frequency_scale), int(y/self.frequency_scale)
r, c = y, x
r = np.clip(r, 0, self.frequency_array_magnitude.shape[0] - 1)
c = np.clip(c, 0, self.frequency_array_magnitude.shape[1] - 1)
value = self.frequency_array_magnitude[r, c]
msg = "X:%d Y:%d Value:%d" % (x, y, value)
self.ui.freq_info_label.setText(msg)
def eventFilter(self, obj, event):
"Call to handle relevant events."
if obj == self.ui.image_label:
if event.type() == QtCore.QEvent.MouseMove:
self.handle_image_move(event)
return True
elif obj == self.ui.freq_label:
if not self.ui.freq_label.isEnabled():
return False
if event.type() == QtCore.QEvent.MouseMove:
self.handle_freq_move(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonPress:
if event.button() == QtCore.Qt.MouseButton.LeftButton:
self.handle_freq_modify(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
if event.button() == QtCore.Qt.MouseButton.LeftButton:
if self.current_brush:
self.recompute_spatial_image()
return True
return QObject.eventFilter(self, obj, event)
def handle_freq_modify(self, event):
"Handle an event which will modify the frequency image."
if not self.current_brush is None:
x, y = event.x(), event.y()
x /= self.frequency_scale
y /= self.frequency_scale
h, w = self.frequency_array_magnitude.shape
magnitude = self.frequency_array_magnitude
angle = self.frequency_array_angle
self.current_brush.apply(x, y, magnitude, angle)
self.set_freq_image_magnitude(self.frequency_array_magnitude)
self.render_freq()
def show_brush(self):
"Show the brush dialog box."
d = BrushDialog(self, self.current_brush)
d.exec_()
if d.get_brush():
self.current_brush = d.get_brush()
def remove_brush(self):
"Deselcts a brush."
self.current_brush = None
self.render_freq()
def recompute_spatial_image(self):
"""Recompute the spatial image from the frequency image and render it.
This function just launches a thread to do the task.
"""
magnitude = self.frequency_array_magnitude
angle = self.frequency_array_angle
self.ift_thread = IFTThread(magnitude, angle)
self.ift_thread.ift_done.connect(self.ift_done_recv)
# To prevent mutiple threads modifying images
# we disable is while one thread is working
self.ui.freq_label.setEnabled(False)
self.ift_thread.start()
def ift_done_recv(self, array):
"The reciever for the ift_done signal"
self.spatial_image[:, :, 0] = array
self.set_yuv_image(self.spatial_image)
self.ui.freq_label.setEnabled(True)
def save_spatial(self):
"Save the spatial domain image."
if self.spatial_image is None:
QtGui.QMessageBox.information(self, "Error", "No Image to Save")
return
filters = "Image Files (*.png)"
filename = QtGui.QFileDialog.getSaveFileName(self, "Save Image",
filter=filters)[0]
if not filename.lower().endswith('.png'):
filename += '.png'
arr = util.yuv_to_rgb(self.spatial_image)
image = util.numpy_to_qimage(arr)
success = image.save(filename)
if not success:
msg = "Could not save image at the location."
QtGui.QMessageBox.information(self, "Error", msg)
def save_both(self):
"Save image and its transofrm."
if self.spatial_image is None or \
self.frequency_array_magnitude is None:
QtGui.QMessageBox.information(self, "Error", "No Image to Save")
return
filters = "Image Files (*.png)"
filename = QtGui.QFileDialog.getSaveFileName(self, "Save Image",
filter=filters)[0]
if not filename.lower().endswith('.png'):
filename += '.png'
arr = util.yuv_to_rgb(self.spatial_image)
r, c, ch = arr.shape
out = np.zeros((r, c*2, ch), dtype=arr.dtype)
out[:, :c, :] = arr
freq_img = util.fft_to_qimage(self.frequency_array_magnitude)
freq_arr = util.qimage_to_numpy(freq_img)
out[:, c:, :] = freq_arr
image = util.numpy_to_qimage(out)
success = image.save(filename)
if not success:
msg = "Could not save image at the location."
QtGui.QMessageBox.information(self, "Error", msg)
def show_about(self):
"Display the about dialog."
d = AboutDialog(self)
d.exec_()
def show_website(self):
"Open the website in a browser."
QtGui.QDesktopServices.openUrl("http://fredo-editor.github.io")
def new_image(self):
"Shows a dialog to create a new blank image."
d = NewImageDialog()
d.exec_()
if d.get_size():
w, h = d.get_size()
array = np.zeros((h, w, 3), dtype=np.uint8)
self.load_image_from_array(array)
def link_zoom(self):
"Ensures that both images are at the same scale."
if self.ui.action_linked_zoom.isChecked():
self.is_zoom_linked = True
self.spatial_scale = 1.0
self.invalidate_image_scale()
self.render_image()
self.frequency_scale = 1.0
self.invalidate_freq_scale()
self.render_freq()
else:
self.is_zoom_linked = False
def run():
app = QApplication(sys.argv)
editor = EditorMainWindow()
editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run()
| StarcoderdataPython |
3218102 | """
A custom manager for working with trees of objects.
"""
from __future__ import unicode_literals
import functools
import contextlib
from itertools import groupby
from django.db import models, connections, router
from django.db.models import F, ManyToManyField, Max, Q
from django.utils.translation import ugettext as _
from mptt.compat import cached_field_value
from mptt.exceptions import CantDisableUpdates, InvalidMove
from mptt.querysets import TreeQuerySet
from mptt.utils import _get_tree_model
from mptt.signals import node_moved
__all__ = ('TreeManager',)
COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s = %(mptt_table)s.%(mptt_rel_to)s
)"""
CUMULATIVE_COUNT_SUBQUERY = """(
SELECT COUNT(*)
FROM %(rel_table)s
WHERE %(mptt_fk)s IN
(
SELECT m2.%(mptt_rel_to)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s = %(mptt_table)s.%(mptt_pk)s
)"""
CUMULATIVE_COUNT_SUBQUERY_M2M = """(
SELECT COUNT(*)
FROM %(rel_table)s j
INNER JOIN %(rel_m2m_table)s k ON j.%(rel_pk)s = k.%(rel_m2m_column)s
WHERE k.%(mptt_fk)s IN
(
SELECT m2.%(mptt_pk)s
FROM %(mptt_table)s m2
WHERE m2.%(tree_id)s = %(mptt_table)s.%(tree_id)s
AND m2.%(left)s BETWEEN %(mptt_table)s.%(left)s
AND %(mptt_table)s.%(right)s
)
)"""
def delegate_manager(method):
"""
Delegate method calls to base manager, if exists.
"""
@functools.wraps(method)
def wrapped(self, *args, **kwargs):
if self._base_manager:
return getattr(self._base_manager, method.__name__)(*args, **kwargs)
return method(self, *args, **kwargs)
return wrapped
class TreeManager(models.Manager.from_queryset(TreeQuerySet)):
"""
A manager for working with trees of objects.
"""
def contribute_to_class(self, model, name):
super(TreeManager, self).contribute_to_class(model, name)
if not model._meta.abstract:
self.tree_model = _get_tree_model(model)
self._base_manager = None
if self.tree_model is not model:
# _base_manager is the treemanager on tree_model
self._base_manager = self.tree_model._tree_manager
def get_queryset(self, *args, **kwargs):
"""
Ensures that this manager always returns nodes in tree order.
"""
return super(TreeManager, self).get_queryset(
*args, **kwargs
).order_by(
self.tree_id_attr, self.left_attr
)
def _get_queryset_relatives(self, queryset, direction, include_self):
"""
Returns a queryset containing either the descendants
``direction == desc`` or the ancestors ``direction == asc`` of a given
queryset.
This function is not meant to be called directly, although there is no
harm in doing so.
Instead, it should be used via ``get_queryset_descendants()`` and/or
``get_queryset_ancestors()``.
This function works by grouping contiguous siblings and using them to create
a range that selects all nodes between the range, instead of querying for each
node individually. Three variables are required when querying for ancestors or
descendants: tree_id_attr, left_attr, right_attr. If we weren't using ranges
and our queryset contained 100 results, the resulting SQL query would contain
300 variables. However, when using ranges, if the same queryset contained 10
sets of contiguous siblings, then the resulting SQL query should only contain
30 variables.
The attributes used to create the range are completely
dependent upon whether you are ascending or descending the tree.
* Ascending (ancestor nodes): select all nodes whose right_attr is greater
than (or equal to, if include_self = True) the smallest right_attr within
the set of contiguous siblings, and whose left_attr is less than (or equal
to) the largest left_attr within the set of contiguous siblings.
* Descending (descendant nodes): select all nodes whose left_attr is greater
than (or equal to, if include_self = True) the smallest left_attr within
the set of contiguous siblings, and whose right_attr is less than (or equal
to) the largest right_attr within the set of contiguous siblings.
The result is the more contiguous siblings in the original queryset, the fewer
SQL variables will be required to execute the query.
"""
assert self.model is queryset.model
opts = queryset.model._mptt_meta
filters = Q()
e = 'e' if include_self else ''
max_op = 'lt' + e
min_op = 'gt' + e
if direction == 'asc':
max_attr = opts.left_attr
min_attr = opts.right_attr
elif direction == 'desc':
max_attr = opts.right_attr
min_attr = opts.left_attr
tree_key = opts.tree_id_attr
min_key = '%s__%s' % (min_attr, min_op)
max_key = '%s__%s' % (max_attr, max_op)
q = queryset.order_by(opts.tree_id_attr, opts.parent_attr, opts.left_attr).only(
opts.tree_id_attr,
opts.left_attr,
opts.right_attr,
min_attr,
max_attr,
opts.parent_attr,
# These fields are used by MPTTModel.update_mptt_cached_fields()
*[f.lstrip('-') for f in opts.order_insertion_by]
)
if not q:
return self.none()
for group in groupby(
q,
key=lambda n: (
getattr(n, opts.tree_id_attr),
getattr(n, opts.parent_attr + '_id'),
)):
next_lft = None
for node in list(group[1]):
tree, lft, rght, min_val, max_val = (getattr(node, opts.tree_id_attr),
getattr(node, opts.left_attr),
getattr(node, opts.right_attr),
getattr(node, min_attr),
getattr(node, max_attr))
if next_lft is None:
next_lft = rght + 1
min_max = {'min': min_val, 'max': max_val}
elif lft == next_lft:
if min_val < min_max['min']:
min_max['min'] = min_val
if max_val > min_max['max']:
min_max['max'] = max_val
next_lft = rght + 1
elif lft != next_lft:
filters |= Q(**{
tree_key: tree,
min_key: min_max['min'],
max_key: min_max['max'],
})
min_max = {'min': min_val, 'max': max_val}
next_lft = rght + 1
filters |= Q(**{
tree_key: tree,
min_key: min_max['min'],
max_key: min_max['max'],
})
return self.filter(filters)
def get_queryset_descendants(self, queryset, include_self=False):
"""
Returns a queryset containing the descendants of all nodes in the
given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'desc', include_self)
def get_queryset_ancestors(self, queryset, include_self=False):
"""
Returns a queryset containing the ancestors
of all nodes in the given queryset.
If ``include_self=True``, nodes in ``queryset`` will also
be included in the result.
"""
return self._get_queryset_relatives(queryset, 'asc', include_self)
@contextlib.contextmanager
def disable_mptt_updates(self):
"""
Context manager. Disables mptt updates.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates.
This doesn't do anything clever. It *will* mess up your tree. You
should follow this method with a call to ``TreeManager.rebuild()``
to ensure your tree stays sane, and you should wrap both calls in a
transaction.
This is best for updates that span a large part of the table. If
you are doing localised changes (one tree, or a few trees) consider
using ``delay_mptt_updates``.
If you are making only minor changes to your tree, just let the
updates happen.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
If updates are already disabled on the model, this is a noop.
Usage::
with transaction.atomic():
with MyNode.objects.disable_mptt_updates():
## bulk updates.
MyNode.objects.rebuild()
"""
# Error cases:
if self.model._meta.abstract:
# an abstract model. Design decision needed - do we disable
# updates for all concrete models that derive from this model? I
# vote no - that's a bit implicit and it's a weird use-case
# anyway. Open to further discussion :)
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s,"
" it's an abstract model" % self.model.__name__
)
elif self.model._meta.proxy:
# a proxy model. disabling updates would implicitly affect other
# models using the db table. Caller should call this on the
# manager for the concrete model instead, to make the behavior
# explicit.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it's a proxy"
" model. Call the concrete model instead."
% self.model.__name__
)
elif self.tree_model is not self.model:
# a multiple-inheritance child of an MPTTModel. Disabling
# updates may affect instances of other models in the tree.
raise CantDisableUpdates(
"You can't disable/delay mptt updates on %s, it doesn't"
" contain the mptt fields."
% self.model.__name__
)
if not self.model._mptt_updates_enabled:
# already disabled, noop.
yield
else:
self.model._set_mptt_updates_enabled(False)
try:
yield
finally:
self.model._set_mptt_updates_enabled(True)
@contextlib.contextmanager
def delay_mptt_updates(self):
"""
Context manager. Delays mptt updates until the end of a block of bulk
processing.
NOTE that this context manager causes inconsistencies! MPTT model
methods are not guaranteed to return the correct results until the end
of the context block.
When to use this method:
If used correctly, this method can be used to speed up bulk
updates. This is best for updates in a localised area of the db
table, especially if all the updates happen in a single tree and
the rest of the forest is left untouched. No subsequent rebuild is
necessary.
``delay_mptt_updates`` does a partial rebuild of the modified trees
(not the whole table). If used indiscriminately, this can actually
be much slower than just letting the updates occur when they're
required.
The worst case occurs when every tree in the table is modified just
once. That results in a full rebuild of the table, which can be
*very* slow.
If your updates will modify most of the trees in the table (not a
small number of trees), you should consider using
``TreeManager.disable_mptt_updates``, as it does much fewer
queries.
Transactions:
This doesn't enforce any transactional behavior. You should wrap
this in a transaction to ensure database consistency.
Exceptions:
If an exception occurs before the processing of the block, delayed
updates will not be applied.
Usage::
with transaction.atomic():
with MyNode.objects.delay_mptt_updates():
## bulk updates.
"""
with self.disable_mptt_updates():
if self.model._mptt_is_tracking:
# already tracking, noop.
yield
else:
self.model._mptt_start_tracking()
try:
yield
except Exception:
# stop tracking, but discard results
self.model._mptt_stop_tracking()
raise
results = self.model._mptt_stop_tracking()
partial_rebuild = self.partial_rebuild
for tree_id in results:
partial_rebuild(tree_id)
@property
def parent_attr(self):
return self.model._mptt_meta.parent_attr
@property
def left_attr(self):
return self.model._mptt_meta.left_attr
@property
def right_attr(self):
return self.model._mptt_meta.right_attr
@property
def tree_id_attr(self):
return self.model._mptt_meta.tree_id_attr
@property
def level_attr(self):
return self.model._mptt_meta.level_attr
def _translate_lookups(self, **lookups):
new_lookups = {}
join_parts = '__'.join
for k, v in lookups.items():
parts = k.split('__')
new_parts = []
new_parts__append = new_parts.append
for part in parts:
new_parts__append(getattr(self, part + '_attr', part))
new_lookups[join_parts(new_parts)] = v
return new_lookups
@delegate_manager
def _mptt_filter(self, qs=None, **filters):
"""
Like ``self.filter()``, but translates name-agnostic filters for MPTT
fields.
"""
if qs is None:
qs = self
return qs.filter(**self._translate_lookups(**filters))
@delegate_manager
def _mptt_update(self, qs=None, **items):
"""
Like ``self.update()``, but translates name-agnostic MPTT fields.
"""
if qs is None:
qs = self
return qs.update(**self._translate_lookups(**items))
def _get_connection(self, **hints):
return connections[router.db_for_write(self.model, **hints)]
def add_related_count(self, queryset, rel_model, rel_field, count_attr,
cumulative=False):
"""
Adds a related item count to a given ``QuerySet`` using its
``extra`` method, for a ``Model`` class which has a relation to
this ``Manager``'s ``Model`` class.
Arguments:
``rel_model``
A ``Model`` class which has a relation to this `Manager``'s
``Model`` class.
``rel_field``
The name of the field in ``rel_model`` which holds the
relation.
``count_attr``
The name of an attribute which should be added to each item in
this ``QuerySet``, containing a count of how many instances
of ``rel_model`` are related to it through ``rel_field``.
``cumulative``
If ``True``, the count will be for each item and all of its
descendants, otherwise it will be for each item itself.
"""
connection = self._get_connection()
qn = connection.ops.quote_name
meta = self.model._meta
mptt_field = rel_model._meta.get_field(rel_field)
if isinstance(mptt_field, ManyToManyField):
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY_M2M % {
'rel_table': qn(rel_model._meta.db_table),
'rel_pk': qn(rel_model._meta.pk.column),
'rel_m2m_table': qn(mptt_field.m2m_db_table()),
'rel_m2m_column': qn(mptt_field.m2m_column_name()),
'mptt_fk': qn(mptt_field.m2m_reverse_name()),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_pk': qn(meta.pk.column),
}
else:
if cumulative:
subquery = CUMULATIVE_COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_rel_to': qn(mptt_field.remote_field.field_name),
'tree_id': qn(meta.get_field(self.tree_id_attr).column),
'left': qn(meta.get_field(self.left_attr).column),
'right': qn(meta.get_field(self.right_attr).column),
}
else:
subquery = COUNT_SUBQUERY % {
'rel_table': qn(rel_model._meta.db_table),
'mptt_fk': qn(rel_model._meta.get_field(rel_field).column),
'mptt_table': qn(self.tree_model._meta.db_table),
'mptt_rel_to': qn(mptt_field.remote_field.field_name),
}
return queryset.extra(select={count_attr: subquery})
@delegate_manager
def insert_node(self, node, target, position='last-child', save=False,
allow_existing_pk=False, refresh_target=True):
"""
Sets up the tree state for ``node`` (which has not yet been
inserted into in the database) so it will be positioned relative
to a given ``target`` node as specified by ``position`` (when
appropriate) it is inserted, with any neccessary space already
having been made for it.
A ``target`` of ``None`` indicates that ``node`` should be
the last root node.
If ``save`` is ``True``, ``node``'s ``save()`` method will be
called before it is returned.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
set the node's parent and let mptt call this during save.
"""
if node.pk and not allow_existing_pk and self.filter(pk=node.pk).exists():
raise ValueError(_('Cannot insert a node which has already been saved.'))
if target is None:
tree_id = self._get_next_tree_id()
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
elif target.is_root_node() and position in ['left', 'right']:
if refresh_target:
# Ensure mptt values on target are not stale.
target._mptt_refresh()
target_tree_id = getattr(target, self.tree_id_attr)
if position == 'left':
tree_id = target_tree_id
space_target = target_tree_id - 1
else:
tree_id = target_tree_id + 1
space_target = target_tree_id
self._create_tree_space(space_target)
setattr(node, self.left_attr, 1)
setattr(node, self.right_attr, 2)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, None)
else:
setattr(node, self.left_attr, 0)
setattr(node, self.level_attr, 0)
if refresh_target:
# Ensure mptt values on target are not stale.
target._mptt_refresh()
space_target, level, left, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
tree_id = getattr(target, self.tree_id_attr)
self._create_space(2, space_target, tree_id)
setattr(node, self.left_attr, -left)
setattr(node, self.right_attr, -left + 1)
setattr(node, self.level_attr, -level)
setattr(node, self.tree_id_attr, tree_id)
setattr(node, self.parent_attr, parent)
if parent:
self._post_insert_update_cached_parent_right(parent, right_shift)
if save:
node.save()
return node
@delegate_manager
def _move_node(self, node, target, position='last-child', save=True, refresh_target=True):
if self.tree_model._mptt_is_tracking:
# delegate to insert_node and clean up the gaps later.
return self.insert_node(node, target, position=position, save=save,
allow_existing_pk=True, refresh_target=refresh_target)
else:
if target is None:
if node.is_child_node():
self._make_child_root_node(node)
elif target.is_root_node() and position in ('left', 'right'):
self._make_sibling_of_root_node(node, target, position)
else:
if node.is_root_node():
self._move_root_node(node, target, position)
else:
self._move_child_node(node, target, position)
def move_node(self, node, target, position='last-child'):
"""
Moves ``node`` relative to a given ``target`` node as specified
by ``position`` (when appropriate), by examining both nodes and
calling the appropriate method to perform the move.
A ``target`` of ``None`` indicates that ``node`` should be
turned into a root node.
Valid values for ``position`` are ``'first-child'``,
``'last-child'``, ``'left'`` or ``'right'``.
``node`` will be modified to reflect its new tree state in the
database.
This method explicitly checks for ``node`` being made a sibling
of a root node, as this is a special case due to our use of tree
ids to order root nodes.
NOTE: This is a low-level method; it does NOT respect
``MPTTMeta.order_insertion_by``. In most cases you should just
move the node yourself by setting node.parent.
"""
self._move_node(node, target, position=position)
node.save()
node_moved.send(sender=node.__class__, instance=node,
target=target, position=position)
@delegate_manager
def root_node(self, tree_id):
"""
Returns the root node of the tree with the given id.
"""
return self._mptt_filter(tree_id=tree_id, parent=None).get()
@delegate_manager
def root_nodes(self):
"""
Creates a ``QuerySet`` containing root nodes.
"""
return self._mptt_filter(parent=None)
@delegate_manager
def rebuild(self):
"""
Rebuilds all trees in the database table using `parent` link.
"""
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
idx = 0
for pk in pks:
idx += 1
rebuild_helper(pk, 1, idx)
rebuild.alters_data = True
@delegate_manager
def partial_rebuild(self, tree_id):
"""
Partially rebuilds a tree i.e. It rebuilds only the tree with given
``tree_id`` in database table using ``parent`` link.
"""
opts = self.model._mptt_meta
qs = self._mptt_filter(parent=None, tree_id=tree_id)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
pks = qs.values_list('pk', flat=True)
if not pks:
return
if len(pks) > 1:
raise RuntimeError(
"More than one root node with tree_id %d. That's invalid,"
" do a full rebuild." % tree_id)
self._rebuild_helper(pks[0], 1, tree_id)
def _rebuild_helper(self, pk, left, tree_id, level=0):
opts = self.model._mptt_meta
right = left + 1
qs = self._mptt_filter(parent__pk=pk)
if opts.order_insertion_by:
qs = qs.order_by(*opts.order_insertion_by)
child_ids = qs.values_list('pk', flat=True)
rebuild_helper = self._rebuild_helper
for child_id in child_ids:
right = rebuild_helper(child_id, right, tree_id, level + 1)
qs = self.model._default_manager.filter(pk=pk)
self._mptt_update(
qs,
left=left,
right=right,
level=level,
tree_id=tree_id
)
return right + 1
def _post_insert_update_cached_parent_right(self, instance, right_shift, seen=None):
setattr(instance, self.right_attr, getattr(instance, self.right_attr) + right_shift)
parent = cached_field_value(instance, self.parent_attr)
if parent:
if not seen:
seen = set()
seen.add(instance)
if parent in seen:
# detect infinite recursion and throw an error
raise InvalidMove
self._post_insert_update_cached_parent_right(parent, right_shift, seen=seen)
def _calculate_inter_tree_move_values(self, node, target, position):
"""
Calculates values required when moving ``node`` relative to
``target`` as specified by ``position``.
"""
left = getattr(node, self.left_attr)
level = getattr(node, self.level_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if position == 'last-child':
space_target = target_right - 1
else:
space_target = target_left
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if position == 'left':
space_target = target_left - 1
else:
space_target = target_right
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_right_change = left - space_target - 1
right_shift = 0
if parent:
right_shift = 2 * (node.get_descendant_count() + 1)
return space_target, level_change, left_right_change, parent, right_shift
def _close_gap(self, size, target, tree_id):
"""
Closes a gap of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(-size, target, tree_id)
def _create_space(self, size, target, tree_id):
"""
Creates a space of a certain ``size`` after the given ``target``
point in the tree identified by ``tree_id``.
"""
self._manage_space(size, target, tree_id)
def _create_tree_space(self, target_tree_id, num_trees=1):
"""
Creates space for a new tree by incrementing all tree ids
greater than ``target_tree_id``.
"""
qs = self._mptt_filter(tree_id__gt=target_tree_id)
self._mptt_update(qs, tree_id=F(self.tree_id_attr) + num_trees)
self.tree_model._mptt_track_tree_insertions(target_tree_id + 1, num_trees)
def _get_next_tree_id(self):
"""
Determines the next largest unused tree id for the tree managed
by this manager.
"""
max_tree_id = list(self.aggregate(Max(self.tree_id_attr)).values())[0]
max_tree_id = max_tree_id or 0
return max_tree_id + 1
def _inter_tree_move_and_close_gap(
self, node, level_change,
left_right_change, new_tree_id):
"""
Removes ``node`` from its current tree, with the given set of
changes being applied to ``node`` and its descendants, closing
the gap left by moving ``node`` as it does so.
"""
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
inter_tree_move_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(tree_id)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %%s
ELSE %(tree_id)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s - %%s
WHEN %(left)s > %%s
THEN %(left)s - %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s - %%s
WHEN %(right)s > %%s
THEN %(right)s - %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
}
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
gap_size = right - left + 1
gap_target_left = left - 1
params = [
left, right, level_change,
left, right, new_tree_id,
left, right, left_right_change,
gap_target_left, gap_size,
left, right, left_right_change,
gap_target_left, gap_size,
getattr(node, self.tree_id_attr)
]
cursor = connection.cursor()
cursor.execute(inter_tree_move_query, params)
def _make_child_root_node(self, node, new_tree_id=None):
"""
Removes ``node`` from its tree, making it the root node of a new
tree.
If ``new_tree_id`` is not specified a new tree id will be
generated.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
if not new_tree_id:
new_tree_id = self._get_next_tree_id()
left_right_change = left - 1
self._inter_tree_move_and_close_gap(node, level, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, 0)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, None)
node._mptt_cached_fields[self.parent_attr] = None
def _make_sibling_of_root_node(self, node, target, position):
"""
Moves ``node``, making it a sibling of the given ``target`` root
node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
Since we use tree ids to reduce the number of rows affected by
tree mangement during insertion and deletion, root nodes are not
true siblings; thus, making an item a sibling of a root node is
a special case which involves shuffling tree ids around.
"""
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
opts = self.model._meta
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if node.is_child_node():
if position == 'left':
space_target = target_tree_id - 1
new_tree_id = target_tree_id
elif position == 'right':
space_target = target_tree_id
new_tree_id = target_tree_id + 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
self._create_tree_space(space_target)
if tree_id > space_target:
# The node's tree id has been incremented in the
# database - this change must be reflected in the node
# object for the method call below to operate on the
# correct tree.
setattr(node, self.tree_id_attr, tree_id + 1)
self._make_child_root_node(node, new_tree_id)
else:
if position == 'left':
if target_tree_id > tree_id:
left_sibling = target.get_previous_sibling()
if node == left_sibling:
return
new_tree_id = getattr(left_sibling, self.tree_id_attr)
lower_bound, upper_bound = tree_id, new_tree_id
shift = -1
else:
new_tree_id = target_tree_id
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
elif position == 'right':
if target_tree_id > tree_id:
new_tree_id = target_tree_id
lower_bound, upper_bound = tree_id, target_tree_id
shift = -1
else:
right_sibling = target.get_next_sibling()
if node == right_sibling:
return
new_tree_id = getattr(right_sibling, self.tree_id_attr)
lower_bound, upper_bound = new_tree_id, tree_id
shift = 1
else:
raise ValueError(_('An invalid position was given: %s.') % position)
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
root_sibling_query = """
UPDATE %(table)s
SET %(tree_id)s = CASE
WHEN %(tree_id)s = %%s
THEN %%s
ELSE %(tree_id)s + %%s END
WHERE %(tree_id)s >= %%s AND %(tree_id)s <= %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(root_sibling_query, [tree_id, new_tree_id, shift,
lower_bound, upper_bound])
setattr(node, self.tree_id_attr, new_tree_id)
def _manage_space(self, size, target, tree_id):
"""
Manages spaces in the tree identified by ``tree_id`` by changing
the values of the left and right columns by ``size`` after the
given ``target`` point.
"""
if self.tree_model._mptt_is_tracking:
self.tree_model._mptt_track_tree_modified(tree_id)
else:
connection = self._get_connection()
qn = connection.ops.quote_name
opts = self.model._meta
space_query = """
UPDATE %(table)s
SET %(left)s = CASE
WHEN %(left)s > %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s > %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s
AND (%(left)s > %%s OR %(right)s > %%s)""" % {
'table': qn(self.tree_model._meta.db_table),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(space_query, [target, size, target, size, tree_id,
target, target])
def _move_child_node(self, node, target, position):
"""
Calls the appropriate method to move child node ``node``
relative to the given ``target`` node as specified by
``position``.
"""
tree_id = getattr(node, self.tree_id_attr)
target_tree_id = getattr(target, self.tree_id_attr)
if tree_id == target_tree_id:
self._move_child_within_tree(node, target, position)
else:
self._move_child_to_new_tree(node, target, position)
def _move_child_to_new_tree(self, node, target, position):
"""
Moves child node ``node`` to a different tree, inserting it
relative to the given ``target`` node in the new tree as
specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
new_tree_id = getattr(target, self.tree_id_attr)
space_target, level_change, left_right_change, parent, new_parent_right = \
self._calculate_inter_tree_move_values(node, target, position)
tree_width = right - left + 1
# Make space for the subtree which will be moved
self._create_space(tree_width, space_target, new_tree_id)
# Move the subtree
self._inter_tree_move_and_close_gap(
node, level_change, left_right_change, new_tree_id)
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_child_within_tree(self, node, target, position):
"""
Moves child node ``node`` within its current tree relative to
the given ``target`` node as specified by ``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
width = right - left + 1
tree_id = getattr(node, self.tree_id_attr)
target_left = getattr(target, self.left_attr)
target_right = getattr(target, self.right_attr)
target_level = getattr(target, self.level_attr)
if position == 'last-child' or position == 'first-child':
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
if position == 'last-child':
if target_right > right:
new_left = target_right - width
new_right = target_right - 1
else:
new_left = target_right
new_right = target_right + width - 1
else:
if target_left > left:
new_left = target_left - width + 1
new_right = target_left
else:
new_left = target_left + 1
new_right = target_left + width
level_change = level - target_level - 1
parent = target
elif position == 'left' or position == 'right':
if node == target:
raise InvalidMove(_('A node may not be made a sibling of itself.'))
elif left < target_left < right:
raise InvalidMove(_('A node may not be made a sibling of any of its descendants.'))
if position == 'left':
if target_left > left:
new_left = target_left - width
new_right = target_left - 1
else:
new_left = target_left
new_right = target_left + width - 1
else:
if target_right > right:
new_left = target_right - width + 1
new_right = target_right
else:
new_left = target_right + 1
new_right = target_right + width
level_change = level - target_level
parent = getattr(target, self.parent_attr)
else:
raise ValueError(_('An invalid position was given: %s.') % position)
left_boundary = min(left, new_left)
right_boundary = max(right, new_right)
left_right_change = new_left - left
gap_size = width
if left_right_change > 0:
gap_size = -gap_size
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
# The level update must come before the left update to keep
# MySQL happy - left seems to refer to the updated value
# immediately after its update has been specified in the query
# with MySQL, but not with SQLite or Postgres.
move_subtree_query = """
UPDATE %(table)s
SET %(level)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(level)s - %%s
ELSE %(level)s END,
%(left)s = CASE
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
WHEN %(left)s >= %%s AND %(left)s <= %%s
THEN %(left)s + %%s
ELSE %(left)s END,
%(right)s = CASE
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
WHEN %(right)s >= %%s AND %(right)s <= %%s
THEN %(right)s + %%s
ELSE %(right)s END
WHERE %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(move_subtree_query, [
left, right, level_change,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
left, right, left_right_change,
left_boundary, right_boundary, gap_size,
tree_id])
# Update the node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, new_left)
setattr(node, self.right_attr, new_right)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
def _move_root_node(self, node, target, position):
"""
Moves root node``node`` to a different tree, inserting it
relative to the given ``target`` node as specified by
``position``.
``node`` will be modified to reflect its new tree state in the
database.
"""
left = getattr(node, self.left_attr)
right = getattr(node, self.right_attr)
level = getattr(node, self.level_attr)
tree_id = getattr(node, self.tree_id_attr)
new_tree_id = getattr(target, self.tree_id_attr)
width = right - left + 1
if node == target:
raise InvalidMove(_('A node may not be made a child of itself.'))
elif tree_id == new_tree_id:
raise InvalidMove(_('A node may not be made a child of any of its descendants.'))
space_target, level_change, left_right_change, parent, right_shift = \
self._calculate_inter_tree_move_values(node, target, position)
# Create space for the tree which will be inserted
self._create_space(width, space_target, new_tree_id)
# Move the root node, making it a child node
connection = self._get_connection(instance=node)
qn = connection.ops.quote_name
opts = self.model._meta
move_tree_query = """
UPDATE %(table)s
SET %(level)s = %(level)s - %%s,
%(left)s = %(left)s - %%s,
%(right)s = %(right)s - %%s,
%(tree_id)s = %%s
WHERE %(left)s >= %%s AND %(left)s <= %%s
AND %(tree_id)s = %%s""" % {
'table': qn(self.tree_model._meta.db_table),
'level': qn(opts.get_field(self.level_attr).column),
'left': qn(opts.get_field(self.left_attr).column),
'right': qn(opts.get_field(self.right_attr).column),
'tree_id': qn(opts.get_field(self.tree_id_attr).column),
}
cursor = connection.cursor()
cursor.execute(move_tree_query, [
level_change, left_right_change, left_right_change,
new_tree_id,
left, right, tree_id])
# Update the former root node to be consistent with the updated
# tree in the database.
setattr(node, self.left_attr, left - left_right_change)
setattr(node, self.right_attr, right - left_right_change)
setattr(node, self.level_attr, level - level_change)
setattr(node, self.tree_id_attr, new_tree_id)
setattr(node, self.parent_attr, parent)
node._mptt_cached_fields[self.parent_attr] = parent.pk
| StarcoderdataPython |
4819460 | # Copyright (c) <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Unittests for pygraph.algorithms.critical
"""
import unittest
from pygraph.algorithms.critical import critical_path
from pygraph.algorithms.critical import transitive_edges,_intersection
from pygraph.classes.digraph import digraph
def generate_test_graph():
'''
Generates & returns a weighted digraph with
one transitive edge and no cycles.
'''
G = digraph()
G.add_nodes([1,2,3,4,5,6])
G.add_edge((1,2), 1)
G.add_edge((2,4), 4)
G.add_edge((1,3), 1)#transitive edge
G.add_edge((2,3), 20)
G.add_edge((3,5), 3)
G.add_edge((4,6), 5)
G.add_edge((5,6), 4)
return G
class test_critical_path_and_transitive_edges(unittest.TestCase):
# critical path algorithm
def test_critical_path_with_cycle(self):
G = generate_test_graph()
G.add_edge((5,2),3)#add cycle
assert critical_path(G) == []
def test_critical_path(self):
G = generate_test_graph()
assert critical_path(G) == [1,2,3,5,6]
# transitive edge detection algorithm
def test_transitivity_with_cycle(self):
G = generate_test_graph()
G.add_edge((5,2),3)#add cycle
assert transitive_edges(G) == []
def test_transitivity(self):
G = generate_test_graph()
G.add_edge((2,5),1)#add another transitive edge
assert transitive_edges(G) == [(1,3),(2,5)]
# intersection testing (used internally)
def test_partial_intersection(self):
list1 = [1,2,3,4]
list2 = [3,4,5,6]
assert _intersection(list1, list2) == [3,4]
def test_empty_intersection(self):
list1 = [1,2,3,4]
list2 = [5,6]
assert _intersection(list1, list2) == []
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
4815310 | import cv2
import numpy as np
# Utility function to draw points in a quadrilateral polygon
def quadrilateral_points(input_image, vertices):
output_image = np.copy(input_image)
color = [0, 0, 255] # Red
thickness = 2
radius = 3
x0, y0 = vertices[0]
x1, y1 = vertices[1]
x2, y2 = vertices[2]
x3, y3 = vertices[3]
cv2.circle(output_image, (x0, y0), radius, color, thickness)
cv2.circle(output_image, (x1, y1), radius, color, thickness)
cv2.circle(output_image, (x2, y2), radius, color, thickness)
cv2.circle(output_image, (x3, y3), radius, color, thickness)
return output_image
# Utility function to draw points in a quadrilateral polygon
def quadrilateral_lines(input_image, vertices):
output_images = np.copy(input_image)
color = [0, 0, 255] # Red
thickness = 2
x0, y0 = vertices[0]
x1, y1 = vertices[1]
x2, y2 = vertices[2]
x3, y3 = vertices[3]
cv2.line(output_images, (x0, y0), (x1, y1), color, thickness)
cv2.line(output_images, (x1, y1), (x2, y2), color, thickness)
cv2.line(output_images, (x2, y2), (x3, y3), color, thickness)
cv2.line(output_images, (x3, y3), (x0, y0), color, thickness)
return output_images
def get_pixel_density_histogram(img):
bottom_area = img[img.shape[0] // 3:, :]
return np.sum(bottom_area, axis=0)
def get_normalized_pivots(img, left_pivot, right_pivot):
midpoint = np.int(img.shape[0] / 2)
space_between = np.int(img.shape[0] - (img.shape[0] / 2.75))
margin = np.int(img.shape[0] - (img.shape[0] / 1))
most_dense = max(left_pivot, right_pivot)
if (most_dense > midpoint) & (left_pivot < right_pivot - margin):
left_pivot = right_pivot - space_between
if (most_dense < midpoint) & (right_pivot > left_pivot + margin):
right_pivot = left_pivot + space_between
return left_pivot, right_pivot
def get_path(rectangle_stack, min_pixel_density):
total = len(rectangle_stack)
max_empty_interval = total / 4
count = 0
for rectangle in rectangle_stack:
if count > max_empty_interval:
return None
if len(rectangle) > min_pixel_density:
count = 0
else:
count = count + 1
path = np.concatenate(rectangle_stack)
if len(path) < (min_pixel_density * total) / 3:
return None
return path
def get_lane(img, rectangle_count, rectangle_width, rectangle_visible, min_pixel_density):
nonzero = img.nonzero()
nonzero_y = np.array(nonzero[0])
nonzero_x = np.array(nonzero[1])
out_img = np.dstack((img, img, img)) * 255
pixel_density_histogram = get_pixel_density_histogram(img)
midpoint = np.int(pixel_density_histogram.shape[0] / 2)
left_pivot, right_pivot = \
get_normalized_pivots(img, np.argmax(pixel_density_histogram[:midpoint]),
np.argmax(pixel_density_histogram[midpoint:]) + midpoint)
rect_height = np.int(img.shape[0] / rectangle_count)
left_stack = []
right_stack = []
# Step through the rectangles one by one
for rectangle in range(rectangle_count):
# Identify rectangle boundaries in x and y (and right and left)
rect_y_bottom = img.shape[0] - (rectangle + 1) * rect_height
rect_y_top = img.shape[0] - rectangle * rect_height
rect_x_left_bottom = left_pivot - rectangle_width
rect_x_left_top = left_pivot + rectangle_width
rect_x_right_bottom = right_pivot - rectangle_width
rect_x_right_top = right_pivot + rectangle_width
# Identify the nonzero pixels in x and y within the rectangle
left_rect_area = ((nonzero_y >= rect_y_bottom) &
(nonzero_y < rect_y_top) &
(nonzero_x >= rect_x_left_bottom) &
(nonzero_x < rect_x_left_top)).nonzero()[0]
right_rect_area = ((nonzero_y >= rect_y_bottom) &
(nonzero_y < rect_y_top) &
(nonzero_x >= rect_x_right_bottom) &
(nonzero_x < rect_x_right_top)).nonzero()[0]
# If you found > min_pixel_density, recenter next rectangle on their mean position
if len(left_rect_area) > min_pixel_density:
left_pivot, right_pivot = get_normalized_pivots(
img, np.int(np.mean(nonzero_x[left_rect_area])), right_pivot)
if len(right_rect_area) > min_pixel_density:
left_pivot, right_pivot = get_normalized_pivots(
img, left_pivot, np.int(np.mean(nonzero_x[right_rect_area])))
# append indexes skiping initial iteractions
if rectangle >= rectangle_count / 10:
left_stack.append(left_rect_area)
right_stack.append(right_rect_area)
# Draw the windows on the visualization image
if rectangle_visible:
cv2.rectangle(out_img, (rect_x_left_bottom, rect_y_bottom), (rect_x_left_top, rect_y_top), (0, 255, 0), 2)
cv2.rectangle(out_img, (rect_x_right_bottom, rect_y_bottom), (rect_x_right_top, rect_y_top), (0, 255, 0), 2)
# Flatten the rectangle stack
left_path = get_path(left_stack, min_pixel_density)
right_path = get_path(right_stack, min_pixel_density)
return left_path, right_path, out_img
def pixel_density_curve_fit(img,
rectangle_count=20, rectangle_width=75, rectangle_visible=False,
lane_width=75,
min_pixel_density=100):
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
nonzero = img.nonzero()
nonzero_y = np.array(nonzero[0])
nonzero_x = np.array(nonzero[1])
# get paths and output image
left_path, right_path, out_img = \
get_lane(img, rectangle_count, rectangle_width, rectangle_visible, min_pixel_density)
# Generate x and y values for plotting
plot_y = np.linspace(0, out_img.shape[0] - 1, out_img.shape[0])
lane_img = np.zeros_like(out_img)
left_fit = None
right_fit = None
if left_path is not None:
left_x = nonzero_x[left_path]
left_y = nonzero_y[left_path]
out_img[nonzero_y[left_path], nonzero_x[left_path]] = [255, 255, 0]
try:
left_fit = np.polyfit(left_y, left_x, 2)
left_fit_x = left_fit[0] * plot_y ** 2 + left_fit[1] * plot_y + left_fit[2]
left_lane_rect_a = np.array([np.transpose(np.vstack([left_fit_x - lane_width, plot_y]))])
left_lane_rect_b = np.array([np.flipud(np.transpose(np.vstack([left_fit_x + lane_width, plot_y])))])
left_lane_pts = np.hstack((left_lane_rect_a, left_lane_rect_b))
cv2.fillPoly(lane_img, np.int_([left_lane_pts]), (255, 0, 0))
except:
left_fit = None
if right_path is not None:
right_x = nonzero_x[right_path]
right_y = nonzero_y[right_path]
out_img[nonzero_y[right_path], nonzero_x[right_path]] = [255, 255, 0]
try:
right_fit = np.polyfit(right_y, right_x, 2)
right_fit_x = right_fit[0] * plot_y ** 2 + right_fit[1] * plot_y + right_fit[2]
right_lane_rect_a = np.array([np.transpose(np.vstack([right_fit_x - lane_width, plot_y]))])
right_lane_rect_b = np.array([np.flipud(np.transpose(np.vstack([right_fit_x + lane_width, plot_y])))])
right_lane_pts = np.hstack((right_lane_rect_a, right_lane_rect_b))
cv2.fillPoly(lane_img, np.int_([right_lane_pts]), (0, 0, 255))
except:
right_fit = None
out_img = cv2.addWeighted(out_img, 1, lane_img, 0.7, 0)
return left_fit, right_fit, out_img
| StarcoderdataPython |
1702474 | # Copyright 2012 OpenStack Foundation
# Copyright 2015 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import etcd
import logging
import socket
import sys
import eventlet
eventlet.monkey_patch()
from oslo_config import cfg
from neutron.agent.common import config
from neutron.agent.dhcp.agent import DhcpAgent
from neutron.agent.dhcp.agent import NetworkCache
from neutron.agent.dhcp_agent import register_options
from neutron.agent.linux import dhcp
from neutron.common import config as common_config
from neutron.common import constants
from calico.datamodel_v1 import dir_for_host
from calico.datamodel_v1 import key_for_subnet
from calico.datamodel_v1 import SUBNET_DIR
from calico.etcdutils import EtcdWatcher
from calico.etcdutils import safe_decode_json
from networking_calico.agent.linux.dhcp import DnsmasqRouted
LOG = logging.getLogger(__name__)
NETWORK_ID = 'calico'
class FakePlugin(object):
"""Fake plugin class.
This class exists to support various calls that
neutron.agent.linux.dhcp.Dnsmasq makes to what it thinks is the Neutron
database (aka the plugin).
The calls are create_dhcp_port, update_dhcp_port and release_dhcp_port, and
the docstring for each corresponding method below indicates how they are
called.
However, update_dhcp_port is never called in the Calico setup, because it
is only used when there is a change to the set of Neutron-allocated IP
addresses that are associated with the DHCP port. In the Calico setup, we
use gateway IPs on the DHCP port instead of any new Neutron allocations,
hence the situation just described can never happen. Therefore we don't
provide any code for update_dhcp_port.
Because this class doesn't speak to the real Neutron database, it follows
that the DHCP interface that we create on each compute host does not show
up as a port in the Neutron database. That doesn't matter, because we
don't allocate a unique IP for each DHCP port, and hence don't consume any
IPs that the Neutron database ought to know about.
"""
def create_dhcp_port(self, port):
"""Support the following DHCP DeviceManager calls.
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
"""
LOG.debug("create_dhcp_port: %s", port)
port['port']['id'] = 'dhcp'
# The following MAC address will be assigned to the Linux dummy
# interface that
# networking_calico.agent.linux.interface.RoutedInterfaceDriver
# creates. Therefore it will never actually be used or involved in the
# sending or receiving of any real data. Hence it should not matter
# that we use a hardcoded value here, and the same value on every
# networking-calico compute host. The '2' bit of the first byte means
# 'locally administered', which makes sense for a hardcoded value like
# this and distinguishes it from the space of managed MAC addresses.
port['port']['mac_address'] = '02:00:00:00:00:00'
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
return dhcp.DictModel(port['port'])
def release_dhcp_port(self, network_id, device_id):
"""Support the following DHCP DeviceManager calls.
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
"""
LOG.debug("release_dhcp_port: %s %s", network_id, device_id)
class CalicoEtcdWatcher(EtcdWatcher):
NETWORK_ID = 'calico'
"""
Calico network ID.
Although there can in general be multiple networks and multiple
subnets per network that need DHCP on a particular compute host,
there's actually nothing that depends on how the subnets are
partitioned across networks, and a single instance of Dnsmasq is
quite capable of providing DHCP for many subnets.
Therefore we model the DHCP requirement using a single network
with ID 'calico', and many subnets within that network.
"""
def _empty_network(self):
"""Construct and return an empty network model."""
return dhcp.NetModel(False,
{"id": NETWORK_ID,
"subnets": [],
"ports": [],
"mtu": constants.DEFAULT_NETWORK_MTU})
def __init__(self, agent):
super(CalicoEtcdWatcher, self).__init__(
'localhost:4001',
dir_for_host(socket.gethostname()) + "/workload"
)
self.agent = agent
self.suppress_on_ports_changed = False
# Create empty Calico network object in the cache.
self.agent.cache.put(self._empty_network())
# Register the etcd paths that we need to watch.
self.register_path(
"/calico/v1/host/<hostname>/workload/<orchestrator>" +
"/<workload_id>/endpoint/<endpoint_id>",
on_set=self.on_endpoint_set,
on_del=self.on_endpoint_delete
)
self.register_path(
"/calico/v1/host/<hostname>/workload/<orchestrator>" +
"/<workload_id>/endpoint",
on_del=self.on_dir_delete
)
self.register_path(
"/calico/v1/host/<hostname>/workload/<orchestrator>" +
"/<workload_id>",
on_del=self.on_dir_delete
)
self.register_path(
"/calico/v1/host/<hostname>/workload/<orchestrator>",
on_del=self.on_dir_delete
)
self.register_path(
"/calico/v1/host/<hostname>/workload",
on_del=self.on_dir_delete
)
# Also watch the etcd subnet tree. When something in that subtree
# changes, the subnet watcher will tell _this_ watcher to resync.
self.subnet_watcher = SubnetWatcher(self)
eventlet.spawn(self.subnet_watcher.loop)
def on_endpoint_set(self, response, hostname, orchestrator,
workload_id, endpoint_id):
"""Handler for endpoint creations and updates.
Endpoint data is, for example:
{ 'state': 'active' or 'inactive',
'name': port['interface_name'],
'mac': port['mac_address'],
'profile_ids': port['security_groups'],
'ipv4_nets': ['10.28.0.2/32'],
'ipv4_gateway': '10.28.0.1',
'ipv6_nets': ['2001:db8:1::2/128'],
'ipv6_gateway': '2001:db8:1::1' }
Port properties needed by DHCP code are:
{ 'id': <unique ID>,
'network_id': <network ID>,
'device_owner': 'calico',
'device_id': <Linux interface name>,
'fixed_ips': [ { 'subnet_id': <subnet ID>,
'ip_address': '10.28.0.2' } ],
'mac_address: <MAC address>,
'extra_dhcp_opts': ... (optional) }
Network properties are:
{ 'subnets': [ <subnet object> ],
'id': <network ID>,
'namespace': None,
'ports: [ <port object> ],
'tenant_id': ? }
Subnet properties are:
{ 'enable_dhcp': True,
'ip_version': 4 or 6,
'cidr': '10.28.0.0/24',
'dns_nameservers': [],
'id': <subnet ID>,
'gateway_ip': <gateway IP address>,
'host_routes': [],
'ipv6_address_mode': 'dhcpv6-stateful' | 'dhcpv6-stateless',
'ipv6_ra_mode': 'dhcpv6-stateful' | 'dhcpv6-stateless' }
"""
# Get the endpoint data.
endpoint = safe_decode_json(response.value, 'endpoint')
if not (isinstance(endpoint, dict) and
'ipv4_nets' in endpoint and
'ipv4_subnet_ids' in endpoint and
'ipv6_nets' in endpoint and
'ipv6_subnet_ids' in endpoint and
'name' in endpoint and
'mac' in endpoint):
# Endpoint data is invalid.
LOG.warning("Invalid endpoint data: %s => %s",
response.value, endpoint)
return
# Construct NetModel port equivalent of Calico's endpoint data.
fixed_ips = []
dns_assignments = []
fqdn = endpoint.get('fqdn')
for ip_version in [4, 6]:
# Generate the fixed IPs and DNS assignments for the current IP
# version.
for addrm, subnet_id in zip(endpoint['ipv%s_nets' % ip_version],
endpoint['ipv%s_subnet_ids' %
ip_version]):
ip_addr = addrm.split('/')[0]
fixed_ips.append({'subnet_id': subnet_id,
'ip_address': ip_addr})
if fqdn:
dns_assignments.append({'hostname': fqdn.split('.')[0],
'ip_address': ip_addr,
'fqdn': fqdn})
port = {'id': endpoint_id,
'network_id': NETWORK_ID,
'device_owner': 'calico',
'device_id': endpoint['name'],
'fixed_ips': fixed_ips,
'mac_address': endpoint['mac'],
'extra_dhcp_opts': []}
if fqdn:
port['dns_assignment'] = dns_assignments
# Add this port into the NetModel.
LOG.debug("new port: %s", port)
self.agent.cache.put_port(dhcp.DictModel(port))
# Now check for impact on subnets and DHCP driver.
self.on_ports_changed()
def on_ports_changed(self):
# Check whether we should really do the following processing.
if self.suppress_on_ports_changed:
LOG.debug("Don't recalculate subnets yet;"
" must be processing a snapshot")
return
# Get current NetModel description of the Calico network.
net = self.agent.cache.get_network_by_id(NETWORK_ID)
LOG.debug("net: %s %s %s", net.id, net.subnets, net.ports)
# See if we need to update the subnets in the NetModel.
new_subnets = self.calculate_new_subnets(net.ports, net.subnets)
if new_subnets is None:
# No change to subnets, so just need 'reload_allocations' to tell
# Dnsmasq about the new port.
self.agent.call_driver('reload_allocations', net)
else:
# Subnets changed, so need to 'restart' the DHCP driver.
net = dhcp.NetModel(False,
{"id": net.id,
"subnets": new_subnets,
"ports": net.ports,
"tenant_id": "calico",
"mtu": constants.DEFAULT_NETWORK_MTU})
LOG.debug("new net: %s %s %s", net.id, net.subnets, net.ports)
# Next line - i.e. just discarding the existing cache - is to work
# around Neutron bug that the DHCP port is not entered into the
# cache's port_lookup dict.
self.agent.cache = NetworkCache()
self.agent.cache.put(net)
self.agent.call_driver('restart', net)
def on_endpoint_delete(self, response, hostname, orchestrator,
workload_id, endpoint_id):
"""Handler for endpoint deletion."""
# Find the corresponding port in the DHCP agent's cache.
port = self.agent.cache.get_port_by_id(endpoint_id)
if port:
LOG.debug("deleted port: %s", port)
self.agent.cache.remove_port(port)
self.on_ports_changed()
def calculate_new_subnets(self, ports, current_subnets):
"""Calculate and return subnets needed for PORTS.
Given a current set of PORTS that we need to provide DHCP for,
calculate all the subnets that we need for those, and get their data
either from CURRENT_SUBNETS or from reading etcd.
If the new set of subnets is equivalent to what we already had in
CURRENT_SUBNETS, return None. Otherwise return the new set of
subnets.
"""
# Gather required subnet IDs.
subnet_ids = set()
for port in ports:
for fixed_ip in port['fixed_ips']:
subnet_ids.add(fixed_ip['subnet_id'])
LOG.debug("Needed subnet IDs: %s", subnet_ids)
# Compare against the existing set of IDs.
existing_ids = set([s.id for s in current_subnets])
LOG.debug("Existing subnet IDs: %s", existing_ids)
if subnet_ids == existing_ids:
LOG.debug("Subnets unchanged")
return None
# Prepare required new subnet data.
new_subnets = []
for subnet_id in subnet_ids:
# Check if we already have this subnet.
existing = [s for s in current_subnets if s.id == subnet_id]
if existing:
# We do. Assume subnet data hasn't changed.
new_subnets.extend(existing)
else:
LOG.debug("Read subnet %s from etcd", subnet_id)
# Read the data for this subnet.
subnet_key = key_for_subnet(subnet_id)
try:
response = self.client.read(subnet_key, consistent=True)
data = safe_decode_json(response.value, 'subnet')
LOG.debug("Subnet data: %s", data)
if not (isinstance(data, dict) and
'cidr' in data and
'gateway_ip' in data):
# Subnet data was invalid.
LOG.warning("Invalid subnet data: %s => %s",
response.value, data)
raise etcd.EtcdKeyNotFound()
# Convert to form expected by NetModel.
ip_version = 6 if ':' in data['cidr'] else 4
subnet = {'enable_dhcp': True,
'ip_version': ip_version,
'cidr': data['cidr'],
'dns_nameservers': data.get('dns_servers') or [],
'id': subnet_id,
'gateway_ip': data['gateway_ip'],
'host_routes': []}
if ip_version == 6:
subnet['ipv6_address_mode'] = constants.DHCPV6_STATEFUL
subnet['ipv6_ra_mode'] = constants.DHCPV6_STATEFUL
# Add this to the set to be returned.
new_subnets.append(subnet)
except etcd.EtcdKeyNotFound:
LOG.warning("No data for subnet %s", subnet_id)
return new_subnets
def _on_snapshot_loaded(self, etcd_snapshot_response):
"""Called whenever a snapshot is loaded from etcd."""
# Reset the cache.
LOG.debug("Reset cache for new snapshot")
self.agent.cache = NetworkCache()
self.agent.cache.put(self._empty_network())
# Suppress the processing inside on_ports_changed, until we've
# processed the whole snapshot.
self.suppress_on_ports_changed = True
# Now pass each snapshot node through the dispatcher, which
# means that on_endpoint_set will be called for each endpoint.
for etcd_node in etcd_snapshot_response.leaves:
etcd_node.action = 'set'
self.dispatcher.handle_event(etcd_node)
LOG.debug("End of new snapshot")
# Now check for impact on subnets and DHCP driver.
self.suppress_on_ports_changed = False
self.on_ports_changed()
def on_dir_delete(self, response, *args, **kwargs):
"""Called if an endpoint parent directory is deleted from etcd."""
LOG.warning("Unexpected directory deletion from etcd; triggering" +
" resync; %s %s %s", response, args, kwargs)
# Handle by doing a resync.
self.resync_after_current_poll = True
class SubnetWatcher(EtcdWatcher):
def __init__(self, endpoint_watcher):
super(SubnetWatcher, self).__init__('localhost:4001', SUBNET_DIR)
self.endpoint_watcher = endpoint_watcher
self.register_path(
SUBNET_DIR + "/<subnet_id>",
on_set=self.on_subnet_set
)
def on_subnet_set(self, response, subnet_id):
"""Handler for subnet creations and updates.
We handle this by telling the main watcher to do a resync.
"""
LOG.info("Subnet %s created or updated", subnet_id)
self.endpoint_watcher.resync_after_current_poll = True
def loop(self):
# Catch and report any exceptions that escape here.
try:
super(SubnetWatcher, self).loop()
except: # noqa
LOG.exception("Exception in SubnetWatcher.loop()")
raise
finally:
# As this thread is exiting, arrange for the agent as a whole to
# exit.
self.endpoint_watcher.stop()
def _on_snapshot_loaded(self, etcd_snapshot_response):
"""Called whenever a snapshot is loaded from etcd."""
LOG.info("New subnet snapshot, trigger endpoint watcher to resync")
self.endpoint_watcher.resync_after_current_poll = True
class CalicoDhcpAgent(DhcpAgent):
"""Calico DHCP agent.
This DHCP agent subclasses and overrides the standard Neutron DHCP
agent so as to be driven by etcd endpoint data - instead of by
Neutron RPC network, subnet and port messages - and so as not to
provide any agent status reporting back to the Neutron server.
This is because we have observed that the RPC exchanges between
DHCP agents and the Neutron server will exhaust the latter once
there are more than a few hundred agents running.
"""
def __init__(self):
super(CalicoDhcpAgent, self).__init__(host=socket.gethostname())
# Override settings that Calico's DHCP agent use requires.
self.conf.set_override('enable_isolated_metadata', False)
self.conf.set_override('use_namespaces', False)
self.conf.set_override(
'interface_driver',
'networking_calico.agent.linux.interface.RoutedInterfaceDriver'
)
# Override the DHCP driver class - networking-calico's
# DnsmasqRouted class.
self.dhcp_driver_cls = DnsmasqRouted
# Override the RPC plugin (i.e. proxy to the Neutron database)
# with a fake plugin. The DHCP driver code calls when it
# wants to tell Neutron that it is creating, updating or
# releasing the DHCP port.
self.plugin_rpc = FakePlugin()
# Watch etcd for any endpoint changes for this host.
self.etcd = CalicoEtcdWatcher(self)
def run(self):
"""Run the EtcdWatcher loop."""
self.etcd.loop()
def main():
register_options(cfg.CONF)
common_config.init(sys.argv[1:])
config.setup_logging()
agent = CalicoDhcpAgent()
agent.run()
| StarcoderdataPython |
1639294 | from setuptools import setup, PEP420PackageFinder
setup(
name="illd",
version="0.0.1",
packages=PEP420PackageFinder.find("src"),
package_data={},
package_dir={"": "src"},
extras_require={
"testing": ["hypothesis", "pytest", "pytest-mock"],
"documentation": ["sphinx", "sphinx_rtd_theme", "sphinx-autobuild", "sphinxcontrib-napoleon"],
"ihme_databases": ["db_tools", "db_queries", "save_results", "hierarchies"],
},
entry_points={
"console_scripts": [
["illserver=illd.app:entry"],
]
},
scripts=[],
zip_safe=False,
classifiers=[
"Intendend Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Statistics",
],
)
| StarcoderdataPython |
3374287 | import time
class Game(object):
"""井字游戏"""
def __init__(self):
# 实例化类便开始初始化游戏
self.initialize_game()
# 初始化棋盘
def initialize_game(self):
self.current_state = [['.','.','.'],
['.','.','.'],
['.','.','.']]
# 玩家X用X作为标记,作为先手
self.player_turn = 'X'
# 打印棋盘在屏幕上
def draw_board(self):
for i in range(0, 3):
for j in range(0, 3):
# 如果棋盘没有放置,显示位置坐标
if self.current_state[i][j] == ".":
val = "({},{})".format(i, j)
else:
val = self.current_state[i][j]
if j != 2:
print('%-5s|' % val, end=" ") # -5s是指定占位空间
else: # 最后一个元素输出就换行
print('{}'.format(val))
print()
# 判断棋子位置是否合理
def is_valid(self, px, py):
if px < 0 or px > 2 or py < 0 or py > 2:
return False # 坐标不在棋盘上,不通过
elif self.current_state[px][py] != '.':
return False # 坐标已经有标记了,不通过
else: # 其他情况是合理的
return True
# 每一步之后都检查游戏是否结束,给出胜利者
def is_end(self):
for i in range(0, 3):
# 水平是否连线
if (self.current_state[i] == ['X', 'X', 'X']):
return 'X'
elif (self.current_state[i] == ['O', 'O', 'O']):
return 'O'
# 垂直是否连线
if self.current_state[0][i] != '.':
if self.current_state[0][i] == self.current_state[1][i] == self.current_state[2][i]:
return self.current_state[0][i] # 返回赢家(该位置上的符号)
# 斜线是否连线
if self.current_state[0][0] != '.':
if self.current_state[0][0] == self.current_state[1][1] == self.current_state[2][2]:
return self.current_state[0][0]
# 斜线是否连线
if self.current_state[0][2] != '.':
if self.current_state[0][2] == self.current_state[1][1] == self.current_state[2][0]:
return self.current_state[0][2]
# 棋盘是否已经放满
for i in range(0, 3):
if self.current_state[i].count(".") > 0: # 若还有".",说明还有位置
return None # 还有位置,返回空,游戏继续
return '.' # 平局返回"."
# 符号 'O' 玩家是计算机,求极大值
def max(self):
# 有可能的价值为-1(失败),0(平局),1(胜利)
max_val = -2 # max_val是初始化alpha值,-2已经小于所有值
px = None # 坐标初始化
py = None
result = self.is_end() # 返回当前结果
# 如果已经结束,就是递归返回
# 这里是构建完整树图,所以不设置递归深度
# 一直递归至游戏结束,根据结果返回评估值
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
# 遍历每一个位置,如果是可以放棋子,就尝试在这里放棋子
self.current_state[i][j] = 'O'
# 然后作为一个分支,在下一层求极小值中寻找极大值
(m, min_i, min_j) = self.min()
if m > max_val: # 若有极大值,则更新下棋坐标
max_val = m
px = i
py = j
self.current_state[i][j] = '.' # 尝试结束后要清空这位置
return (max_val, px, py)
# 符号 'X' 玩家是人,是计算机对手,所以是求极小值
def min(self):
# 有可能的价值为-1(胜利),0(平局),1(失败),刚好与计算机相反
min_val = 2 # min_val初始化,2已经大于所有值
qx = None # 坐标初始化
qy = None
result = self.is_end() # 返回当前结果
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
# 遍历每一个位置,如果是可以放棋子,就尝试在这里放棋子
self.current_state[i][j] = 'X'
# 然后作为一个分支,在下一层求极大值中寻找极小值
(m, max_i, max_j) = self.max()
if m < min_val: # 若有极小值,则更新下棋坐标
min_val = m
qx = i
qy = j
self.current_state[i][j] = '.'
return (min_val, qx, qy)
# 开始游戏,程序入口
def play(self):
# 极大值极小值算法
while True: # 轮流下棋,直到游戏结束
self.draw_board() # 先把当前棋盘打印在屏幕上
self.result = self.is_end() # 判断是否结束游戏
if self.result != None: # 游戏结束
if self.result == 'X': # 如果是X是胜利者
print('胜者为X!')
elif self.result == 'O': # 反之亦然
print('胜者为O!')
elif self.result == '.': # 平局
print("平局")
self.initialize_game() # 初始化棋盘,结束游戏
return
# 若没有结束游戏,看到谁下棋
if self.player_turn == 'X': # 到X下棋
while True:
start = time.time() # 记录X的思考时间
# 这里可以忽略,不给人类提示也可以的
(m, qx, qy) = self.min() # X是代表人,也就是程序对手,所以找极小值
end = time.time() # 思考结束,得到下棋的坐标qx,qy
print('用时: {}s'.format(round(end - start, 7)))
print('推荐步骤: X = {}, Y = {}'.format(qx, qy))
try:
px = int(input('输入坐标值x: '))
py = int(input('输入坐标值y: '))
except:
# 若输入不能转化为整数,请再次输入
print('输入不符合要求,请再次输入。')
break
if self.is_valid(px, py):
self.current_state[px][py] = 'X'
self.player_turn = 'O'
break
else:
print('输入不符合要求,请再次输入。')
else:
(m, px, py) = self.max() # 到计算机下棋,所以要找极大值
self.current_state[px][py] = 'O'
self.player_turn = 'X'
def max_alpha_beta(self, alpha, beta):
max_val = -2
px = None
py = None
result = self.is_end()
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
self.current_state[i][j] = 'O'
(m, min_i, in_j) = self.min_alpha_beta(alpha, beta)
if m > max_val:
max_val = m
px = i
py = j
self.current_state[i][j] = '.'
# 前面的思路是一样的,主要添加以下剪枝条件的判断
alpha = max(max_val, alpha)
if beta <= alpha:
return (max_val, px, py)
return (max_val, px, py)
def min_alpha_beta(self, alpha, beta):
min_val = 2
qx = None
qy = None
result = self.is_end()
if result == 'X':
return (-1, 0, 0)
elif result == 'O':
return (1, 0, 0)
elif result == '.':
return (0, 0, 0)
for i in range(0, 3):
for j in range(0, 3):
if self.current_state[i][j] == '.':
self.current_state[i][j] = 'X'
(m, max_i, max_j) = self.max_alpha_beta(alpha, beta)
if m < min_val:
min_val = m
qx = i
qy = j
self.current_state[i][j] = '.'
# 前面的思路是一样的,主要添加以下剪枝条件的判断
beta = min(min_val, beta)
if beta <= alpha:
return (min_val, qx, qy)
return (min_val, qx, qy)
def play_alpha_beta(self):
# 极大极小值算法+剪枝算法
while True:
self.draw_board() # 先把当前棋盘打印在屏幕上
self.result = self.is_end() # 判断是否结束游戏
if self.result != None: # 游戏结束
if self.result == 'X': # 如果是X是胜利者
print('胜者为X!')
elif self.result == 'O': # 反之亦然
print('胜者为O!')
elif self.result == '.': # 平局
print("平局")
self.initialize_game() # 初始化棋盘,结束游戏
return
if self.player_turn == 'X':
while True:
start = time.time()
# X是代表人,也就是程序对手,所以找极小值,alpha,beta初始化值为-2,2
(m, qx, qy) = self.min_alpha_beta(-2, 2)
end = time.time()
print('用时: {}s'.format(round(end - start, 7)))
print('推荐步骤: X = {}, Y = {}'.format(qx, qy))
try:
px = int(input('输入坐标值x: '))
py = int(input('输入坐标值y: '))
except:
# 若输入不能转化为整数,请再次输入
print('输入不符合要求,请再次输入。')
break
if self.is_valid(px, py):
self.current_state[px][py] = 'X'
self.player_turn = 'O'
break
else:
print('输入不符合要求,请再次输入。')
else:
# 到计算机下棋,所以要找极大值,alpha,beta初始化值为-2,2
(m, px, py) = self.max_alpha_beta(-2, 2)
self.current_state[px][py] = 'O'
self.player_turn = 'X'
if __name__ == "__main__":
g = Game()
g.play_alpha_beta() | StarcoderdataPython |
9498 | <filename>src/sage/tests/books/computational-mathematics-with-sagemath/domaines_doctest.py<gh_stars>1000+
## -*- encoding: utf-8 -*-
"""
This file (./domaines_doctest.sage) was *autogenerated* from ./domaines.tex,
with sagetex.sty version 2011/05/27 v2.3.1.
It contains the contents of all the sageexample environments from this file.
You should be able to doctest this file with:
sage -t ./domaines_doctest.sage
It is always safe to delete this file; it is not used in typesetting your
document.
Sage example in ./domaines.tex, line 10::
sage: x = var('x')
Sage example in ./domaines.tex, line 69::
sage: o = 12/35
sage: type(o)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 82::
sage: type(12/35)
<... 'sage.rings.rational.Rational'>
Sage example in ./domaines.tex, line 131::
sage: o = 720
sage: o.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 142::
sage: type(o).factor(o)
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 157::
sage: 720.factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 166::
sage: o = 720 / 133
sage: o.numerator().factor()
2^4 * 3^2 * 5
Sage example in ./domaines.tex, line 253::
sage: 3 * 7
21
Sage example in ./domaines.tex, line 261::
sage: (2/3) * (6/5)
4/5
Sage example in ./domaines.tex, line 267::
sage: (1 + I) * (1 - I)
2
Sage example in ./domaines.tex, line 274::
sage: (x + 2) * (x + 1)
(x + 2)*(x + 1)
sage: (x + 1) * (x + 2)
(x + 2)*(x + 1)
Sage example in ./domaines.tex, line 308::
sage: def fourth_power(a):
....: a = a * a
....: a = a * a
....: return a
Sage example in ./domaines.tex, line 330::
sage: fourth_power(2)
16
sage: fourth_power(3/2)
81/16
sage: fourth_power(I)
1
sage: fourth_power(x+1)
(x + 1)^4
sage: M = matrix([[0,-1],[1,0]]); M
[ 0 -1]
[ 1 0]
sage: fourth_power(M)
[1 0]
[0 1]
Sage example in ./domaines.tex, line 375::
sage: t = type(5/1); t
<... 'sage.rings.rational.Rational'>
sage: t == type(5)
False
Sage example in ./domaines.tex, line 476::
sage: a = 5; a
5
sage: a.is_unit()
False
Sage example in ./domaines.tex, line 484::
sage: a = 5/1; a
5
sage: a.is_unit()
True
Sage example in ./domaines.tex, line 507::
sage: parent(5)
Integer Ring
sage: parent(5/1)
Rational Field
Sage example in ./domaines.tex, line 515::
sage: ZZ
Integer Ring
sage: QQ
Rational Field
Sage example in ./domaines.tex, line 525::
sage: QQ(5).parent()
Rational Field
sage: ZZ(5/1).parent()
Integer Ring
sage: ZZ(1/5)
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
Sage example in ./domaines.tex, line 543::
sage: ZZ(1), QQ(1), RR(1), CC(1)
(1, 1, 1.00000000000000, 1.00000000000000)
Sage example in ./domaines.tex, line 568::
sage: cartesian_product([QQ, QQ])
The Cartesian product of (Rational Field, Rational Field)
Sage example in ./domaines.tex, line 574::
sage: ZZ.fraction_field()
Rational Field
Sage example in ./domaines.tex, line 580::
sage: ZZ['x']
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 591::
sage: Z5 = GF(5); Z5
Finite Field of size 5
sage: P = Z5['x']; P
Univariate Polynomial Ring in x over Finite Field of size 5
sage: M = MatrixSpace(P, 3, 3); M
Full MatrixSpace of 3 by 3 dense matrices over
Univariate Polynomial Ring in x over Finite Field of size 5
Sage example in ./domaines.tex, line 602::
sage: M.random_element() # random
[2*x^2 + 3*x + 4 4*x^2 + 2*x + 2 4*x^2 + 2*x]
[ 3*x 2*x^2 + x + 3 3*x^2 + 4*x]
[ 4*x^2 + 3 3*x^2 + 2*x + 4 2*x + 4]
Sage example in ./domaines.tex, line 697::
sage: QQ.category()
Join of Category of number fields and Category of quotient fields and Category of metric spaces
Sage example in ./domaines.tex, line 704::
sage: QQ in Fields()
True
Sage example in ./domaines.tex, line 712::
sage: QQ in CommutativeAdditiveGroups()
True
Sage example in ./domaines.tex, line 718::
sage: QQ['x'] in EuclideanDomains()
True
Sage example in ./domaines.tex, line 859::
sage: 5.parent()
Integer Ring
Sage example in ./domaines.tex, line 872::
sage: type(factor(4))
<class 'sage.structure.factorization_integer.IntegerFactorization'>
Sage example in ./domaines.tex, line 895::
sage: int(5)
5
sage: type(int(5))
<... 'int'>
Sage example in ./domaines.tex, line 909::
sage: Integer(5)
5
sage: type(Integer(5))
<... 'sage.rings.integer.Integer'>
Sage example in ./domaines.tex, line 926::
sage: factorial(99) / factorial(100) - 1 / 50
-1/100
Sage example in ./domaines.tex, line 974::
sage: 72/53 - 5/3 * 2.7
-3.14150943396227
Sage example in ./domaines.tex, line 982::
sage: cos(1), cos(1.)
(cos(1), 0.540302305868140)
Sage example in ./domaines.tex, line 1000::
sage: pi.n(digits=50) # variant: n(pi,digits=50)
3.1415926535897932384626433832795028841971693993751
Sage example in ./domaines.tex, line 1020::
sage: z = CC(1,2); z.arg()
1.10714871779409
Sage example in ./domaines.tex, line 1036::
sage: I.parent()
Number Field in I with defining polynomial x^2 + 1 with I = 1*I
Sage example in ./domaines.tex, line 1043::
sage: (1.+2.*I).parent()
Complex Field with 53 bits of precision
sage: (1.+2.*SR(I)).parent()
Symbolic Ring
Sage example in ./domaines.tex, line 1064::
sage: z = 3 * exp(I*pi/4)
sage: z.real(), z.imag(), z.abs().canonicalize_radical()
(3/2*sqrt(2), 3/2*sqrt(2), 3)
Sage example in ./domaines.tex, line 1094::
sage: a, b, c = 0, 2, 3
sage: a == 1 or (b == 2 and c == 3)
True
Sage example in ./domaines.tex, line 1147::
sage: x, y = var('x, y')
sage: bool( (x-y)*(x+y) == x^2-y^2 )
True
Sage example in ./domaines.tex, line 1171::
sage: Z4 = IntegerModRing(4); Z4
Ring of integers modulo 4
sage: m = Z4(7); m
3
Sage example in ./domaines.tex, line 1184::
sage: 3 * m + 1
2
Sage example in ./domaines.tex, line 1191::
sage: Z3 = GF(3); Z3
Finite Field of size 3
Sage example in ./domaines.tex, line 1243::
sage: a = matrix(QQ, [[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1259::
sage: M = MatrixSpace(QQ,3,3); M
Full MatrixSpace of 3 by 3 dense matrices over Rational Field
sage: a = M([[1,2,3],[2,4,8],[3,9,27]])
sage: (a^2 + 1) * a^(-1)
[ -5 13/2 7/3]
[ 7 1 25/3]
[ 2 19/2 27]
Sage example in ./domaines.tex, line 1283::
sage: P = ZZ['x']; P
Univariate Polynomial Ring in x over Integer Ring
sage: F = P.fraction_field(); F
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
sage: p = P(x+1) * P(x); p
x^2 + x
sage: p + 1/p
(x^4 + 2*x^3 + x^2 + 1)/(x^2 + x)
sage: parent(p + 1/p)
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1382::
sage: k.<a> = NumberField(x^3 + x + 1); a^3; a^4+3*a
-a - 1
-a^2 + 2*a
Sage example in ./domaines.tex, line 1416::
sage: parent(sin(x))
Symbolic Ring
Sage example in ./domaines.tex, line 1422::
sage: SR
Symbolic Ring
Sage example in ./domaines.tex, line 1428::
sage: SR.category()
Category of fields
Sage example in ./domaines.tex, line 1482::
sage: R = QQ['x1,x2,x3,x4']; R
Multivariate Polynomial Ring in x1, x2, x3, x4 over Rational Field
sage: x1, x2, x3, x4 = R.gens()
Sage example in ./domaines.tex, line 1489::
sage: x1 * (x2 - x3)
x1*x2 - x1*x3
Sage example in ./domaines.tex, line 1496::
sage: (x1+x2)*(x1-x2) - (x1^2 - x2^2)
0
Sage example in ./domaines.tex, line 1509::
sage: P = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) ); P * P.lc()
x1^3*x2^2*x3 - x1^2*x2^3*x3 - x1^3*x2*x3^2 + x1*x2^3*x3^2
+ x1^2*x2*x3^3 - x1*x2^2*x3^3 - x1^3*x2^2*x4 + x1^2*x2^3*x4
+ x1^3*x3^2*x4 - x2^3*x3^2*x4 - x1^2*x3^3*x4 + x2^2*x3^3*x4
+ x1^3*x2*x4^2 - x1*x2^3*x4^2 - x1^3*x3*x4^2 + x2^3*x3*x4^2
+ x1*x3^3*x4^2 - x2*x3^3*x4^2 - x1^2*x2*x4^3 + x1*x2^2*x4^3
+ x1^2*x3*x4^3 - x2^2*x3*x4^3 - x1*x3^2*x4^3 + x2*x3^2*x4^3
Sage example in ./domaines.tex, line 1531::
sage: x1, x2, x3, x4 = SR.var('x1, x2, x3, x4')
sage: got = prod( (a-b) for (a,b) in Subsets([x1,x2,x3,x4],2) )
sage: expected1 = -(x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: expected2 = (x1 - x2)*(x1 - x3)*(x1 - x4)*(x2 - x3)*(x2 - x4)*(x3 - x4)
sage: bool(got == expected1 or got == expected2)
True
Sage example in ./domaines.tex, line 1581::
sage: x = var('x')
sage: p = 54*x^4+36*x^3-102*x^2-72*x-12
sage: factor(p)
6*(x^2 - 2)*(3*x + 1)^2
Sage example in ./domaines.tex, line 1616::
sage: R = ZZ['x']; R
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1622::
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
Sage example in ./domaines.tex, line 1629::
sage: parent(q)
Univariate Polynomial Ring in x over Integer Ring
Sage example in ./domaines.tex, line 1635::
sage: factor(q)
2 * 3 * (3*x + 1)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1642::
sage: R = QQ['x']; R
Univariate Polynomial Ring in x over Rational Field
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x + 1/3)^2 * (x^2 - 2)
Sage example in ./domaines.tex, line 1665::
sage: R = ComplexField(16)['x']; R
Univariate Polynomial Ring in x over Complex Field
with 16 bits of precision
sage: q = R(p); q
54.00*x^4 + 36.00*x^3 - 102.0*x^2 - 72.00*x - 12.00
sage: factor(q)
(54.00) * (x - 1.414) * (x + 0.3333)^2 * (x + 1.414)
Sage example in ./domaines.tex, line 1685::
sage: R = QQ[sqrt(2)]['x']; R
Univariate Polynomial Ring in x over Number Field in sqrt2 with defining polynomial x^2 - 2 with sqrt2 = 1.414213562373095?
sage: q = R(p); q
54*x^4 + 36*x^3 - 102*x^2 - 72*x - 12
sage: factor(q)
(54) * (x - sqrt2) * (x + sqrt2) * (x + 1/3)^2
Sage example in ./domaines.tex, line 1698::
sage: R = GF(5)['x']; R
Univariate Polynomial Ring in x over Finite Field of size 5
sage: q = R(p); q
4*x^4 + x^3 + 3*x^2 + 3*x + 3
sage: factor(q)
(4) * (x + 2)^2 * (x^2 + 3)
"""
| StarcoderdataPython |
1738105 | import simplejson as json
from mongrel2 import handler
import hashlib
import base64
sender_id = "82209006-86FF-4982-B5EA-D1E29E55D481"
conn = handler.Connection(sender_id, "tcp://127.0.0.1:9999",
"tcp://127.0.0.1:9998")
users = {}
user_list = []
def wsChallenge(v):
try:
x=hashlib.sha1(v+"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
return base64.b64encode(x.digest())
except:
return ""
def wsdeframe(data):
#We rely on mongrel2 to have done the length calculation and read
#Just the header and payload
fin=(ord(data[0])&0x80)!=0
masked=(ord(data[1])&0x80)!=0
leng=ord(data[1])&0x7f
if not masked:
raise Exception("Packet not masked!")
if not fin:
raise Exception("Fragmentation not supported")
payloadStart=6
if leng == 126:
payloadStart +=2
elif leng == 127:
payloadStart += 8
maskKey=map(ord,data[payloadStart-4:payloadStart])
dataOut=[]
index=0
for byte in data[payloadStart:]:
dataOut.append(chr(ord(byte)^maskKey[index%4]))
index+=1
return "".join(dataOut)
def wsframe(data,opcode=1):
header=''
header+=chr(0x80|opcode)
realLength=len(data)
if realLength < 126:
dummyLength=realLength
elif realLength < 2**32:
dummyLength = 126
else:
dummyLength=127
header+=chr(dummyLength)
if dummyLength == 127:
header += chr(realLength >> 56 &0xff)
header += chr(realLength >> 48 &0xff)
header += chr(realLength >> 40 &0xff)
header += chr(realLength >> 32 &0xff)
if dummyLength == 126 or dummyLength == 127:
header += chr(realLength >> 24 & 0xff)
header += chr(realLength >> 16 & 0xff)
header += chr(realLength >> 8 & 0xff)
header += chr(realLength & 0xff)
return header+data
while True:
try:
req = conn.recv()
except:
print "FAILED RECV"
continue
print "ID", req.conn_id
if req.headers.get('METHOD') == 'GET':
responseCode=wsChallenge(req.headers.get('sec-websocket-key'))
response="HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: %s\r\n\r\n"%responseCode
print response
conn.reply(req,response)
continue
if req.is_disconnect():
print "DISCONNECTED", req.conn_id
if req.conn_id in users:
data['user'] = users[req.conn_id]
del users[req.conn_id]
if len(users.keys()) > 0:
conn.deliver(req.sender, users.keys(), wsframe(json.dumps(data)))
user_list = [u[1] for u in users.items()]
if req.headers.get('METHOD') != 'WEBSOCKET':
print 'METHOD is Not GET or WEBSOCKET',req.headers.get('METHOD')
continue
try:
wsdata = wsdeframe(req.body)
except:
print "WS Frame decode failed"
req.reply('')
continue
print "DATA:",wsdata
try:
data = json.loads(wsdata)
except:
print "JSON decode failed"
#conn.reply(req,'')
continue
if data["type"] == "join":
if len(users.keys()) > 0:
conn.deliver(req.sender, users.keys(), wsframe(json.dumps(data)))
users[req.conn_id] = data['user']
user_list = [u[1] for u in users.items()]
conn.reply(req, wsframe(json.dumps({'type': 'userList', 'users': user_list})))
elif data["type"] == "disconnect":
print "DISCONNECTED", req.conn_id
if req.conn_id in users:
data['user'] = users[req.conn_id]
del users[req.conn_id]
if len(users.keys()) > 0:
conn.deliver(req.sender, users.keys(), wsframe(json.dumps(data)))
user_list = [u[1] for u in users.items()]
elif req.conn_id not in users:
users[req.conn_id] = data['user']
elif data['type'] == "msg":
conn.deliver(req.sender, users.keys(), wsframe(json.dumps(data)))
print "REGISTERED USERS:", len(users)
| StarcoderdataPython |
67475 | import gym
import hydra
from ood_env import OODEnv
from util import ImageInputWrapper
import numpy as np
from stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.env_checker import check_env
from ood_model_wrapper import OodDetectorWrappedModel
import wandb
import os
from stable_baselines3 import PPO, DQN, A2C
from sb3_contrib import TQC, QRDQN, MaskablePPO
from wandb.integration.sb3 import WandbCallback
ALGO_DICT = {
"PPO": PPO,
"DQN": DQN,
"A2C": A2C,
"TQC": TQC,
"QRDQN": QRDQN,
"PPO_MASK": MaskablePPO,
"PPO_Mask": MaskablePPO,
"MASKABLE_PPO": MaskablePPO
}
@hydra.main(config_path='.', config_name='config')
def main(cfg):
# initialize wandb
os.environ["WANDB_API_KEY"] = cfg.wandb.key
model_name = cfg.model
env_name = cfg.env
env_key = env_name.replace("-", "_")
try:
policy_class = cfg.hyperparams[model_name][env_key].policy_class
except:
policy_class = "CnnPolicy" if cfg.image_input else "MlpPolicy"
total_timesteps = cfg.hyperparams[model_name][env_key].n_timesteps
config = {
"model": model_name,
"policy_class": policy_class,
"total_timesteps": total_timesteps,
"env_name": env_name,
"image_input": cfg.image_input,
"n_frames_stack": cfg.n_frames_stack,
"use_ood_detector_wrapped_model": cfg.use_ood_detector_wrapped_model,
"eval_outlier_detection": cfg.eval_outlier_detection,
"num_eval_rollouts": cfg.num_eval_rollouts,
"ood_config.use": cfg.ood_config.use,
"ood_config.prob": cfg.ood_config.prob,
"ood_config.type": cfg.ood_config.type,
"ood_config.random_std": cfg.ood_config.random_std,
"ood_config.outlier_env_names": cfg.ood_config.outlier_env_names,
"ood_config.ood_state_prob": cfg.ood_config.ood_state_prob,
}
try:
config.update({
"img_height": cfg.hyperparams[model_name][env_key].img_height,
"img_width": cfg.hyperparams[model_name][env_key].img_width,
})
except:
pass
run = wandb.init(
project=cfg.wandb.project,
entity=cfg.wandb.entity,
config=config,
sync_tensorboard=True, # auto-upload sb3's tensorboard metrics
monitor_gym=True, # auto-upload the videos of agents playing the game
save_code=True, # optional
)
model_save_filename = f"{model_name}_{env_name}_{run.id}"
model_save_path = os.path.join(os.getcwd(), f"{model_save_filename}.zip")
wandb.config.update({"model_save_path": model_save_path})
env = gym.make(cfg.env)
if cfg.ood_config.use:
env = OODEnv(env, cfg.ood_config)
check_env(env)
env = Monitor(env)
env = DummyVecEnv([lambda: env])
if cfg.image_input:
env = VecFrameStack(env, cfg.n_frames_stack)
params = {
"policy": policy_class,
"env": env,
"verbose": cfg.stable_baselines.verbosity,
"tensorboard_log": f"runs/{run.id}",
}
params = {**params, **cfg.hyperparams[model_name][env_key]}
try:
params['policy_kwargs'] = eval(params['policy_kwargs'])
except:
pass
params.pop("n_timesteps", None)
params.pop("img_width", None)
params.pop("img_height", None)
# Initialize policy
policy = ALGO_DICT[model_name](**params)
wandb_callback = WandbCallback(
gradient_save_freq=100,
verbose=2
)
if cfg.use_ood_detector_wrapped_model:
model = OodDetectorWrappedModel(
policy,
cfg.ood_detector.pretrain_timesteps,
cfg.ood_detector.fit_outlier_detectors_every_n,
cfg.ood_detector.k,
cfg.ood_detector.distance_threshold_percentile,
cfg.ood_detector.distance_metric
)
model.learn(total_timesteps=total_timesteps, callback=wandb_callback)
model.eval(cfg.num_eval_rollouts, check_outlier=cfg.eval_outlier_detection)
model.save(model_save_filename)
else:
policy.learn(total_timesteps=total_timesteps, callback=wandb_callback)
policy.save(model_save_filename)
# wandb.save(model_save_path)
print(f"Model saved to {model_save_path}")
run.finish()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1663536 | import urllib2
import requests
from bs4 import BeautifulSoup
def getEricAuth():
login_url = "http://scumbag-control/auth.asp"
target_url = "http://scumbag-control/title_app.asp"
login_params = {'login' : 'administrator', 'password' : '<PASSWORD>', 'action_login.x' : '1', 'action_login.y' : '1'}
s = requests.Session()
s.post(url = login_url, data = login_params)
r = s.get(url = target_url)
soup = BeautifulSoup(r.text, features="lxml")
applet_id = soup.find('param', attrs={'name' : 'APPLET_ID'}).get('value');
return applet_id
| StarcoderdataPython |
191002 | <reponame>triffid/kiki
# .................................................................................................................
level_dict["bronze"] = {
"scheme": "bronze_scheme",
"size": (9,6,9),
"intro": "bronze",
"help": (
"$scale(1.5)mission:\nactivate the exit!\n\n" + \
"to activate the exit\nfeed it with electricity:\n\n" + \
"connect the generator\nwith the motor\n"+ \
"and close the circuit\nwith the wire stones",
),
"player": { "position": (0,1,0),
},
"exits": [
{
"name": "exit",
"active": 0,
"position": (0,0,0),
},
],
"create":
"""
s = world.getSize()
d = 2
world.addObjectAtPos (KikiMotorCylinder (KikiFace.PY), KikiPos (s.x/2, 1, s.z/2))
world.addObjectAtPos (KikiMotorGear (KikiFace.PY), KikiPos (s.x/2, 0, s.z/2))
world.addObjectAtPos (KikiGear (KikiFace.PY), KikiPos (s.x/2-1, s.y-1, s.z/2-1))
world.addObjectAtPos (KikiGenerator (KikiFace.PY), KikiPos (s.x/2+1, s.y-1, s.z/2-1))
#world.addObjectAtPos (KikiHealthAtom (), KikiPos (s.x/2+1, s.y-1, s.z/2+1))
world.addObjectAtPos (KikiBomb (), KikiPos (s.x/2-1, s.y-1, s.z/2+1))
world.addObjectAtPos (KikiWireStone (), KikiPos (s.x/2, s.y-1, s.z/2))
world.addObjectAtPos (KikiWireStone (), KikiPos (s.x/2+1, s.y-2, s.z/2))
world.addObjectAtPos (KikiWireStone (), KikiPos (s.x/2-1, s.y-2, s.z/2))
# floor wire square
world.addObjectLine ("KikiWire (KikiFace.PY, 10)", KikiPos (s.x/2-d+1, 0, s.z/2-d), KikiPos (s.x/2+d, 0, s.z/2-d))
world.addObjectLine ("KikiWire (KikiFace.PY, 10)", KikiPos (s.x/2-d+1, 0, s.z/2+d), KikiPos (s.x/2+d, 0, s.z/2+d))
world.addObjectAtPos (KikiWire (KikiFace.PY, 5), KikiPos (s.x/2-d, 0, s.z/2+1))
world.addObjectAtPos (KikiWire (KikiFace.PY, 5), KikiPos (s.x/2-d, 0, s.z/2-1))
world.addObjectAtPos (KikiWire (KikiFace.PY, 13), KikiPos (s.x/2-d, 0, s.z/2))
world.addObjectAtPos (KikiWire (KikiFace.PY, 5), KikiPos (s.x/2+d, 0, s.z/2+1))
world.addObjectAtPos (KikiWire (KikiFace.PY, 5), KikiPos (s.x/2+d, 0, s.z/2-1))
world.addObjectAtPos (KikiWire (KikiFace.PY, 7), KikiPos (s.x/2+d, 0, s.z/2))
# corners of wire square
world.addObjectAtPos (KikiWire (KikiFace.PY, 6), KikiPos (s.x/2-d, 0, s.z/2-d))
world.addObjectAtPos (KikiWire (KikiFace.PY, 3), KikiPos (s.x/2-d, 0, s.z/2+d))
world.addObjectAtPos (KikiWire (KikiFace.PY, 9), KikiPos (s.x/2+d, 0, s.z/2+d))
world.addObjectAtPos (KikiWire (KikiFace.PY, 12), KikiPos (s.x/2+d, 0, s.z/2-d))
world.addObjectLine ("KikiWire (KikiFace.PY, 10)", KikiPos (0, 0, s.z/2), KikiPos (s.x/2-d, 0, s.z/2))
world.addObjectLine ("KikiWire (KikiFace.PY, 10)", KikiPos (s.x/2+d+1, 0, s.z/2), KikiPos (s.x, 0, s.z/2))
# ceiling wire square
world.addObjectLine ("KikiWire (KikiFace.NY, 10)", KikiPos (s.x/2-d+1, s.y-1, s.z/2-d), KikiPos (s.x/2+d, s.y-1, s.z/2-d))
world.addObjectLine ("KikiWire (KikiFace.NY, 10)", KikiPos (s.x/2-d+1, s.y-1, s.z/2+d), KikiPos (s.x/2+d, s.y-1, s.z/2+d))
world.addObjectAtPos (KikiWire (KikiFace.NY, 5), KikiPos (s.x/2-d, s.y-1, s.z/2+1))
world.addObjectAtPos (KikiWire (KikiFace.NY, 5), KikiPos (s.x/2-d, s.y-1, s.z/2-1))
world.addObjectAtPos (KikiWire (KikiFace.NY, 13), KikiPos (s.x/2-d, s.y-1, s.z/2))
world.addObjectAtPos (KikiWire (KikiFace.NY, 5), KikiPos (s.x/2+d, s.y-1, s.z/2+1))
world.addObjectAtPos (KikiWire (KikiFace.NY, 5), KikiPos (s.x/2+d, s.y-1, s.z/2-1))
world.addObjectAtPos (KikiWire (KikiFace.NY, 7), KikiPos (s.x/2+d, s.y-1, s.z/2))
# corners of wire square
world.addObjectAtPos (KikiWire (KikiFace.NY, 3), KikiPos (s.x/2-d, s.y-1, s.z/2-d))
world.addObjectAtPos (KikiWire (KikiFace.NY, 6), KikiPos (s.x/2-d, s.y-1, s.z/2+d))
world.addObjectAtPos (KikiWire (KikiFace.NY, 12), KikiPos (s.x/2+d, s.y-1, s.z/2+d))
world.addObjectAtPos (KikiWire (KikiFace.NY, 9), KikiPos (s.x/2+d, s.y-1, s.z/2-d))
world.addObjectLine ("KikiWire (KikiFace.NY, 10)", KikiPos (0, s.y-1, s.z/2), KikiPos (s.x/2-d, s.y-1, s.z/2))
world.addObjectLine ("KikiWire (KikiFace.NY, 10)", KikiPos (s.x/2+d+1, s.y-1, s.z/2), KikiPos (s.x, s.y-1, s.z/2))
# wall wire lines
world.addObjectLine ("KikiWire (KikiFace.PX, 5)", KikiPos ( 0, 0, s.z/2), KikiPos ( 0, s.y, s.z/2))
world.addObjectLine ("KikiWire (KikiFace.NX, 5)", KikiPos (s.x-1, 0, s.z/2), KikiPos (s.x-1, s.y, s.z/2))
""",
} | StarcoderdataPython |
1768363 | <reponame>danielpatrickdotdev/beerfest
# Generated by Django 2.1.2 on 2018-10-26 07:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beerfest', '0003_auto_20181019_0438'),
]
operations = [
migrations.AlterField(
model_name='bar',
name='name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterField(
model_name='brewery',
name='name',
field=models.CharField(max_length=200, unique=True),
),
migrations.AlterUniqueTogether(
name='beer',
unique_together={('brewery', 'name')},
),
]
| StarcoderdataPython |
3255710 | # Emulate runserver...
from django.core.management.commands.runserver import *
# ... and monkey-patch it!
from django.core.management.commands import runserver
from c10ktools.servers.tulip import run
runserver.run = run
del runserver
| StarcoderdataPython |
1778353 | <gh_stars>0
'''
@Autor: <NAME>
@Email: <EMAIL>
@Description: Agrupar aquellas palabras que tengan el mismo anaggrama
'''
def groupAnagrams(strs):
# el metodo sorted ordena y devuelve una cadena,
# mediante join agrupomos la palabras en una cadena vacia
diccionarioWord = {}; # diccionario
for i in strs:
sortWord = "".join(sorted(i))
if sortWord not in diccionarioWord: # si no estan en el diccionario
diccionarioWord[sortWord] = [i] #creamos la variable
else:
diccionarioWord[sortWord].append(i) # si la existe la variable agegamos elementos
#lista a devolver
listadeLista = []
for i in diccionarioWord.values(): #para cada values en diccionario
listadeLista.append(i) #lo metemos en la listadeLista
return listadeLista #retorno de la lista de lista
#CASOS DE PRUEBA
#los resutlados del a ejercuion fueron los mismos
'''
print(groupAnagrams("eat"))
print(groupAnagrams("tea"))
print(groupAnagrams("ate"))
'''
#strs = ["eat","tea","tan","ate","nat","bat"]
strs = [""]
#strs = ["a"]
print(groupAnagrams(strs))
| StarcoderdataPython |
1788948 | <filename>posts/admin.py
from django.contrib import admin
from posts.models import Post,Comment
admin.site.register(Comment)
admin.site.register(Post)
| StarcoderdataPython |
1609847 | <gh_stars>1000+
#!/usr/bin/env python
try:
from yaml import (
CLoader as Loader,
CSafeLoader as SafeLoader,
CDumper as Dumper
)
except ImportError:
from yaml import (
Loader, SafeLoader, Dumper
)
if Loader.__name__ == 'CLoader':
print("libyaml is working")
elif Loader.__name__ == 'Loader':
print("libyaml is not working")
print("Check the python executable and pyyaml for libyaml support")
| StarcoderdataPython |
3252419 | DATES_NUMBER: int = 7
INITIAL_SHIFT: int = -1
MOVEMENT_SHIFT: int = 7
| StarcoderdataPython |
3372158 | # Generated by Django 3.1.12 on 2021-07-15 00:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reo', '0114_sitemodel_lifetime_emissions_cost_co2'),
]
operations = [
migrations.AddField(
model_name='scenariomodel',
name='include_climate_in_objective',
field=models.BooleanField(blank=True, null=True),
),
]
| StarcoderdataPython |
51861 | from bytewax import Dataflow, run
flow = Dataflow()
flow.map(lambda x: x * x)
flow.capture()
if __name__ == "__main__":
for epoch, y in sorted(run(flow, enumerate(range(10)))):
print(y)
| StarcoderdataPython |
1681189 | import glob
import os
files = glob.glob("../output/Financial Data/*.csv")
codeFile = "../output/StockCodesNYSE.csv"
def detect_empty(file):
data = open(file,'r').read()
lines = data.split('\n')
for line in lines:
fields = line.split('|')
if len(fields)>1:
return False
return True
def check_count():
data = open(codeFile,'r').read()
records = data.split('\n')
for record in records:
code = record.split(',')[0]
files = glob.glob("../output/Formatted/temp/"+code+"-*.csv")
if len(files) == 3:
for file in files:
os.remove(file)
# if len(files)>0 and len(files)<3:
# print(code)
if __name__ == "__main__":
# check_count()
"""
Uncomment the following code to check and remove empty data files
"""
# for file in files:
# if detect_empty(file) == True:
# os.remove(file)
# print(file)
| StarcoderdataPython |
1621111 | # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# snippet-sourcedescription:[construct_url_federated_users.py demonstrates how to programmatically construct a URL that gives federated users direct access to the AWS Management Console.]
# snippet-service:[iam]
# snippet-keyword:[Python]
# snippet-keyword:[AWS Identity and Access Management (IAM)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[AssumeRole]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-01-24]
# snippet-sourceauthor:[stephswo (AWS)]
# snippet-start:[iam.python.construct_url_federated_users.complete]
import urllib.parse
import json
import requests
import boto3
# Create an STS client
sts_client = boto3.client('sts')
# Assume a role defined on an external account. The role specifies the
# permissions that are allowed on the account.
# Replace EXTERNAL_ACCOUNT_NUMBER with the account number of the external
# account.
# Replace ROLE_NAME with the name of the role defined on the external account.
# Optional, but recommended: Specify a unique ExternalId= string assigned by
# the external account.
response = sts_client.assume_role(RoleArn='arn:aws:iam::EXTERNAL_ACCOUNT_NUMBER:role/ROLE_NAME',
RoleSessionName='AssumeRoleSession1')
# Reference the temporary credentials section of the response
tempCredentials = response['Credentials']
# Construct the required JSON structure using the temporary credentials
session_json = '{'
session_json += '"sessionId":"' + tempCredentials['AccessKeyId'] + '",'
session_json += '"sessionKey":"' + tempCredentials['SecretAccessKey'] + '",'
session_json += '"sessionToken":"' + tempCredentials['SessionToken'] + '"'
session_json += '}'
# Make request to AWS federation endpoint to get sign-in token.
# Construct the parameter string with the sign-in action request,
# a 12-hour session duration, and the JSON document with temporary
# credentials.
request_parameters = '?Action=getSigninToken'
request_parameters += '&SessionDuration=43200'
request_parameters += '&Session=' + urllib.parse.quote_plus(session_json)
request_url = 'https://signin.aws.amazon.com/federation' + request_parameters
response = requests.get(request_url)
# Returns a JSON document with a single element named SigninToken.
signin_token = json.loads(response.text)
# Create URL where the sign-in token is used to sign into the AWS Console
request_parameters = '?Action=login'
request_parameters += '&Issuer=Example.org'
request_parameters += '&Destination=' + urllib.parse.quote_plus('https://console.aws.amazon.com/')
request_parameters += '&SigninToken=' + signin_token['SigninToken']
request_url = 'https://signin.aws.amazon.com/federation' + request_parameters
# Send final URL to stdout
print(request_url)
# snippet-end:[iam.python.construct_url_federated_users.complete]
| StarcoderdataPython |
1640317 | from rest_framework import serializers
from operations.models import Operation
class OperationSerializer(serializers.ModelSerializer):
class Meta:
model = Operation
fields = ('id', 'data') | StarcoderdataPython |
70310 | import os
import copy
import json
def get_all_files(root, ext=None):
result = []
for (new_path, dirs, files) in os.walk(root):
result += [ os.path.abspath(os.path.join(new_path, f)) for f in files if ext is None or os.path.splitext(f)[1] == ext]
return result
def obj_to_deep_dict(obj, classkey=None):
if isinstance(obj, dict):
for k in obj.keys():
obj[k] = obj_to_deep_dict(obj[k], classkey)
return obj
elif hasattr(obj, "__iter__"):
return [obj_to_deep_dict(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([(key, obj_to_deep_dict(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj
def obj_to_json(obj):
return json.dumps(obj_to_deep_dict(obj)) | StarcoderdataPython |
3266113 | #!/usr/bin/env python3
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from statistics import stdev, mean
import cProfile
# Create Cross-validation
def multiclass(folds,class_data,class_column):
accuracies = {}
kf = KFold(n_splits=folds, shuffle=True)
classifiers = [('Decision Tree',DecisionTreeClassifier(max_depth=5)),
('kNN',KNeighborsClassifier(n_neighbors=3)),
('Support Vector Linear', SVC(kernel="linear", C=0.025)),
('Support Vector Radial', SVC(gamma=2, C=1)),
('Gaussian',GaussianProcessClassifier(1.0 * RBF(1.0))),
('Random Forest',RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1)),
('Neural Net',MLPClassifier(alpha=1, max_iter=1000)),
('AdaBoost',AdaBoostClassifier()),
('Naive Bayes',GaussianNB()),
('Quadratic Discrimation',QuadraticDiscriminantAnalysis())
]
for clf_name, classifier in classifiers:
accuracies[clf_name] = {}
accuracies[clf_name]['total'] = []
for train_index, test_index in kf.split(class_data):
# x_train, x_test, y_train, y_test = train_test_split(class_data, class_column, test_size = i/10)
x_train, y_train = class_data.iloc[train_index], class_column.iloc[train_index]
x_test, y_test = class_data.iloc[test_index], class_column.iloc[test_index]
# Cycle through all the classifiers, for this dataset
for clf_name, classifier in classifiers:
clf = classifier
clf = clf.fit(x_train,y_train)
predictions = clf.predict(x_test)
accuracy = accuracy_score(y_test,predictions)
accuracies[clf_name]['total'].append(accuracy)
for clf_name, classifier in classifiers:
accuracies[clf_name]['average'] = mean(accuracies[clf_name]['total'])
accuracies[clf_name]['stdev'] = stdev(accuracies[clf_name]['total'])
return accuracies, classifiers | StarcoderdataPython |
3224733 | #!/usr/bin/env python
'''
Unit tests for inventory/environ.py
'''
from __future__ import absolute_import
import os
import sys
import pytest
from mock import MagicMock, patch, mock_open
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
#FIXTURES_DIR = os.path.join(FILE_DIR, "fixtures")
REPO_DIR = os.path.join(FILE_DIR, "..", "..")
# Add environ.py into path for testing
sys.path.append(os.path.join(REPO_DIR, "inventory"))
import environ
@pytest.mark.parametrize(("regex", "result"),
[
(r"(FOOBAR)", {"foobar": "123"}),
(r"^FOO(.*)", {"bar": "123"}),
]
)
def test_getVars(regex, result):
'''
This method makes the assumption that there will always be a group(1),
So if doing an exact string match, for now group the entire string
'''
with patch("os.environ", new={"FOOBAR": "123", "BARFOO": "456"}):
r = environ.getVars(regex)
assert r == result
@pytest.mark.skip(reason="TODO")
def test_getSplunkInventory():
pass
@patch('environ.loadDefaults', return_value={"splunk": {"http_port": 8000, "build_location": None}})
@patch('environ.overrideEnvironmentVars')
@patch('environ.getSecrets')
@patch('environ.getHEC')
def test_getDefaultVars(mock_overrideEnvironmentVars, mock_loadDefaultSplunkVariables, mock_getSecrets, mock_getHEC):
'''
Unit test for getting our default variables
'''
retval = environ.getDefaultVars()
assert "splunk" in retval
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"opt": None, "home": None, "exec": None, "pid": None}),
# Check default.yml parameters
({"opt": "/opt"}, {}, {"opt": "/opt", "home": None, "exec": None, "pid": None}),
({"home": "/tmp/splunk"}, {}, {"opt": None, "home": "/tmp/splunk", "exec": None, "pid": None}),
({"exec": "/opt/splunk/bin/splunk"}, {}, {"opt": None, "home": None, "exec": "/opt/splunk/bin/splunk", "pid": None}),
({"pid": "/splunk.pid"}, {}, {"opt": None, "home": None, "exec": None, "pid": "/splunk.pid"}),
# Check environment variable parameters
({}, {"SPLUNK_OPT": "/home/"}, {"opt": "/home/", "home": None, "exec": None, "pid": None}),
({}, {"SPLUNK_HOME": "/home/"}, {"opt": None, "home": "/home/", "exec": None, "pid": None}),
({}, {"SPLUNK_EXEC": "/home/splunk.exe"}, {"opt": None, "home": None, "exec": "/home/splunk.exe", "pid": None}),
({}, {"SPLUNK_PID": "/home/splunk.pid"}, {"opt": None, "home": None, "exec": None, "pid": "/home/splunk.pid"}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"opt": "/home"}, {"SPLUNK_OPT": "/opt"}, {"opt": "/opt", "home": None, "exec": None, "pid": None}),
({"home": "/tmp/splunk"}, {"SPLUNK_HOME": "/opt/splunk"}, {"opt": None, "home": "/opt/splunk", "exec": None, "pid": None}),
({"exec": "/bin/splunk"}, {"SPLUNK_EXEC": "/opt/splunk/bin/splunk"}, {"opt": None, "home": None, "exec": "/opt/splunk/bin/splunk", "pid": None}),
({"pid": "/splunk.pid"}, {"SPLUNK_PID": "/opt/splunk/splunk.pid"}, {"opt": None, "home": None, "exec": None, "pid": "/opt/splunk/splunk.pid"}),
]
)
def test_getSplunkPaths(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getSplunkPaths(vars_scope)
assert type(vars_scope["splunk"]) == dict
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Check default.yml parameters
({"idxc": {}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"label": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"label": "1234"}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "1234", "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "1234"}}, {}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234"}}, {}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"discoveryPass4SymmKey": None}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"discoveryPass4SymmKey": "1234"}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": "1234", "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Search factor should never exceed replication factor
({"idxc": {"replication_factor": 0, "search_factor": 2}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 0, "search_factor": 0}),
({"idxc": {"replication_factor": 1, "search_factor": 3}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"replication_factor": "2", "search_factor": 3}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 2}),
# This should return replication_factor=2 because there are only 2 hosts in the "splunk_indexer" group
({"idxc": {"replication_factor": 3, "search_factor": 1}}, {}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
# Check environment variable parameters
({}, {"SPLUNK_IDXC_LABEL": ""}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "", "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_LABEL": "abcd"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_SECRET": ""}, {"pass4SymmKey": "", "discoveryPass4SymmKey": "", "label": None, "secret": "", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_SECRET": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_REPLICATION_FACTOR": "1"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
({}, {"SPLUNK_IDXC_REPLICATION_FACTOR": 2, "SPLUNK_IDXC_SEARCH_FACTOR": "1"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
({}, {"SPLUNK_IDXC_DISCOVERYPASS4SYMMKEY": "qwerty"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": "qwerty", "label": None, "secret": None, "replication_factor": 1, "search_factor": 1}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"idxc": {"label": "1234"}}, {"SPLUNK_IDXC_LABEL": "abcd"}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "abcd"}}, {"SPLUNK_IDXC_SECRET": "1234"}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234"}}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "7890"}}, {"SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "7890", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "7890"}}, {"SPLUNK_IDXC_DISCOVERYPASS4SYMMKEY": "zxcv", "SPLUNK_IDXC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "discoveryPass4SymmKey": "zxcv", "label": None, "secret": "abcd", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"secret": "abcd"}}, {"SPLUNK_IDXC_SECRET": "1234"}, {"pass4SymmKey": "1234", "discoveryPass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1, "search_factor": 1}),
({"idxc": {"replication_factor": 3, "search_factor": 3}}, {"SPLUNK_IDXC_REPLICATION_FACTOR": 2}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 2}),
({"idxc": {"replication_factor": 2, "search_factor": 2}}, {"SPLUNK_IDXC_SEARCH_FACTOR": 1}, {"pass4SymmKey": None, "discoveryPass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2, "search_factor": 1}),
]
)
def test_getIndexerClustering(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory", {"splunk_indexer": {"hosts": ["a", "b"]}}) as mock_inven:
with patch("os.environ", new=os_env):
environ.getIndexerClustering(vars_scope)
assert type(vars_scope["splunk"]["idxc"]) == dict
assert vars_scope["splunk"]["idxc"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
# Check default.yml parameters
({"shc": {}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"label": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"label": "1234"}}, {}, {"pass4SymmKey": None, "label": "1234", "secret": None, "replication_factor": 1}),
({"shc": {"secret": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"secret": "1234"}}, {}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"pass4SymmKey": None}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"pass4SymmKey": "1234"}}, {}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"replication_factor": 0}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 0}),
({"shc": {"replication_factor": 1}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
({"shc": {"replication_factor": "2"}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# This should return replication_factor=2 because there are only 2 hosts in the "splunk_search_head" group
({"shc": {"replication_factor": 3}}, {}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# Check environment variable parameters
({}, {"SPLUNK_SHC_LABEL": ""}, {"pass4SymmKey": None, "label": "", "secret": None, "replication_factor": 1}),
({}, {"SPLUNK_SHC_LABEL": "abcd"}, {"pass4SymmKey": None,"label": "abcd", "secret": None, "replication_factor": 1}),
({}, {"SPLUNK_SHC_SECRET": ""}, {"pass4SymmKey": "", "label": None, "secret": "", "replication_factor": 1}),
({}, {"SPLUNK_SHC_SECRET": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({}, {"SPLUNK_SHC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({}, {"SPLUNK_SHC_REPLICATION_FACTOR": "2"}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 2}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"shc": {"label": "1234"}}, {"SPLUNK_SHC_LABEL": "abcd"}, {"pass4SymmKey": None, "label": "abcd", "secret": None, "replication_factor": 1}),
({"shc": {"secret": "abcd"}}, {"SPLUNK_SHC_SECRET": "1234"}, {"pass4SymmKey": "1234", "label": None, "secret": "1234", "replication_factor": 1}),
({"shc": {"pass4SymmKey": "1234"}}, {"SPLUNK_SHC_PASS4SYMMKEY": "abcd"}, {"pass4SymmKey": "abcd", "label": None, "secret": "abcd", "replication_factor": 1}),
({"shc": {"replication_factor": 2}}, {"SPLUNK_SHC_REPLICATION_FACTOR": "1"}, {"pass4SymmKey": None, "label": None, "secret": None, "replication_factor": 1}),
]
)
def test_getSearchHeadClustering(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory", {"splunk_search_head": {"hosts": ["a", "b"]}}) as mock_inven:
with patch("os.environ", new=os_env):
environ.getSearchHeadClustering(vars_scope)
assert type(vars_scope["splunk"]["shc"]) == dict
assert vars_scope["splunk"]["shc"] == output
@pytest.mark.skip(reason="TODO")
def test_getMultisite():
pass
@pytest.mark.skip(reason="TODO")
def test_getSplunkWebSSL():
pass
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"ca": None, "cert": None, "password": None, "enable": True}),
({"does-not-exist": True}, {}, {"ca": None, "cert": None, "password": None, "enable": True}),
# Check default.yml parameters
({"ssl": {"enable": False}}, {}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"ca": "hi"}}, {}, {"ca": "hi", "cert": None, "password": None, "enable": True}),
({"ssl": {"cert": "hi"}}, {}, {"ca": None, "cert": "hi", "password": None, "enable": True}),
({"ssl": {"password": "hi"}}, {}, {"ca": None, "cert": None, "password": "hi", "enable": True}),
({"ssl": {"ca": "aaa", "cert": "bbb", "password": "<PASSWORD>", "enable": False}}, {}, {"ca": "aaa", "cert": "bbb", "password": "<PASSWORD>", "enable": False}),
# Check environment variable parameters
({}, {"SPLUNKD_SSL_CA": "hi"}, {"ca": "hi", "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_CERT": "hi"}, {"ca": None, "cert": "hi", "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_PASSWORD": "hi"}, {"ca": None, "cert": None, "password": "hi", "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "true"}, {"ca": None, "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "false"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({}, {"SPLUNKD_SSL_ENABLE": "False"}, {"ca": None, "cert": None, "password": None, "enable": False}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"ssl": {"ca": "value1"}}, {"SPLUNKD_SSL_CA": "value2"}, {"ca": "value2", "cert": None, "password": None, "enable": True}),
({"ssl": {"cert": "value1"}}, {"SPLUNKD_SSL_CERT": "value2"}, {"ca": None, "cert": "value2", "password": None, "enable": True}),
({"ssl": {"password": "<PASSWORD>"}}, {"SPLUNKD_SSL_PASSWORD": "<PASSWORD>"}, {"ca": None, "cert": None, "password": "<PASSWORD>", "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "true"}, {"ca": None, "cert": None, "password": None, "enable": True}),
({}, {"SPLUNKD_SSL_ENABLE": "false"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": True}}, {"SPLUNKD_SSL_ENABLE": "FALSE"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": True}}, {"SPLUNKD_SSL_ENABLE": "FaLsE"}, {"ca": None, "cert": None, "password": None, "enable": False}),
({"ssl": {"enable": False}}, {"SPLUNKD_SSL_ENABLE": ""}, {"ca": None, "cert": None, "password": None, "enable": False}),
]
)
def test_getSplunkdSSL(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getSplunkdSSL(vars_scope)
assert type(vars_scope["splunk"]) == dict
assert type(vars_scope["splunk"]["ssl"]) == dict
assert vars_scope["splunk"]["ssl"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters - Splunk password is required
({"password": "<PASSWORD>"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": None}),
# Check default.yml parameters
({"password": "<PASSWORD>", "pass4SymmKey": "you-will-never-guess", "secret": None}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": "<PASSWORD>", "pass4SymmKey": "you-will-never-guess", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": "<PASSWORD>", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": "<PASSWORD>", "declarative_admin_password": True, "secret": "1234"}, {}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": None, "secret": "1234"}),
# Check environment variable parameters
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "true", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess"}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": None}),
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "TRUE", "SPLUNK_PASS4SYMMKEY": "you-will-never-guess", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": True, "pass4SymmKey": "you-will-never-guess", "secret": "1234"}),
# We currently don't support 'yes' as a valid boolean
({"password": None}, {"SPLUNK_PASSWORD": "<PASSWORD>", "SPLUNK_DECLARATIVE_ADMIN_PASSWORD": "yes", "SPLUNK_SECRET": "1234"}, {"password": "<PASSWORD>", "declarative_admin_password": False, "pass4SymmKey": None, "secret": "1234"})
]
)
def test_getSecrets(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
with patch("environ.os.path") as mock_os_path:
mock_os_path.isfile = MagicMock()
mock_os_path.isfile.return_value = False
environ.getSecrets(vars_scope)
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check when Splunk password is a file
({"password": "/<PASSWORD>"}, {}, {"password": "<PASSWORD>", "pass4SymmKey": None, "secret": None}),
({"password": "<PASSWORD>"}, {"SPLUNK_PASSWORD": "/<PASSWORD>"}, {"password": "<PASSWORD>", "pass4SymmKey": None, "secret": None}),
]
)
def test_getSecrets_passwordFromFile(default_yml, os_env, output):
file_contents = """
worldneversayshiback
"""
m = mock_open(read_data=file_contents)
vars_scope = {"splunk": default_yml}
with patch("environ.open", m, create=True) as mopen:
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
with patch("os.path") as mock_os_path:
# Make sure that the isfile() check returns True
mock_os_path.isfile = MagicMock()
mock_os_path.isfile.return_value = True
environ.getSecrets(vars_scope)
mopen.assert_called_once()
assert vars_scope["splunk"]["password"] == "<PASSWORD>"
@pytest.mark.parametrize(("default_yml"),
[
# Check null parameters
({}),
({"password": None}),
({"password": ""})
]
)
def test_noSplunkPassword(default_yml):
vars_scope = {"splunk": default_yml}
with pytest.raises(Exception) as exc:
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new={}):
environ.getSecrets(vars_scope)
assert "Splunk password must be supplied!" in str(exc.value)
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"launch": {}}),
# Check default.yml parameters
({"launch": {}}, {}, {"launch": {}}),
({"launch": {"A": "B"}}, {}, {"launch": {"A": "B"}}),
({"launch": {"A": "B", "C": "D"}}, {}, {"launch": {"A": "B", "C": "D"}}),
# Check environment variable parameters
({}, {"SPLUNK_LAUNCH_CONF": None}, {"launch": {}}),
({}, {"SPLUNK_LAUNCH_CONF": ""}, {"launch": {}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB"}, {"launch": {"AAA": "BBB"}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB,CCC=DDD"}, {"launch": {"AAA": "BBB", "CCC": "DDD"}}),
({}, {"SPLUNK_LAUNCH_CONF": "AAA=BBB=CCC,DDD=EEE=FFF"}, {"launch": {"AAA": "BBB=CCC", "DDD": "EEE=FFF"}}),
# Check both
({"launch": {"A": "B", "C": "D"}}, {"SPLUNK_LAUNCH_CONF": "A=E,C=D"}, {"launch": {"A": "E", "C": "D"}}),
]
)
def test_getLaunchConf(default_yml, os_env, output):
vars_scope = {"splunk": default_yml}
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getLaunchConf(vars_scope)
assert vars_scope["splunk"] == output
@pytest.mark.parametrize(("value", "separator", "output"),
[
# Check null value
(None, ",", []),
# Check empty value
("", ",", []),
# Check string value
("a", ",", ["a"]),
# Check comma separated string value
("a,b,c", ",", ["a", "b", "c"]),
# Check list value
(["a"], ",", ["a"]),
(["a", "b", "c"], ",", ["a", "b", "c"])
]
)
def test_ensureListValue(value, separator, output):
result = environ.ensureListValue(value, separator)
assert result == output
@pytest.mark.parametrize(("value", "separator", "output"),
[
# Check null value
(None, ",", []),
# Check empty value
("", ",", []),
# Check string value
("a", ",", ["a"]),
# Check comma separated string value
("a,b,c", ",", ["a", "b", "c"]),
# Check comma separated string value with whitespaces
(" a, b,c ", ",", ["a", "b", "c"]),
]
)
def test_splitAndStrip(value, separator, output):
result = environ.splitAndStrip(value, separator)
assert result == output
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
# Check ansible_pre_tasks using defaults or env vars
({"ansible_pre_tasks": ""}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a"}, {}, {"ansible_pre_tasks": ["a"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a"]}, {}, {"ansible_pre_tasks": ["a"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a,b,c"}, {}, {"ansible_pre_tasks": ["a","b","c"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a","b","c"]}, {}, {"ansible_pre_tasks": ["a","b","c"], "ansible_post_tasks": [], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_PRE_TASKS": "d"}, {"ansible_pre_tasks": ["d"], "ansible_post_tasks": [], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": "a,b,c"}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_pre_tasks": ["a","b","c"]}, {"SPLUNK_ANSIBLE_PRE_TASKS": "e,f,g"}, {"ansible_pre_tasks": ["e","f","g"], "ansible_post_tasks": [], "ansible_environment": {}}),
# Check ansible_post_tasks using defaults or env vars
({"ansible_post_tasks": ""}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_post_tasks": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_post_tasks": "a"}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a"]}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a"], "ansible_environment": {}}),
({"ansible_post_tasks": "a,b,c"}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a","b","c"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a","b","c"]}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["a","b","c"], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_POST_TASKS": "d"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["d"], "ansible_environment": {}}),
({}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
({"ansible_post_tasks": "a,b,c"}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
({"ansible_post_tasks": ["a","b","c"]}, {"SPLUNK_ANSIBLE_POST_TASKS": "e,f,g"}, {"ansible_pre_tasks": [], "ansible_post_tasks": ["e","f","g"], "ansible_environment": {}}),
# Check ansible_environment using defaults or env vars
({"ansible_environment": None}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {}}),
({"ansible_environment": {"a": "b"}}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b"}}),
({"ansible_environment": {"a": "b", "d": "e"}}, {}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "d": "e"}}),
({}, {"SPLUNK_ANSIBLE_ENV": "a=b"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b"}}),
({}, {"SPLUNK_ANSIBLE_ENV": "a=b,x=y"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "x": "y"}}),
({"ansible_environment": {"a": "c", "d": "e"}}, {"SPLUNK_ANSIBLE_ENV": "a=b,x=y"}, {"ansible_pre_tasks": [], "ansible_post_tasks": [], "ansible_environment": {"a": "b", "d": "e", "x": "y"}}),
]
)
def test_getAnsibleContext(default_yml, os_env, output):
vars_scope = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getAnsibleContext(vars_scope)
assert vars_scope == output
@pytest.mark.parametrize(("default_yml", "os_env", "splunk_asan"),
[
# Check null parameters
({}, {}, False),
# Check default.yml parameters
({"asan": False}, {}, False),
({"asan": True}, {}, True),
# Check env var parameters
({}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({}, {"SPLUNK_ENABLE_ASAN": "anything"}, True),
# Check both
({"asan": False}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({"asan": True}, {"SPLUNK_ENABLE_ASAN": ""}, False),
({"asan": True}, {"SPLUNK_ENABLE_ASAN": "true"}, True),
({"asan": False}, {"SPLUNK_ENABLE_ASAN": "yes"}, True),
]
)
def test_getASan(default_yml, os_env, splunk_asan):
vars_scope = {"ansible_environment": {}, "splunk": {}}
vars_scope["splunk"] = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getASan(vars_scope)
assert vars_scope["splunk"]["asan"] == splunk_asan
if vars_scope["splunk"]["asan"]:
assert vars_scope["ansible_environment"].get("ASAN_OPTIONS") == "detect_leaks=0"
else:
assert vars_scope["ansible_environment"].get("ASAN_OPTIONS") == None
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, {"enable": True, "port": 8088, "token": None, "ssl": True}),
# Check default.yml parameters
({"enable": False}, {}, {"enable": False, "port": 8088, "token": None, "ssl": True}),
({"port": 8099}, {}, {"enable": True, "port": 8099, "token": None, "ssl": True}),
({"token": "abcd"}, {}, {"enable": True, "port": 8088, "token": "abcd", "ssl": True}),
({"ssl": False}, {}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
# Check env var parameters
({}, {"SPLUNK_HEC_TOKEN": "<PASSWORD>"}, {"enable": True, "port": 8088, "token": "qw<PASSWORD>", "ssl": True}),
({}, {"SPLUNK_HEC_PORT": "9999"}, {"enable": True, "port": 9999, "token": None, "ssl": True}),
({}, {"SPLUNK_HEC_SSL": "true"}, {"enable": True, "port": 8088, "token": None, "ssl": True}),
({}, {"SPLUNK_HEC_SSL": "false"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
({}, {"SPLUNK_HEC_SSL": "FALSE"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
# Check both
({"port": 8099}, {"SPLUNK_HEC_PORT": "19999"}, {"enable": True, "port": 19999, "token": None, "ssl": True}),
({"token": "abcd"}, {"SPLUNK_HEC_TOKEN": "<PASSWORD>"}, {"enable": True, "port": 8088, "token": "fdsa", "ssl": True}),
({"ssl": True}, {"SPLUNK_HEC_SSL": "fAlSe"}, {"enable": True, "port": 8088, "token": None, "ssl": False}),
]
)
def test_getHEC(default_yml, os_env, result):
vars_scope = {"splunk": {}}
vars_scope["splunk"] = {
"hec": {
"enable": True,
"port": 8088,
"token": None,
"ssl": True
}
}
vars_scope["splunk"]["hec"].update(default_yml)
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getHEC(vars_scope)
assert vars_scope["splunk"]["hec"] == result
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, False),
# # Check default.yml parameters
({"disable_popups": False}, {}, False),
({"disable_popups": True}, {}, True),
# # Check env var parameters
({}, {"SPLUNK_DISABLE_POPUPS": "TRUE"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "true"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "True"}, True),
({}, {"SPLUNK_DISABLE_POPUPS": "false"}, False),
({}, {"SPLUNK_DISABLE_POPUPS": "False"}, False),
({}, {"SPLUNK_DISABLE_POPUPS": "FALSE"}, False),
# # Check both
({"disable_popups": False}, {"SPLUNK_DISABLE_POPUPS": "TRUE"}, True),
({"disable_popups": False}, {"SPLUNK_DISABLE_POPUPS": "True"}, True),
({"disable_popups": True}, {"SPLUNK_DISABLE_POPUPS": "False"}, False),
({"disable_popups": True}, {"SPLUNK_DISABLE_POPUPS": "FALSE"}, False),
]
)
def test_getDisablePopups(default_yml, os_env, result):
vars_scope = {}
vars_scope["splunk"] = default_yml
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getDisablePopups(vars_scope)
assert vars_scope["splunk"]["disable_popups"] == result
@pytest.mark.parametrize(("default_yml", "os_env", "result"),
[
# Check null parameters
({}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
# Check default.yml parameters
({"enable": True}, {}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"server": "fwd.dsp.com:8888"}, {}, {"enable": False, "server": "fwd.dsp.com:8888", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"cert": "path/to/cert.pem"}, {}, {"enable": False, "server": None, "cert": "path/to/cert.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": True}, {}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_name": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "abcd", "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_desc": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "abcd", "pipeline_spec": None}),
({"pipeline_spec": "abcd"}, {}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "abcd"}),
# Check env var parameters
({}, {"SPLUNK_DSP_SERVER": "fwd.dsp.com:9999"}, {"enable": False, "server": "fwd.dsp.com:9999", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_CERT": "crt.pem"}, {"enable": False, "server": None, "cert": "crt.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "yes"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "true"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_VERIFY": "TRUE"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "yes"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "true"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_ENABLE": "TRUE"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_NAME": "do"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "do", "pipeline_desc": None, "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_DESC": "re"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "re", "pipeline_spec": None}),
({}, {"SPLUNK_DSP_PIPELINE_SPEC": "mi"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "mi"}),
# Check both
({"enable": True}, {"SPLUNK_DSP_ENABLE": "false"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"enable": False}, {"SPLUNK_DSP_ENABLE": "true"}, {"enable": True, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"server": "fwd.dsp.com:8888"}, {"SPLUNK_DSP_SERVER": "fwd.dsp.com:9999"}, {"enable": False, "server": "fwd.dsp.com:9999", "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"cert": "path1/crt.pem"}, {"SPLUNK_DSP_CERT": "path2/cert.pem"}, {"enable": False, "server": None, "cert": "path2/cert.pem", "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": True}, {"SPLUNK_DSP_VERIFY": "false"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"verify": False}, {"SPLUNK_DSP_VERIFY": "TRUE"}, {"enable": False, "server": None, "cert": None, "verify": True, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_name": "abcd"}, {"SPLUNK_DSP_PIPELINE_NAME": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": "xyz", "pipeline_desc": None, "pipeline_spec": None}),
({"pipeline_desc": "abcd"}, {"SPLUNK_DSP_PIPELINE_DESC": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": "xyz", "pipeline_spec": None}),
({"pipeline_spec": "abcd"}, {"SPLUNK_DSP_PIPELINE_SPEC": "xyz"}, {"enable": False, "server": None, "cert": None, "verify": False, "pipeline_name": None, "pipeline_desc": None, "pipeline_spec": "xyz"}),
]
)
def test_getDSP(default_yml, os_env, result):
vars_scope = {"splunk": {}}
vars_scope["splunk"] = {
"dsp": {
"enable": False,
"server": None,
"cert": None,
"verify": False,
"pipeline_name": None,
"pipeline_desc": None,
"pipeline_spec": None,
}
}
vars_scope["splunk"]["dsp"].update(default_yml)
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
environ.getDSP(vars_scope)
assert vars_scope["splunk"]["dsp"] == result
@pytest.mark.parametrize(("es_enablement", "os_env", "result"),
[
(None, {}, ""),
(None, {"SPLUNK_ES_SSL_ENABLEMENT":"strict"}, "--ssl_enablement strict"),
({"ssl_enablement":"auto"}, {}, "--ssl_enablement auto"),
({"ssl_enablement":"strict"}, {}, "--ssl_enablement strict"),
({"ssl_enablement":"ignore"}, {}, "--ssl_enablement ignore"),
({"ssl_enablement":"ignore"}, {"SPLUNK_ES_SSL_ENABLEMENT":"strict"}, "--ssl_enablement strict"),
({"ssl_enablement":"invalid"}, {}, "Exception")
]
)
def test_getESSplunkVariables(es_enablement, os_env, result):
vars_scope = {"splunk": {}}
if es_enablement is not None:
vars_scope["splunk"]["es"] = es_enablement
with patch("environ.inventory") as mock_inven:
with patch("os.environ", new=os_env):
try:
environ.getESSplunkVariables(vars_scope)
assert vars_scope["es_ssl_enablement"] == result
except Exception:
assert result == "Exception"
@pytest.mark.parametrize(("os_env", "license_master_url", "deployer_url", "cluster_master_url", "search_head_captain_url"),
[
({}, "", "", "", ""),
# Check individual environment variables
({"SPLUNK_LICENSE_MASTER_URL": "something"}, "https://something:8089", "", "", ""),
({"SPLUNK_DEPLOYER_URL": "something"}, "", "something", "", ""),
({"SPLUNK_CLUSTER_MASTER_URL": "something"}, "", "", "something", ""),
({"SPLUNK_SEARCH_HEAD_CAPTAIN_URL": "something"}, "", "", "", "something"),
]
)
def test_getDistributedTopology(os_env, license_master_url, deployer_url, cluster_master_url, search_head_captain_url):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
environ.getDistributedTopology(vars_scope)
assert type(vars_scope["splunk"]["license_master_url"]) == str
assert vars_scope["splunk"]["license_master_url"] == license_master_url
assert type(vars_scope["splunk"]["deployer_url"]) == str
assert vars_scope["splunk"]["deployer_url"] == deployer_url
assert type(vars_scope["splunk"]["cluster_master_url"]) == str
assert vars_scope["splunk"]["cluster_master_url"] == cluster_master_url
assert type(vars_scope["splunk"]["search_head_captain_url"]) == str
assert vars_scope["splunk"]["search_head_captain_url"] == search_head_captain_url
@pytest.mark.parametrize(("default_yml", "os_env", "license_uri", "wildcard_license", "ignore_license", "license_download_dest"),
[
({}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
# Check individual environment variables
({}, {"SPLUNK_LICENSE_URI": "http://web/license.lic"}, "http://web/license.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_LICENSE_URI": "/mnt/*.lic"}, "/mnt/*.lic", True, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_NFR_LICENSE": "/mnt/nfr.lic"}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": ""}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "true"}, "splunk.lic", False, True, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "TRUE"}, "splunk.lic", False, True, "/tmp/splunk.lic"),
({}, {"SPLUNK_IGNORE_LICENSE": "false"}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({}, {"SPLUNK_LICENSE_INSTALL_PATH": "/Downloads/"}, "splunk.lic", False, False, "/Downloads/"),
# Check default.yml
({"license_uri": None}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": ""}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "http://web/license.lic"}, {}, "http://web/license.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/*.lic"}, {}, "/mnt/*.lic", True, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/nfr.lic"}, {}, "/mnt/nfr.lic", False, False, "/tmp/splunk.lic"),
({"license_uri": "/mnt/1.lic"}, {"SPLUNK_LICENSE_URI": "/mnt/2.lic"}, "/mnt/2.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": None}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": ""}, {}, "splunk.lic", False, False, "/tmp/splunk.lic"),
({"license_download_dest": "/Downloads/splunk.lic"}, {}, "splunk.lic", False, False, "/Downloads/splunk.lic"),
({"license_download_dest": "/Downloads/splunk.lic"}, {"SPLUNK_LICENSE_INSTALL_PATH": "/mnt/license.file"}, "splunk.lic", False, False, "/mnt/license.file"),
]
)
def test_getLicenses(default_yml, os_env, license_uri, wildcard_license, ignore_license, license_download_dest):
vars_scope = {"splunk": default_yml}
with patch("os.environ", new=os_env):
environ.getLicenses(vars_scope)
assert vars_scope["splunk"]["license_uri"] == license_uri
assert type(vars_scope["splunk"]["wildcard_license"]) == bool
assert vars_scope["splunk"]["wildcard_license"] == wildcard_license
assert type(vars_scope["splunk"]["ignore_license"]) == bool
assert vars_scope["splunk"]["ignore_license"] == ignore_license
assert vars_scope["splunk"]["license_download_dest"] == license_download_dest
@pytest.mark.parametrize(("default_yml", "os_env", "java_version", "java_download_url", "java_update_version"),
[
({}, {}, None, None, None),
# Check environment variable parameters
({}, {"JAVA": "oracle:8"}, None, None, None),
({}, {"JAVA_VERSION": "openjdk:8"}, "openjdk:8", None, None),
({}, {"JAVA_VERSION": "openjdk:9"}, "openjdk:9", None, None),
({}, {"JAVA_VERSION": "oracle:8"}, "oracle:8", "https://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz", "141"),
({}, {"JAVA_VERSION": "ORACLE:8"}, "oracle:8", "https://download.oracle.com/otn-pub/java/jdk/8u141-b15/336fa29ff2bb4ef291e347e091f7f4a7/jdk-8u141-linux-x64.tar.gz", "141"),
({}, {"JAVA_VERSION": "openjdk:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
({}, {"JAVA_VERSION": "oPenJdK:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
({}, {"JAVA_VERSION": "oracle:8", "JAVA_DOWNLOAD_URL": "https://java/jdk-8u9000-linux-x64.tar.gz"}, "oracle:8", "https://java/jdk-8u9000-linux-x64.tar.gz", "9000"),
({}, {"JAVA_VERSION": "openjdk:11", "JAVA_DOWNLOAD_URL": "https://java/openjdk-11.11.11_linux-x64_bin.tar.gz"}, "openjdk:11", "https://java/openjdk-11.11.11_linux-x64_bin.tar.gz", "11.11.11"),
# Check default.yml
({"java_version": "openjdk:11"}, {}, "openjdk:11", None, None),
({"java_download_url": "http://web/java.tgz"}, {}, None, "http://web/java.tgz", None),
({"java_update_version": "jdk11u141"}, {}, None, None, "jdk11u141"),
# Check order of precedence
({"java_version": "openjdk:9", "java_download_url": "http://web/java.tgz", "java_update_version": "jdk11u141"}, {"JAVA_VERSION": "oPenJdK:11"}, "openjdk:11", "https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz", "11.0.2"),
]
)
def test_getJava(default_yml, os_env, java_version, java_download_url, java_update_version):
vars_scope = default_yml
with patch("os.environ", new=os_env):
environ.getJava(vars_scope)
assert vars_scope["java_version"] == java_version
assert vars_scope["java_download_url"] == java_download_url
assert vars_scope["java_update_version"] == java_update_version
@pytest.mark.parametrize(("os_env", "java_version", "java_download_url", "err_msg"),
[
({"JAVA_VERSION": "oracle:3"}, None, None, "Invalid Java version supplied"),
({"JAVA_VERSION": "openjdk:20"}, None, None, "Invalid Java version supplied"),
({"JAVA_VERSION": "oracle:8", "JAVA_DOWNLOAD_URL": "https://java/jdk-8u9000.tar.gz"}, "oracle:8", "https://java/jdk-8u9000.tar.gz", "Invalid Java download URL format"),
({"JAVA_VERSION": "openjdk:11", "JAVA_DOWNLOAD_URL": "https://java/openjdk-11.tar.gz"}, "openjdk:11", "https://java/openjdk-11.tar.gz", "Invalid Java download URL format"),
]
)
def test_getJava_exception(os_env, java_version, java_download_url, err_msg):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
try:
environ.getJava(vars_scope)
assert False
except Exception as e:
assert True
assert err_msg in str(e)
assert vars_scope["java_version"] == java_version
assert vars_scope["java_download_url"] == java_download_url
assert vars_scope["java_update_version"] == None
@pytest.mark.parametrize(("default_yml", "os_env", "build", "build_url_bearer_token"),
[
({}, {}, None, None),
# Check default.yml parameters
({"buildlocation": "http://server/file.tgz"}, {}, None, None),
({"build_location": None}, {}, None, None),
({"build_location": ""}, {}, "", None),
({"build_location": "/path/to/file.tgz"}, {}, "/path/to/file.tgz", None),
({"build_location": "http://server/file.tgz"}, {}, "http://server/file.tgz", None),
({"build_location": "https://server/file.tgz"}, {}, "https://server/file.tgz", None),
# Check environment variable parameters
({}, {"SPLUNK_BUILD": "http://server/file.tgz"}, None, None),
({}, {"SPLUNK_BUILD_URL": None}, None, None),
({}, {"SPLUNK_BUILD_URL": ""}, "", None),
({}, {"SPLUNK_BUILD_URL": "/path/to/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "/path/to/file.tgz", "testToken"),
({}, {"SPLUNK_BUILD_URL": "http://server/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "http://server/file.tgz", "testToken"),
({}, {"SPLUNK_BUILD_URL": "https://server/file.tgz", "SPLUNK_BUILD_URL_BEARER_TOKEN": "testToken"}, "https://server/file.tgz", "testToken"),
# Check order of precedence
({"build_location": "http://server/file1.tgz"}, {"SPLUNK_BUILD_URL": "https://server/file2.tgz"}, "https://server/file2.tgz", None),
({"build_location": "http://server/file1.tgz"}, {"SPLUNK_BUILD_URL": "/path/to/file.tgz"}, "/path/to/file.tgz", None),
]
)
def test_getSplunkBuild(default_yml, os_env, build, build_url_bearer_token):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getSplunkBuild(vars_scope)
assert vars_scope["splunk"]["build_location"] == build
assert vars_scope["splunk"]["build_url_bearer_token"] == build_url_bearer_token
@pytest.mark.parametrize(("default_yml", "response_content", "trigger_splunkbase"),
[
({}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho"}, "<id>123abc</id>", False),
({"splunkbase_password": "<PASSWORD>"}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"}, "<id>123abc</id>", True),
({"splunkbase_username": "", "splunkbase_password": ""}, "<id>123abc</id>", False),
({}, "<id>123abc</id>", False),
({"splunkbase_username": "ocho"}, b"<id>123abc</id>", False),
({"splunkbase_password": "<PASSWORD>"}, b"<id>123abc</id>", False),
({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"}, b"<id>123abc</id>", True),
({"splunkbase_username": "", "splunkbase_password": ""}, b"<id>123abc</id>", False),
]
)
def test_getSplunkbaseToken(default_yml, response_content, trigger_splunkbase):
vars_scope = default_yml
with patch("environ.requests.post") as mock_post:
mock_post.return_value = MagicMock(status_code=200, content=response_content)
with patch("os.environ", new=dict()):
environ.getSplunkbaseToken(vars_scope)
# Make sure Splunkbase token is populated when appropriate
assert "splunkbase_token" in vars_scope
assert "splunkbase_username" in vars_scope
assert "splunkbase_password" in vars_scope
if trigger_splunkbase:
mock_post.assert_called_with("https://splunkbase.splunk.com/api/account:login/", data={"username": "ocho", "password": "<PASSWORD>"})
assert vars_scope.get("splunkbase_token") == "<PASSWORD>"
else:
mock_post.assert_not_called()
assert not vars_scope.get("splunkbase_token")
def test_getSplunkbaseToken_exception():
with patch("environ.requests.post") as mock_post:
mock_post.return_value = MagicMock(status_code=400, content="error")
try:
environ.getSplunkbaseToken({"splunkbase_username": "ocho", "splunkbase_password": "<PASSWORD>"})
assert False
except Exception as e:
assert True
assert "Invalid Splunkbase credentials" in str(e)
@pytest.mark.parametrize(("default_yml", "os_env", "apps_count"),
[
# Check null parameters
({}, {}, 0),
# Check default.yml parameters
({"app_location": []}, {}, 0),
({"app_location": ["a"]}, {}, 0),
({"app_location": ["a", "b", "c"]}, {}, 0),
({"apps_location": []}, {}, 0),
({"apps_location": ["a"]}, {}, 1),
({"apps_location": ["a", "b", "c"]}, {}, 3),
({"apps_location": "a"}, {}, 1),
({"apps_location": "a,b,c,d"}, {}, 4),
# Check environment variable parameters
({}, {"SPLUNK_APPS": None}, 0),
({}, {"SPLUNK_APPS": "hi"}, 0),
({}, {"SPLUNK_APPS_URL": "hi"}, 1),
({}, {"SPLUNK_APPS_URL": "a,b,ccccc,dd"}, 4),
# Check the union combination of default.yml + environment variables
### Invalid 'app_location' variable name in default.yml
({"app_location": []}, {"SPLUNK_APPS_URL": None}, 0),
({"app_location": ["a"]}, {"SPLUNK_APPS_URL": "a"}, 1),
({"app_location": ["a", "b", "c"]}, {"SPLUNK_APPS_URL": "a,bb"}, 2),
### Invalid 'SPLUNK_APP_URL' variable name in env vars
({"apps_location": ["x"]}, {"SPLUNK_APP_URL": "a"}, 1),
({"apps_location": ["x", "y"]}, {"SPLUNK_APP_URL": "a,bb"}, 2),
({"apps_location": "x,y,z"}, {"SPLUNK_APP_URL": "a,bb"}, 3),
### Correct variable names
({"apps_location": ["x"]}, {"SPLUNK_APPS_URL": "a"}, 2),
({"apps_location": ["x", "y"]}, {"SPLUNK_APPS_URL": "a,bb"}, 4),
({"apps_location": "x,y,z"}, {"SPLUNK_APPS_URL": "a,bb"}, 5),
### Only return unique set of apps
({"apps_location": ["x"]}, {"SPLUNK_APPS_URL": "x"}, 1),
({"apps_location": ["x", "y"]}, {"SPLUNK_APPS_URL": "a,bb,y"}, 4),
({"apps_location": "x,y,z"}, {"SPLUNK_APPS_URL": "x,yy,a,z"}, 5),
]
)
def test_getSplunkApps(default_yml, os_env, apps_count):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getSplunkApps(vars_scope)
assert type(vars_scope["splunk"]["apps_location"]) == list
assert len(vars_scope["splunk"]["apps_location"]) == apps_count
@pytest.mark.parametrize(("default_yml", "os_env", "key", "value"),
[
# Check cert_prefix
({}, {}, "cert_prefix", "https"),
({"cert_prefix": "http"}, {}, "cert_prefix", "http"),
({}, {"SPLUNK_CERT_PREFIX": "fakehttps"}, "cert_prefix", "fakehttps"),
# Check splunk.user
({"splunk": {"user": "root"}}, {}, "splunk.user", "root"),
({}, {"SPLUNK_USER": "root"}, "splunk.user", "root"),
# Check splunk.group
({"splunk": {"group": "root"}}, {}, "splunk.group", "root"),
({}, {"SPLUNK_GROUP": "root"}, "splunk.group", "root"),
# Check splunk.root_endpoint
({"splunk": {"root_endpoint": "/splunk"}}, {}, "splunk.root_endpoint", "/splunk"),
({}, {"SPLUNK_ROOT_ENDPOINT": "/splk"}, "splunk.root_endpoint", "/splk"),
# Check splunk.svc_port
({"splunk": {"svc_port": "9089"}}, {}, "splunk.svc_port", "9089"),
({}, {"SPLUNK_SVC_PORT": "8189"}, "splunk.svc_port", "8189"),
# Check splunk.s2s.port
({"splunk": {"s2s": {"port": "9999"}}}, {}, "splunk.s2s.port", 9999),
({}, {"SPLUNK_S2S_PORT": "9991"}, "splunk.s2s.port", 9991),
# Check splunk.enable_service
({"splunk": {"enable_service": "yes"}}, {}, "splunk.enable_service", "yes"),
({}, {"SPLUNK_ENABLE_SERVICE": "no"}, "splunk.enable_service", "no"),
# Check splunk.service_name
({"splunk": {"service_name": "SpLuNkD"}}, {}, "splunk.service_name", "SpLuNkD"),
({}, {"SPLUNK_SERVICE_NAME": "sPlUnKd"}, "splunk.service_name", "sPlUnKd"),
# Check splunk.allow_upgrade
({"splunk": {"allow_upgrade": "yes"}}, {}, "splunk.allow_upgrade", "yes"),
({}, {"SPLUNK_ALLOW_UPGRADE": "no"}, "splunk.allow_upgrade", "no"),
# Check splunk.set_search_peers
({"splunk": {"set_search_peers": False}}, {}, "splunk.set_search_peers", False),
({}, {"SPLUNK_SET_SEARCH_PEERS": "False"}, "splunk.set_search_peers", False),
({"splunk": {"set_search_peers": True}}, {"SPLUNK_SET_SEARCH_PEERS": "False"}, "splunk.set_search_peers", False),
# Check splunk.appserver.port
({"splunk": {"appserver": {"port": "9291"}}}, {}, "splunk.appserver.port", "9291"),
({}, {"SPLUNK_APPSERVER_PORT": "9391"}, "splunk.appserver.port", "9391"),
# Check splunk.kvstore.port
({"splunk": {"kvstore" :{"port": "9165"}}}, {}, "splunk.kvstore.port", "9165"),
({}, {"SPLUNK_KVSTORE_PORT": "9265"}, "splunk.kvstore.port", "9265"),
# Check splunk.connection_timeout
({"splunk": {"connection_timeout": 60}}, {}, "splunk.connection_timeout", 60),
({}, {"SPLUNK_CONNECTION_TIMEOUT": 200}, "splunk.connection_timeout", 200),
]
)
def test_overrideEnvironmentVars(default_yml, os_env, key, value):
vars_scope = {
"ansible_pre_tasks": None,
"ansible_post_tasks": None,
"cert_prefix": "https",
"splunk": {
"user": "splunk",
"group": "splunk",
"root_endpoint": None,
"svc_port": 8089,
"s2s": {"port": 9997},
"appserver": {"port": 8065},
"kvstore": {"port": 8191},
"hec_token": "<KEY>",
"enable_service": False,
"service_name": "Splunkd",
"allow_upgrade": True,
"asan": None,
"set_search_peers": True,
"connection_timeout": 0,
}
}
# TODO: Possibly remove the dependency on merge_dict() in this test
environ.merge_dict(vars_scope, default_yml)
with patch("os.environ", new=os_env):
environ.overrideEnvironmentVars(vars_scope)
if "splunk" in key:
if "s2s" in key or "appserver" in key or "kvstore" in key:
section, key = key.split(".")[-2:]
assert vars_scope["splunk"][section][key] == value
else:
key = key.split(".")[-1]
assert vars_scope["splunk"][key] == value
else:
assert vars_scope[key] == value
@pytest.mark.parametrize(("default_yml", "os_env", "output"),
[
# Check null parameters
({}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
# Check default.yml parameters
({"dfs": {"enable": True}}, {}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": 20}}, {}, {"enable": False, "dfw_num_slots": 20, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": "15"}}, {}, {"enable": False, "dfw_num_slots": 15, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": 20}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 20, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": "15"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 15, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots_enabled": True}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_host": "10.0.0.1"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "10.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_webui_port": 8081}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8081}),
({"dfs": {"spark_master_webui_port": "8082"}}, {}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8082}),
# Check environment variable parameters
({}, {"SPLUNK_ENABLE_DFS": ""}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_ENABLE_DFS": "true"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_ENABLE_DFS": "TRUE"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS": "11"}, {"enable": False, "dfw_num_slots": 11, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFC_NUM_SLOTS": "1"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 1, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": ""}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "true"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "TRUE"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({}, {"SPARK_MASTER_HOST": "8.8.8.8"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "8.8.8.8", "spark_master_webui_port": 8080}),
({}, {"SPARK_MASTER_WEBUI_PORT": "8888"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8888}),
# Check the union combination of default.yml + environment variables and order of precedence when overwriting
({"dfs": {"enable": False}}, {"SPLUNK_ENABLE_DFS": "true"}, {"enable": True, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots": 100}}, {"SPLUNK_DFW_NUM_SLOTS": "101"}, {"enable": False, "dfw_num_slots": 101, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfc_num_slots": 100}}, {"SPLUNK_DFC_NUM_SLOTS": "101"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 101, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"dfw_num_slots_enabled": False}}, {"SPLUNK_DFW_NUM_SLOTS_ENABLED": "True"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": True, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_host": "10.0.0.1"}}, {"SPARK_MASTER_HOST": "8.8.8.8"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "8.8.8.8", "spark_master_webui_port": 8080}),
({"dfs": {"spark_master_webui_port": 8082}}, {"SPARK_MASTER_WEBUI_PORT": "8888"}, {"enable": False, "dfw_num_slots": 10, "dfc_num_slots": 4, "dfw_num_slots_enabled": False, "spark_master_host": "127.0.0.1", "spark_master_webui_port": 8888}),
]
)
def test_getDFS(default_yml, os_env, output):
vars_scope = dict()
vars_scope["splunk"] = default_yml
with patch("os.environ", new=os_env):
environ.getDFS(vars_scope)
# Check typing
assert type(vars_scope["splunk"]["dfs"]["enable"]) == bool
assert type(vars_scope["splunk"]["dfs"]["dfw_num_slots"]) == int
assert type(vars_scope["splunk"]["dfs"]["dfc_num_slots"]) == int
assert type(vars_scope["splunk"]["dfs"]["dfw_num_slots_enabled"]) == bool
assert type(vars_scope["splunk"]["dfs"]["spark_master_webui_port"]) == int
assert vars_scope["splunk"]["dfs"] == output
@pytest.mark.parametrize(("os_env", "deployment_server", "add", "before_start_cmd", "cmd"),
[
({}, None, None, None, None),
# Check environment variable parameters
({"SPLUNK_DEPLOYMENT_SERVER": ""}, None, None, None, None),
({"SPLUNK_DEPLOYMENT_SERVER": "something"}, "something", None, None, None),
({"SPLUNK_ADD": ""}, None, None, None, None),
({"SPLUNK_ADD": "echo 1"}, None, ["echo 1"], None, None),
({"SPLUNK_ADD": "echo 1,echo 2"}, None, ["echo 1", "echo 2"], None, None),
({"SPLUNK_BEFORE_START_CMD": ""}, None, None, None, None),
({"SPLUNK_BEFORE_START_CMD": "echo 1"}, None, None, ["echo 1"], None),
({"SPLUNK_BEFORE_START_CMD": "echo 1,echo 2"}, None, None, ["echo 1", "echo 2"], None),
({"SPLUNK_CMD": ""}, None, None, None, None),
({"SPLUNK_CMD": "echo 1"}, None, None, None, ["echo 1"]),
({"SPLUNK_CMD": "echo 1,echo 2"}, None, None, None, ["echo 1", "echo 2"]),
]
)
def test_getUFSplunkVariables(os_env, deployment_server, add, before_start_cmd, cmd):
vars_scope = {"splunk": {}}
with patch("os.environ", new=os_env):
environ.getUFSplunkVariables(vars_scope)
assert vars_scope["splunk"].get("deployment_server") == deployment_server
assert vars_scope["splunk"].get("add") == add
assert vars_scope["splunk"].get("before_start_cmd") == before_start_cmd
assert vars_scope["splunk"].get("cmd") == cmd
def test_getRandomString():
word = environ.getRandomString()
assert len(word) == 6
@pytest.mark.parametrize(("url", "vars_scope", "output"),
[
("licmaster", {"splunk": {}}, "https://licmaster:8089"),
("http://licmaster", {"splunk": {}}, "http://licmaster:8089"),
("licmaster:8081", {"splunk": {}}, "https://licmaster:8081"),
("http://licmaster:80", {"splunk": {}}, "http://licmaster:80"),
("ftp://licmaster.corp.net:3333", {"splunk": {}}, "ftp://licmaster.corp.net:3333"),
("username:<EMAIL>", {"splunk": {}}, "https://lm.internal.net:8089"),
("http://username:password@lm.internal.net:3333", {"splunk": {}}, "http://lm.internal.net:3333"),
# Check null input
("", {"splunk": {}}, ""),
(None, {"splunk": {}}, ""),
# Check vars_scope overrides
("licmaster", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "http://licmaster:18089"),
("https://licmaster", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "https://licmaster:18089"),
("licmaster:28089", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "http://licmaster:28089"),
("https://licmaster:38089", {"cert_prefix": "http", "splunk": {"svc_port": 18089}}, "https://licmaster:38089"),
]
)
def test_parseUrl(url, vars_scope, output):
result = environ.parseUrl(url, vars_scope)
assert result == output
@pytest.mark.parametrize(("dict1", "dict2", "result"),
[
# Check dicts
({}, {"a": 2}, {"a": 2}),
({"b": 2}, {"a": 2}, {"a": 2, "b": 2}),
({"a": 1, "b": 2}, {"a": 2}, {"a": 2, "b": 2}),
({"a": 0}, {"a": 1}, {"a": 1}),
({"a": 1}, {"b": 2, "c": 3}, {"a": 1, "b": 2, "c": 3}),
# Check arrays
({}, {"a": []}, {"a": []}),
({}, {"a": [1, 2]}, {"a": [1, 2]}),
({"b": [0]}, {"a": [1]}, {"a": [1], "b": [0]}),
({"a": [0]}, {"a": [1]}, {"a": [0, 1]}),
# Check nested dict output
({"nested": {}}, {"nested": {"a": 1}}, {"nested": {"a": 1}}),
({"nested": {"a": 1}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2}}),
({"nested": {"a": 1, "c": 3}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2, "c": 3}}),
({"nested": {"a": 1, "b": 3}}, {"nested": {"b": 2}}, {"nested": {"a": 1, "b": 2}}),
# Check nested with diff value types
({"nested": {"x": 1}}, {"nested": {"x": {"a": 1}}}, {"nested": {"x": {"a": 1}}}),
({"nested": {"x": {"a": 1}}}, {"nested": {"x": 1}}, {"nested": {"x": 1}}),
# Check nested arrays
({"nested": {"array": []}}, {"nested": {"array": [1]}}, {"nested": {"array": [1]}}),
({"nested": {"array": [1, 2, 3]}}, {"nested": {"array": []}}, {"nested": {"array": [1, 2, 3]}}),
({"nested": {"array": [1, 2]}}, {"nested": {"array": [3, 4, 5]}}, {"nested": {"array": [1, 2, 3, 4, 5]}}),
({"nested": {"x": 10, "array": [1, 2]}}, {"nested": {"y": 20, "array": [3, 4, 5]}}, {"nested": {"x": 10, "y": 20, "array": [1, 2, 3, 4, 5]}}),
# Targeted github bug
({"splunk": {"conf": [{"key": "fileA", "content": {"a": "b", "c": "d"}}]}}, {"splunk": {"conf": [{"key": "fileB", "content": {"e": "f", "g": "h"}}]}}, {"splunk": {"conf": [{"key": "fileA", "content": {"a": "b", "c": "d"}}, {"key": "fileB", "content": {"e": "f", "g": "h"}}]}}),
]
)
def test_merge_dict(dict1, dict2, result):
output = environ.merge_dict(dict1, dict2)
assert output == result
@pytest.mark.parametrize(("source", "merge_url_called", "merge_file_called"),
[
(None, False, False),
("", False, False),
(" ", False, False),
("http://web/default.yml", True, False),
("https://web/default.yml", True, False),
("file:///path/to/default.yml", False, True),
("/path/to/default.yml", False, True),
("rel/path/to/default.yml", False, True),
]
)
def test_mergeDefaults(source, merge_url_called, merge_file_called):
with patch("environ.mergeDefaultsFromFile") as mock_merge_file:
with patch("environ.mergeDefaultsFromURL") as mock_merge_url:
result = environ.mergeDefaults({"hello": "world"}, "foobar", source)
if merge_url_called:
mock_merge_url.assert_called_once()
mock_merge_file.assert_not_called()
else:
mock_merge_url.assert_not_called()
if merge_file_called:
mock_merge_file.assert_called_once()
mock_merge_url.assert_not_called()
else:
mock_merge_file.assert_not_called()
@pytest.mark.parametrize(("key"),
[
("FOO"),
("BAR"),
("BAZ"),
]
)
def test_mergeDefaults_url_with_req_params(key):
config = {
"config": {
"FOO": {
"headers": {"HI": "MOM"},
"verify": True
},
"BAR": {
"headers": {"GOODBYE": "MOM"},
"verify": False
}
}
}
with patch("environ.mergeDefaultsFromFile") as mock_merge_file:
with patch("environ.mergeDefaultsFromURL") as mock_merge_url:
result = environ.mergeDefaults(config, key, "http://website/default.yml")
mock_merge_file.assert_not_called()
mock_merge_url.assert_called_once()
if key == "FOO":
mock_merge_url.assert_called_with(config, "http://website/default.yml", {"HI": "MOM"}, True)
elif key == "BAR":
mock_merge_url.assert_called_with(config, "http://website/default.yml", {"GOODBYE": "MOM"}, False)
else:
mock_merge_url.assert_called_with(config, "http://website/default.yml", None, False)
@pytest.mark.skip(reason="TODO")
def test_mergeDefaultsFromURL():
pass
@pytest.mark.parametrize(("file", "file_exists", "merge_called"),
[
(None, False, False),
("", False, False),
(" ", False, False),
("/path/to/file", False, False),
("/path/to/file", True, True),
]
)
def test_mergeDefaultsFromFile(file, file_exists, merge_called):
mo = mock_open()
with patch("environ.open", mo, create=True):
with patch("environ.os") as mock_os:
with patch("environ.merge_dict") as mock_merge:
mock_os.path.exists = MagicMock(return_value=file_exists)
result = environ.mergeDefaultsFromFile({"hello": "world"}, file)
if merge_called:
mo.assert_called_once()
mock_merge.assert_called_once()
else:
mo.assert_not_called()
mock_merge.assert_not_called()
assert result == {"hello": "world"}
@pytest.mark.parametrize(("mock_base", "mock_baked", "mock_env", "mock_host", "merge_call_count"),
[
# Null cases
({}, [], [], [], 0),
({"config": None}, [], [], [], 0),
({"config": {}}, [], [], [], 0),
# Check baked
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "file1"}], [], [], 1),
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "f1"}, {"key": "baked", "src": "f2"}, {"key": "baked", "src": "f3"}], [], [], 3),
# Check env
({"config": {"foo": "bar"}}, [], [{"key": "env", "src": "file1"}], [], 1),
({"config": {"foo": "bar"}}, [], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}, {"key": "env", "src": "f3"}], [], 3),
# Check host
({"config": {"foo": "bar"}}, [], [], [{"key": "host", "src": "file1"}], 1),
({"config": {"foo": "bar"}}, [], [], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}, {"key": "host", "src": "f3"}], 3),
# Check mixed
({"config": {"foo": "bar"}}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 5),
({"config": None}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 0),
({"config": {}}, [{"key": "baked", "src": "file1"}], [{"key": "env", "src": "f1"}, {"key": "env", "src": "f2"}], [{"key": "host", "src": "f1"}, {"key": "host", "src": "f2"}], 0),
]
)
def test_loadDefaults(mock_base, mock_baked, mock_env, mock_host, merge_call_count):
mbase = MagicMock(return_value=mock_base)
mbaked = MagicMock(return_value=mock_baked)
menv = MagicMock(return_value=mock_env)
mhost = MagicMock(return_value=mock_host)
with patch("environ.loadBaseDefaults", mbase):
with patch("environ.loadBakedDefaults", mbaked):
with patch("environ.loadEnvDefaults", menv):
with patch("environ.loadHostDefaults", mhost):
with patch("environ.mergeDefaults") as mock_merge:
output = environ.loadDefaults()
assert mock_merge.call_count == merge_call_count
@pytest.mark.parametrize(("os_env", "filename"),
[
({}, "splunk_defaults"),
({"SPLUNK_ROLE": "splunk_standalone"}, "splunk_defaults"),
({"SPLUNK_ROLE": "splunk_universal_forwarder"}, "splunkforwarder_defaults"),
]
)
def test_loadBaseDefaults(os_env, filename):
sample_yml = """
this: file
is:
a: yaml
"""
mo = mock_open(read_data=sample_yml)
with patch("environ.open", mo, create=True):
with patch("os.environ", new=os_env):
output = environ.loadBaseDefaults()
mo.assert_called_once()
args, _ = mo.call_args
assert filename in args[0]
assert args[1] == "r"
assert type(output) == dict
assert output["this"] == "file"
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"baked": None}, []),
({"baked": ""}, []),
({"baked": "file1"}, [{"key": "baked", "src": "file1"}]),
({"baked": "file1,file2,file3"}, [{"key": "baked", "src": "file1"}, {"key": "baked", "src": "file2"}, {"key": "baked", "src": "file3"}]),
]
)
def test_loadBakedDefaults(config, output):
result = environ.loadBakedDefaults(config)
assert result == output
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"env": None}, []),
({"env": {}}, []),
({"env": {"var": None}}, []),
({"env": {"var": ""}}, []),
# Adding test for a key that does not exist
({"env": {"var": "FAKE"}}, []),
# Adding tests for keys that exist
({"env": {"var": "KEY1"}}, [{"key": "env", "src": "file1"}]),
({"env": {"var": "KEY2"}}, [{"key": "env", "src": "file1"}, {"key": "env", "src": "file2"}, {"key": "env", "src": "file3"}]),
]
)
def test_loadEnvDefaults(config, output):
with patch("os.environ", new={"KEY1": "file1", "KEY2": "file1,file2,file3"}):
result = environ.loadEnvDefaults(config)
assert result == output
@pytest.mark.parametrize(("config", "output"),
[
(None, []),
({}, []),
({"host": None}, []),
({"host": {}}, []),
({"host": {"url": None}}, []),
({"host": {"url": ""}}, []),
({"host": {"url": "file1"}}, [{"key": "host", "src": "file1"}]),
({"host": {"url": "file1,file2,file3"}}, [{"key": "host", "src": "file1"}, {"key": "host", "src": "file2"}, {"key": "host", "src": "file3"}]),
]
)
def test_loadHostDefaults(config, output):
result = environ.loadHostDefaults(config)
assert result == output
@pytest.mark.parametrize(("inputInventory", "outputInventory"),
[
# Verify null inputs
({}, {}),
({"all": {}}, {"all": {}}),
({"all": {"vars": {}}}, {"all": {"vars": {}}}),
({"all": {"vars": {"splunk": {}}}}, {"all": {"vars": {"splunk": {}}}}),
# Verify individual keys to obfuscate
({"all": {"vars": {"splunk": {"password": "<PASSWORD>"}}}}, {"all": {"vars": {"splunk": {"password": "**************"}}}}),
({"all": {"vars": {"splunk": {"shc": {"secret": "helloworld"}}}}}, {"all": {"vars": {"splunk": {"shc": {"secret": "**************"}}}}}),
({"all": {"vars": {"splunk": {"smartstore": {"index": []}}}}}, {"all": {"vars": {"splunk": {"smartstore": {"index": []}}}}}),
({"all": {"vars": {"splunk": {"smartstore": {"index": [{"s3": {"access_key": "1234", "secret_key": "abcd"}}]}}}}}, {"all": {"vars": {"splunk": {"smartstore": {"index": [{"s3": {"access_key": "**************", "secret_key": "**************"}}]}}}}}),
]
)
def test_obfuscate_vars(inputInventory, outputInventory):
result = environ.obfuscate_vars(inputInventory)
assert result == outputInventory
@pytest.mark.skip(reason="TODO")
def test_create_parser():
pass
@pytest.mark.skip(reason="TODO")
def test_prep_for_yaml_out():
pass
@pytest.mark.skip(reason="TODO")
def test_main():
pass
| StarcoderdataPython |
3262927 | # pyOCD debugger
# Copyright (c) 2022 Huada Semiconductor Corporation
# Copyright (c) 2022 <NAME>
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...coresight.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
class DBGMCU:
STCTL = 0xE0042020
STCTL_VALUE = 0x3
TRACECTL = 0xE0042024
TRACECTL_VALUE = 0x0
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00,
0x4770ba40, 0x4770ba40, 0x4770bac0, 0x4770bac0, 0x0030ea4f, 0x00004770, 0x0030ea4f, 0x00004770,
0x5001f24a, 0x8008490a, 0x7800480a, 0x0007f020, 0x39264908, 0x0026f881, 0x68004807, 0x00f0f020,
0x60084905, 0x49032001, 0x70081d09, 0x00004770, 0x4005440e, 0x40054026, 0x40010408, 0x5101f24a,
0x80114a16, 0x6a094916, 0x0170f021, 0x4a143110, 0xf2406211, 0x68094104, 0x0001f001, 0x4911b150,
0x60114a11, 0x68094911, 0x01f0f021, 0x4a0f3140, 0xe0096011, 0x4a0c490e, 0x490c6011, 0xf0216809,
0x315001f0, 0x60114a09, 0x4a052100, 0x7011322a, 0x4a032105, 0x1026f882, 0x00004770, 0x4005440e,
0x40054000, 0x10102781, 0x4005410c, 0x40010408, 0x10101f81, 0x4603b570, 0xe003460c, 0x5b01f814,
0x5b01f803, 0xf1a21e15, 0xd1f70201, 0xb510bd70, 0xf81ef000, 0xb510bd10, 0x46204604, 0xf854f000,
0x0000bd10, 0xf000b500, 0x4808f9d5, 0xf4206800, 0x49067080, 0x20016008, 0x20006008, 0x1e406008,
0x390c4902, 0xf0006008, 0xbd00f9c5, 0x4001040c, 0xf000b500, 0xf7fff9bf, 0x481aff73, 0xf0206800,
0x1c400001, 0x60084917, 0x68004608, 0x0070f020, 0x60083050, 0x68004608, 0x7080f420, 0x7080f500,
0xf2416008, 0x21002034, 0xe0016008, 0xf9a2f000, 0x1d00480c, 0xf4006800, 0x28007080, 0xf000d0f6,
0x4808f999, 0xf0206800, 0x49060070, 0x46086008, 0xf0206800, 0x60080001, 0xff60f7ff, 0xf98af000,
0xbd002000, 0x4001040c, 0x4604b530, 0x4540f649, 0xff36f7ff, 0xf97ef000, 0x6800481d, 0x0001f020,
0x491b1c40, 0x46086008, 0xf0206800, 0x30400070, 0x46086008, 0xf4206800, 0xf5007080, 0x60087080,
0xf968f000, 0x2000bf00, 0xf0006020, 0xe001f963, 0xf960f000, 0x1d00480e, 0xf4006800, 0xb9107080,
0x1e051e68, 0xf000d1f4, 0x4809f955, 0xf0206800, 0x49070070, 0x46086008, 0xf0206800, 0x60080001,
0xf948f000, 0xff1af7ff, 0xf944f000, 0xbd302000, 0x4001040c, 0x4811b500, 0x60084911, 0x60084811,
0x300c480f, 0xf4206800, 0xf5007080, 0x490c7080, 0x6008310c, 0x5001f24a, 0x8008490b, 0x6a00480b,
0x40e0f420, 0x40c0f500, 0x62084908, 0x4025f44f, 0x80084905, 0xf91ef000, 0x0000bd00, 0xffff0123,
0x40010400, 0xffff3210, 0x4005440e, 0x40054000, 0x45f8e92d, 0x460c4680, 0xf04f4615, 0x900030ff,
0x4740f649, 0x493d203f, 0xf0006008, 0x483bf903, 0x68003808, 0x0001f020, 0x49381c40, 0x60083908,
0x68004608, 0x0070f020, 0x60083030, 0x68004608, 0x7080f420, 0x7080f500, 0x46466008, 0xf000bf00,
0xf04ff8e9, 0xe0260a00, 0x4740f649, 0x60306828, 0x1d2d1d36, 0xf8def000, 0xf000e001, 0x4827f8db,
0x68001f00, 0x0010f000, 0x1e78b910, 0xd1f41e07, 0x2001b917, 0x85f8e8bd, 0x1f004820, 0xf0006800,
0xb118000f, 0xf8c6f000, 0xe7f32001, 0x491b2010, 0xf10a6008, 0xebba0a01, 0xd3d50f94, 0x0003f004,
0xf004b138, 0x46290203, 0xf7ff4668, 0x9800fec3, 0xf000c601, 0x4811f8af, 0x68003808, 0x0070f020,
0x3908490e, 0x46086008, 0xf0206800, 0x60080001, 0x4740f649, 0xf000e001, 0x4808f89d, 0x68001f00,
0x7080f400, 0x1e78b910, 0xd1f41e07, 0x2001b90f, 0xf000e7c0, 0x2000f88f, 0x0000e7bc, 0x40010414,
0x4604b570, 0x4616460d, 0xff44f7ff, 0xbd702000, 0x4604b570, 0x4616460d, 0x46294632, 0xf7ff4620,
0xbd70ff67, 0x4604b510, 0xfe9cf7ff, 0xbd102000, 0x4604b5f0, 0x2300460d, 0x27002600, 0x21004626,
0xf856e007, 0x6810cb04, 0xd0004584, 0x1d12e004, 0xebb11c49, 0xd3f40f95, 0x4637bf00, 0xe0062300,
0xcb01f817, 0x45845cd0, 0xe004d000, 0xf0051c5b, 0x42980003, 0xbf00d8f4, 0x0081eb04, 0xbdf04418,
0x1e01bf00, 0x0001f1a0, 0x4770d1fb, 0x481fb510, 0x481f6802, 0xf3c06800, 0x481d0481, 0xf3c06800,
0xb90c2303, 0xe0081192, 0xd1012c01, 0xe0041292, 0xd1012c02, 0xe0001312, 0xb10b1392, 0xd1022b0f,
0xf83af000, 0xf003e020, 0xb1180001, 0xf000b9e2, 0xe019f833, 0x0002f003, 0xd1042802, 0xd1132a01,
0xf82af000, 0xf003e010, 0x28040004, 0x2a02d104, 0xf000d10a, 0xe007f821, 0x0008f003, 0xd1032808,
0xd1012a03, 0xf818f000, 0x0000bd10, 0x40049404, 0x40010680, 0x4807b500, 0xf3c06800, 0xb9084000,
0xf816f000, 0x68004803, 0x0001f000, 0xf7ffb908, 0xbd00ffad, 0x40010680, 0x49034802, 0x48036008,
0x47706008, 0xffff0123, 0x40049408, 0xffff3210, 0x4823b510, 0xb2926842, 0x68004822, 0x4481f3c0,
0x68004820, 0x6303f3c0, 0x1192b90c, 0x2c01e008, 0x1292d101, 0x2c02e004, 0x1312d101, 0x1392e000,
0x2001b90b, 0x2000e000, 0xd1012b0f, 0xe0002101, 0x43082100, 0xf000b110, 0xe020f827, 0x0001f003,
0xb9e2b118, 0xf820f000, 0xf003e019, 0x28020002, 0x2a01d104, 0xf000d113, 0xe010f817, 0x0004f003,
0xd1042804, 0xd10a2a02, 0xf80ef000, 0xf003e007, 0x28080008, 0x2a03d103, 0xf000d101, 0xbd10f805,
0x40049000, 0x40010680, 0x49034802, 0x48036088, 0x47706088, 0xffff0123, 0x40049000, 0xffff3210,
0x00000000
],
# Relative function addresses
'pc_init': 0x200003a5,
'pc_unInit': 0x200003c9,
'pc_program_page': 0x200003b5,
'pc_erase_sector': 0x200000fb,
'pc_eraseAll': 0x200000f3,
'static_base' : 0x20000000 + 0x00000020 + 0x000004b0,
'begin_stack' : 0x20000700,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x200,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x20001200], # Enable double buffering
'min_program_length' : 0x200,
}
FLASH_ALGO_OTP = {
'load_address' : 0x20000000,
# Flash algorithm as a hex string
'instructions': [
0xE00ABE00,
0x4770ba40, 0x4770bac0, 0x0030ea4f, 0x00004770, 0x5001f24a, 0x8008490a, 0x7800480a, 0x0007f020,
0x39264908, 0x0026f881, 0x68004807, 0x00f0f020, 0x60084905, 0x49032001, 0x70081d09, 0x00004770,
0x4005440e, 0x40054026, 0x40010408, 0x5101f24a, 0x80114a16, 0x6a094916, 0x0170f021, 0x4a143110,
0xf2406211, 0x68094104, 0x0001f001, 0x4911b150, 0x60114a11, 0x68094911, 0x01f0f021, 0x4a0f3140,
0xe0096011, 0x4a0c490e, 0x490c6011, 0xf0216809, 0x315001f0, 0x60114a09, 0x4a052100, 0x7011322a,
0x4a032105, 0x1026f882, 0x00004770, 0x4005440e, 0x40054000, 0x10102781, 0x4005410c, 0x40010408,
0x10101f81, 0x4603b570, 0xe003460c, 0x5b01f814, 0x5b01f803, 0xf1a21e15, 0xd1f70201, 0xb510bd70,
0xf81ef000, 0xb510bd10, 0x46204604, 0xf81af000, 0x0000bd10, 0xf000b500, 0x4808f959, 0xf4206800,
0x49067080, 0x20016008, 0x20006008, 0x1e406008, 0x390c4902, 0xf0006008, 0xbd00f949, 0x4001040c,
0x47702000, 0x20004601, 0x00004770, 0x4811b500, 0x60084911, 0x60084811, 0x300c480f, 0xf4206800,
0xf5007080, 0x490c7080, 0x6008310c, 0x5001f24a, 0x8008490b, 0x6a00480b, 0x40e0f420, 0x40c0f500,
0x62084908, 0x4025f44f, 0x80084905, 0xf91ef000, 0x0000bd00, 0xffff0123, 0x40010400, 0xffff3210,
0x4005440e, 0x40054000, 0x45f8e92d, 0x460c4680, 0xf04f4615, 0x900030ff, 0x4740f649, 0x493d203f,
0xf0006008, 0x483bf903, 0x68003808, 0x0001f020, 0x49381c40, 0x60083908, 0x68004608, 0x0070f020,
0x60083030, 0x68004608, 0x7080f420, 0x7080f500, 0x46466008, 0xf000bf00, 0xf04ff8e9, 0xe0260a00,
0x4740f649, 0x60306828, 0x1d2d1d36, 0xf8def000, 0xf000e001, 0x4827f8db, 0x68001f00, 0x0010f000,
0x1e78b910, 0xd1f41e07, 0x2001b917, 0x85f8e8bd, 0x1f004820, 0xf0006800, 0xb118000f, 0xf8c6f000,
0xe7f32001, 0x491b2010, 0xf10a6008, 0xebba0a01, 0xd3d50f94, 0x0003f004, 0xf004b138, 0x46290203,
0xf7ff4668, 0x9800ff3f, 0xf000c601, 0x4811f8af, 0x68003808, 0x0070f020, 0x3908490e, 0x46086008,
0xf0206800, 0x60080001, 0x4740f649, 0xf000e001, 0x4808f89d, 0x68001f00, 0x7080f400, 0x1e78b910,
0xd1f41e07, 0x2001b90f, 0xf000e7c0, 0x2000f88f, 0x0000e7bc, 0x40010414, 0x4604b570, 0x4616460d,
0xff44f7ff, 0xbd702000, 0x4604b570, 0x4616460d, 0x46294632, 0xf7ff4620, 0xbd70ff67, 0x4604b510,
0xff18f7ff, 0xbd102000, 0x4604b5f0, 0x2300460d, 0x27002600, 0x21004626, 0xf856e007, 0x6810cb04,
0xd0004584, 0x1d12e004, 0xebb11c49, 0xd3f40f95, 0x4637bf00, 0xe0062300, 0xcb01f817, 0x45845cd0,
0xe004d000, 0xf0051c5b, 0x42980003, 0xbf00d8f4, 0x0081eb04, 0xbdf04418, 0x1e01bf00, 0x0001f1a0,
0x4770d1fb, 0x481fb510, 0x481f6802, 0xf3c06800, 0x481d0481, 0xf3c06800, 0xb90c2303, 0xe0081192,
0xd1012c01, 0xe0041292, 0xd1012c02, 0xe0001312, 0xb10b1392, 0xd1022b0f, 0xf83af000, 0xf003e020,
0xb1180001, 0xf000b9e2, 0xe019f833, 0x0002f003, 0xd1042802, 0xd1132a01, 0xf82af000, 0xf003e010,
0x28040004, 0x2a02d104, 0xf000d10a, 0xe007f821, 0x0008f003, 0xd1032808, 0xd1012a03, 0xf818f000,
0x0000bd10, 0x40049404, 0x40010680, 0x4807b500, 0xf3c06800, 0xb9084000, 0xf816f000, 0x68004803,
0x0001f000, 0xf7ffb908, 0xbd00ffad, 0x40010680, 0x49034802, 0x48036008, 0x47706008, 0xffff0123,
0x40049408, 0xffff3210, 0x4823b510, 0xb2926842, 0x68004822, 0x4481f3c0, 0x68004820, 0x6303f3c0,
0x1192b90c, 0x2c01e008, 0x1292d101, 0x2c02e004, 0x1312d101, 0x1392e000, 0x2001b90b, 0x2000e000,
0xd1012b0f, 0xe0002101, 0x43082100, 0xf000b110, 0xe020f827, 0x0001f003, 0xb9e2b118, 0xf820f000,
0xf003e019, 0x28020002, 0x2a01d104, 0xf000d113, 0xe010f817, 0x0004f003, 0xd1042804, 0xd10a2a02,
0xf80ef000, 0xf003e007, 0x28080008, 0x2a03d103, 0xf000d101, 0xbd10f805, 0x40049000, 0x40010680,
0x49034802, 0x48036088, 0x47706088, 0xffff0123, 0x40049000, 0xffff3210, 0x00000000
],
# Relative function addresses
'pc_init': 0x2000029d,
'pc_unInit': 0x200002c1,
'pc_program_page': 0x200002ad,
'pc_erase_sector': 0x200000eb,
'pc_eraseAll': 0x200000e3,
'static_base' : 0x20000000 + 0x00000004 + 0x00000498,
'begin_stack' : 0x20000700,
'begin_data' : 0x20000000 + 0x1000,
'page_size' : 0x3fc,
'analyzer_supported' : False,
'analyzer_address' : 0x00000000,
'page_buffers' : [0x20001000, 0x200013fc], # Enable double buffering
'min_program_length' : 0x3fc,
# Flash information
'flash_start': 0x3000c00,
'flash_size': 0x3fc,
'sector_sizes': (
(0x0, 0x3fc),
)
}
class HC32F451xC(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x40000, page_size=0x200, sector_size=0x2000,
is_boot_memory=True,
algo=FLASH_ALGO),
FlashRegion( start=0x03000C00, length=0x3FC, sector_size=0x3FC,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_OTP),
RamRegion( start=0x1FFF8000, length=0x2F000),
RamRegion( start=0x200F0000, length=0x1000)
)
def __init__(self, session):
super(HC32F451xC, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32F460.svd")
def post_connect_hook(self):
self.write32(DBGMCU.STCTL, DBGMCU.STCTL_VALUE)
self.write32(DBGMCU.TRACECTL, DBGMCU.TRACECTL_VALUE)
class HC32F451xE(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x80000, page_size=0x200, sector_size=0x2000,
is_boot_memory=True,
algo=FLASH_ALGO),
FlashRegion( start=0x03000C00, length=0x3FC, sector_size=0x3FC,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_OTP),
RamRegion( start=0x1FFF8000, length=0x2F000),
RamRegion( start=0x200F0000, length=0x1000)
)
def __init__(self, session):
super(HC32F451xE, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32F460.svd")
def post_connect_hook(self):
self.write32(DBGMCU.STCTL, DBGMCU.STCTL_VALUE)
self.write32(DBGMCU.TRACECTL, DBGMCU.TRACECTL_VALUE)
class HC32F452xC(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x40000, page_size=0x200, sector_size=0x2000,
is_boot_memory=True,
algo=FLASH_ALGO),
FlashRegion( start=0x03000C00, length=0x3FC, sector_size=0x3FC,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_OTP),
RamRegion( start=0x1FFF8000, length=0x2F000),
RamRegion( start=0x200F0000, length=0x1000)
)
def __init__(self, session):
super(HC32F452xC, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32F460.svd")
def post_connect_hook(self):
self.write32(DBGMCU.STCTL, DBGMCU.STCTL_VALUE)
self.write32(DBGMCU.TRACECTL, DBGMCU.TRACECTL_VALUE)
class HC32F452xE(CoreSightTarget):
VENDOR = "HDSC"
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x00000000, length=0x80000, page_size=0x200, sector_size=0x2000,
is_boot_memory=True,
algo=FLASH_ALGO),
FlashRegion( start=0x03000C00, length=0x3FC, sector_size=0x3FC,
is_boot_memory=False,
is_default=False,
algo=FLASH_ALGO_OTP),
RamRegion( start=0x1FFF8000, length=0x2F000),
RamRegion( start=0x200F0000, length=0x1000)
)
def __init__(self, session):
super(HC32F452xE, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("HC32F460.svd")
def post_connect_hook(self):
self.write32(DBGMCU.STCTL, DBGMCU.STCTL_VALUE)
self.write32(DBGMCU.TRACECTL, DBGMCU.TRACECTL_VALUE)
| StarcoderdataPython |
1768750 | <gh_stars>0
# -*- coding: utf-8 -*-
# speechrate.py - determines the speech rate of data from the Buckeye corpus.
#
# Copyright 2019, <NAME> (<EMAIL>)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The following list of labels is used to deliminate the context windows for
# determining the speech rate:
BREAK_LABELS = ("<SIL>", "<LAUGH>", "<IVER>", "<UNKNOWN>", "<VOCNOISE>",
"<NOISE>",
"{B_TRANS}", "{E_TRANS}")
# Labels are described in Table 4 of the Buckeye Manual
# https://buckeyecorpus.osu.edu/BuckeyeCorpusmanual.pdf
#
# <SIL> pause
# <LAUGH> laughter without producing a word
# <IVER> interviewer's turn
# <VOCNOISE> non-speech vocalizations
# <UNKNOWN> audible but unintelligible speech
# <NOISE> environmental noise
#
# {B_TRANS} beginning of transcript
# {E_TRANS} end of transcript
#
# You can add more labels to BREAK_LABELS. For example, if you want to end the
# context window also if the speaker produces a word hesitantly, you'd add
# "<HES-" (with quotation marks, with only an opening '<' but no closing one)
# to the variable.
def get_context(lines, ref_pos, span):
"""
Create two lists representing the left and the right context window,
respectively. Each list represents the temporal distance of the tokens in
the context window from the reference token.
The context windows will include up to `span` tokens, but can contain fewer
tokens if the start or the end of the recording is reached, or if a token
is encountered that may be considered a disfluency. These tokens are
specified by the labels defined in the global variable BREAK_LABELS.
Arguments
---------
lines : list of strings
The content of a Buckeye .words file, either with or without the
header information.
ref_pos : int
The index of the reference token in the list `lines`.
span : int
The maximum size of the left and the right context window.
Returns
-------
l_dist, r_dist : lists of float
The temporal distance between the reference token and the tokens in the
left and the right context window, respectively.
If a context window doesn't contain a valid token (e.g. because the
reference token occurs at the start or the end of the recording, or
because it is preceded or followed by a pause), the respective list can
be empty. Otherwise, it will contain up to `span` values (the length of
the two lists can differ).
"""
# Skip the header information from the Buckeye .words files if it is
# included in the list of lines:
if "#" in lines:
header_pos = lines.index("#")
lines = lines[(header_pos + 1):]
elif "#\n" in lines:
header_pos = lines.index("#\n")
lines = lines[(header_pos + 1):]
else:
header_pos = 0
ref_pos = ref_pos - header_pos - 1
# Determine the extent of the left and the right context windows around the
# reference token. Each window will contain up to 'span' tokens, but can
# contain fewer if the start or end of the recording is included.
#
# In order to correctly calculate the start and end times of the tokens,
# the left context window will include one additional token, and the right
# context window will include the reference token.
#
l_start = max(0, ref_pos - span - 2)
r_end = min(len(lines), ref_pos + span + 1)
# l_dist and r_dist will contain the temporal distance of the tokens in the
# left and right context window from the reference token.
l_dist = []
r_dist = []
# obtain left context window (which may be empty):
l_win = [s.strip().split(" ", 1) for s in lines[l_start:ref_pos]]
if l_win:
# for each token in the left window, create a list entry that contains
# the end time and the word string:
l_dat = [{"t": float(time), "word": trans.split(" ", 2)[1]}
for time, trans in l_win]
# the start time of the reference token equates the end time of the
# last token in the left window:
start_time = l_dat[-1]["t"]
# go through the tokens left context window in reverse order, excluding
# the very first context token (this token is only included so that the
# start_time can always be determined correctly):
for token in l_dat[1:][::-1]:
# calculate the temporal distance between the context token and the
# current token, and add the distance to a list:
l_dist.append(start_time - token["t"])
# stop if the context token matches one of the break labels:
if token["word"].upper().startswith(BREAK_LABELS):
break
# obtain right context window, including the reference token as the first
# element:
r_win = [s.strip().split(" ", 1) for s in lines[ref_pos:r_end]]
# for each token in the right window, create a list entry that contains
# the end time and the word string:
r_dat = [{"t": float(time.strip()), "word": trans.split(" ", 2)[1]}
for time, trans in r_win]
# the end time of the reference token is stored in the first element of
# the right context window:
end_time = r_dat[0]["t"]
# go through the tokens in the right window (excluding the reference token)
for token in r_dat[1:]:
# stop if the context token matches one of the break labels:
if token["word"].upper().startswith(BREAK_LABELS):
break
# calculate the temporal distance between the context token and the
# current token, and add the distance to a list:
r_dist.append(token["t"] - end_time)
return (l_dist[1:], r_dist)
def get_speechrate(l_dist, r_dist):
"""
Calculate the speech rate based on the temporal distances of the tokens in
the provided context windows.
The speech D rate is calculated as the overall length of the left and the
right context window, divided by the sum of the number of tokens in the two
windows.
If one of the two windows is empty, only the other list is taken into
account.
If both lists are empty, the function returns None.
Note that as the two lists represent temporal distances, the maximum of
each list represents the duration of the respective context window.
Arguments
---------
l_dist, r_dist : list of floats
The temporal distance between the reference token and the tokens in the
left and the right context window, respectively.
These lists are typically produced by the `get_context()` function.
Returns
-------
D : float
The speech rate based on the two context windows. If both context
windows are empty, D is defined as None.
"""
if l_dist and r_dist:
return (max(l_dist) + max(r_dist)) / (len(l_dist) + len(r_dist))
elif r_dist and not l_dist:
return max(r_dist) / len(r_dist)
elif l_dist:
return max(l_dist) / len(l_dist)
else:
return None
| StarcoderdataPython |
143906 | <reponame>argriffing/matplotlib
#!/usr/bin/env python
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
delta = 0.025
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1 # difference of Gaussians
im = plt.imshow(Z, interpolation='bilinear', cmap=cm.RdYlGn,
origin='lower', extent=[-3, 3, -3, 3],
vmax=abs(Z).max(), vmin=-abs(Z).max())
plt.show()
| StarcoderdataPython |
1637424 | import pymath
disp = float(input())
time = float(input())
print("velocity = {}".format(pymath.physics.velocity(disp, time)))
| StarcoderdataPython |
166713 | <gh_stars>0
import fst
import codecs
import sys
import cPickle as pickle
import subprocess
from kitchen.text.converters import getwriter
DATA_DIR = sys.argv[1]
FILE_LIST = sys.argv[2]
DUTCH_FST_FILE = '/export/ws15-pt-data/rsloan/Dutch_ref_orthography_fst.txt'
DUTCH_DICT = '/export/ws15-pt-data/rsloan/dt_pron.p'
EN_DICT = '/export/ws15-pt-data/rsloan/en_pron.p'
PHONETISAURUS_FILE = '/export/ws15-pt-data/rsloan/phonetisaurus-0.8a/phonetisaurus/script/dt_data/dt_pron.fst'
#OUTFILE = '/export/ws15-pt-data/rsloan/dt_oov.txt'
def create_dt_fst():
dt_fst = fst.Transducer(isyms=fst.SymbolTable('eps'), osyms=fst.SymbolTable('eps'))
fst_file = codecs.open(DUTCH_FST_FILE, 'r', encoding='utf-8')
for l in fst_file:
l = l.replace(u'\ufeff', '')
entry = l.split()
if len(entry) == 4:
if entry[3] == 'ks':
entry[3] = 'k s'
dt_fst.add_arc(int(entry[0]), int(entry[1]), entry[2], entry[3])
dt_fst[1].final = True
dt_fst[2].final = True
return dt_fst
def process_numerals(l):
'''replaces numerals with their Dutch translations and treats each numeral as a separate word
also has the separate effect of standardizing spacing, but that shouldn't matter in this script'''
l = l.replace('1', ' een ')
l = l.replace('9', ' negen ')
l = l.replace('3', ' drie ')
l = l.replace('4', ' vier ')
l = l.replace('2', ' twee ')
l = l.replace('5', ' vijf ')
l = l.replace('6', ' zes ')
l = l.replace('7', ' zeven ')
l = l.replace('8', ' acht ')
l = l.replace('0', ' zero ')
return l
dt_fst = create_dt_fst()
dt_dict = pickle.load(open(DUTCH_DICT, 'rb'))
en_dict = pickle.load(open(EN_DICT, 'rb'))
#outfile = codecs.open(OUTFILE, 'w', encoding='utf-8')
def word_to_pron(word):
'''given a Dutch word, outputs a pronunciation (with phonemes separated by spaces)'''
try: #look in dict
if word == '<silence>':
return 'SIL'
else:
return dt_dict[word]
except KeyError:
#outfile.write(word + '\n')
#return word
# if len(word) < 15:
#print "using phonetisaurus on word: " + word
pron = subprocess.check_output(['phonetisaurus-g2p', '--model='+PHONETISAURUS_FILE,'--input='+word])
pron = pron.rstrip()
endnum = pron.index('\t')
pron = pron[endnum+1:]
pron = unicode(pron, 'utf-8')
pron = pron.replace(u"\u0279", 'r')
pron = pron.replace('h', u"\u0266")
pron = pron.replace(u"\u0289", u"\u0259")
pron = pron.replace(u"\u0264", u"\u0263")
#if len(word) <= 2 or len(pron.split()) > len(word)/2:
return pron
#try: #check if English
# pron = en_dict[word]
# return pron
#except KeyError: #OOV word, revert to fst
#phonetisaurus probably couldn't get through word, use FST instead
#print "reverting to FST for " + word
#wfst = fst.linear_chain(word, syms=dt_fst.isyms)
#bestpath = wfst.compose(dt_fst).shortest_path(1)
#for path in bestpath.paths():
# pron = ' '.join(bestpath.osyms.find(arc.olabel) for arc in path)
#return pron
utts = open(FILE_LIST, 'r')
for name in utts:
name = name.strip()
utt = codecs.open(DATA_DIR+'/'+name+'.txt', 'r', encoding='utf-8')
for l in utt:
outstring = ''
l = l.replace('-', ' ') #treat hyphens as spaces when splitting words
l = l.replace('/', ' ')
l = process_numerals(l)
words = l.split()
for w in words:
w = w.replace(u"\u00EB", 'e')
w = w.replace(u"\u00EF", 'i')
w = w.replace('&', 'en')
extra_chars = [',', '\'',u'\ufeff',':','\"','(',')']
end_punc = ['.', '?', '!']
for c in extra_chars:
w = w.replace(c, '')
end_sil = False
for c in end_punc:
if c in w:
w = w.replace(c, '')
end_sil = True
if len(w) > 0:
outstring += word_to_pron(w.lower()) + ' '
if end_sil:
outstring += 'SIL '
outstring = outstring.replace(u'eps', '')
outstring = ' '.join(outstring.split()) #remove extra space, just in case
UTF8Writer = getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
print outstring
| StarcoderdataPython |
74170 | <reponame>QwaddleMan/XNATSlicer
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, Washington University in St. Louis"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "XNAT Software License Agreement " + \
"(see: http://xnat.org/about/license.php)"
__version__ = "2.1.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# application
from __main__ import qt
# module
from Settings import *
from FontSetting import *
from MetadataEditorSetting import *
from CheckBoxSetting import *
from XnatSlicerUtils import *
class Settings_View(FontSetting, MetadataEditorSetting, CheckBoxSetting,
Settings):
"""
Settings_View is the Settings pertaining to
the 'View_Tree' class.
"""
LABEL_FONT = qt.QFont('Arial', 10, 10, False)
LABEL_FONT_SIZE = 'Font Size'
LABEL_METADATA = 'Info. Metadata'
DEFAULT_METADATA = Xnat.metadata.DEFAULT_TAGS_LITE
CHECKBOXES = OrderedDict([
('lastAccessed', {
'tag': 'showLastAccessedOnly',
'desc': 'Show only accessed projects.',
'checked': False,
'event': 'FILTERTOGGLED'
})
])
def setup(self):
"""
As stated.
"""
self.addSection('Filters')
self.createCheckBoxes()
self.addSpacing()
self.addSpacing()
self.createFontSizeDropdown(Settings_View.LABEL_FONT_SIZE)
self.addSpacing()
self.createMetadataEditorSets(Settings_View.LABEL_METADATA,
itemType = 'checkbox',
editVisible = True,
customEditVisible = False)
| StarcoderdataPython |
147179 | <filename>qa/rpc-tests/elysium_property_creation_fee.py
#!/usr/bin/env python3
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import ElysiumTestFramework
from test_framework.util import assert_raises_message
class ElysiumPropertyCreationFeeTest(ElysiumTestFramework):
def get_new_address(self, default_balance = 0):
addr = self.nodes[0].getnewaddress()
if default_balance > 0:
self.nodes[0].sendtoaddress(addr, default_balance)
self.nodes[0].generate(1)
self.sync_all()
return addr
def test(self, balance = 1, ecosystem = 1, amount = None, expected_error = None):
addr = self.get_new_address(balance)
operator = self.nodes[0].elysium_sendissuancemanaged
options = [addr, ecosystem, 1, 0, "", "", "Foo", "", ""]
if amount is not None:
operator = self.nodes[0].elysium_sendissuancefixed
options.append(amount)
if expected_error is None:
operator(*options)
self.nodes[0].generate(1)
self.sync_all()
else:
assert_raises_message(
JSONRPCException,
expected_error,
operator,
*options)
def test_insufficient(self, balance = 1, ecosystem = 1, amount = None):
self.test(balance, ecosystem, amount, 'fees may not be sufficient')
def run_test(self):
super().run_test()
creation_fee_start_block = 500
# before creation fee is activated, all properies type should be able to create with low fee.
self.test(ecosystem = 1)
self.test(ecosystem = 1, amount = "10000")
self.test(ecosystem = 2)
self.test(ecosystem = 2, amount = "10000")
# make sure, property creation fee is activated
self.nodes[0].generate(creation_fee_start_block - self.nodes[0].getblockcount())
# after the activation, 100 FIRO is required for creating main ecosystem property
self.test_insufficient(ecosystem = 1)
self.test_insufficient(ecosystem = 1, amount = "10000")
# test ecosystem should be able to create with low fee
self.test(ecosystem = 2)
self.test(ecosystem = 2, amount = "10000")
# creating main ecosystem property with 100 FIRO fee, should success
self.test(balance = 101, ecosystem = 1)
self.test(balance = 101, ecosystem = 1, amount = "10000")
if __name__ == '__main__':
ElysiumPropertyCreationFeeTest().main() | StarcoderdataPython |
3246360 | #-*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, include, url
from shop.models.productmodel import Product
from shop.views import ShopListView, ShopTemplateView
from shop.views.cart import CartDetails, CartItemDetail
from shop.views.checkout import ThankYouView, CheckoutSelectionView, \
ShippingBackendRedirectView, PaymentBackendRedirectView#, SelectShippingView, SelectPaymentView,
from shop.views.order import OrderListView, OrderDetailView
from shop.views.product import ProductDetailView
# Loop through payment backends and mount the modules in pay/
urlpatterns = patterns('',
(r'^pay/', include('shop.payment.urls')),
(r'^ship/', include('shop.shipping.urls')),
#Home
url(r'^$', ShopTemplateView.as_view(template_name="shop/welcome.html"),
name='shop_welcome'),
# Cart
url(r'^cart/delete/$', CartDetails.as_view(action='delete'), # DELETE
name='cart_delete'),
url('^cart/item/$', CartDetails.as_view(action='post'), # POST
name='cart_item_add' ),
url(r'^cart/$', CartDetails.as_view(), name='cart'), # GET
url(r'^cart/update/$', CartDetails.as_view(action='put'),
name='cart_update'),
# CartItems
url('^cart/item/(?P<id>[0-9A-Za-z-_.//]+)$', CartItemDetail.as_view(),
name='cart_item' ),
# Checkout
url(r'^checkout/$', CheckoutSelectionView.as_view(),
name='checkout_selection' # First step of the checkout process
),
#url(r'^checkout/ship/$', SelectShippingView.as_view(),
# name='checkout_shipping' # First step of the checkout process
# ),
url(r'^checkout/ship/$', ShippingBackendRedirectView.as_view(),
name='checkout_shipping' # First step of the checkout process
),
#url(r'^checkout/pay/$', SelectPaymentView.as_view(),
# name='checkout_payment' # Second step of the checkout process
# ),
url(r'^checkout/pay/$', PaymentBackendRedirectView.as_view(),
name='checkout_payment' # First step of the checkout process
),
url(r'^checkout/thank_you/$', ThankYouView.as_view(),
name='thank_you_for_your_order' # Second step of the checkout process
),
# Products
url(r'^products/$',
ShopListView.as_view(model=Product),
name='product_list'
),
url(r'^products/(?P<slug>[0-9A-Za-z-_.//]+)/$',
ProductDetailView.as_view(),
name='product_detail'
),
# Orders
url(r'^orders/$',
OrderListView.as_view(),
name='order_list'),
url(r'^orders/(?P<pk>\d+)/$',
OrderDetailView.as_view(),
name='order_detail'),
)
| StarcoderdataPython |
115848 | <filename>python-scripts/gt_path_manager.py
"""
GT Path Manager - A script for quickly repathing many elements in Maya.
<NAME> - <EMAIL> - 2020-08-26 - github.com/TrevisanGMW
0.1a - 2020-08-26
Created initial setup, added table and icons for file nodes
1.0 - 2020-12-02
Initial Release
Added support for UDIMS and Image Sequences to the "file" node
Added support for a lot of common nodes:
audio, cacheFile, AlembicNode, BifMeshImportNode, gpuCache, MASH_Audio
Added support for Arnold Lights
aiPhotometricLight, aiStandIn, aiVolume
Added support for Redshift Lights
RedshiftProxyMesh, RedshiftVolumeShape, RedshiftNormalMap, RedshiftDomeLight, RedshiftIESLight
Added support for Reference Files through OpenMaya API (Instead of PyMEL)
1.1 - 2020-12-03
Added support for Image Planes
1.2 - 2021-05-11
Made script compatible with Python 3 (Maya 2022+)
Todo:
Add support for Goalem Nodes
'SimulationCacheProxyManager', 'destinationTerrainFile', accepts_empty=True
'SimulationCacheProxyManager', 'skinningShaderFile', accepts_empty=True
'CrowdEntityTypeNode', 'characterFile', accepts_empty=True
'CharacterMakerLocator', 'currentFile', accepts_empty=True
'TerrainLocator', 'navMeshFile', accepts_empty=True
'SimulationCacheProxy', 'inputCacheDir', accepts_empty=True
# Manage Multiple Files?
'SimulationCacheProxy', 'characterFiles', accepts_empty=True, checks_multiple_paths=True
'CrowdManagerNode', 'characterFiles', accepts_empty=True, checks_multiple_paths=True
Add:
Only Files Checkbox
"""
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
from shiboken2 import wrapInstance
import maya.OpenMaya as om
import maya.OpenMayaUI as omui
import maya.cmds as cmds
import maya.mel as mel
import time
import sys
import os
import re
# Script Name
script_name = "GT Path Manager"
# Version
script_version = '1.2'
# Python Version
python_version = sys.version_info.major
def maya_main_window():
'''
Return the Maya main window widget as a Python object
'''
main_window_ptr = omui.MQtUtil.mainWindow()
if python_version == 3:
return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)
else:
return wrapInstance(long(main_window_ptr), QtWidgets.QWidget)
def list_reference_pairs():
'''
Returns all references and their paths. Used to get a reference path when the file is not found.
cmds.referenceQuery would return an error.
Returns:
reference_list (list): A list of pairs, containing reference name and reference path
'''
it = om.MItDependencyNodes(om.MFn.kReference)
ref_nodes = om.MObjectArray()
while not it.isDone():
ref_nodes.append(it.thisNode())
it.next()
ref_pairs = []
for i in range(ref_nodes.length()):
try:
ref = ref_nodes.__getitem__(i)
mfn_ref = om.MFnReference(ref)
ref_pairs.append([mfn_ref.absoluteName(), mfn_ref.fileName(False,False,False)])
except:
pass
return ref_pairs
class GTPathManagerDialog(QtWidgets.QDialog):
''' Main GT Path Manager Class '''
ATTR_ROLE = QtCore.Qt.UserRole
VALUE_ROLE = QtCore.Qt.UserRole + 1
def __init__(self, parent=maya_main_window()):
''' Create main dialog, set title and run other UI calls '''
super(GTPathManagerDialog, self).__init__(parent)
self.setWindowTitle(script_name + ' - (v' + str(script_version) + ')')
self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)
self.setMinimumWidth(700)
self.resize(self.width() + 250,500)
# Set Icon
self.setWindowIcon(QtGui.QIcon(':/annotation.png'))
# Setup Window Content and Signals
self.create_widgets()
self.create_layout()
self.create_connections()
# Remove Focus from Line Edit
self.setFocus()
# Initial Table Refresh
self.refresh_table()
def create_widgets(self):
''' Create Widgets '''
# Title
self.title_label = QtWidgets.QLabel(script_name)
self.title_label.setStyleSheet('background-color: rgb(93, 93, 93); \
border: 0px solid rgb(93, 93, 93); \
color: rgb(255, 255, 255);\
font: bold 12px; \
padding: 5px;')
self.help_btn = QtWidgets.QPushButton('Help')
self.help_btn.setStyleSheet('color: rgb(255, 255, 255); font: bold 12px;')
# Search Path
self.search_path_label = QtWidgets.QLabel("Search Path: ")
self.filepath_le = QtWidgets.QLineEdit()
self.filepath_le.setPlaceholderText('Path to a Directory')
self.filepath_le.setMinimumSize(QtCore.QSize(380, 0))
self.select_dir_path_btn = QtWidgets.QPushButton()
self.select_dir_path_btn.setIcon(QtGui.QIcon(':fileOpen.png'))
self.select_dir_path_btn.setToolTip('Select Directory')
self.table_wdg = QtWidgets.QTableWidget()
self.table_wdg.setColumnCount(4)
self.table_wdg.setColumnWidth(0, 22)
self.table_wdg.setColumnWidth(1, 80)
self.table_wdg.setColumnWidth(3, 280)
header_view = self.table_wdg.horizontalHeader()
header_view.setSectionResizeMode(3, QtWidgets.QHeaderView.Stretch)
self.table_wdg.setHorizontalHeaderLabels(["", "Node", "Node Type", "Path"])
self.refresh_btn = QtWidgets.QPushButton("Refresh")
self.refresh_btn.setFixedWidth(55)
self.start_repair_btn = QtWidgets.QPushButton("Auto Path Repair")
#self.start_repair_btn.setFixedWidth(120)
self.search_replace_btn = QtWidgets.QPushButton("Search and Replace")
self.only_files_cb = QtWidgets.QCheckBox("Only File Nodes")
def create_layout(self):
''' Layout '''
# Build File Path Layout
file_path_layout = QtWidgets.QHBoxLayout()
file_path_layout.addWidget(self.search_path_label)
file_path_layout.addWidget(self.filepath_le)
file_path_layout.addWidget(self.select_dir_path_btn)
# Build Title Layout
title_layout = QtWidgets.QHBoxLayout()
title_layout.setSpacing(0)
title_layout.addWidget(self.title_label,5)
title_layout.addWidget(self.help_btn)
# Bottom Left Buttons (Search Path)
button_layout = QtWidgets.QHBoxLayout()
button_layout.addLayout(file_path_layout)
# Bottom Right Buttons (Main Buttons)
button_layout.setSpacing(2)
button_layout.addStretch()
#button_layout.addWidget(self.only_files_cb)
button_layout.addWidget(self.start_repair_btn)
button_layout.addWidget(self.search_replace_btn)
button_layout.addWidget(self.refresh_btn)
# Build Main Layout
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.addLayout(title_layout)
main_layout.setContentsMargins(15, 15, 15, 11) # Make Margins Uniform LTRB
main_layout.addWidget(self.table_wdg)
main_layout.addLayout(button_layout)
def create_connections(self):
''' Create Connections '''
self.refresh_btn.clicked.connect(self.refresh_table)
self.table_wdg.cellChanged.connect(self.on_cell_changed)
self.table_wdg.cellClicked.connect(self.select_clicked_item)
# Auto Path Repair Btn
self.start_repair_btn.clicked.connect(self.start_attempt_repair)
self.help_btn.clicked.connect(self.build_gui_help_path_manager)
self.search_replace_btn.clicked.connect(self.build_gui_search_replace_path_manager)
self.select_dir_path_btn.clicked.connect(self.show_dir_select_dialog)
def show_dir_select_dialog(self):
''' Invoke open file dialog so the user can select a search directory (Populate filepath_le with user input) '''
multiple_filters = "Directories Only (.donotshowfiles)"
file_path = cmds.fileDialog2(fileFilter=multiple_filters, dialogStyle=2, fm=3, caption='Select Search Directory', okc='Select Directory')
if file_path:
self.filepath_le.setText(file_path[0])
def set_cell_changed_connection_enabled(self, enabled):
''' To turn on and off the connection so it doesn't update unnecessarily '''
if enabled:
self.table_wdg.cellChanged.connect(self.on_cell_changed)
else:
self.table_wdg.cellChanged.disconnect(self.on_cell_changed)
def select_clicked_item(self, row, column):
'''
Executed when clicking on a table item, it tries to select the node clicked
'''
item = self.table_wdg.item(row, 1)
node_name = self.get_item_value(item)
try:
if cmds.objExists(node_name):
cmds.select(node_name)
except:
pass
def showEvent(self, e):
''' Cause it to refresh when opening. I might have to change this for heavy projects '''
super(GTPathManagerDialog, self).showEvent(e)
self.refresh_table
def keyPressEvent(self, e):
''' Key presses should not be passed to the parent '''
super(GTPathManagerDialog, self).keyPressEvent(e)
e.accept()
def get_path_items(self, obj):
'''
Get a tuple containing file_path, is_valid_path, obj_type, obj_icon, obj_attr
Parameters:
obj (string): Name of the object.
Returns:
file_path (string): The path extracted from the object.
is_valid_path (bool): Whether or not the file exists in the system (or directory).
obj_type (string): Type of object. E.g. "file".
obj_icon (string): Icon path for the Node Type cell.
obj_attr (string): Attribute used to get/set the new path.
'''
if cmds.objExists(obj):
file_path = ''
obj_type = cmds.objectType(obj) or ''
obj_icon = ''
obj_attr = ''
is_dir = False
try:
# Common Types
if obj_type == 'file':
obj_icon = ':file.svg'
obj_type = obj_type.capitalize()
obj_attr = '.fileTextureName'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'audio':
obj_icon = ':audio.svg'
obj_type = obj_type.capitalize()
obj_attr = '.filename'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'cacheFile':
obj_icon = ':cachedPlayback.png'
obj_type = 'Cache File'
obj_attr = '.cachePath'
path_no_file = cmds.getAttr(obj + obj_attr) or ''
file_path = path_no_file + '/' + cmds.getAttr(obj + '.cacheName') + '.xml'
file_path = file_path.replace('//', '/')
elif obj_type == 'AlembicNode':
obj_icon = ':enableAllCaches.png'
obj_type = 'Alembic File'
obj_attr = '.abc_File'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'BifMeshImportNode':
obj_icon = ':bifrostContainer.svg'
obj_type = 'Bifrost Cache'
obj_attr = '.bifMeshDirectory'
is_dir = True
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'gpuCache':
obj_icon = ':importCache.png'
obj_type = 'GPU Cache'
obj_attr = '.cacheFileName'
file_path = cmds.getAttr(obj + obj_attr)
# Arnold
elif obj_type == 'aiPhotometricLight':
obj_icon = ':LM_spotLight.png'
obj_type = 'aiPhotometricLight'
obj_attr = '.aiFilename'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'aiStandIn':
obj_icon = ':envCube.svg'
obj_type = 'aiStandIn'
obj_attr = '.dso'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'aiVolume':
obj_icon = ':cube.png'
obj_type = 'aiVolume'
obj_attr = '.filename'
file_path = cmds.getAttr(obj + obj_attr)
# Redshift
elif obj_type == 'RedshiftProxyMesh':
obj_icon = ':envCube.svg'
obj_type = 'rsProxyMesh'
obj_attr = '.fileName'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'RedshiftVolumeShape':
obj_icon = ':cube.png'
obj_type = 'rsVolumeShape'
obj_attr = '.fileName'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'RedshiftNormalMap':
obj_icon = ':normalDetails.svg'
obj_type = 'rsNormalMap'
obj_attr = '.tex0'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'RedshiftDomeLight':
obj_icon = ':ambientLight.svg'
obj_type = 'rsDomeLight'
obj_attr = '.tex0'
file_path = cmds.getAttr(obj + obj_attr)
elif obj_type == 'RedshiftIESLight':
obj_icon = ':LM_spotLight.png'
obj_type = 'rsIESLight'
obj_attr = '.profile'
file_path = cmds.getAttr(obj + obj_attr)
# MASH
elif obj_type == 'MASH_Audio':
obj_icon = ':audio.svg'
obj_type = 'MASH Audio'
obj_attr = '.filename'
file_path = cmds.getAttr(obj + obj_attr)
# Image Plane
elif obj_type == 'imagePlane':
obj_icon = ':imagePlane.svg'
obj_type = 'Image Plane'
obj_attr = '.imageName'
file_path = cmds.getAttr(obj + obj_attr)
# References
elif obj_type == 'reference':
obj_icon = ':reference.png'
obj_type = 'Reference'
obj_attr = '.fileNames' # Not used
try:
ref_pairs = list_reference_pairs()
r_file = ''
reference_name = ''
for i in range(len(ref_pairs)):
reference_name = ref_pairs[i][0]
if reference_name.startswith(':'):
reference_name = reference_name[1:]
if reference_name == obj:
r_file = ref_pairs[i][1]
if reference_name == '':
r_file = 'Unknown'
except Exception as e:
r_file = 'Unknown'
print(e)
file_path = r_file
is_valid_path = os.path.isfile(file_path)
if is_dir:
is_valid_path = os.path.isdir(file_path)
return (file_path, is_valid_path, obj_type, obj_icon, obj_attr)
except:
return (file_path, False, obj_type, obj_icon, obj_attr)
else:
return None
def refresh_table(self, is_repair_attempt=False, is_search_replace=False):
'''
Main Refresh Function
Parameters:
is_repair_attempt=False (bool): Is attempting to auto repair paths? (Called by the Auto Path Repair Button)
is_search_replace=False (bool): Is it doing a search and replace? (Called by the Search and Replace Button)
'''
common_locations = [] # Locations where files were found
is_search_dir_valid = False
if is_repair_attempt:
search_dir = self.filepath_le.text()
if os.path.isdir(search_dir):
is_search_dir_valid = True
else:
cmds.warning('The search directory doesn\'t exist. Please select a valid path and try again.')
self.set_cell_changed_connection_enabled(False) # So it doesn't update it unecessarly
self.table_wdg.setRowCount(0) # Remove all rows
# Used to detect installed plugins
node_types = cmds.ls(nodeTypes=True)
# Common Nodes
file_nodes = cmds.ls(type='file')
path_nodes = file_nodes
# Available Types
available_node_types = ['audio', 'cacheFile', 'AlembicNode', 'gpuCache','BifMeshImportNode',\
'RedshiftProxyMesh','RedshiftVolumeShape','RedshiftNormalMap','RedshiftDomeLight','RedshiftIESLight', \
'MASH_Audio','aiPhotometricLight','aiStandIn','aiVolume', 'imagePlane']
# Add Types for Loaded Plugins
path_node_types = []
for obj_type in available_node_types:
if obj_type in node_types:
path_node_types.append(obj_type)
# Add Extra Nodes to Path Nodes
for node_type in path_node_types:
try:
nodes_list = cmds.ls(type=node_type)
path_nodes += nodes_list
except:
pass
# Add References
refs = cmds.ls(rf=True)
path_nodes += refs
# Populate Table
for i in range(len(path_nodes)):
################ Start Directory Search ################
if is_repair_attempt and is_search_dir_valid:
try:
file_items = self.get_path_items(path_nodes[i]) # (path, is_path_valid, node_type_string, icon, node_attr)
progress_bar_name = 'Searching'
query_path = file_items[0]
initial_result = os.path.exists(query_path)
query_path = query_path.replace('\\','/') # Format it - The main Query
desired_file = query_path.split('/')[-1] # Extract file name (short_name)
accept_dir = False
is_udim_file = False
is_image_sequence = False
# Check if using UDIMs or Image Sequences
if file_items[2] == 'File':
try:
uv_tiling_mode = cmds.getAttr(path_nodes[i] + '.uvTilingMode') # Is it using UDIM?
use_frame_extension = cmds.getAttr(path_nodes[i] + '.useFrameExtension') # Is it an image sequence?
is_image_sequence = use_frame_extension
if uv_tiling_mode != 0:
udim_file_pattern = maya.app.general.fileTexturePathResolver.getFilePatternString(query_path, use_frame_extension, uv_tiling_mode)
query_path = udim_file_pattern#.replace('<UDIM>','1001') Handled later using regex
is_udim_file = True
except:
pass
# Handle desired folder (instead of file)
if file_items[2] == 'Bifrost Cache':
if query_path.endswith('/'):
desired_file = query_path.split('/')[-2]
else:
desired_file = query_path.split('/')[-1]
accept_dir = True
is_found = False
if (initial_result != True) and (len(common_locations) != 0): # If common locations are available try them first
for loc in common_locations:
formatted_path = loc.replace("\\","/")
formatted_path = formatted_path[::-1]
formatted_path = formatted_path.split("/", 1)[-1]
formatted_path = formatted_path[::-1]
common_path_result = os.path.exists(formatted_path + "/" + desired_file)
if common_path_result == True:
resolved_path = (formatted_path + "/" + desired_file).replace('/','\\')
#print(path_nodes[i] + ' found using known location.') # Debugging
self.set_attr_enhanced(path_nodes[i], file_items[4], resolved_path)
is_found = True
# Full Search/Walk
if (initial_result != True) and (is_found == False):
search_count = 0 # How many folders to look into (walk) for the progress bar
for path in os.walk(search_dir): # generates the file names in a directory tree by walking the tree either top-b or b-top
search_count += 1
resolved_path = query_path
self.make_progress_bar(progress_bar_name, search_count) # make_progress_bar(name, maxVal) - Max value is the number of folders
for path, dirs, files in os.walk(search_dir): # root_dir_path, sub_dirs, files in os.walk(my_dir)
self.move_progress_bar(progress_bar_name, 1)
path = path.replace('/','\\')
# Handle Files
if desired_file in files:
resolved_path = (path + '\\' + desired_file).replace('/','\\')
common_locations.append(resolved_path)
is_found = True
# Handle Folders (instead of files)
if accept_dir and desired_file in dirs:
resolved_path = (path + '\\' + desired_file).replace('/','\\')
common_locations.append(resolved_path)
is_found = True
# Handle UDIMs
if is_udim_file and is_found == False:
file_name = os.path.splitext(desired_file)[0].replace('<UDIM>', '')
extension = os.path.splitext(desired_file)[1]
pattern = re.compile(file_name + '\\d\\d\\d\\d' + extension)
first_found_file = ''
if any(pattern.match(line) for line in files):
lines_to_log = [line for line in files if pattern.match(line)]
first_found_file = lines_to_log[0]
if first_found_file != '':
resolved_path = (path + '\\' + first_found_file).replace('/','\\')
if os.path.exists(resolved_path):
common_locations.append(resolved_path)
is_found = True
# Handle Image sequences
if is_image_sequence and is_found == False:
file_name = os.path.splitext(desired_file)[0].replace('<f>', '').replace('<F>', '')
extension = os.path.splitext(desired_file)[1]
pattern = re.compile(file_name + '\\d+' + extension)
first_found_file = ''
if any(pattern.match(line) for line in files):
lines_to_log = [line for line in files if pattern.match(line)]
first_found_file = lines_to_log[0]
if first_found_file != '':
resolved_path = (path + '\\' + first_found_file).replace('/','\\')
if os.path.exists(resolved_path):
common_locations.append(resolved_path)
is_found = True
if is_found:
#print(path_nodes[i] + ' has a valid path.') # Debugging
self.set_attr_enhanced(path_nodes[i], file_items[4], resolved_path)
self.kill_progress_window(progress_bar_name) # Kill progress bar
except:
self.kill_progress_window(progress_bar_name)
################ End Directory Search ################
# Search and Replace
if is_search_replace:
try:
file_items = self.get_path_items(path_nodes[i])
old_path = file_items[0]
new_path = old_path.replace(self.search_string, self.replace_string)
self.set_attr_enhanced(path_nodes[i], file_items[4], new_path) #(path, is_path_valid, node_type_string, icon, node_attr)
except:
pass
# Refresh Table
file_items = self.get_path_items(path_nodes[i])
self.table_wdg.insertRow(i)
self.table_wdg.setFocusPolicy(QtCore.Qt.NoFocus) # No highlight
self.insert_item(i, 1, path_nodes[i], None, path_nodes[i])
if file_items: # (path, is_path_valid, node_type_string, icon, node_attr)
if file_items[1]:
self.insert_item(i, 2, file_items[2], None, cmds.objectType(path_nodes[i]), icon_path=file_items[3], editable=False )
self.insert_icon(i, 0, ':confirm.png')
else:
self.insert_item(i, 2, file_items[2], None, cmds.objectType(path_nodes[i]), icon_path=file_items[3], editable=False )
self.insert_icon(i, 0, ':error.png')
self.insert_item(i, 3, file_items[0], file_items[4], file_items[0])
self.set_cell_changed_connection_enabled(True)
def insert_item(self, row, column, node_name, attr, value, icon_path='', editable=True, centered=True):
item = QtWidgets.QTableWidgetItem(node_name)
#item.setBackgroundColor(QtGui.QColor(255,0,0, 10)) Make the background of the cells green/red?
self.set_item_value(item, value)
self.set_item_attr(item, attr)
if icon_path != '':
item.setIcon(QtGui.QIcon(icon_path))
if centered:
item.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
if not editable:
item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.table_wdg.setItem(row, column, item)
def insert_icon(self, row, column, icon_path):
item = QtWidgets.QWidget()
label = QtWidgets.QLabel()
label.setScaledContents(True)
label.maximumSize()
label.setPixmap(QtGui.QPixmap(icon_path))
layout = QtWidgets.QHBoxLayout(item)
layout.addWidget(label)
layout.setAlignment(QtCore.Qt.AlignHCenter)
layout.setContentsMargins(0,5,0,5)
item.setLayout(layout)
self.table_wdg.setCellWidget(row, column, item)
def set_item_text(self, item, text):
item.setText(text)
def get_item_text(self, item):
return item.text()
def set_item_attr(self, item, attr):
item.setData(self.ATTR_ROLE, attr)
def get_item_attr(self, item):
return item.data(self.ATTR_ROLE)
def set_item_value(self, item, value):
item.setData(self.VALUE_ROLE, value)
def get_item_value(self, item):
return item.data(self.VALUE_ROLE)
def on_cell_changed(self, row, column):
self.set_cell_changed_connection_enabled(False)
item = self.table_wdg.item(row, column)
if column == 1:
self.rename(item)
if column == 3:
self.repath(item)
self.set_cell_changed_connection_enabled(True)
def rename(self, item):
old_name = self.get_item_value(item)
new_name = self.get_item_text(item)
if old_name != new_name:
actual_new_name = cmds.rename(old_name, new_name)
if actual_new_name != new_name:
self.set_item_text(item, actual_new_name)
self.set_item_value(item, actual_new_name)
def repath(self, item):
old_path = self.get_item_value(item)
new_path = self.get_item_text(item)
attr_to_change = self.get_item_attr(item)
object_name = self.get_item_value(self.table_wdg.item(item.row(), 1))
if old_path != new_path:
try:
is_valid_path = os.path.isfile(new_path)
complex_output = self.set_attr_enhanced(object_name, attr_to_change, new_path)
if complex_output != None and complex_output == False:
self.set_item_value(item, old_path)
self.set_item_text(item, old_path)
is_valid_path = os.path.isfile(old_path)
else:
self.set_item_text(item, new_path)
if is_valid_path:
self.insert_icon(item.row(), 0, ':confirm.png')
else:
self.insert_icon(item.row(), 0, ':error.png')
self.set_cell_changed_connection_enabled(True)
self.refresh_table()
except Exception as e:
self.set_item_value(item, old_path)
self.set_cell_changed_connection_enabled(True)
self.refresh_table()
raise e
def set_attr_enhanced(self, obj, attribute, new_value):
'''
Set attribute for the provided object using different methods depending on its type
Parameters:
obj (string): Name of the node/object.
attribute (string): Name of the attribute to set. E.g. ".cacheFile"
new_value (string): New value to update
'''
#print(obj + ' ' + attribute + ' ' + new_value) # Debugging
if cmds.objExists(obj):
obj_type = cmds.objectType(obj) or ''
else:
obj_type = ''
complex_types = ['cacheFile', 'reference']
if obj_type not in complex_types:
cmds.setAttr( obj + attribute , new_value, type='string')
else:
if obj_type == 'cacheFile':
format_path = os.path.splitext(new_value)[0].replace("\\","/")
file_name = format_path.split('/')[-1]
format_path_no_file = format_path[::-1].split("/", 1)[-1][::-1]
try:
if os.path.isfile(format_path_no_file + '/' + file_name.replace('.xml','') + '.xml'):
cmds.setAttr(obj + '.cachePath', format_path_no_file, type='string')
cmds.setAttr(obj + '.cacheName', file_name, type='string')
return True
else:
return False
except:
return False
if obj_type == 'reference':
not_skipped = True
try:
cmds.referenceQuery(obj,isLoaded=True)
except:
not_skipped = False
if not_skipped:
if os.path.isfile(new_value):
try:
cmds.file(new_value, loadReference=obj)
except:
return False
else:
cmds.warning('Provided reference path : "' + new_value + '" doesn\'t lead to a valid file. Previous path was retained.')
else:
cmds.warning('Reference file inaccessible.')
def start_attempt_repair(self):
''' Runs refresh function while searching for files '''
self.refresh_table(is_repair_attempt=True)
def make_progress_bar(self, prog_win_name, max_value):
'''
Create Progress Window
Parameters:
prog_win_name (string): Name of the window
max_value (int): The maximum or "ending" value of the progress indicator.
'''
if(cmds.window(prog_win_name, q=1, ex=1)):
cmds.deleteUI(prog_win_name)
if(cmds.windowPref(prog_win_name, q=1, ex=1)):
cmds.windowPref(prog_win_name, r=1)
prog_window = cmds.window(prog_win_name, title=prog_win_name, widthHeight=(300, 50))
cmds.columnLayout(p=prog_win_name)
progress_control = cmds.progressBar(prog_win_name + '_progress', maxValue=max_value, width=300, height=50)
cmds.showWindow( prog_window )
def move_progress_bar(self, prog_win_name, step_size):
cmds.progressBar(prog_win_name + '_progress', edit=True, step=step_size)
def kill_progress_window(self, prog_win_name):
'''
Close progress window in case it exists
Parameters:
prog_win_name (string): Name of the window
'''
if(cmds.window(prog_win_name, q=1, ex=1)):
cmds.deleteUI(prog_win_name)
if(cmds.windowPref(prog_win_name, q=1, ex=1)):
cmds.windowPref(prog_win_name, r=1)
def build_gui_help_path_manager(self):
''' Creates the Help GUI for GT Path Manager '''
window_name = "build_gui_help_path_manager"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= script_name + " Help", mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
main_column = cmds.columnLayout(p= window_name)
# Title Text
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p=main_column) # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p=main_column) # Title Column
cmds.text(script_name + " Help", bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
cmds.separator(h=10, style='none', p=main_column) # Empty Space
# Body ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p=main_column)
# cmds.text(l='Script for managing paths', align="center")
# cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='This script displays a list with the name, type and path\n of any common nodes found in Maya.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='You can select the node listed by clicking on it or \nchange its name or path by double clicking the cell.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='The icon on the left describes the validity of the path.\nIf the file or directory is found in the system it shows\n a green confirm icon otherwise it shows a red icon.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Auto Path Repair', align="center", font='boldLabelFont')
cmds.text(l='This function walks through the folders under the\nprovided directory looking for missing files. \nIf it finds a match, the path is updated.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Search and Replace', align="center", font='boldLabelFont')
cmds.text(l='This function allows you to search and replace strings\nin the listed paths.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Refresh', align="center", font='boldLabelFont')
cmds.text(l='Re-populates the list while re-checking for path validity.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Search Path', align="center", font='boldLabelFont')
cmds.text(l='A directory path used when looking for missing files.', align="center")
cmds.separator(h=15, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p=main_column)
cmds.text('<NAME> ')
cmds.text(l='<a href="mailto:<EMAIL>"><EMAIL></a>', hl=True, highlightColor=[1,1,1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p=main_column)
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1,1,1])
cmds.separator(h=7, style='none') # Empty Space
# Close Button
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p=main_column)
cmds.separator(h=5, style='none')
cmds.button(l='OK', h=30, c=lambda args: close_help_gui())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QtWidgets.QWidget)
else:
widget = wrapInstance(long(qw), QtWidgets.QWidget)
icon = QtGui.QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
''' Closes Help UI in case it's opened. '''
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
def build_gui_search_replace_path_manager(self):
''' Creates the GUI for Searching and Replacing Paths '''
window_name = "build_gui_search_replace_path_manager"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= 'Search and Replace', mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
main_column = cmds.columnLayout(p= window_name)
# Body
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p=main_column)
cmds.text(l='This will search and replace strings in your paths', align="center")
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p=main_column) # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p=main_column) # Title Column
cmds.text('Search for:', bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
search_txtfield = cmds.textField(placeholderText='Type search here')
cmds.separator(h=10, style='none') # Empty Space
cmds.text('Replace with:', bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
replace_txtfield = cmds.textField(placeholderText='Type replace here')
# Close Button
cmds.separator(h=5, style='none')
cmds.rowColumnLayout(nc=2, cw=[(1, 148),(2, 148)], cs=[(1,10),(2,4)], p=main_column)
# Apply Button
cmds.button(l='Search and Replace', h=30, c=lambda args: apply_search_replace())
#cmds.separator(h=10, style='none')
cmds.button(l='Cancel', h=30, c=lambda args: close_snr_gui())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
if python_version == 3:
widget = wrapInstance(int(qw), QtWidgets.QWidget)
else:
widget = wrapInstance(long(qw), QtWidgets.QWidget)
icon = QtGui.QIcon(':/search.png')
widget.setWindowIcon(icon)
def apply_search_replace():
''' Runs Search and Replace Function '''
self.search_string = cmds.textField(search_txtfield, q=True, text=True)
self.replace_string = cmds.textField(replace_txtfield, q=True, text=True)
if self.search_string != '':
try:
gt_path_manager_dialog.show()
except:
pass
self.refresh_table(is_search_replace=True)
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
else:
cmds.warning('"Search for" string can\'t be empty.')
def close_snr_gui():
''' Closes Search and Replace GUI in case it's opened. '''
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
def try_to_close_gt_path_manager():
''' Attempts to close GT Path Manager '''
try:
gt_path_manager_dialog.close() # pylint: disable=E0601
gt_path_manager_dialog.deleteLater()
except:
pass
# Build GUI
if __name__ == "__main__":
try_to_close_gt_path_manager()
gt_path_manager_dialog = GTPathManagerDialog()
gt_path_manager_dialog.show() | StarcoderdataPython |
91295 | <reponame>fakela/mindee-api-python
import pytest
from mindee import Client, Response, Receipt, Passport
from mindee.http import HTTPException
@pytest.fixture
def empty_client():
return Client()
@pytest.fixture
def dummy_client():
return Client(
expense_receipt_token="dummy",
invoice_token="<PASSWORD>",
passport_token="<PASSWORD>",
license_plate_token="<PASSWORD>",
)
@pytest.fixture
def dummy_client_dont_raise():
return Client(
expense_receipt_token="dummy",
invoice_token="dummy",
passport_token="dummy",
license_plate_token="<PASSWORD>",
raise_on_error=False,
)
@pytest.fixture
def response():
return Response.load("./tests/data/expense_receipts/v3/receipt.json")
def test_parse_receipt_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_receipt("./tests/data/expense_receipts/receipt.jpg")
def test_parse_invoice_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_invoice("./tests/data/expense_receipts/receipt.jpg")
def test_parse_financial_doc_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_financial_document(
"./tests/data/expense_receipts/receipt.jpg"
)
def test_parse_passport_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_passport("./tests/data/expense_receipts/receipt.jpg")
def test_parse_license_plate_without_token(empty_client):
with pytest.raises(Exception):
empty_client.parse_license_plate("./tests/data/license_plates/plate.png")
def test_parse_receipt_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_receipt("./tests/data/expense_receipts/receipt.jpga")
def test_parse_invoice_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_invoice("./tests/data/expense_receipts/receipt.jpga")
def test_parse_financial_doc_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_financial_document(
"./tests/data/expense_receipts/receipt.jpga"
)
def test_parse_passport_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_passport("./tests/data/expense_receipts/receipt.jpga")
def test_parse_plate_with_wrong_filetype(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_license_plate("./tests/data/expense_receipts/receipt.jpga")
def test_parse_receipt_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt("./tests/data/expense_receipts/receipt.jpg")
def test_parse_receipt_with_wrong_version(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg", version="4000"
)
def test_parse_invoice_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_invoice("./tests/data/expense_receipts/receipt.jpg")
def test_parse_financial_doc_with_wrong_token_jpg(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_financial_document(
"./tests/data/expense_receipts/receipt.jpg"
)
def test_parse_financial_doc_with_wrong_token_pdf(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_financial_document("./tests/data/invoices/invoice.pdf")
def test_parse_passport_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_passport("./tests/data/expense_receipts/receipt.jpg")
def test_parse_license_plate_with_wrong_token(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_license_plate("./tests/data/license_plates/plate.png")
def test_response_dump(response):
assert isinstance(response.receipt, Receipt)
response.dump("./tests/data/response_dump.json")
def test_response_dump_failure(response):
with pytest.raises(Exception):
response.dump(open("./tests/pathDoesNotExist/aaa"))
def test_response_load_failure():
with pytest.raises(Exception):
Response.load("notAFile")
def test_response_with_passport_type():
response = Response.load("./tests/data/passport/v1/passport.json")
assert isinstance(response.passport, Passport)
def test_request_with_filepath(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg", input_type="path"
)
def test_request_with_file(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_receipt(
open("./tests/data/expense_receipts/receipt.jpg", "rb"), input_type="stream"
)
def test_request_with_base64_no_filename(dummy_client):
with open("./tests/data/expense_receipts/receipt.txt", "r") as fh:
b64 = fh.read()
with pytest.raises(AssertionError):
dummy_client.parse_receipt(b64, input_type="base64")
def test_request_with_base64(dummy_client):
with open("./tests/data/expense_receipts/receipt.txt", "r") as fh:
b64 = fh.read()
with pytest.raises(HTTPException):
dummy_client.parse_receipt(b64, input_type="base64", filename="receipt.txt")
def test_request_without_raise_on_error(dummy_client_dont_raise):
result = dummy_client_dont_raise.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg", input_type="path"
)
assert result.receipt is None
assert len(result.receipts) == 0
def test_request_without_raise_on_error_include_words(dummy_client_dont_raise):
result = dummy_client_dont_raise.parse_receipt(
"./tests/data/expense_receipts/receipt.jpg",
input_type="path",
include_words=True,
)
assert result.receipt is None
assert len(result.receipts) == 0
def test_request_with_file_wrong_type(dummy_client):
with pytest.raises(AssertionError):
dummy_client.parse_receipt(open("./tests/data/test.txt"), input_type="file")
with pytest.raises(AssertionError):
dummy_client.parse_receipt("./tests/data/test.txt", input_type="path")
def test_pdf_reconstruct(dummy_client):
with pytest.raises(HTTPException):
dummy_client.parse_invoice("./tests/data/invoices/invoice_6p.pdf")
| StarcoderdataPython |
1790279 | <reponame>voocel/wechat
import itchat
import time
import datetime
def timer_handle(exec_time):
flag = 0
while True:
now = datetime.datetime.now()
if now > exec_time and now < exec_time + datetime.timedelta(seconds = 1):
send_to()
time.sleep(1)
flag = 1
else:
if flag == 1:
exec_time + datetime.timedelta(hours=1)
flag = 0
def send_to():
# 找到想发给的好友
users = itchat.search_friends(name="忘")
print(users)
# 获取UserName,用于发送消息
userName = users[0]['UserName']
itchat.send("起床了", toUserName=userName) # toUserName留空则表示发送给自己
# itchat.send("@fil@%s" % './file/test.text', toUserName=userName) # 发送文件
# itchat.send("@img@%s" % './img/0.jpg', toUserName=userName) # 发送图片
# itchat.send("@vid@%s" % '/video/test.mp4', toUserName=userName) # 发送视频
print('发送成功!')
if __name__ == '__main__':
itchat.auto_login(hotReload=True) # 弹出二维码扫描登录(保持登录状态)
exec_time = datetime.datetime(2017, 11, 19, 22, 50, 10)
print('任务将执行于 {0}'.format(exec_time))
timer_handle(exec_time)
| StarcoderdataPython |
4040 | <reponame>RobotLocomotion/drake-python3.7
import numpy as np
from pydrake.common.value import AbstractValue
from pydrake.math import RigidTransform
from pydrake.perception import BaseField, Fields, PointCloud
from pydrake.systems.framework import LeafSystem
def _TransformPoints(points_Ci, X_CiSi):
# Make homogeneous copy of points.
points_h_Ci = np.vstack((points_Ci,
np.ones((1, points_Ci.shape[1]))))
return X_CiSi.dot(points_h_Ci)[:3, :]
def _TileColors(color, dim):
# Need manual broadcasting.
return np.tile(np.array([color]).T, (1, dim))
def _ConcatenatePointClouds(points_dict, colors_dict):
scene_points = None
scene_colors = None
for id in points_dict:
if scene_points is None:
scene_points = points_dict[id]
else:
scene_points = np.hstack((points_dict[id], scene_points))
if scene_colors is None:
scene_colors = colors_dict[id]
else:
scene_colors = np.hstack((colors_dict[id], scene_colors))
valid_indices = np.logical_not(np.isnan(scene_points))
scene_points = scene_points[:, valid_indices[0, :]]
scene_colors = scene_colors[:, valid_indices[0, :]]
return scene_points, scene_colors
class PointCloudConcatenation(LeafSystem):
"""
.. pydrake_system::
name: PointCloudConcatenation
input_ports:
- point_cloud_CiSi_id0
- X_FCi_id0
- ...
- point_cloud_CiSi_idN
- X_FCi_idN
output_ports:
- point_cloud_FS
"""
def __init__(self, id_list, default_rgb=[255., 255., 255.]):
"""
A system that takes in N point clouds of points Si in frame Ci, and N
RigidTransforms from frame Ci to F, to put each point cloud in a common
frame F. The system returns one point cloud combining all of the
transformed point clouds. Each point cloud must have XYZs. RGBs are
optional. If absent, those points will be the provided default color.
@param id_list A list containing the string IDs of all of the point
clouds. This is often the serial number of the camera they came
from, such as "1" for a simulated camera or "805212060373" for a
real camera.
@param default_rgb A list of length 3 containing the RGB values to use
in the absence of PointCloud.rgbs. Values should be between 0 and
255. The default is white.
"""
LeafSystem.__init__(self)
self._point_cloud_ports = {}
self._transform_ports = {}
self._id_list = id_list
self._default_rgb = np.array(default_rgb)
output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs)
for id in self._id_list:
self._point_cloud_ports[id] = self.DeclareAbstractInputPort(
"point_cloud_CiSi_{}".format(id),
AbstractValue.Make(PointCloud(fields=output_fields)))
self._transform_ports[id] = self.DeclareAbstractInputPort(
"X_FCi_{}".format(id),
AbstractValue.Make(RigidTransform.Identity()))
self.DeclareAbstractOutputPort("point_cloud_FS",
lambda: AbstractValue.Make(
PointCloud(fields=output_fields)),
self.DoCalcOutput)
def _AlignPointClouds(self, context):
points = {}
colors = {}
for id in self._id_list:
point_cloud = self.EvalAbstractInput(
context, self._point_cloud_ports[id].get_index()).get_value()
X_CiSi = self.EvalAbstractInput(
context, self._transform_ports[id].get_index()).get_value()
points[id] = _TransformPoints(
point_cloud.xyzs(), X_CiSi.GetAsMatrix4())
if point_cloud.has_rgbs():
colors[id] = point_cloud.rgbs()
else:
colors[id] = _TileColors(
self._default_rgb, point_cloud.xyzs().shape[1])
return _ConcatenatePointClouds(points, colors)
def DoCalcOutput(self, context, output):
scene_points, scene_colors = self._AlignPointClouds(context)
output.get_mutable_value().resize(scene_points.shape[1])
output.get_mutable_value().mutable_xyzs()[:] = scene_points
output.get_mutable_value().mutable_rgbs()[:] = scene_colors
| StarcoderdataPython |
1615049 | <filename>gazoo_device/primary_devices/esp32_matter_locking.py
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Device class for ESP32 M5Stack locking device."""
from gazoo_device import decorators
from gazoo_device import detect_criteria
from gazoo_device import gdm_logger
from gazoo_device.base_classes import esp32_matter_device
from gazoo_device.capabilities import pwrpc_lock_default
from gazoo_device.protos import device_service_pb2
from gazoo_device.protos import locking_service_pb2
from gazoo_device.utility import pwrpc_utils
logger = gdm_logger.get_logger()
class Esp32MatterLocking(esp32_matter_device.Esp32MatterDevice):
"""Device class for Esp32MatterLocking devices.
Matter locking application running on the Espressif ESP32 M5Stack platform:
https://github.com/project-chip/connectedhomeip/tree/master/examples/lock-app/esp32
"""
DETECT_MATCH_CRITERIA = {
detect_criteria.PigweedQuery.product_name:
"cp2104 usb to uart bridge controller",
detect_criteria.PigweedQuery.manufacturer_name: r"silicon(_| )labs",
detect_criteria.PigweedQuery.app_type:
pwrpc_utils.PigweedAppType.LOCKING.value,
}
DEVICE_TYPE = "esp32matterlocking"
_OWNER_EMAIL = "<EMAIL>"
_COMMUNICATION_KWARGS = {"protobufs": (locking_service_pb2,
device_service_pb2,),
"baudrate": esp32_matter_device.BAUDRATE}
@decorators.CapabilityDecorator(pwrpc_lock_default.PwRPCLockDefault)
def pw_rpc_lock(self):
"""PwRPCLock instance to send RPC commands."""
return self.lazy_init(
pwrpc_lock_default.PwRPCLockDefault,
device_name=self.name,
switchboard_call=self.switchboard.call)
| StarcoderdataPython |
131099 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
class STN3D(nn.Module):
def __init__(self, input_channels=3):
super(STN3D, self).__init__()
self.input_channels = input_channels
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU(),
nn.Linear(256, input_channels * input_channels),
)
def forward(self, x):
batch_size = x.size(0)
num_points = x.size(2)
x = self.mlp1(x)
x = F.max_pool1d(x, num_points).squeeze(2)
x = self.mlp2(x)
I = torch.eye(self.input_channels).view(-1).to(x.device)
x = x + I
x = x.view(-1, self.input_channels, self.input_channels)
return x
class PointNetEncoder(nn.Module):
def __init__(self, embedding_size, input_channels=3):
super(PointNetEncoder, self).__init__()
self.input_channels = input_channels
self.stn1 = STN3D(input_channels)
self.stn2 = STN3D(64)
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.fc = nn.Linear(1024, embedding_size)
def forward(self, x):
batch_size = x.shape[0]
num_points = x.shape[1]
x = x[:, :, : self.input_channels]
x = x.transpose(2, 1) # transpose to apply 1D convolution
x = self.mlp1(x)
x = self.mlp2(x)
x = F.max_pool1d(x, num_points).squeeze(2) # max pooling
x = self.fc(x)
return x
class TargetEncoder(nn.Module):
def __init__(self, embedding_size, input_channels=3):
super(TargetEncoder, self).__init__()
self.input_channels = input_channels
self.stn1 = STN3D(input_channels)
self.stn2 = STN3D(64)
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.fc = nn.Linear(1024, embedding_size)
def forward(self, x):
batch_size = x.shape[0]
num_points = x.shape[1]
x = x[:, :, : self.input_channels]
x = x.transpose(2, 1) # transpose to apply 1D convolution
x = self.mlp1(x)
x = self.mlp2(x)
x = F.max_pool1d(x, num_points).squeeze(2) # max pooling
x = self.fc(x)
return x
class Classification_Layer(nn.Module):
def __init__(self, input_dim, num_class, use_bn=False):
super(Classification_Layer, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, num_class)
if (use_bn):
self.bn1 = nn.BatchNorm1d(intermediate_layer)
def forward(self, x, use_bn=False):
if use_bn:
x = F.relu(self.bn1(self.fc1(x)))
else:
x = self.fc1(x)
return F.log_softmax(x, dim=1)
class ParamDecoder(nn.Module):
def __init__(self, input_dim, intermediate_layer, embedding_size, use_bn=False):
super(ParamDecoder, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, intermediate_layer)
self.fc2 = nn.Linear(intermediate_layer, embedding_size)
if (use_bn):
self.bn1 = nn.BatchNorm1d(intermediate_layer)
def forward(self, x, use_bn=False):
if use_bn:
x = F.relu(self.bn1(self.fc1(x)))
else:
x = self.fc1(x)
x = self.fc2(x)
return x
class ParamDecoder2(nn.Module):
def __init__(self, input_dim, intermediate_layer, embedding_size, use_bn=False):
super(ParamDecoder2, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, 512)
self.fc2 = nn.Linear(512, intermediate_layer)
self.fc3 = nn.Linear(intermediate_layer, embedding_size)
if (use_bn):
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(intermediate_layer)
def forward(self, x, use_bn=False):
if use_bn:
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.fc2(x)))
else:
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class LatentDecoder(nn.Module):
def __init__(self, input_dim, embedding_size):
super(LatentDecoder, self).__init__()
self.input_dim = input_dim
self.fc1 = nn.Linear(input_dim, 256)
self.bn1 = nn.BatchNorm1d(256)
self.fc_mu = nn.Linear(256, embedding_size)
self.fc_sigma = nn.Linear(256, embedding_size)
def forward(self, x, use_bn=False):
x = F.relu(self.bn1(self.fc1(x)))
mu = self.fc_mu(x)
sigma = self.fc_sigma(x)
return mu, sigma
class TargetDecoder(nn.Module):
def __init__(self, input_dim, num_points):
super(TargetDecoder, self).__init__()
self.input_dim = input_dim
self.num_points = num_points
self.fc1 = nn.Linear(input_dim, 1024)
self.fc2 = nn.Linear(1024, 1024)
self.fc3 = nn.Linear(1024, num_points*3)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = x.view(-1, self.num_points, 3)
return x
class ECCV(nn.Module):
def __init__(self, embedding_size, input_channels=3):
super(ECCV, self).__init__()
self.input_channels = input_channels
self.stn1 = STN3D(input_channels)
self.stn2 = STN3D(64)
self.mlp1 = nn.Sequential(
nn.Conv1d(input_channels, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
)
self.mlp2 = nn.Sequential(
nn.Conv1d(64, 64, 1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.Conv1d(64, 128, 1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Conv1d(128, 1024, 1),
nn.BatchNorm1d(1024),
nn.ReLU(),
)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc_mu = nn.Linear(256, embedding_size)
self.fc_sigma = nn.Linear(256, embedding_size)
def forward(self, x):
batch_size = x.shape[0]
num_points = x.shape[1]
x = x[:, :, : self.input_channels]
x = x.transpose(2, 1) # transpose to apply 1D convolution
x = self.mlp1(x)
x = self.mlp2(x)
x = F.max_pool1d(x, num_points).squeeze(2) # max pooling
x = self.fc1(x)
x = self.fc2(x)
mu = self.fc_mu(x)
sigma = self.fc_sigma(x)
return mu, sigma
### For Images ###
#
def set_parameter_requires_grad(model, is_fixed):
if is_fixed:
for param in model.parameters():
param.requires_grad = False
## Set layer4 to trainable
for name, param in model.layer4[0].named_parameters():
param.requires_grad = True
for name, param in model.layer4[1].named_parameters():
param.requires_grad = True
class ImageEncoder(nn.Module):
def __init__(self, embedding_size, is_fixed, use_pretrained=True):
super(ImageEncoder, self).__init__()
#ResNet18
model_ft = models.resnet18(pretrained=use_pretrained)
# Set trainable parameters
set_parameter_requires_grad(model_ft, is_fixed)
# Set output feature dim
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, embedding_size)
input_size = 224
self.input_size = input_size
self.model = model_ft
# ###Debug
# for name, param in model_ft.named_parameters():
# print(name)
# print(param.requires_grad)
# exit()
def forward(self, x):
batch_size = x.shape[0]
x = self.model(x)
return x
| StarcoderdataPython |
1773945 | from typing import Iterable, Iterator, List, Optional, Tuple
from pypict import capi
class Task:
def __init__(self, seed: Optional[int] = None):
self.handle = capi.createTask()
self._model = _Model(seed)
capi.setRootModel(self.handle, self._model.handle)
def __del__(self) -> None:
if self.handle != 0:
capi.deleteTask(self.handle)
@property
def model(self) -> '_Model':
return self._model
def add_exclusion(self, items: Iterable[Tuple[int, int]]) -> None:
capi.addExclusion(self.handle, tuple(items))
def add_seed(self, items: Iterable[Tuple[int, int]]) -> None:
capi.addSeed(self.handle, tuple(items))
def generate(self) -> '_ResultSet':
capi.generate(self.handle)
return _ResultSet(self)
def get_total_parameter_count(self) -> int:
return capi.getTotalParameterCount(self.handle)
class _Model:
def __init__(self, seed: Optional[int] = None):
if seed is None:
seed = capi.DEFAULT_RANDOM_SEED
self.handle = capi.createModel(seed)
self._owned = True
def __del__(self) -> None:
if self.handle != 0 and self._owned:
capi.deleteModel(self.handle)
def add_parameter(
self,
count: int,
order: int = capi.PAIRWISE_GENERATION,
weights: Optional[Iterable[int]] = None) -> int:
if weights is not None:
weights = tuple(weights)
return capi.addParameter(self.handle, count, order, weights)
def attach_child_model(
self,
order: int,
seed: Optional[int] = None) -> '_Model':
if seed is None:
seed = capi.DEFAULT_RANDOM_SEED
childModel = _Model(seed)
capi.attachChildModel(self.handle, childModel.handle, order)
childModel._owned = False
return childModel
class _ResultSet:
def __init__(self, task: Task):
self._task = task
def __iter__(self) -> Iterator[List[int]]:
capi.resetResultFetching(self._task.handle)
buf = capi.allocateResultBuffer(self._task.handle)
try:
while True:
remaining = capi.getNextResultRow(self._task.handle, buf)
if remaining == 0:
break
yield list(buf)
finally:
capi.freeResultBuffer(buf)
| StarcoderdataPython |
154954 | <gh_stars>0
# Copyright (c) 2019 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, TYPE_CHECKING
from . import VersionUpgrade43to44
if TYPE_CHECKING:
from UM.Application import Application
upgrade = VersionUpgrade43to44.VersionUpgrade43to44()
def getMetaData() -> Dict[str, Any]:
return {
"version_upgrade": {
# From To Upgrade function
("preferences", 6000009): ("preferences", 6000010, upgrade.upgradePreferences),
("machine_stack", 4000009): ("machine_stack", 4000010, upgrade.upgradeStack),
("extruder_train", 4000009): ("extruder_train", 4000010, upgrade.upgradeStack),
("definition_changes", 4000009): ("definition_changes", 4000010, upgrade.upgradeInstanceContainer),
("quality_changes", 4000009): ("quality_changes", 4000010, upgrade.upgradeInstanceContainer),
("quality", 4000009): ("quality", 4000010, upgrade.upgradeInstanceContainer),
("user", 4000009): ("user", 4000010, upgrade.upgradeInstanceContainer),
},
"sources": {
"preferences": {
"get_version": upgrade.getCfgVersion,
"location": {"."}
},
"machine_stack": {
"get_version": upgrade.getCfgVersion,
"location": {"./machine_instances"}
},
"extruder_train": {
"get_version": upgrade.getCfgVersion,
"location": {"./extruders"}
},
"definition_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./definition_changes"}
},
"quality_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality_changes"}
},
"quality": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality"}
},
"user": {
"get_version": upgrade.getCfgVersion,
"location": {"./user"}
}
}
}
def register(app: "Application") -> Dict[str, Any]:
return {"version_upgrade": upgrade}
| StarcoderdataPython |
3353362 | import os
import operator
import json
import requests
import graphql_queries
PATH_TO_DATA = "_data"
GITHUB_USERNAME = os.environ["GH_USERNAME"]
GITHUB_OAUTH_TOKEN = os.environ["OAUTH_TOKEN"]
GITHUB_API_ENDPOINT = "https://api.github.com/graphql"
print("LOG: Assuming the current path to be the root of the metrics repository.")
SVG_NO_OF_MEMBERS = 'N/A'
SVG_NO_OF_REPOS = 'N/A'
def fetch_one_page(query_string, variables):
"""
Request the GitHub GraphQL API
"""
headers = {
"Content-Type": "application/json",
}
r = requests.post(GITHUB_API_ENDPOINT, json={"query": query_string, "variables": variables}, auth=(GITHUB_USERNAME, GITHUB_OAUTH_TOKEN))
if r.status_code == 200:
return r.json()
else:
raise Exception("Error in GitHub API query. Status Code : {}, Response: {}".format(r.status_code, r.json()))
all_org_edges = [] # All the repos in the org with their stats
# Read repos-to-include.txt
all_orgs = [] # Track orgs and all its repos e.g. twitter, twitter
all_repos = [] # Track specific repositories e.g. ('pantsbuild', 'pants')
with open("repos-to-include.txt", "r") as f:
for line in f:
owner, repo = line.split("/")
repo = repo.rstrip("\n")
if repo == "*":
all_orgs.append(owner)
else:
all_repos.append((owner, repo))
print("LOG: Orgs to track", all_orgs)
print("Repos to track", all_repos)
for org in all_orgs:
# Combine the paginated responses from the API
has_next_page = False
end_cursor = None
num_of_pages = 0
while True:
print("Num of pages", num_of_pages)
variables = json.dumps({"owner": org, "endCursor": end_cursor})
print("Sending request for", org)
response = fetch_one_page(graphql_queries.org_all_repos, variables)
print("Received request for", org)
if org == 'twitter':
SVG_NO_OF_MEMBERS = response["data"]["organization"]["membersWithRole"]["totalCount"]
repository_edges = response["data"]["organization"]["repositories"]["edges"]
all_org_edges.extend(repository_edges)
pageInfo = response["data"]["organization"]["repositories"]["pageInfo"]
has_next_page = pageInfo["hasNextPage"]
print("has_next_page", has_next_page)
end_cursor = pageInfo["endCursor"]
print("end_cursor", end_cursor)
num_of_pages += 1
if not has_next_page:
break
print("LOG: Fetched all the org repositories. Count:", len(all_org_edges))
# print("LOG: First record")
# print(all_org_edges[0])
# Fetch individual repositories' data
all_repo_edges = [] # All individual repos
for repo in all_repos:
variables = json.dumps({"owner": repo[0], "repo": repo[1], "endCursor": None})
response = fetch_one_page(graphql_queries.repo_wise, variables)
all_repo_edges.append(response["data"])
print("LOG: Fetched all the individual repos as well. Count:", len(all_repo_edges))
# Repos to exclude
repos_to_exclude = set()
with open("repos-to-exclude.txt", "r") as f:
for line in f:
repo = line.rstrip("\n")
repos_to_exclude.add(repo)
print("LOG: Removing private repositories")
public_repos = []
for edge in all_org_edges:
if not edge["node"]["isPrivate"]:
public_repos.append(edge)
for edge in all_repo_edges:
if not edge["repository"]["isPrivate"]:
public_repos.append({'node': edge['repository']})
SVG_NO_OF_REPOS = len(public_repos)
print("LOG: Number of public repos", len(public_repos))
DATA_JSON = {}
for repo in public_repos:
repo_full_name = repo["node"]["nameWithOwner"]
if repo_full_name in repos_to_exclude:
print("LOG: Excluding", repo_full_name)
continue
DATA_JSON[repo_full_name] = repo["node"]
# Flatten list of languages
languages_dict = {}
for item in DATA_JSON[repo_full_name]["languages"]["edges"]:
languages_dict[item["node"]["name"]] = item["size"]
total_bytes = sum(languages_dict.values())
for lang in languages_dict:
languages_dict[lang] /= total_bytes # This is got to be a float, so use Python 3
# Use languages which have more than 5% code
languages = []
for item, value in languages_dict.items():
if value > 0.05:
languages.append(item)
DATA_JSON[repo_full_name]["languages"] = " ".join(languages)
# Flatten list of repository topics
_topics = DATA_JSON[repo_full_name]["repositoryTopics"]["edges"]
topics = []
for item in _topics:
topics.append(item["node"]["topic"]["name"])
DATA_JSON[repo_full_name]["repositoryTopics"] = " ".join(topics)
# Flatten stars count and watch count
DATA_JSON[repo_full_name]["stargazers"] = DATA_JSON[repo_full_name]["stargazers"]["totalCount"]
DATA_JSON[repo_full_name]["watchers"] = DATA_JSON[repo_full_name]["watchers"]["totalCount"]
# Save to _data directory
file_path = PATH_TO_DATA + "/" + "projects.json"
with open(file_path, "w+") as f:
json.dump(DATA_JSON, f)
print("LOG: Saved to", file_path)
# Update the SVG
print("No of members", SVG_NO_OF_MEMBERS)
print("No of repos", SVG_NO_OF_REPOS)
network_svg = open("assets/network_raw.svg").read()
network_svg = network_svg.replace("{$members}", str(SVG_NO_OF_MEMBERS))
network_svg = network_svg.replace("{$Repos}", str(SVG_NO_OF_REPOS))
with open("assets/network.svg", "w+") as f:
f.write(network_svg)
print("LOG: assets/network.svg updated!")
| StarcoderdataPython |
4839416 | <reponame>fjsaezm/mcd-maaa<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Color changes made by: <NAME>
"""
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, accuracy_score
from matplotlib import pyplot as plt, colors, lines
from sklearn.linear_model import LinearRegression, Perceptron, Ridge
# For coloring (https://coolors.co/4effef-f6ae2d-f26419-6f8f72-504b43)
c1 = '#4EFFEF'
c2 = '#F6AE2D'
def plot_dataset(x, y):
if (len(x.shape) == 1):
plt.plot(x, y, "*")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.title("Data")
else:
n_plot = x.shape[1]
fig, axs = plt.subplots(ncols=n_plot, sharey=True)
for i in range(n_plot):
ax = axs[i]
ax.plot(x[:, i], y, "*")
ax.set_xlabel("$x_{%d}$" % (i + 1))
if (i == 0):
ax.set_ylabel("$y$")
plt.suptitle("Data")
def plot_linear_model(x, y_r, w_e, b_e, w_r=None, b_r=None):
if (np.isscalar(w_e) or (len(w_e) == 1)):
y_p = w_e * x + b_e
plt.plot(x, y_r, "*", label="Obs.")
plt.plot(x, y_p, "-", label="Pred")
for i in range(len(x)):
plt.plot([x[i].item(), x[i].item()], [y_p[i].item(), y_r[i].item()], ":k")
if (w_r is not None) and (b_r is not None):
plt.plot(x, w_r * x + b_r, "--k", label="Real")
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.title("$y = %.2f x + %.2f$ (MSE: %.2f, MAE: %.2f, R2: %.2f)" % (w_e, b_e, mean_squared_error(y_r, y_p), mean_absolute_error(y_r, y_p), r2_score(y_r, y_p)))
else:
y_p = x @ w_e + b_e
pos = np.arange(len(w_e) + 1)
plt.bar(pos, np.append(w_e, b_e), alpha=0.5, label="Est.")
if (w_r is not None) and (b_r is not None):
plt.bar(pos, np.append(w_r, b_r), alpha=0.5, label="Real")
plt.legend()
plt.grid()
labels = []
for i in range(len(w_e)):
labels.append("$w_%d$" % (i + 1))
labels.append("$b$")
plt.xticks(pos, labels)
plt.title("MSE: %.2f, MAE: %.2f, R2: %.2f" % (mean_squared_error(y_r, y_p), mean_absolute_error(y_r, y_p), r2_score(y_r, y_p)))
def evaluate_linear_model(x_tr, y_tr_r, x_te, y_te_r, w, b, plot=False):
if (np.isscalar(w)):
y_tr_p = w * x_tr + b
y_te_p = w * x_te + b
else:
y_tr_p = x_tr @ w.ravel() + b
y_te_p = x_te @ w.ravel() + b
er_tr = [mean_squared_error(y_tr_r, y_tr_p), mean_absolute_error(y_tr_r, y_tr_p), r2_score(y_tr_r, y_tr_p)]
er_te = [mean_squared_error(y_te_r, y_te_p), mean_absolute_error(y_te_r, y_te_p), r2_score(y_te_r, y_te_p)]
ers = [er_tr, er_te]
headers=["MSE", "MAE", "R2"]
print("%10s" % "", end="")
for h in headers:
print("%10s" % h, end="")
print("")
headersc = ["Train", "Test"]
cnt = 0
for er in ers:
hc = headersc[cnt]
cnt = cnt + 1
print("%10s" % hc, end="")
for e in er:
print("%10.2f" % e, end="")
print("")
if plot:
plot_linear_model(x_te, y_te_r, w.ravel(), b)
def plot_dataset_clas(x, y):
if (len(x.shape) == 1):
plt.plot(x, y, "*")
plt.xlabel("$x$")
clas = np.unique(y)
plt.yticks(clas)
else:
if (len(np.unique(y) == 2)):
ind = y == 1
plt.scatter(x[ind, 0], x[ind, 1], c=c1, zorder=100)
ind = y != 1
plt.scatter(x[ind, 0], x[ind, 1], c=c2, zorder=100)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.axis("equal")
plt.title("Data")
def order_points(points):
if len(points) == 0:
return []
centre = points.mean(axis=0);
angles = np.arctan2(points[:, 0] - centre[0], points[:, 1] - centre[1])
o_points = points[np.argsort(angles), :]
return np.vstack((o_points, o_points[0, :]))
def plot_linear_model_clas(x, y_r, w, b):
if (len(x.shape) == 1) or (x.shape[1] != 2):
raise ValueError("only_r two-dimensional problems can be represented")
y_p = np.sign(x @ w + b)
plot_dataset_clas(x, y_r)
ax = plt.axis("equal")
lims = np.array([ax[0] - 100, ax[1] + 100, ax[2] - 100, ax[3] + 100])
if (w[1] != 0):
x1 = lims[0:2]
x2 = - (w[0] * x1 + b) / w[1]
else:
x2 = lims[2:]
x1 = - (w[1] * x2 + b) / w[0]
points = np.column_stack((np.append(x1, [lims[0], lims[1], lims[0], lims[1]]), np.append(x2, [lims[2], lims[3], lims[3], lims[2]])))
points_p = order_points(points[points @ w + b >= - 1e-2])
if (len(points_p) > 0):
plt.fill(points_p[:, 0], points_p[:, 1], "b", alpha=0.3)
points_n = order_points(points[points @ w + b <= + 1e-2])
if (len(points_n) > 0):
plt.fill(points_n[:, 0], points_n[:, 1], "r", alpha=0.3)
plt.plot(x1, x2, "-k")
plot_dataset_clas(x, y_r)
plt.axis(ax)
plt.title("$y = %.2f x_1 + %.2f x_2 + %.2f$ (Acc: %.2f%%)" % (w[0], w[1], b, 100 * accuracy_score(y_r, y_p)))
def fun_cross_entropy(X, y, w):
y_b = y.copy()
y_b[y_b == -1] = 0
y_p = 1 / (1 + np.exp(- X @ w))
return (- (1 - y_b) * np.log(1 - y_p) - y_b * np.log(y_p)).sum()
def grad_cross_entropy(X, y, w):
y_b = y.copy()
y_b[y_b == -1] = 0
y_p = 1 / (1 + np.exp(- X @ w))
return X.T @ (y_p - y_b)
def fit_polylinear_regression(x, y, deg=1):
X = np.power(np.reshape(x, (len(x), 1)), np.arange(1, deg + 1))
model = LinearRegression()
model.fit(X, y)
return model
def pred_polylinear_regression(model, x):
X = np.power(np.reshape(x, (len(x), 1)), np.arange(1, len(model.coef_) + 1))
return model.predict(X)
def plot_polylinear_model(x, y_r, model):
xv = np.linspace(x.min(), x.max())
plt.plot(x, y_r, "*", label="Obs.")
plt.plot(xv, pred_polylinear_regression(model, xv), "-", label="Pred")
y_p = pred_polylinear_regression(model, x)
for i in range(len(x)):
plt.plot([x[i].item(), x[i].item()], [y_p[i].item(), y_r[i].item()], ":k")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.title("Degree: %d (MSE: %.2f, MAE: %.2f, R2: %.2f)" % (len(model.coef_), mean_squared_error(y_r, y_p), mean_absolute_error(y_r, y_p), r2_score(y_r, y_p)))
def norm_p(w, p):
if (p == 0):
return np.count_nonzero(w)
if (p == np.inf):
return np.max(np.abs(w))
nw = np.sum(np.power(np.abs(w), p))
if (p > 1):
nw = np.power(nw, 1 / p)
return nw
def plot_contour_lp(p, mini=-3, maxi=3, npoi = 21):
x = np.linspace(mini, maxi, npoi)
y = np.linspace(mini, maxi, npoi)
x, y = np.meshgrid(x, y)
z = np.apply_along_axis(norm_p, 2, np.stack([x, y], axis = 2), p)
plt.contour(x, y, z)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.gca().set_aspect("equal", "box")
plt.title("Norm $\ell_{%g}$" % p)
plt.grid()
plt.show()
def plot_contour_l1_l2(l1_ratio=0.5, mini=-3, maxi=3, npoi = 21):
x = np.linspace(mini, maxi, npoi)
y = np.linspace(mini, maxi, npoi)
x, y = np.meshgrid(x, y)
z = l1_ratio * np.apply_along_axis(norm_p, 2, np.stack([x, y], axis = 2), 1) + (1 - l1_ratio) * np.apply_along_axis(norm_p, 2, np.stack([x, y], axis = 2), 2)
plt.contour(x, y, z)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.gca().set_aspect("equal", "box")
plt.title("%g * Norm $\ell_1$ + %g * Norm $\ell_2$" % (l1_ratio, 1 - l1_ratio))
plt.grid()
plt.show()
def plot_contour_linear_lp(X, y, p=None, mini=-3, maxi=3, npoi=51):
def mse_linear(w):
return mean_squared_error(y, X @ w)
x1 = np.linspace(mini, maxi, npoi)
x2 = np.linspace(mini, maxi, npoi)
x1, x2 = np.meshgrid(x1, x2)
z = np.apply_along_axis(mse_linear, 2, np.stack([x1, x2], axis = 2))
plt.contour(x1, x2, z, 30)
if p is not None:
x = np.linspace(-1, 1, 101)
if (p == 0):
plt.plot([-1, 1], [0, 0], "-k")
plt.plot([0, 0], [-1, 1], "-k")
ball = np.abs(x1) + np.abs(x2) <= 1
elif (p == np.inf):
plt.plot([-1, 1, 1, -1, -1], [1, 1, -1, -1, 1, ], "-k")
plt.fill([-1, 1, 1, -1, -1], [1, 1, -1, -1, 1, ], "k")
ball = np.maximum(x1, x2) <= 1
else:
y = np.power(1 - np.power(np.abs(x), p), 1 / p)
plt.plot(np.concatenate((x, np.flip(x))), np.concatenate((y, np.flip(-y))), "-k")
plt.fill(np.concatenate((x, np.flip(x))), np.concatenate((y, np.flip(-y))), "k")
ball = np.power(np.abs(x1), p) + np.power(np.abs(x2), p) <= 1
obj = z
obj[ball == False] = np.inf
else:
obj = z
ind = np.unravel_index(np.argmin(obj), obj.shape)
plt.plot(x1[ind[0], ind[1]], x2[ind[0], ind[1]], "r*")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.gca().set_aspect("equal", "box")
if (p is not None):
plt.title("Norm $\ell_{%g}$ + MSE" % p)
else:
plt.title("MSE")
plt.grid()
plt.show()
def generate_bv_example(n_rep=250, n_mod=15, n_dim=10, noise=3e-1, seed=1234):
n_pat = n_dim
alpha_v = np.logspace(-4, 4, n_mod)
np.random.seed(seed)
w = np.random.randn(n_dim)
x_te = np.random.randn(n_pat, n_dim)
y_te = x_te @ w + noise * np.random.randn(n_pat)
distances = np.zeros((n_mod, n_rep))
predictions = np.zeros((n_mod, n_rep, n_pat))
for i, alpha in enumerate(alpha_v):
for j in range(n_rep):
x_tr = np.random.randn(n_pat, n_dim)
y_tr = x_tr @ w + noise * np.random.randn(n_pat)
y_te_p = Ridge(alpha=alpha, fit_intercept=False).fit(x_tr, y_tr).predict(x_te)
predictions[i, j, :] = y_te_p
distances[i, j] = mean_squared_error(y_te, y_te_p)
return distances, predictions, y_te
def plot_perceptron_evo_epochs(x, y, max_epochs=5):
import warnings
warnings.filterwarnings("ignore", category=Warning)
fig, ax = plt.subplots(nrows=1, ncols=max_iters)
for i in range(max_epochs):
model = Perceptron(tol=-1, max_iter=i + 1)
model.fit(x, y)
plt.sca(ax[i])
plot_linear_model_clas(x, y, model.coef_[0], model.intercept_)
if (i > 0):
ax[i].set_yticklabels("")
ax[i].set_ylabel("")
plt.title("Epoch %d (Acc: %.2f%%)" % (i + 1, 100 * accuracy_score(y, model.predict(x))))
plt.tight_layout()
def plot_perceptron_evo_iter(x, y, max_iters=5):
import warnings
warnings.filterwarnings("ignore", category=Warning)
n_pat = x.shape[0]
n_dim = x.shape[1]
w = np.zeros(n_dim + 1)
w[0] = 1e-1
x_b = np.column_stack((np.ones(n_pat), x))
nrows = int(np.ceil(max_iters / 5))
ncols = 5
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 3 * nrows))
ax = ax.ravel()
for i in range(max_iters):
x_i = x_b[i % n_pat, :]
y_i = y[i % n_pat]
pred = np.sign(w @ x_i)
plt.sca(ax[i])
plot_linear_model_clas(x, y, w[1:], w[0])
plt.scatter(x_i[1], x_i[2], s=200, linewidth=4, facecolors="none", edgecolors="k", zorder=100)
if (i % ncols > 0):
ax[i].set_yticklabels("")
ax[i].set_ylabel("")
if (i < (nrows - 1) * ncols):
ax[i].set_xticklabels("")
ax[i].set_xlabel("")
plt.title("Iter. %d (Acc: %.2f%%)" % (i + 1, 100 * accuracy_score(y, np.sign(x_b @ w))))
w += (y_i - pred) / 2 * x_i
plt.tight_layout()
def plot_nonlinear_model(x, y_r, model, phi=None):
if phi is None:
phi = lambda x: np.reshape(x, (-1, 1))
y_p = model.predict(phi(x))
plt.plot(x, y_r, "*", label="Obs.")
plt.plot(x, y_p, "-", label="Pred")
for i in range(len(x)):
plt.plot([x[i].item(), x[i].item()], [y_p[i].item(), y_r[i].item()], ":k")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.title("(MSE: %.2f, MAE: %.2f, R2: %.2f)" % (mean_squared_error(y_r, y_p), mean_absolute_error(y_r, y_p), r2_score(y_r, y_p)))
def plot_nonlinear_model_clas(x, y_r, model, phi=None, n_points=31):
if phi is None:
phi = lambda x: np.reshape(x, (-1, 1))
alpha = 0.3
col_1 = np.array([31, 119, 180]) / 255
col_2 = np.array([214, 39, 40]) / 255
y_p = model.predict(phi(x))
ind = y_r < 0
plt.scatter(x[ind, 0], x[ind, 1], c=[col_1], zorder=100)
ind = y_r >= 0
plt.scatter(x[ind, 0], x[ind, 1], c=[col_2], zorder=100)
ax = plt.axis("equal")
x_1 = np.linspace(plt.xlim()[0], plt.xlim()[1], n_points)
x_1 = np.hstack((x_1[0] - 100, x_1, x_1[-1] + 100))
x_2 = np.linspace(plt.ylim()[0], plt.ylim()[1], n_points)
x_2 = np.hstack((x_2[0] - 100, x_2, x_2[-1] + 100))
x_1, x_2 = np.meshgrid(x_1, x_2, indexing="ij")
plt.pcolormesh(x_1, x_2, np.reshape(model.predict(phi(np.column_stack((x_1.ravel(), x_2.ravel())))), x_1.shape), shading="auto", cmap=colors.ListedColormap([alpha * col_1 + 1 - alpha, alpha * col_2 + 1 - alpha]))
plt.axis(ax)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("(Acc: %.2f%%)" % (100 * accuracy_score(y_r, y_p)))
def polynomial_basis(X, deg):
def phi(x):
return np.power(x, np.arange(0, deg + 1))
return np.array([phi(x) for x in X])
def gaussian_basis(X, mu, sigma):
sigma_2 = np.power(sigma, 2)
def phi(x):
return np.exp(- np.power(x - mu, 2) / sigma_2)
return np.array([phi(x) for x in X])
def sigmoidal_basis(X, a, b):
def phi(x):
return 1 / (1 + np.exp(- (a * x - b)))
return np.array([phi(x) for x in X])
def plot_krr_coefficients(model, label_gap=5):
coef = model.dual_coef_
pos = np.arange(len(coef))
plt.bar(pos, coef, alpha=0.5)
plt.grid()
labels = []
for i in range(len(coef)):
labels.append("$\\alpha_{%d}$" % (i + 1))
plt.xticks(pos[::label_gap], labels[::label_gap])
plt.title("Dual Coefficients")
def plot_svc(x, y, model, n_points=151, plot_slack=False,
plot_support_vectors=True):
alpha = 0.2
col_2 = np.array([78, 255, 239]) / 255
col_1 = np.array([246, 174, 45]) / 255
ind = y != 1
plt.scatter(x[ind, 0], x[ind, 1], c=c1, s=30, zorder=100)
ind = y == 1
plt.scatter(x[ind, 0], x[ind, 1], c=c2, s=30, zorder=100)
lims = plt.axis("equal")
xx = np.linspace(lims[0] - 1.1 * (lims[1] - lims[0]), lims[1] + 1.1 * (lims[1] - lims[0]), n_points)
yy = np.linspace(lims[2], lims[3], n_points)
yy, xx = np.meshgrid(yy, xx)
xy = np.vstack([xx.ravel(), yy.ravel()]).T
zz = model.decision_function(xy).reshape(xx.shape)
plt.pcolormesh(xx, yy, np.sign(zz), shading="auto",
cmap=colors.ListedColormap([alpha * col_2 + 1 - alpha, alpha * col_1 + 1 - alpha]))
plt.contour(xx, yy, zz, colors=[c1, "k", c2], levels=[-1, 0, 1], linestyles=["--", "-", "--"], linewidths=[2, 4, 2])
plt.legend(handles=[
lines.Line2D([], [], color=c1, linestyle="--", label="Support Hyp. $-1$"),
lines.Line2D([], [], color="k", linestyle="-", label="Sepparating Hyp."),
lines.Line2D([], [], color=c2, linestyle="--", label="Support Hyp. $+1$")
])
# Plot black circles around the support vectors
if plot_support_vectors:
plt.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=100, linewidth=3, facecolors="none", edgecolors="k")
if (plot_slack):
w = model.coef_[0]
b = model.intercept_
nws = np.linalg.norm(w)**2
for i in model.support_:
p = x[i, :] - (w @ x[i, :] + b - y[i]) / nws * w
c = c2 if y[i] == 1 else c1
plt.plot([p[0], x[i, 0]], [p[1], x[i, 1]], ":", color=c)
plt.axis(lims)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("SVM (%s, C=%.2g)" % (model.kernel, model.C))
def plot_svc_animation_frame(x, y, ax, model, lims, subtitle, n_points=151, alpha=0.2):
alpha = 0.2
col_2 = np.array([78, 255, 239]) / 255
col_1 = np.array([246, 174, 45]) / 255
ind = y != 1
ax.scatter(x[ind, 0], x[ind, 1], c=c1, s=30, zorder=100)
ind = y == 1
ax.scatter(x[ind, 0], x[ind, 1], c=c2, s=30, zorder=100)
if lims == None:
lims = ax.axis("equal")
xx = np.linspace(lims[0] - 1.1 * (lims[1] - lims[0]),
lims[1] + 1.1 * (lims[1] - lims[0]),
n_points)
yy = np.linspace(lims[2], lims[3], n_points)
yy, xx = np.meshgrid(yy, xx)
xy = np.vstack([xx.ravel(), yy.ravel()]).T
zz = model.decision_function(xy).reshape(xx.shape)
ax.pcolormesh(xx, yy, np.sign(zz), shading="auto",
cmap=colors.ListedColormap([alpha * col_2 + 1 - alpha,
alpha * col_1 + 1 - alpha]))
ax.contour(xx, yy, zz, colors=[c1, "k", c2], levels=[-1, 0, 1],
linestyles=["--", "-", "--"], linewidths=[2, 4, 2])
ax.legend(handles=[
lines.Line2D([], [], color=c1, linestyle="--", label="Support Hyp. $-1$"),
lines.Line2D([], [], color="k", linestyle="-", label="Sepparating Hyp."),
lines.Line2D([], [], color=c2, linestyle="--", label="Support Hyp. $+1$")
])
# Plot black circles around the support vectors
ax.scatter(model.support_vectors_[:, 0],
model.support_vectors_[:, 1],
s=100, linewidth=3, facecolors="none", edgecolors="k")
ax.axis(lims)
ax.set_xlabel("$x_1$")
ax.set_ylabel("$x_2$")
ax.set_title(subtitle)
return lims
def plot_all_linear_separators(x, y, plot_best=False, n_points=51):
ang_vec = np.linspace(0, 2 * np.pi, n_points)
b_vec = np.linspace(-5, 5, n_points)
ang_mat, b_mat = np.meshgrid(ang_vec, b_vec, indexing="ij")
ws = []
bs = []
ms = []
svs = []
for i_ang in range(len(ang_vec)):
ang = ang_vec[i_ang]
for i_b in range(len(b_vec)):
b = b_vec[i_b]
w = np.array([np.sin(ang), np.cos(ang)])
d = (np.abs(x @ w + b) / np.linalg.norm(w))
m = d.min()
sv = np.argsort(d)[:3]
y_p = np.sign(x @ w + b)
if (accuracy_score(y, y_p) == 1):
ws.append(w)
bs.append(b)
ms.append(m)
svs.append(sv)
plot_dataset_clas(x, y)
lims = plt.axis()
max_m = np.array(ms).max()
for w, b, m, sv in zip(ws, bs, ms, svs):
if (w[1] != 0):
x1 = np.asarray(lims[0:2])
x1[0] -= 1.1 * (lims[1] - lims[0])
x1[1] += 1.1 * (lims[1] - lims[0])
x2 = - (w[0] * x1 + b) / w[1]
else:
x2 = lims[2:]
x1 = - (w[1] * x2 + b) / w[0]
if (plot_best):
if (m == max_m):
plt.plot(x1, x2, "-k", alpha=1.0)
plt.scatter(x[sv, 0], x[sv, 1], s=100, linewidth=3, facecolors="none", edgecolors="k")
else:
plt.plot(x1, x2, "-k", alpha=0.3)
else:
plt.plot(x1, x2, "-k", alpha=0.3)
plt.axis(lims)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Linear Classifiers")
def plot_svr(x, y, model, n_points=151, plot_slack=False):
x_e = np.linspace(x.min(), x.max(), n_points)
np.append(x_e, x)
x_e.sort()
y_p = model.predict(x_e.reshape(-1, 1))
y_pi = model.predict(x.reshape(-1, 1))
plt.plot(x, y, "*", label="Obs.")
plt.plot(x_e, y_p, "-", label="Model")
plt.plot(x_e, y_p + model.epsilon, "--k")
plt.plot(x_e, y_p - model.epsilon, "--k")
plt.scatter(x[model.support_], y[model.support_], s=100, linewidth=3, facecolors="none", edgecolors="k")
if (plot_slack):
for i in range(len(x)):
if (y_pi[i] > y[i] + model.epsilon):
plt.plot([x[i].item(), x[i].item()], [y_pi[i].item() - model.epsilon, y[i].item()], ":k")
if (y_pi[i] < y[i] - model.epsilon):
plt.plot([x[i].item(), x[i].item()], [y_pi[i].item() + model.epsilon, y[i].item()], ":k")
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.title("SVM (%s, C=%.2g)" % (model.kernel, model.C))
def evaluate_nonlinear_model(x_tr, y_tr_r, x_te, y_te_r, model):
y_tr_p = model.predict(x_tr)
y_te_p = model.predict(x_te)
er_tr = [mean_squared_error(y_tr_r, y_tr_p), mean_absolute_error(y_tr_r, y_tr_p), r2_score(y_tr_r, y_tr_p)]
er_te = [mean_squared_error(y_te_r, y_te_p), mean_absolute_error(y_te_r, y_te_p), r2_score(y_te_r, y_te_p)]
ers = [er_tr, er_te]
headers=["MSE", "MAE", "R2"]
print("%10s" % "", end="")
for h in headers:
print("%10s" % h, end="")
print("")
headersc = ["Train", "Test"]
cnt = 0
for er in ers:
hc = headersc[cnt]
cnt = cnt + 1
print("%10s" % hc, end="")
for e in er:
print("%10.2f" % e, end="")
print("")
def generate_sequence(env, early_stop=True, model=None, n_steps=500):
state = env.reset()
for i in range(n_steps):
env.render()
if model is None:
action = env.action_space.sample()
else:
action_probs, _ = model.predict(tf.expand_dims(tf.convert_to_tensor(state), 0))
p = np.squeeze(action_probs)
action = np.random.choice(len(p), p=p)
state, _, done, _ = env.step(action)
if early_stop and done:
print("Finished after %d steps" % i)
break
| StarcoderdataPython |
4817482 | # -*- coding:utf-8 -*-
"""
Copyright (c) 2013-2016 SYPH, All Rights Reserved.
-----------------------------------------------------------
Author: S.JunPeng
Date: 2016/12/22
Change Activity:
_==/ i i \==_
/XX/ |\___/| \XX\
/XXXX\ |XXXXX| /XXXX\
|XXXXXX\_ _XXXXXXX_ _/XXXXXX|
XXXXXXXXXXXxxxxxxxXXXXXXXXXXXxxxxxxxXXXXXXXXXXX
|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
XXXXXX/^^^^^\XXXXXXXXXXXXXXXXXXXXX/^^^^^\XXXXXX
|XXX| \XXX/^^\XXXXX/^^\XXX/ |XXX|
\XX\ \X/ \XXX/ \X/ /XX/
"\ " \X/ " /"
"""
import json
import logging
import datetime
import re
from braces.views import CsrfExemptMixin
from django.http.response import HttpResponse
from django.views.generic import View
from apps.etl.models import *
from apps.common.models import FeatureCodeMapping
from apps.datasource.models import *
from vendor.utils.pagination import ExtPaginator
from vendor.utils.commons import json_response
from studio.feature_comment_handle.exec_chain_handle import func_exec_chain
from studio.feature_comment_handle.jsonparse_handle import JSONPathParser
from vendor.utils.defaults import *
logger = logging.getLogger('apps.interface')
class FeatureConfig(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 10
if featurename == 'all':
feature_config_obj = FeatureConf.objects.values()
else:
feature_config_obj = FeatureConf.objects.filter(feature_name=featurename).values()
feature_config_count = feature_config_obj.count()
paginator = ExtPaginator(list(feature_config_obj), page_size, feature_config_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
x.update(
{"feature_select_value": x["feature_select_value"] if x["feature_select_value"] else ''}),
x.update({"feature_type_desc": FeatureType.objects.get(pk=x["feature_type_id"]).feature_type_desc if x[
"feature_type_id"] else ''}),
x.update(
{"feature_rule_type_desc": FeatureRuleType.objects.get(
pk=x["feature_rule_type_id"]).feature_type_desc if x[
"feature_rule_type_id"] else ''}),
x.update(
{"feature_card_type_desc": FeatureCardType.objects.get(
pk=x["feature_card_type_id"]).feature_type_desc if x[
"feature_card_type_id"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=feature_config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, item, featureid, *args, **kwargs):
"""更新特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update feature config request data=%s", body)
updated_on = datetime.datetime.now()
if item == 'feature_info':
feature_name_cn = body.get('feature_name_cn')
if not feature_name_cn:
raise Exception(u'特征中文名不能为空')
feature_type = body.get('feature_type_id')
feature_rule_type = body.get('feature_rule_type_id')
feature_card_type = body.get('feature_card_type_id')
feature_type = FeatureType.objects.get(pk=feature_type) if feature_type else None
feature_rule_type = FeatureRuleType.objects.get(pk=feature_rule_type) if feature_rule_type else None
feature_card_type = FeatureCardType.objects.get(pk=feature_card_type) if feature_card_type else None
FeatureConf.objects.filter(pk=int(featureid)).update(
feature_name=body.get('feature_name'),
feature_name_cn=body.get('feature_name_cn'),
feature_type=feature_type,
feature_rule_type=feature_rule_type,
feature_card_type=feature_card_type,
feature_select_value=body.get('feature_select_value'),
updated_on=updated_on,
is_delete=body.get('is_delete')
)
elif item == 'feature_source':
data_identity = body.get('data_identity')
if not data_identity:
raise Exception(u'特征数据源不能为空!')
FeatureConf.objects.filter(pk=int(featureid)).update(
collect_type=body.get('collect_type'),
data_identity=body.get('data_identity'),
updated_on=updated_on
)
else:
raise Exception('url error')
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, item, *args, **kwargs):
"""添加特征基本信息配置"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update feature config request data=%s", body)
created_on = datetime.datetime.now()
if item == 'feature_info':
feature_name = body.get('feature_name')
feature_name_cn = body.get('feature_name_cn')
if not feature_name_cn or not feature_name_cn:
raise Exception(u'特征名和特征中文名不能为空!')
count = FeatureConf.objects.filter(feature_name=feature_name)
if count:
raise Exception('%s already exists!' % feature_name)
feature_type = body.get('feature_type_id')
feature_rule_type = body.get('feature_rule_type_id')
feature_card_type = body.get('feature_card_type_id')
feature_type = FeatureType.objects.get(pk=feature_type) if feature_type else None
feature_rule_type = FeatureRuleType.objects.get(pk=feature_rule_type) if feature_rule_type else None
feature_card_type = FeatureCardType.objects.get(pk=feature_card_type) if feature_card_type else None
FeatureConf(
feature_name=body.get('feature_name'),
feature_name_cn=body.get('feature_name_cn'),
feature_type=feature_type,
feature_rule_type=feature_rule_type,
feature_card_type=feature_card_type,
feature_select_value=body.get('feature_select_value'),
is_delete=body.get('is_delete'),
updated_on=created_on,
created_on=created_on
).save()
else:
raise Exception('url error')
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class FeatureShuntConfig(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取分流特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 500
if featurename == 'all':
feature_config_obj = FeatureShuntConf.objects.values()
else:
feature_config_obj = FeatureShuntConf.objects.filter(feature_name=featurename).values()
feature_config_count = feature_config_obj.count()
paginator = ExtPaginator(list(feature_config_obj), page_size, feature_config_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
x.update({"shunt_value": x["shunt_value"] if x["shunt_value"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=feature_config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, featureid, *args, **kwargs):
"""更新分流特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update shunt feature config request data=%s", body)
feature_name = body.get('feature_name')
shunt_key = body.get('shunt_key')
shunt_type = body.get('shunt_type')
shunt_value = body.get('shunt_value')
data_identity = body.get('data_identity')
is_delete = body.get('is_delete')
if not (feature_name and shunt_key and data_identity and shunt_type and shunt_value):
raise Exception("all values don't is null !")
if not isinstance(eval(body['shunt_value']), tuple):
raise Exception(u'数据源适应范围必须为元组类型!')
updated_on = datetime.datetime.now()
FeatureShuntConf.objects.filter(pk=int(featureid)).update(
feature_name=feature_name,
shunt_key=shunt_key,
data_identity=data_identity,
shunt_type=shunt_type,
shunt_value=shunt_value,
updated_on=updated_on,
is_delete=is_delete
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加分流特征基本信息配置"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("add shunt feature config request data=%s", body)
feature_name = body.get('feature_name')
shunt_key = body.get('shunt_key')
shunt_type = body.get('shunt_type')
shunt_value = body.get('shunt_value')
data_identity = body.get('data_identity')
is_delete = body.get('is_delete')
if not (feature_name and shunt_key and data_identity and shunt_type and shunt_value):
raise Exception("all values don't is null !")
if not isinstance(eval(body['shunt_value']), (tuple, list)):
raise Exception(u'数据源适应范围必须为元组类型!')
if FeatureShuntConf.objects.filter(feature_name=feature_name).count():
raise Exception('this feature_name already exists!')
FeatureShuntConf(
feature_name=feature_name,
shunt_key=shunt_key,
data_identity=data_identity,
shunt_type=shunt_type,
shunt_value=tuple(eval(shunt_value)),
is_delete=is_delete
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class FeatureRelevanceConfig(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取依赖特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 100
if featurename == 'all':
feature_config_obj = FeatureRelevanceConf.objects.values()
else:
feature_config_obj = FeatureRelevanceConf.objects.filter(
feature_name=featurename
).values()
feature_config_count = feature_config_obj.count()
paginator = ExtPaginator(list(feature_config_obj), page_size, feature_config_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=feature_config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, featureid, *args, **kwargs):
"""更新依赖特征基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update relevance feature config request data=%s", body)
feature_name = body['feature_name']
depend_feature = body['depend_feature']
data_identity = body['data_identity']
is_delete = body['is_delete']
updated_on = datetime.datetime.now()
FeatureRelevanceConf.objects.filter(pk=int(featureid)).update(
feature_name=feature_name,
depend_feature=depend_feature,
data_identity=data_identity,
updated_on=updated_on,
is_delete=is_delete
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加依赖特征基本信息配置"""
data = {
'status': 1,
'message': 'success'
}
try:
path = request.path
if 'check' in path:
con = FeatureRelevanceConf.objects.filter(is_delete=False)
for i in con:
if i.depend_feature:
depend_feature_list = i.depend_feature.split(',')
for j in depend_feature_list:
con = FeatureRelevanceConf.objects.filter(is_delete=False, feature_name=j).count()
if not con:
raise Exception("%s dependent feature %s, %s is not available in relevance table !" % (
i.feature_name, j, j))
else:
body = json.loads(request.body)
logger.info("add shunt feature config request data=%s", body)
feature_name = body['feature_name']
depend_feature = body['depend_feature']
data_identity = body['data_identity']
is_delete = body['is_delete']
updated_on = datetime.datetime.now()
if FeatureRelevanceConf.objects.filter(feature_name=feature_name).count():
raise Exception('this feature_name already exists!')
FeatureRelevanceConf(
feature_name=feature_name,
depend_feature=depend_feature,
data_identity=data_identity,
updated_on=updated_on,
is_delete=is_delete
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class RemoteConfig(CsrfExemptMixin, View):
def get(self, request, data_identity, page, *args, **kwargs):
"""获取数据源基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 100
if data_identity == 'all':
data_identity_obj = DsInterfaceInfo.objects.values()
else:
data_identity_obj = DsInterfaceInfo.objects.filter(
data_identity=data_identity
).values()
data_identity_count = data_identity_obj.count()
paginator = ExtPaginator(list(data_identity_obj), page_size, data_identity_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
# x.update({"must_data": eval(x["must_data"]) if x["must_data"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=data_identity_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, id, *args, **kwargs):
"""更新数据源基础配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update datasource config request data=%s", body)
name = body['name']
data_identity = body['data_identity']
data_source = body['data_source_id']
data_origin_type = body['data_origin_type']
route = body['route']
method = body['method']
comment = body['comment']
common_data = body['common_data']
must_data = body['must_data']
is_need_token = body['is_need_token']
is_need_encrypt = body['is_need_encrypt']
is_async = body['is_async']
encrypt_type = body['encrypt_type']
is_delete = body['is_delete']
updated_on = datetime.datetime.now()
DsInterfaceInfo.objects.filter(pk=int(id)).update(
name=name,
data_identity=data_identity,
data_source=data_source,
data_origin_type=data_origin_type,
route=route,
method=method,
comment=comment,
common_data=common_data,
must_data=must_data,
is_need_token=is_need_token,
is_need_encrypt=is_need_encrypt,
is_async=is_async,
encrypt_type=encrypt_type,
is_delete=is_delete,
updated_on=updated_on
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': 'error'
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加数据源基本信息配置"""
data = {'status': 1,
'message': 'success'}
try:
body = json.loads(request.body)
logger.info("add datasource config request data=%s", body)
name = body['name']
data_identity = body['data_identity']
data_source = int(body['data_source_id'])
data_origin_type = body['data_origin_type']
route = body['route']
method = body['method']
comment = body['comment']
common_data = body['common_data']
must_data = body['must_data']
is_need_token = body['is_need_token']
is_need_encrypt = body['is_need_encrypt']
is_async = body['is_async']
encrypt_type = body['encrypt_type']
is_delete = body['is_delete']
data_source = DataSourceInfo.objects.get(pk=data_source)
DsInterfaceInfo(
name=name,
data_identity=data_identity,
data_source=data_source,
data_origin_type=data_origin_type,
route=route,
method=method,
comment=comment,
common_data=common_data,
must_data=must_data,
is_need_token=is_need_token,
is_need_encrypt=is_need_encrypt,
is_async=is_async,
encrypt_type=encrypt_type,
is_delete=is_delete,
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': 'error'
}
return json_response(data)
class PreFieldInfoConfig(CsrfExemptMixin, View):
def get(self, request, fieldname, page, *args, **kwargs):
"""获取数据源参数配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 100
if fieldname == 'all':
config_obj = PreFieldInfo.objects.values()
else:
config_obj = PreFieldInfo.objects.filter(field_name=fieldname).values()
config_count = config_obj.count()
paginator = ExtPaginator(list(config_obj), page_size, config_count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, fieldid, *args, **kwargs):
"""更新数据源参数配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update feature config request data=%s", body)
updated_on = datetime.datetime.now()
source = body.get('source')
path = body.get('path')
if not ((source and path) or (not source and not path)):
raise Exception(u'参数来源和jsonpath只能同时为空或同时不为空!')
PreFieldInfo.objects.filter(pk=int(fieldid)).update(
field_name=body.get('field_name'),
field_name_cn=body.get('field_name_cn'),
source=body.get('source'),
path=body.get('path'),
updated_on=updated_on,
is_delete=body.get('is_delete')
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加数据源参数配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update feature config request data=%s", body)
created_on = datetime.datetime.now()
field_name = body.get('field_name')
count = PreFieldInfo.objects.filter(field_name=field_name)
source = body.get('source')
path = body.get('path')
if count:
raise Exception('%s already exists' % field_name)
if not ((source and path) or (not source and not path)):
raise Exception(u'参数来源和jsonpath只能同时为空或同时不为空!')
PreFieldInfo(
field_name=field_name,
field_name_cn=body.get('field_name_cn'),
source=source,
path=path,
updated_on=created_on,
created_on=created_on,
is_delete=body.get('is_delete')
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class GetItemList(CsrfExemptMixin, View):
def get(self, request, item, *args, **kwargs):
"""获取下拉列表信息"""
try:
if item == 'feature_name':
data = FeatureConf.objects.filter(is_delete=False).values_list('id', 'feature_name').order_by(
'feature_name')
elif item == 'feature_type':
data = FeatureType.objects.filter(is_delete=False).values_list('id', 'feature_type_desc')
elif item == 'feature_card_type':
data = FeatureCardType.objects.filter(is_delete=False).values_list('id', 'feature_type_desc')
elif item == 'feature_rule_type':
data = FeatureRuleType.objects.filter(is_delete=False).values_list('id', 'feature_type_desc')
elif item == 'args':
data = PreFieldInfo.objects.filter(is_delete=False).values_list('id', 'field_name')
elif item == 'funcname':
data = FuncLibSource.objects.values_list('func_name', 'func_type', 'func_desc').order_by('func_type')
tmp = {}
for func_name, func_type, func_desc in data:
f = re.search("(.*)\\((.*)\\)", func_name)
f_name = f.group(1)
f_args = f.group(2)
f_s_args = ''
if len(f_args.split(',')) > 1:
f_s_args = f_args.split(',')[1]
tmp['%s' % f_name] = [func_name, f_s_args, func_type, func_desc]
return json_response(tmp)
elif item == 'data_identity':
data = DsInterfaceInfo.objects.values_list('id', 'data_identity').order_by('data_identity')
elif item == 'data_identity_args':
data = DsInterfaceInfo.objects.values_list('id', 'data_identity', 'must_data').order_by('data_identity')
tmp = []
for id, data_identity, must_data in data:
if must_data:
args_list = []
for args in eval(must_data).values():
if re.search("%\\((.*)\\)s", args):
args_list.append(re.search("%\\((.*)\\)s", args).group(1))
tmp.append([id, "'%s':%s" % (data_identity, str(args_list))])
data = tmp
else:
raise Exception('url error')
data = {id: value for id, value in data}
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class FeatureProcessAPI(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取特征计算配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 10
if featurename == 'all':
feature_config_obj = FeatureProcess.objects.values()
else:
feature_config_obj = FeatureProcess.objects.filter(feature_name=featurename).values()
feature_config_count = feature_config_obj.count()
paginator = ExtPaginator(list(feature_config_obj), page_size, feature_config_count)
object_list = paginator.page(current_page)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=feature_config_count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request):
path = request.path
body = request.body
datas = json.loads(body)
if 'delete' in path:
data = {
'status': 1,
'message': 'success'
}
try:
feature_name = datas.get('feature_name')
FeatureProcess.objects.filter(feature_name=feature_name).delete()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
elif 'test' in path:
origin_data = datas.get('origin_data', None)
configx = datas.get('config', None)
feature_name = 'feature_name'
default_value = ''
try:
feature_name = configx.get('feature_name', 'null')
json_path_list = configx.get('json_path_list', None)
default_value = configx.get('default_value', None)
if not (feature_name and default_value and json_path_list):
raise Exception("feature_name,default_value, json_path_list can't are null !")
f_map_and_filter_chain = configx.get('f_map_and_filter_chain', None)
reduce_chain = configx.get('reduce_chain', None)
l_map_and_filter_chain = configx.get('l_map_and_filter_chain', None)
json_path_parser = JSONPathParser()
value_list = json_path_parser.parsex(origin_data, json_path_list)
result = []
for i in value_list:
result = result + i[3]
if len(result) == 1 and isinstance(result[0], list):
result = result[0]
if f_map_and_filter_chain:
result = func_exec_chain(result, f_map_and_filter_chain)
if reduce_chain:
result = func_exec_chain(result, reduce_chain)
if l_map_and_filter_chain:
result = func_exec_chain(result, l_map_and_filter_chain)
res = {'message': 'success', 'result': {feature_name: result}}
except Exception as e:
res = {
'message': e.message,
'result': {feature_name: eval(default_value) if default_value else ''}
}
return json_response(res)
def put(self, request):
data = {
'status': 1,
'message': 'success'
}
try:
body = request.body
datas = json.loads(body, encoding='utf8')
configx = datas.get('config', None)
feature_name = configx.get('feature_name')
if FeatureProcess.objects.filter(feature_name=feature_name).count():
FeatureProcess.objects.filter(feature_name=feature_name).update(
feature_data_type=configx.get('feature_data_type'),
default_value=configx.get('default_value'),
json_path_list=configx.get('json_path_list'),
reduce_chain=configx.get('reduce_chain'),
f_map_and_filter_chain=configx.get('f_map_and_filter_chain'),
l_map_and_filter_chain=configx.get('l_map_and_filter_chain')
)
else:
FeatureProcess(
feature_name=configx.get('feature_name'),
feature_data_type=configx.get('feature_data_type'),
default_value=configx.get('default_value'),
json_path_list=configx.get('json_path_list'),
reduce_chain=configx.get('reduce_chain'),
f_map_and_filter_chain=configx.get('f_map_and_filter_chain'),
l_map_and_filter_chain=configx.get('l_map_and_filter_chain')
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class TypeInfoConfig(CsrfExemptMixin, View):
def get(self, request, item, page, *args, **kwargs):
"""获取规则类型、特征类型、打分卡类型 配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 100
obj = FeatureType
if item == 'feature_type':
obj = FeatureType.objects.values()
elif item == 'feature_card_type':
obj = FeatureCardType.objects.values()
elif item == 'feature_rule_type':
obj = FeatureRuleType.objects.values()
count = obj.count()
paginator = ExtPaginator(list(obj), page_size, count)
object_list = paginator.page(current_page)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, item, id, *args, **kwargs):
"""更新类型配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
logger.info("update type config request data=%s", body)
is_delete = body.get('is_delete', 0)
feature_type_desc = body.get('feature_type_desc', '')
obj = FeatureType
if not feature_type_desc:
raise Exception(u'类型描述不能为空!')
if item == 'feature_type':
obj = FeatureType
elif item == 'feature_card_type':
obj = FeatureCardType
elif item == 'feature_rule_type':
obj = FeatureRuleType
obj.objects.filter(pk=id).update(is_delete=is_delete, feature_type_desc=feature_type_desc)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, item, *args, **kwargs):
"""添加类型配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
is_delete = body.get('is_delete', 0)
feature_type_desc = body.get('feature_type_desc', '')
obj = ''
if not feature_type_desc:
raise Exception(u'类型描述不能为空!')
if item == 'feature_type':
obj = FeatureType
elif item == 'feature_card_type':
obj = FeatureCardType
elif item == 'feature_rule_type':
obj = FeatureRuleType
if obj.objects.filter(feature_type_desc=feature_type_desc).count():
raise Exception(u"此类型已经存在!")
obj(is_delete=is_delete, feature_type_desc=feature_type_desc).save()
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
class MapCodeConfig(CsrfExemptMixin, View):
def get(self, request, featurename, page, *args, **kwargs):
"""获取map code表配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
current_page = page
page_size = 10
if featurename == 'all':
obj = FeatureCodeMapping.objects.values()
else:
obj = FeatureCodeMapping.objects.filter(feature_name=featurename).values()
count = obj.count()
paginator = ExtPaginator(list(obj), page_size, count)
object_list = paginator.page(current_page)
map(lambda x: [
x.update({"created_on": x["created_on"].strftime('%Y-%m-%d %H:%M:%S') if x["created_on"] else ''}),
x.update({"updated_on": x["updated_on"].strftime('%Y-%m-%d %H:%M:%S') if x["updated_on"] else ''}),
], object_list)
page_num = paginator.num_pages
page_range = paginator.page_range
res_data = dict(
total_count=count,
page_num=page_num,
current_page=current_page,
config_list=list(object_list),
page_range=page_range
)
data.update({"res_data": res_data})
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def put(self, request, id, *args, **kwargs):
"""更新mapcode配置信息"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
updated_on = datetime.datetime.now()
is_delete = body.get('is_delete', 0)
feature_name = body.get('feature_name', '')
feature_desc = body.get('feature_desc', '')
unitary_value = str(body.get('unitary_value', ''))
dual_value = body.get('dual_value', '')
mapped_value = str(body.get('mapped_value', ''))
value_type = body.get('value_type', '')
arithmetic_type = body.get('arithmetic_type', '')
if not (feature_name and unitary_value and mapped_value and arithmetic_type):
raise Exception("all values don't is null !")
FeatureCodeMapping.objects.filter(pk=int(id)).update(
feature_name=feature_name,
feature_desc=feature_desc,
unitary_value=unitary_value,
dual_value=dual_value,
mapped_value=mapped_value,
value_type=value_type,
arithmetic_type=arithmetic_type,
updated_on=updated_on,
is_delete=is_delete
)
except Exception as e:
logger.error(e.message)
data = {
'status': '0',
'message': e.message
}
return json_response(data)
def post(self, request, *args, **kwargs):
"""添加mapcode配置"""
data = {
'status': 1,
'message': 'success'
}
try:
body = json.loads(request.body)
updated_on = datetime.datetime.now()
is_delete = body.get('is_delete', 0)
feature_name = body.get('feature_name', '')
feature_desc = body.get('feature_desc', '')
unitary_value = str(body.get('unitary_value', ''))
dual_value = body.get('dual_value', '')
mapped_value = str(body.get('mapped_value', ''))
value_type = body.get('value_type', '')
arithmetic_type = body.get('arithmetic_type', '')
if not (feature_name and unitary_value and mapped_value and arithmetic_type):
raise Exception("all values don't is null !")
t = FeatureCodeMapping.objects.filter(feature_name=feature_name, unitary_value=unitary_value,
dual_value=dual_value)
print t[0].id
if t.count():
raise Exception(u'此配置的map值已经存在!')
FeatureCodeMapping(
feature_name=feature_name,
feature_desc=feature_desc,
unitary_value=unitary_value,
dual_value=dual_value,
mapped_value=mapped_value,
value_type=value_type,
arithmetic_type=arithmetic_type,
updated_on=updated_on,
created_on=updated_on,
is_delete=is_delete
).save()
except Exception as e:
logger.error(e.message)
data = {
'status': 0,
'message': e.message
}
return json_response(data)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.